From 70b5e5ffea42d09cae85b580406f098fa1433c79 Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Thu, 30 May 2024 11:29:41 +0800 Subject: [PATCH 01/29] feat(build): Support to specify THIRDPARTY_ROOT by setting the environment variable (#2035) Building Pegasus thirdparty libraries costs long time, it would be meaningful to reuse a built thirdparty directory when build Pegasus source code in different directories. This patch introduces an environment variable `PEGASUS_THIRDPARTY_ROOT` to indicate the thirdparty directory, if it has been built, it can be skipt to save time and disk space. --- CMakeLists.txt | 8 ++++++-- run.sh | 3 ++- scripts/compile_thrift.py | 6 +++--- scripts/pack_client.sh | 2 +- scripts/pack_server.sh | 12 ++++++------ scripts/pack_tools.sh | 12 ++++++------ scripts/recompile_thrift.sh | 1 - src/sample/run.sh | 22 +++++++++++++++++++++- 8 files changed, 45 insertions(+), 21 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5cc4c70044..0ee9d60405 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,8 +35,12 @@ include(BaseFunctions) set(CMAKE_EXPORT_COMPILE_COMMANDS TRUE) set(PROJECT_ROOT ${CMAKE_CURRENT_LIST_DIR}) -set(THIRDPARTY_ROOT ${PROJECT_ROOT}/thirdparty) -set(THIRDPARTY_INSTALL_DIR ${PROJECT_ROOT}/thirdparty/output) +if ("$ENV{THIRDPARTY_ROOT}" STREQUAL "") + set(THIRDPARTY_ROOT ${PROJECT_ROOT}/thirdparty) +else() + set(THIRDPARTY_ROOT $ENV{THIRDPARTY_ROOT}) +endif() +set(THIRDPARTY_INSTALL_DIR ${THIRDPARTY_ROOT}/output) message(STATUS "THIRDPARTY_INSTALL_DIR = ${THIRDPARTY_INSTALL_DIR}") set(BUILD_DIR ${PROJECT_ROOT}/src/builder) diff --git a/run.sh b/run.sh index abfd90d1ad..7e1b413213 100755 --- a/run.sh +++ b/run.sh @@ -24,7 +24,8 @@ ROOT="$(cd "$(dirname "$0")" && pwd)" export BUILD_ROOT_DIR=${ROOT}/build export BUILD_LATEST_DIR=${BUILD_ROOT_DIR}/latest export REPORT_DIR="$ROOT/test_report" -export THIRDPARTY_ROOT=$ROOT/thirdparty +# It's possible to specify THIRDPARTY_ROOT by setting the environment variable PEGASUS_THIRDPARTY_ROOT. +export THIRDPARTY_ROOT=${PEGASUS_THIRDPARTY_ROOT:-"$ROOT/thirdparty"} ARCH_TYPE='' arch_output=$(arch) if [ "$arch_output"x == "x86_64"x ]; then diff --git a/scripts/compile_thrift.py b/scripts/compile_thrift.py index 376a3c50c0..f6d46a4d20 100755 --- a/scripts/compile_thrift.py +++ b/scripts/compile_thrift.py @@ -220,13 +220,13 @@ def add_hook(name, path, func, args): if __name__ == "__main__": root_dir = os.getcwd() - thrift_exe = root_dir + "/thirdparty/output/bin/thrift" + thrift_exe = os.environ['THIRDPARTY_ROOT'] + "/output/bin/thrift" print("thrift_exe = " + thrift_exe) print("root_dir = " + root_dir) if not os.path.isfile(thrift_exe): - print("Error: can't find compiler %s\nPlease build thrift in thirdparty/" % thrift_exe) - sys.exit() + print("Error: can't find compiler %s\nPlease build thrift in %s/" % (thrift_exe, os.environ['THIRDPARTY_ROOT'])) + sys.exit(1) ctor_kv_pair = " kv_pair(const std::string& _key, const std::string& _val): key(_key), value(_val) {\n }" ctor_configuration_proposal_action = " configuration_proposal_action(::dsn::rpc_address t, ::dsn::rpc_address n, config_type::type tp): target(t), node(n), type(tp) {}" diff --git a/scripts/pack_client.sh b/scripts/pack_client.sh index e15212cb9a..66021465c1 100755 --- a/scripts/pack_client.sh +++ b/scripts/pack_client.sh @@ -109,7 +109,7 @@ mkdir -p ${pack}/lib copy_file ${BUILD_LATEST_DIR}/output/lib/libpegasus_client_static.a ${pack}/lib # TODO(yingchun): make sure shared lib works well too # copy_file ${BUILD_LATEST_DIR}/output/lib/libpegasus_client_shared.so ${pack}/lib -copy_file ./thirdparty/output/lib/libboost*.so.1.69.0 ${pack}/lib +copy_file ${THIRDPARTY_ROOT}/output/lib/libboost*.so.1.69.0 ${pack}/lib ln -sf `ls ${pack}/lib | grep libboost_system` ${pack}/lib/libboost_system.so ln -sf `ls ${pack}/lib | grep libboost_filesystem` ${pack}/lib/libboost_filesystem.so ln -sf `ls ${pack}/lib | grep libboost_regex` ${pack}/lib/libboost_regex.so diff --git a/scripts/pack_server.sh b/scripts/pack_server.sh index 2ff6e8446d..775002128e 100755 --- a/scripts/pack_server.sh +++ b/scripts/pack_server.sh @@ -116,15 +116,15 @@ copy_file ${BUILD_LATEST_DIR}/output/lib/libdsn_replica_server.so ${pack}/bin copy_file ${BUILD_LATEST_DIR}/output/lib/libdsn_utils.so ${pack}/bin if [ "$use_jemalloc" == "on" ]; then - copy_file ./thirdparty/output/lib/libjemalloc.so.2 ${pack}/bin - copy_file ./thirdparty/output/lib/libprofiler.so.0 ${pack}/bin + copy_file ${THIRDPARTY_ROOT}/output/lib/libjemalloc.so.2 ${pack}/bin + copy_file ${THIRDPARTY_ROOT}/output/lib/libprofiler.so.0 ${pack}/bin else - copy_file ./thirdparty/output/lib/libtcmalloc_and_profiler.so.4 ${pack}/bin + copy_file ${THIRDPARTY_ROOT}/output/lib/libtcmalloc_and_profiler.so.4 ${pack}/bin fi -copy_file ./thirdparty/output/lib/libboost*.so.1.69.0 ${pack}/bin -copy_file ./thirdparty/output/lib/libhdfs* ${pack}/bin -copy_file ./thirdparty/output/lib/librocksdb.so.8 ${pack}/bin +copy_file ${THIRDPARTY_ROOT}/output/lib/libboost*.so.1.69.0 ${pack}/bin +copy_file ${THIRDPARTY_ROOT}/output/lib/libhdfs* ${pack}/bin +copy_file ${THIRDPARTY_ROOT}/output/lib/librocksdb.so.8 ${pack}/bin copy_file ./scripts/sendmail.sh ${pack}/bin copy_file ./src/server/config.ini ${pack}/bin copy_file ./src/server/config.min.ini ${pack}/bin diff --git a/scripts/pack_tools.sh b/scripts/pack_tools.sh index ce386b3a49..f948f1700c 100755 --- a/scripts/pack_tools.sh +++ b/scripts/pack_tools.sh @@ -125,15 +125,15 @@ mkdir -p ${pack}/lib copy_file ${BUILD_LATEST_DIR}/output/lib/*.so* ${pack}/lib/ if [ "$use_jemalloc" == "on" ]; then - copy_file ./thirdparty/output/lib/libjemalloc.so.2 ${pack}/lib/ - copy_file ./thirdparty/output/lib/libprofiler.so.0 ${pack}/lib/ + copy_file ${THIRDPARTY_ROOT}/output/lib/libjemalloc.so.2 ${pack}/lib/ + copy_file ${THIRDPARTY_ROOT}/output/lib/libprofiler.so.0 ${pack}/lib/ else - copy_file ./thirdparty/output/lib/libtcmalloc_and_profiler.so.4 ${pack}/lib/ + copy_file ${THIRDPARTY_ROOT}/output/lib/libtcmalloc_and_profiler.so.4 ${pack}/lib/ fi -copy_file ./thirdparty/output/lib/libboost*.so.1.69.0 ${pack}/lib/ -copy_file ./thirdparty/output/lib/libhdfs* ${pack}/lib/ -copy_file ./thirdparty/output/lib/librocksdb.so.8 ${pack}/lib/ +copy_file ${THIRDPARTY_ROOT}/output/lib/libboost*.so.1.69.0 ${pack}/lib/ +copy_file ${THIRDPARTY_ROOT}/output/lib/libhdfs* ${pack}/lib/ +copy_file ${THIRDPARTY_ROOT}/output/lib/librocksdb.so.8 ${pack}/lib/ copy_file `get_stdcpp_lib $custom_gcc` ${pack}/lib/ pack_tools_lib() { diff --git a/scripts/recompile_thrift.sh b/scripts/recompile_thrift.sh index 52e5397be2..72135f8aa9 100755 --- a/scripts/recompile_thrift.sh +++ b/scripts/recompile_thrift.sh @@ -17,7 +17,6 @@ # under the License. cd `dirname $0` -THIRDPARTY_ROOT=../thirdparty if [ ! -d "$THIRDPARTY_ROOT" ]; then echo "ERROR: THIRDPARTY_ROOT not set" diff --git a/src/sample/run.sh b/src/sample/run.sh index 646446ef83..5da1a91689 100755 --- a/src/sample/run.sh +++ b/src/sample/run.sh @@ -16,5 +16,25 @@ # specific language governing permissions and limitations # under the License. -export LD_LIBRARY_PATH=`pwd`/../../../../../thirdparty/output/lib:`pwd`/../../lib:/usr/lib/jvm/java-1.8.0-openjdk/jre/lib/amd64/server +if [ ! -d "$PEGASUS_THIRDPARTY_ROOT" ]; then + echo "ERROR: PEGASUS_THIRDPARTY_ROOT not set" + exit 1 +fi + +if [ ! -d "$JAVA_HOME" ]; then + echo "ERROR: JAVA_HOME not set" + exit 1 +fi + +ARCH_TYPE='' +arch_output=$(arch) +if [ "$arch_output"x == "x86_64"x ]; then + ARCH_TYPE="amd64" +elif [ "$arch_output"x == "aarch64"x ]; then + ARCH_TYPE="aarch64" +else + echo "WARNING: unsupported CPU architecture '$arch_output', use 'x86_64' as default" +fi +export LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/${ARCH_TYPE}:${JAVA_HOME}/jre/lib/${ARCH_TYPE}/server:${PEGASUS_THIRDPARTY_ROOT}/output/lib:$(pwd)/../../lib:${LD_LIBRARY_PATH} + ./sample onebox temp From b7e6a8ec62e1430e003068cb0a99024c7890f9dd Mon Sep 17 00:00:00 2001 From: Samunroyu <36890229+Samunroyu@users.noreply.github.com> Date: Thu, 30 May 2024 11:32:05 +0800 Subject: [PATCH 02/29] feat(shell): add JSON format data output to some backup_policy commands (#2030) Add JSON output to some backup policy commands to facilitate the writing of automation scripts. Backup policy commands including: - ls_backup_policy - query_backup_policy ls_backup_policy Output example by Tabler format ``` [p1] backup_provider_type : hdfs_service backup_interval : 86400s app_ids : {3} start_time : 03:36 status : enabled backup_history_count : 1 [p2] backup_provider_type : hdfs_service backup_interval : 86400s app_ids : {3} start_time : 20:25 status : enabled backup_history_count : 1 ``` ls_backup_policy Output example by JSON format ``` { "p1": { "backup_provider_type": "hdfs_service", "backup_interval": "86400s", "app_ids": "{3}", "start_time": "03:36", "status": "enabled", "backup_history_count": "1" }, "p2": { "backup_provider_type": "hdfs_service", "backup_interval": "86400s", "app_ids": "{3}", "start_time": "20:25", "status": "enabled", "backup_history_count": "1" } } ``` query_backup_policy Output example by Tabler format ``` [p1] backup_provider_type : hdfs_service backup_interval : 86400s app_ids : {3} start_time : 03:36 status : enabled backup_history_count : 1 [backup_info] id start_time end_time app_ids 1716781003199 2024-05-27 03:36:43 - {3} [p2] backup_provider_type : hdfs_service backup_interval : 86400s app_ids : {3} start_time : 20:25 status : enabled backup_history_count : 1 [backup_info] id start_time end_time app_ids 1716840160297 2024-05-27 20:02:40 - {3} ``` query_backup_policy Output example by JSON format ``` { "p1": { "backup_provider_type": "hdfs_service", "backup_interval": "86400s", "app_ids": "{3}", "start_time": "03:36", "status": "enabled", "backup_history_count": "1" }, "p1_backup_info": { "1716781003199": { "id": "1716781003199", "start_time": "2024-05-27 03:36:43", "end_time": "-", "app_ids": "{3}" } }, "p2": { "backup_provider_type": "hdfs_service", "backup_interval": "86400s", "app_ids": "{3}", "start_time": "20:25", "status": "enabled", "backup_history_count": "1" }, "p2_backup_info": { "1716840160297": { "id": "1716840160297", "start_time": "2024-05-27 20:02:40", "end_time": "-", "app_ids": "{3}" } } } ``` --- src/client/replication_ddl_client.cpp | 76 ++++++++++++++++----------- src/client/replication_ddl_client.h | 5 +- src/meta/meta_backup_service.cpp | 3 +- src/shell/commands/cold_backup.cpp | 67 ++++++++--------------- src/shell/main.cpp | 9 +++- 5 files changed, 77 insertions(+), 83 deletions(-) diff --git a/src/client/replication_ddl_client.cpp b/src/client/replication_ddl_client.cpp index a71241362e..13a2e5181a 100644 --- a/src/client/replication_ddl_client.cpp +++ b/src/client/replication_ddl_client.cpp @@ -38,6 +38,7 @@ #include "backup_types.h" #include "common//duplication_common.h" +#include "common/backup_common.h" #include "common/bulk_load_common.h" #include "common/gpid.h" #include "common/manual_compact.h" @@ -1139,20 +1140,19 @@ dsn::error_code replication_ddl_client::enable_backup_policy(const std::string & } } -static void print_policy_entry(const policy_entry &entry) +static dsn::utils::table_printer print_policy_entry(const policy_entry &entry) { - dsn::utils::table_printer tp; - tp.add_row_name_and_data(" name", entry.policy_name); - tp.add_row_name_and_data(" backup_provider_type", entry.backup_provider_type); - tp.add_row_name_and_data(" backup_interval", entry.backup_interval_seconds + "s"); - tp.add_row_name_and_data(" app_ids", fmt::format("{{{}}}", fmt::join(entry.app_ids, ", "))); - tp.add_row_name_and_data(" start_time", entry.start_time); - tp.add_row_name_and_data(" status", entry.is_disable ? "disabled" : "enabled"); - tp.add_row_name_and_data(" backup_history_count", entry.backup_history_count_to_keep); - tp.output(std::cout); + dsn::utils::table_printer tp(entry.policy_name); + tp.add_row_name_and_data("backup_provider_type", entry.backup_provider_type); + tp.add_row_name_and_data("backup_interval", entry.backup_interval_seconds + "s"); + tp.add_row_name_and_data("app_ids", fmt::format("{{{}}}", fmt::join(entry.app_ids, ", "))); + tp.add_row_name_and_data("start_time", entry.start_time); + tp.add_row_name_and_data("status", entry.is_disable ? "disabled" : "enabled"); + tp.add_row_name_and_data("backup_history_count", entry.backup_history_count_to_keep); + return tp; } -static void print_backup_entry(const backup_entry &bentry) +static void print_backup_entry(dsn::utils::table_printer &tp, const backup_entry &bentry) { char start_time[30] = {'\0'}; char end_time[30] = {'\0'}; @@ -1164,15 +1164,13 @@ static void print_backup_entry(const backup_entry &bentry) ::dsn::utils::time_ms_to_date_time(bentry.end_time_ms, end_time, 30); } - dsn::utils::table_printer tp; - tp.add_row_name_and_data(" id", bentry.backup_id); - tp.add_row_name_and_data(" start_time", start_time); - tp.add_row_name_and_data(" end_time", end_time); - tp.add_row_name_and_data(" app_ids", fmt::format("{{{}}}", fmt::join(bentry.app_ids, ", "))); - tp.output(std::cout); + tp.add_row(bentry.backup_id); + tp.append_data(start_time); + tp.append_data(end_time); + tp.append_data(fmt::format("{{{}}}", fmt::join(bentry.app_ids, ", "))); } -dsn::error_code replication_ddl_client::ls_backup_policy() +dsn::error_code replication_ddl_client::ls_backup_policy(bool json) { auto req = std::make_shared(); req->policy_names.clear(); @@ -1187,21 +1185,26 @@ dsn::error_code replication_ddl_client::ls_backup_policy() configuration_query_backup_policy_response resp; ::dsn::unmarshall(resp_task->get_response(), resp); + std::streambuf *buf; + std::ofstream of; + buf = std::cout.rdbuf(); + std::ostream out(buf); + if (resp.err != ERR_OK) { return resp.err; } else { + dsn::utils::multi_table_printer mtp; for (int32_t idx = 0; idx < resp.policys.size(); idx++) { - std::cout << "[" << idx + 1 << "]" << std::endl; - print_policy_entry(resp.policys[idx]); - std::cout << std::endl; + dsn::utils::table_printer tp = print_policy_entry(resp.policys[idx]); + mtp.add(std::move(tp)); } + mtp.output(out, json ? tp_output_format::kJsonPretty : tp_output_format::kTabular); } return ERR_OK; } -dsn::error_code -replication_ddl_client::query_backup_policy(const std::vector &policy_names, - int backup_info_cnt) +dsn::error_code replication_ddl_client::query_backup_policy( + const std::vector &policy_names, int backup_info_cnt, bool json) { auto req = std::make_shared(); req->policy_names = policy_names; @@ -1217,23 +1220,32 @@ replication_ddl_client::query_backup_policy(const std::vector &poli configuration_query_backup_policy_response resp; ::dsn::unmarshall(resp_task->get_response(), resp); + std::streambuf *buf; + std::ofstream of; + buf = std::cout.rdbuf(); + std::ostream out(buf); + if (resp.err != ERR_OK) { return resp.err; } else { + dsn::utils::multi_table_printer mtp; for (int32_t idx = 0; idx < resp.policys.size(); idx++) { - if (idx != 0) { - std::cout << "************************" << std::endl; - } const policy_entry &pentry = resp.policys[idx]; - std::cout << "policy_info:" << std::endl; - print_policy_entry(pentry); - std::cout << std::endl << "backup_infos:" << std::endl; + dsn::utils::table_printer tp_policy = print_policy_entry(pentry); + mtp.add(std::move(tp_policy)); const std::vector &backup_infos = resp.backup_infos[idx]; + dsn::utils::table_printer tp_backup(pentry.policy_name + "_" + + cold_backup_constant::BACKUP_INFO); + tp_backup.add_title("id"); + tp_backup.add_column("start_time"); + tp_backup.add_column("end_time"); + tp_backup.add_column("app_ids"); for (int bi_idx = 0; bi_idx < backup_infos.size(); bi_idx++) { - std::cout << "[" << (bi_idx + 1) << "]" << std::endl; - print_backup_entry(backup_infos[bi_idx]); + print_backup_entry(tp_backup, backup_infos[bi_idx]); } + mtp.add(std::move(tp_backup)); } + mtp.output(out, json ? tp_output_format::kJsonPretty : tp_output_format::kTabular); } return ERR_OK; } diff --git a/src/client/replication_ddl_client.h b/src/client/replication_ddl_client.h index 12710e7854..b36b2cf325 100644 --- a/src/client/replication_ddl_client.h +++ b/src/client/replication_ddl_client.h @@ -179,14 +179,15 @@ class replication_ddl_client error_with query_backup(int32_t app_id, int64_t backup_id); - dsn::error_code ls_backup_policy(); + dsn::error_code ls_backup_policy(bool json); dsn::error_code disable_backup_policy(const std::string &policy_name); dsn::error_code enable_backup_policy(const std::string &policy_name); dsn::error_code query_backup_policy(const std::vector &policy_names, - int backup_info_cnt); + int backup_info_cnt, + bool json); dsn::error_code update_backup_policy(const std::string &policy_name, const std::vector &add_appids, diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp index 3e924635ae..6423e40b1a 100644 --- a/src/meta/meta_backup_service.cpp +++ b/src/meta/meta_backup_service.cpp @@ -1468,8 +1468,7 @@ bool backup_service::is_valid_policy_name_unlocked(const std::string &policy_nam // BACKUP_INFO and policy_name should not be the same, because they are in the same level in the // output when query the policy details, use different names to distinguish the respective // contents. - static const std::set kReservedNames = {cold_backup_constant::BACKUP_INFO}; - if (kReservedNames.count(policy_name) == 1) { + if (policy_name.find(cold_backup_constant::BACKUP_INFO) != std::string::npos) { hint_message = "policy name is reserved"; return false; } diff --git a/src/shell/commands/cold_backup.cpp b/src/shell/commands/cold_backup.cpp index 4dfd41473c..f235234c64 100644 --- a/src/shell/commands/cold_backup.cpp +++ b/src/shell/commands/cold_backup.cpp @@ -32,7 +32,9 @@ #include #include "client/replication_ddl_client.h" +#include "shell/argh.h" #include "shell/command_executor.h" +#include "shell/command_helper.h" #include "shell/commands.h" #include "shell/sds/sds.h" #include "utils/error_code.h" @@ -143,64 +145,39 @@ bool add_backup_policy(command_executor *e, shell_context *sc, arguments args) bool ls_backup_policy(command_executor *e, shell_context *sc, arguments args) { - ::dsn::error_code err = sc->ddl_client->ls_backup_policy(); + argh::parser cmd(args.argc, args.argv); + const bool json = cmd[{"-j", "--json"}]; + + ::dsn::error_code err = sc->ddl_client->ls_backup_policy(json); if (err != ::dsn::ERR_OK) { std::cout << "ls backup policy failed" << std::endl; - } else { - std::cout << std::endl << "ls backup policy succeed" << std::endl; } return true; } bool query_backup_policy(command_executor *e, shell_context *sc, arguments args) { - static struct option long_options[] = {{"policy_name", required_argument, 0, 'p'}, - {"backup_info_cnt", required_argument, 0, 'b'}, - {0, 0, 0, 0}}; + const std::string query_backup_policy_help = " [-b|--backup_info_cnt] [-j|--json]"; + argh::parser cmd(args.argc, args.argv, argh::parser::PREFER_PARAM_FOR_UNREG_OPTION); + RETURN_FALSE_IF_NOT(cmd.pos_args().size() > 1, + "invalid command, should be in the form of '{}'", + query_backup_policy_help); + + int param_index = 1; std::vector policy_names; - int backup_info_cnt = 3; + PARSE_STRS(policy_names); - optind = 0; - while (true) { - int option_index = 0; - int c; - c = getopt_long(args.argc, args.argv, "p:b:", long_options, &option_index); - if (c == -1) - break; - switch (c) { - case 'p': { - std::vector names; - ::dsn::utils::split_args(optarg, names, ','); - for (const auto &policy_name : names) { - if (policy_name.empty()) { - fprintf(stderr, "invalid, empty policy_name, just ignore\n"); - continue; - } else { - policy_names.emplace_back(policy_name); - } - } - } break; - case 'b': - backup_info_cnt = atoi(optarg); - if (backup_info_cnt <= 0) { - fprintf(stderr, "invalid backup_info_cnt %s\n", optarg); - return false; - } - break; - default: - return false; - } - } - if (policy_names.empty()) { - fprintf(stderr, "empty policy_name, please assign policy_name you want to query\n"); - return false; - } - ::dsn::error_code ret = sc->ddl_client->query_backup_policy(policy_names, backup_info_cnt); + uint32_t backup_info_cnt; + PARSE_OPT_UINT(backup_info_cnt, 3, {"-b", "--backup_info_cnt"}); + + const bool json = cmd[{"-j", "--json"}]; + + ::dsn::error_code ret = + sc->ddl_client->query_backup_policy(policy_names, backup_info_cnt, json); if (ret != ::dsn::ERR_OK) { fprintf(stderr, "query backup policy failed, err = %s\n", ret.to_string()); - } else { - std::cout << std::endl << "query backup policy succeed" << std::endl; } + return true; } diff --git a/src/shell/main.cpp b/src/shell/main.cpp index 4b4934eedf..34bddc4c65 100644 --- a/src/shell/main.cpp +++ b/src/shell/main.cpp @@ -413,11 +413,16 @@ static command_executor commands[] = { "<-c|--backup_history_cnt num>", add_backup_policy, }, - {"ls_backup_policy", "list the names of the subsistent backup policies", "", ls_backup_policy}, + { + "ls_backup_policy", + "list the names of the subsistent backup policies", + "[-j|--json]", + ls_backup_policy, + }, { "query_backup_policy", "query subsistent backup policy and last backup infos", - "<-p|--policy_name p1,p2...> [-b|--backup_info_cnt num]", + "<-p|--policy_name p1,p2...> [-b|--backup_info_cnt num] [-j|--json]", query_backup_policy, }, { From d9ef442f465a04b4386d5af0d3541fe19358c4c6 Mon Sep 17 00:00:00 2001 From: Samunroyu <36890229+Samunroyu@users.noreply.github.com> Date: Fri, 31 May 2024 11:01:12 +0800 Subject: [PATCH 03/29] chore(pack): support to pack server binaries separately (#2034) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds a new flag `--separate_servers` to indicate whether to pack `pegasus_collector`,`pegasus_meta_server` and `pegasus_replica_server` binaries, otherwise a combined `pegasus_server` binary will be packed in the pegasus_server_xxx.tar. When build server in option `--separate_servers`,the corresponding option to use pack command is: ``` ./run.sh pack_server -s or ./run.sh pack_server --separate_servers ./run.sh pack_tools -s or ./run.sh pack_tools --separate_servers ``` --- .github/actions/build_pegasus/action.yaml | 4 +-- .github/workflows/lint_and_test_cpp.yaml | 3 +- scripts/pack_client.sh | 6 ---- scripts/pack_common.sh | 6 +++- scripts/pack_server.sh | 35 ++++++++++++++--------- scripts/pack_tools.sh | 24 +++++++++++----- 6 files changed, 48 insertions(+), 30 deletions(-) diff --git a/.github/actions/build_pegasus/action.yaml b/.github/actions/build_pegasus/action.yaml index ad640f2fa5..d0093ffb66 100644 --- a/.github/actions/build_pegasus/action.yaml +++ b/.github/actions/build_pegasus/action.yaml @@ -38,12 +38,12 @@ runs: shell: bash - name: Pack Server run: | - ./run.sh pack_server -j + ./run.sh pack_server -j ${PACK_OPTIONS} rm -rf pegasus-server-* shell: bash - name: Pack Tools run: | - ./run.sh pack_tools -j + ./run.sh pack_tools -j ${PACK_OPTIONS} rm -rf pegasus-tools-* shell: bash - name: Clear Build Files diff --git a/.github/workflows/lint_and_test_cpp.yaml b/.github/workflows/lint_and_test_cpp.yaml index dba6497715..8f6e0e300a 100644 --- a/.github/workflows/lint_and_test_cpp.yaml +++ b/.github/workflows/lint_and_test_cpp.yaml @@ -418,7 +418,8 @@ jobs: runs-on: ubuntu-latest env: USE_JEMALLOC: OFF - BUILD_OPTIONS: -t debug --test + BUILD_OPTIONS: -t debug --test --separate_servers + PACK_OPTIONS: --separate_servers container: image: apache/pegasus:thirdparties-bin-centos7-${{ github.base_ref }} steps: diff --git a/scripts/pack_client.sh b/scripts/pack_client.sh index 66021465c1..c28bb7dfd3 100755 --- a/scripts/pack_client.sh +++ b/scripts/pack_client.sh @@ -39,12 +39,6 @@ then exit 1 fi -if [ ! -f ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server ] -then - echo "ERROR: ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server not found" - exit 1 -fi - if [ ! -f ${BUILD_LATEST_DIR}/CMakeCache.txt ] then echo "ERROR: ${BUILD_LATEST_DIR}/CMakeCache.txt not found" diff --git a/scripts/pack_common.sh b/scripts/pack_common.sh index d22555d330..d1080f48dc 100755 --- a/scripts/pack_common.sh +++ b/scripts/pack_common.sh @@ -20,7 +20,11 @@ set -e function get_stdcpp_lib() { - libname=`ldd ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server 2>/dev/null | grep libstdc++` + if [[ $2 == "false" ]]; then + libname=`ldd ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server 2>/dev/null | grep libstdc++` + else + libname=`ldd ${BUILD_LATEST_DIR}/output/bin/pegasus_meta_server/pegasus_meta_server 2>/dev/null | grep libstdc++` + fi libname=`echo $libname | cut -f1 -d" "` if [ $1 = "true" ]; then gcc_path=`which gcc` diff --git a/scripts/pack_server.sh b/scripts/pack_server.sh index 775002128e..5f1172e918 100755 --- a/scripts/pack_server.sh +++ b/scripts/pack_server.sh @@ -27,6 +27,7 @@ function usage() { echo " -g|--custom-gcc" echo " -k|--keytab-file" echo " -j|--use-jemalloc" + echo " -s|--separate_servers" exit 0 } @@ -39,11 +40,6 @@ if [ ! -f src/include/pegasus/git_commit.h ]; then exit 1 fi -if [ ! -f ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server ]; then - echo "ERROR: ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server not found" - exit 1 -fi - if [ ! -f ${BUILD_LATEST_DIR}/CMakeCache.txt ]; then echo "ERROR: ${BUILD_LATEST_DIR}/CMakeCache.txt not found" exit 1 @@ -77,7 +73,8 @@ fi custom_gcc="false" keytab_file="" -use_jemalloc="off" +use_jemalloc="false" +separate_servers="false" while [[ $# > 0 ]]; do option_key="$1" @@ -97,7 +94,10 @@ while [[ $# > 0 ]]; do shift ;; -j | --use-jemalloc) - use_jemalloc="on" + use_jemalloc="true" + ;; + -s | --separate_servers) + separate_servers="true" ;; *) echo "ERROR: unknown option \"$option_key\"" @@ -110,12 +110,17 @@ while [[ $# > 0 ]]; do done mkdir -p ${pack}/bin -copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server ${pack}/bin +if [[ $separate_servers == "false" ]]; then + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server ${pack}/bin +else + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_meta_server/pegasus_meta_server ${pack}/bin + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_replica_server/pegasus_replica_server ${pack}/bin +fi copy_file ${BUILD_LATEST_DIR}/output/lib/libdsn_meta_server.so ${pack}/bin copy_file ${BUILD_LATEST_DIR}/output/lib/libdsn_replica_server.so ${pack}/bin copy_file ${BUILD_LATEST_DIR}/output/lib/libdsn_utils.so ${pack}/bin -if [ "$use_jemalloc" == "on" ]; then +if [ "$use_jemalloc" == "true" ]; then copy_file ${THIRDPARTY_ROOT}/output/lib/libjemalloc.so.2 ${pack}/bin copy_file ${THIRDPARTY_ROOT}/output/lib/libprofiler.so.0 ${pack}/bin else @@ -130,14 +135,18 @@ copy_file ./src/server/config.ini ${pack}/bin copy_file ./src/server/config.min.ini ${pack}/bin copy_file ./scripts/config_hdfs.sh ${pack}/bin -copy_file "$(get_stdcpp_lib $custom_gcc)" "${pack}/bin" +copy_file "$(get_stdcpp_lib $custom_gcc $separate_servers)" "${pack}/bin" pack_server_lib() { - pack_system_lib "${pack}/bin" server "$1" + if [[ $2 == "false" ]]; then + pack_system_lib "${pack}/bin" server "$1" + else + pack_system_lib "${pack}/bin" meta_server "$1" + fi } -pack_server_lib crypto -pack_server_lib ssl +pack_server_lib crypto $separate_servers +pack_server_lib ssl $separate_servers # Pack hadoop-related files. # If you want to use hdfs service to backup/restore/bulkload pegasus tables, diff --git a/scripts/pack_tools.sh b/scripts/pack_tools.sh index f948f1700c..de1d6cdf2c 100755 --- a/scripts/pack_tools.sh +++ b/scripts/pack_tools.sh @@ -27,6 +27,7 @@ function usage() echo " -p|--update-package-template " echo " -g|--custom-gcc" echo " -j|--use-jemalloc" + echo " -s|--separate_servers" exit 0 } @@ -82,7 +83,8 @@ if [ -n "$MINOS_CONFIG_FILE" ]; then fi custom_gcc="false" -use_jemalloc="off" +use_jemalloc="false" +separate_servers="false" while [[ $# > 0 ]]; do option_key="$1" @@ -98,7 +100,10 @@ while [[ $# > 0 ]]; do usage ;; -j|--use-jemalloc) - use_jemalloc="on" + use_jemalloc="true" + ;; + -s | --separate_servers) + separate_servers="true" ;; *) echo "ERROR: unknown option \"$option_key\"" @@ -114,7 +119,12 @@ mkdir -p ${pack} copy_file ./run.sh ${pack}/ mkdir -p ${pack}/bin -cp -v -r ${BUILD_LATEST_DIR}/output/bin/pegasus_server ${pack}/bin/ +if [[ $separate_servers == "false" ]]; then + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server ${pack}/bin +else + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_meta_server/pegasus_meta_server ${pack}/bin + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_replica_server/pegasus_replica_server ${pack}/bin +fi cp -v -r ${BUILD_LATEST_DIR}/output/bin/pegasus_shell ${pack}/bin/ cp -v -r ${BUILD_LATEST_DIR}/output/bin/pegasus_bench ${pack}/bin/ cp -v -r ${BUILD_LATEST_DIR}/output/bin/pegasus_kill_test ${pack}/bin/ @@ -124,7 +134,7 @@ cp -v -r ${BUILD_LATEST_DIR}/output/bin/pegasus_pressureclient ${pack}/bin/ mkdir -p ${pack}/lib copy_file ${BUILD_LATEST_DIR}/output/lib/*.so* ${pack}/lib/ -if [ "$use_jemalloc" == "on" ]; then +if [ "$use_jemalloc" == "true" ]; then copy_file ${THIRDPARTY_ROOT}/output/lib/libjemalloc.so.2 ${pack}/lib/ copy_file ${THIRDPARTY_ROOT}/output/lib/libprofiler.so.0 ${pack}/lib/ else @@ -134,14 +144,14 @@ fi copy_file ${THIRDPARTY_ROOT}/output/lib/libboost*.so.1.69.0 ${pack}/lib/ copy_file ${THIRDPARTY_ROOT}/output/lib/libhdfs* ${pack}/lib/ copy_file ${THIRDPARTY_ROOT}/output/lib/librocksdb.so.8 ${pack}/lib/ -copy_file `get_stdcpp_lib $custom_gcc` ${pack}/lib/ +copy_file `get_stdcpp_lib $custom_gcc $separate_servers` ${pack}/lib/ pack_tools_lib() { pack_system_lib "${pack}/lib" shell "$1" } -pack_tools_lib crypto -pack_tools_lib ssl +pack_tools_lib crypto $separate_servers +pack_tools_lib ssl $separate_servers chmod -x ${pack}/lib/* From 956a9bc17a258d10d6da8bb4c372c9f7adb04309 Mon Sep 17 00:00:00 2001 From: Dan Wang Date: Tue, 4 Jun 2024 11:53:05 +0800 Subject: [PATCH 04/29] fix(duplication): add warning message while trying to add a duplication that has been existing for the same table with the same remote cluster (#2038) https://github.com/apache/incubator-pegasus/issues/2039 --- .../duplication/meta_duplication_service.cpp | 26 +++++++--------- .../duplication/meta_duplication_service.h | 4 +-- .../test/meta_duplication_service_test.cpp | 31 +++++++++++-------- src/shell/command_helper.h | 11 +++++-- src/shell/commands/duplication.cpp | 25 +++++++++------ src/utils/error_code.h | 3 ++ 6 files changed, 59 insertions(+), 41 deletions(-) diff --git a/src/meta/duplication/meta_duplication_service.cpp b/src/meta/duplication/meta_duplication_service.cpp index 4c432736cb..77642c1f52 100644 --- a/src/meta/duplication/meta_duplication_service.cpp +++ b/src/meta/duplication/meta_duplication_service.cpp @@ -241,6 +241,7 @@ void meta_duplication_service::add_duplication(duplication_add_rpc rpc) std::shared_ptr app; duplication_info_s_ptr dup; + error_code resp_err = ERR_OK; { zauto_read_lock l(app_lock()); @@ -273,13 +274,13 @@ void meta_duplication_service::add_duplication(duplication_add_rpc rpc) if (dup) { // The duplication for the same app to the same remote cluster has existed. - remote_app_name = dup->remote_app_name; - remote_replica_count = dup->remote_replica_count; - LOG_INFO("no need to add duplication, since it has existed: app_name={}, " + resp_err = ERR_DUP_EXIST; + LOG_INFO("[{}] duplication has been existing: app_name={}, " "remote_cluster_name={}, remote_app_name={}", + dup->log_prefix(), request.app_name, request.remote_cluster_name, - remote_app_name); + dup->remote_app_name); } else { // Check if other apps of this cluster are duplicated to the same remote app. for (const auto & [ app_name, cur_app_state ] : _state->_exist_apps) { @@ -313,15 +314,14 @@ void meta_duplication_service::add_duplication(duplication_add_rpc rpc) app); } - do_add_duplication(app, dup, rpc, remote_app_name, remote_replica_count); + do_add_duplication(app, dup, rpc, resp_err); } // ThreadPool(WRITE): THREAD_POOL_META_STATE void meta_duplication_service::do_add_duplication(std::shared_ptr &app, duplication_info_s_ptr &dup, duplication_add_rpc &rpc, - const std::string &remote_app_name, - const int32_t remote_replica_count) + const error_code &resp_err) { const auto &ec = dup->start(rpc.request().is_duplicating_checkpoint); LOG_ERROR_DUP_HINT_AND_RETURN_IF_NOT(ec == ERR_OK, @@ -335,10 +335,8 @@ void meta_duplication_service::do_add_duplication(std::shared_ptr &ap auto value = dup->to_json_blob(); std::queue nodes({get_duplication_path(*app), std::to_string(dup->id)}); _meta_svc->get_meta_storage()->create_node_recursively( - std::move(nodes), - std::move(value), - [app, this, dup, rpc, remote_app_name, remote_replica_count]() mutable { - LOG_INFO("[{}] add duplication successfully [app_name: {}, follower: {}]", + std::move(nodes), std::move(value), [app, this, dup, rpc, resp_err]() mutable { + LOG_INFO("[{}] add duplication successfully [app_name: {}, remote_cluster_name: {}]", dup->log_prefix(), app->app_name, dup->remote_cluster_name); @@ -347,11 +345,11 @@ void meta_duplication_service::do_add_duplication(std::shared_ptr &ap dup->persist_status(); auto &resp = rpc.response(); - resp.err = ERR_OK; + resp.err = resp_err; resp.appid = app->app_id; resp.dupid = dup->id; - resp.__set_remote_app_name(remote_app_name); - resp.__set_remote_replica_count(remote_replica_count); + resp.__set_remote_app_name(dup->remote_app_name); + resp.__set_remote_replica_count(dup->remote_replica_count); zauto_write_lock l(app_lock()); refresh_duplicating_no_lock(app); diff --git a/src/meta/duplication/meta_duplication_service.h b/src/meta/duplication/meta_duplication_service.h index 2bcda880ef..3f06d63265 100644 --- a/src/meta/duplication/meta_duplication_service.h +++ b/src/meta/duplication/meta_duplication_service.h @@ -31,6 +31,7 @@ #include "utils/fmt_logging.h" namespace dsn { +class error_code; class host_port; class zrwlock_nr; @@ -81,8 +82,7 @@ class meta_duplication_service void do_add_duplication(std::shared_ptr &app, duplication_info_s_ptr &dup, duplication_add_rpc &rpc, - const std::string &remote_app_name, - const int32_t remote_replica_count); + const error_code &resp_err); void do_modify_duplication(std::shared_ptr &app, duplication_info_s_ptr &dup, diff --git a/src/meta/test/meta_duplication_service_test.cpp b/src/meta/test/meta_duplication_service_test.cpp index 0e0ed1f40f..621a34d9cc 100644 --- a/src/meta/test/meta_duplication_service_test.cpp +++ b/src/meta/test/meta_duplication_service_test.cpp @@ -57,6 +57,7 @@ #include "meta/server_state.h" #include "meta/test/misc/misc.h" #include "meta_test_base.h" +#include "runtime/api_layer1.h" #include "runtime/rpc/rpc_address.h" #include "runtime/rpc/rpc_host_port.h" #include "utils/blob.h" @@ -399,7 +400,7 @@ class meta_duplication_service_test : public meta_test_base struct TestData { std::string app_name; - std::string remote; + std::string remote_cluster_name; bool specified; std::string remote_app_name; @@ -414,13 +415,14 @@ class meta_duplication_service_test : public meta_test_base kTestRemoteAppName, kTestRemoteReplicaCount, ERR_OK}, - // A duplication that has been added would be found with its original remote_app_name. + // Add a duplication that has been existing for the same table with the same remote + // cluster. {kTestAppName, kTestRemoteClusterName, - true, + false, kTestRemoteAppName, kTestRemoteReplicaCount, - ERR_OK}, + ERR_DUP_EXIST}, // The general case that duplicating to remote cluster with same remote_app_name. {kTestSameAppName, kTestRemoteClusterName, @@ -477,10 +479,12 @@ class meta_duplication_service_test : public meta_test_base for (auto test : tests) { duplication_add_response resp; if (test.specified) { - resp = create_dup( - test.app_name, test.remote, test.remote_app_name, test.remote_replica_count); + resp = create_dup(test.app_name, + test.remote_cluster_name, + test.remote_app_name, + test.remote_replica_count); } else { - resp = create_dup_unspecified(test.app_name, test.remote); + resp = create_dup_unspecified(test.app_name, test.remote_cluster_name); } ASSERT_EQ(test.wec, resp.err); @@ -494,7 +498,7 @@ class meta_duplication_service_test : public meta_test_base ASSERT_TRUE(dup != nullptr); ASSERT_EQ(app->app_id, dup->app_id); ASSERT_EQ(duplication_status::DS_PREPARE, dup->_status); - ASSERT_EQ(test.remote, dup->remote_cluster_name); + ASSERT_EQ(test.remote_cluster_name, dup->remote_cluster_name); ASSERT_EQ(test.remote_app_name, resp.remote_app_name); ASSERT_EQ(test.remote_app_name, dup->remote_app_name); ASSERT_EQ(test.remote_replica_count, resp.remote_replica_count); @@ -524,23 +528,24 @@ TEST_F(meta_duplication_service_test, dup_op_upon_unavail_app) create_app(test_app_unavail); find_app(test_app_unavail)->status = app_status::AS_DROPPED; - dupid_t test_dup = create_dup(kTestAppName).dupid; - struct TestData { std::string app; - error_code wec; } tests[] = { {test_app_not_exist, ERR_APP_NOT_EXIST}, {test_app_unavail, ERR_APP_NOT_EXIST}, - {kTestAppName, ERR_OK}, }; for (auto test : tests) { + const auto &resp = create_dup(test.app); + ASSERT_EQ(test.wec, resp.err); + ASSERT_EQ(test.wec, query_dup_info(test.app).err); - ASSERT_EQ(test.wec, create_dup(test.app).err); + + // For the response with some error, `dupid` doesn't matter. + dupid_t test_dup = test.wec == ERR_OK ? resp.dupid : static_cast(dsn_now_s()); ASSERT_EQ(test.wec, change_dup_status(test.app, test_dup, duplication_status::DS_REMOVED).err); } diff --git a/src/shell/command_helper.h b/src/shell/command_helper.h index cc1b35686f..965eb64df0 100644 --- a/src/shell/command_helper.h +++ b/src/shell/command_helper.h @@ -71,16 +71,21 @@ "ERROR: {}\n", \ fmt::format(msg, ##__VA_ARGS__)) -#define SHELL_PRINTLN_WARNING(msg, ...) \ +#define SHELL_PRINT_WARNING_BASE(msg, ...) \ fmt::print(stdout, \ fmt::emphasis::bold | fmt::fg(fmt::color::yellow), \ - "WARNING: {}\n", \ + "WARNING: {}", \ fmt::format(msg, ##__VA_ARGS__)) +#define SHELL_PRINT_WARNING(msg, ...) SHELL_PRINT_WARNING_BASE(msg, ##__VA_ARGS__) + +#define SHELL_PRINTLN_WARNING(msg, ...) \ + SHELL_PRINT_WARNING_BASE("{}\n", fmt::format(msg, ##__VA_ARGS__)) + #define SHELL_PRINT_OK_BASE(msg, ...) \ fmt::print(stdout, fmt::emphasis::bold | fmt::fg(fmt::color::green), msg, ##__VA_ARGS__) -#define SHELL_PRINT_OK(msg, ...) SHELL_PRINT_OK_BASE("{}", fmt::format(msg, ##__VA_ARGS__)) +#define SHELL_PRINT_OK(msg, ...) SHELL_PRINT_OK_BASE(msg, ##__VA_ARGS__) #define SHELL_PRINTLN_OK(msg, ...) SHELL_PRINT_OK_BASE("{}\n", fmt::format(msg, ##__VA_ARGS__)) diff --git a/src/shell/commands/duplication.cpp b/src/shell/commands/duplication.cpp index cd7c07ad6d..82237af1a7 100644 --- a/src/shell/commands/duplication.cpp +++ b/src/shell/commands/duplication.cpp @@ -102,7 +102,7 @@ bool add_dup(command_executor *e, shell_context *sc, arguments args) hint = err_resp.get_value().hint; } - if (!err) { + if (!err && err.code() != dsn::ERR_DUP_EXIST) { SHELL_PRINTLN_ERROR( "adding duplication failed [app_name: {}, remote_cluster_name: {}, " "is_duplicating_checkpoint: {}, remote_app_name: {}, remote_replica_count: {}, " @@ -121,15 +121,22 @@ bool add_dup(command_executor *e, shell_context *sc, arguments args) return true; } + if (err.code() == dsn::ERR_DUP_EXIST) { + SHELL_PRINT_WARNING("duplication has been existing"); + } else { + SHELL_PRINT_OK("adding duplication succeed"); + } + const auto &resp = err_resp.get_value(); - SHELL_PRINT_OK( - "adding duplication succeed [app_name: {}, remote_cluster_name: {}, appid: {}, dupid: " - "{}, is_duplicating_checkpoint: {}", - app_name, - remote_cluster_name, - resp.appid, - resp.dupid, - is_duplicating_checkpoint); + SHELL_PRINT_OK(" [app_name: {}, remote_cluster_name: {}, appid: {}, dupid: {}", + app_name, + remote_cluster_name, + resp.appid, + resp.dupid); + + if (err) { + SHELL_PRINT_OK(", is_duplicating_checkpoint: {}", is_duplicating_checkpoint); + } if (resp.__isset.remote_app_name) { SHELL_PRINT_OK(", remote_app_name: {}", resp.remote_app_name); diff --git a/src/utils/error_code.h b/src/utils/error_code.h index 04df97947a..023ec2b253 100644 --- a/src/utils/error_code.h +++ b/src/utils/error_code.h @@ -182,6 +182,9 @@ DEFINE_ERR_CODE(ERR_RDB_CORRUPTION) DEFINE_ERR_CODE(ERR_DISK_IO_ERROR) DEFINE_ERR_CODE(ERR_CURL_FAILED) + +DEFINE_ERR_CODE(ERR_DUP_EXIST) + } // namespace dsn USER_DEFINED_STRUCTURE_FORMATTER(::dsn::error_code); From 37b59d944748083f1de91e0a57a813aea5f1aba6 Mon Sep 17 00:00:00 2001 From: Samunroyu <36890229+Samunroyu@users.noreply.github.com> Date: Wed, 5 Jun 2024 16:45:01 +0800 Subject: [PATCH 05/29] fix(shell): Add PARSE_OPT_STRS marco for shell command execute flag input mode (#2040) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The marco `PARSE_STRS` execute strs with `param_index`,and it only execute the number of params_index of input strs. The marco `PARSE_OPT_STRS` can execute input strs with flag. The historical flag input mode should be continued. --- src/shell/command_helper.h | 6 ++++++ src/shell/commands/cold_backup.cpp | 23 +++++++++++++++++++---- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/src/shell/command_helper.h b/src/shell/command_helper.h index 965eb64df0..250e28fec4 100644 --- a/src/shell/command_helper.h +++ b/src/shell/command_helper.h @@ -929,6 +929,12 @@ class aggregate_stats_calcs } \ } while (false) +#define PARSE_OPT_STRS(container, def_val, ...) \ + do { \ + const auto param = cmd(__VA_ARGS__, (def_val)).str(); \ + ::dsn::utils::split_args(param.c_str(), container, ','); \ + } while (false) + // A helper macro to parse command argument, the result is filled in an uint32_t variable named // 'value'. #define PARSE_UINT(value) \ diff --git a/src/shell/commands/cold_backup.cpp b/src/shell/commands/cold_backup.cpp index f235234c64..35cc2ff2e7 100644 --- a/src/shell/commands/cold_backup.cpp +++ b/src/shell/commands/cold_backup.cpp @@ -27,7 +27,9 @@ #include #include #include +#include #include +#include #include #include @@ -157,15 +159,28 @@ bool ls_backup_policy(command_executor *e, shell_context *sc, arguments args) bool query_backup_policy(command_executor *e, shell_context *sc, arguments args) { - const std::string query_backup_policy_help = " [-b|--backup_info_cnt] [-j|--json]"; + const std::string query_backup_policy_help = + "<-p|--policy_name> [-b|--backup_info_cnt] [-j|--json]"; argh::parser cmd(args.argc, args.argv, argh::parser::PREFER_PARAM_FOR_UNREG_OPTION); - RETURN_FALSE_IF_NOT(cmd.pos_args().size() > 1, + RETURN_FALSE_IF_NOT(cmd.params().size() >= 1, "invalid command, should be in the form of '{}'", query_backup_policy_help); - int param_index = 1; std::vector policy_names; - PARSE_STRS(policy_names); + PARSE_OPT_STRS(policy_names, "", {"-p", "--policy_name"}); + + if (policy_names.empty()) { + SHELL_PRINTLN_ERROR( + "invalid command, policy_name should be in the form of 'val1,val2,val3' and " + "should not be empty"); + return false; + } + + std::set str_set(policy_names.begin(), policy_names.end()); + if (str_set.size() != policy_names.size()) { + SHELL_PRINTLN_ERROR("invalid command, policy_name has duplicate values"); + return false; + } uint32_t backup_info_cnt; PARSE_OPT_UINT(backup_info_cnt, 3, {"-b", "--backup_info_cnt"}); From 1ea072e1a4003f0e8375ea92ffae2190adc80594 Mon Sep 17 00:00:00 2001 From: lengyuexuexuan <46274877+lengyuexuexuan@users.noreply.github.com> Date: Thu, 20 Jun 2024 11:18:51 +0800 Subject: [PATCH 06/29] chore(go-client): add generation thrift files of go-client (#1917) https://github.com/apache/incubator-pegasus/issues/1881 By uploading generation thrift files, the go client can be used directly by users through "go get" without the need to compile it locally. --- .gitignore | 5 - .licenserc.yaml | 10 +- go-client/admin/client_test.go | 2 +- go-client/idl/admin/GoUnusedProtection__.go | 6 + go-client/idl/admin/backup-consts.go | 27 + go-client/idl/admin/backup.go | 5299 +++++ go-client/idl/admin/bulk_load-consts.go | 27 + go-client/idl/admin/bulk_load.go | 4536 +++++ go-client/idl/admin/duplication-consts.go | 27 + go-client/idl/admin/duplication.go | 2606 +++ go-client/idl/admin/meta_admin-consts.go | 27 + go-client/idl/admin/meta_admin.go | 16081 ++++++++++++++++ go-client/idl/admin/metadata-consts.go | 27 + go-client/idl/admin/metadata.go | 1373 ++ go-client/idl/admin/partition_split-consts.go | 27 + go-client/idl/admin/partition_split.go | 3245 ++++ go-client/idl/cmd/GoUnusedProtection__.go | 6 + go-client/idl/cmd/command-consts.go | 22 + go-client/idl/cmd/command.go | 535 + go-client/idl/radmin/GoUnusedProtection__.go | 6 + go-client/idl/radmin/replica_admin-consts.go | 29 + go-client/idl/radmin/replica_admin.go | 3681 ++++ .../idl/replication/GoUnusedProtection__.go | 6 + .../idl/replication/dsn.layer2-consts.go | 25 + go-client/idl/replication/dsn.layer2.go | 2136 ++ go-client/idl/rrdb/GoUnusedProtection__.go | 6 + go-client/idl/rrdb/meta-remote/meta-remote.go | 183 + go-client/idl/rrdb/rrdb-consts.go | 27 + go-client/idl/rrdb/rrdb-remote/rrdb-remote.go | 536 + go-client/idl/rrdb/rrdb.go | 12123 ++++++++++++ 30 files changed, 52636 insertions(+), 10 deletions(-) create mode 100644 go-client/idl/admin/GoUnusedProtection__.go create mode 100644 go-client/idl/admin/backup-consts.go create mode 100644 go-client/idl/admin/backup.go create mode 100644 go-client/idl/admin/bulk_load-consts.go create mode 100644 go-client/idl/admin/bulk_load.go create mode 100644 go-client/idl/admin/duplication-consts.go create mode 100644 go-client/idl/admin/duplication.go create mode 100644 go-client/idl/admin/meta_admin-consts.go create mode 100644 go-client/idl/admin/meta_admin.go create mode 100644 go-client/idl/admin/metadata-consts.go create mode 100644 go-client/idl/admin/metadata.go create mode 100644 go-client/idl/admin/partition_split-consts.go create mode 100644 go-client/idl/admin/partition_split.go create mode 100644 go-client/idl/cmd/GoUnusedProtection__.go create mode 100644 go-client/idl/cmd/command-consts.go create mode 100644 go-client/idl/cmd/command.go create mode 100644 go-client/idl/radmin/GoUnusedProtection__.go create mode 100644 go-client/idl/radmin/replica_admin-consts.go create mode 100644 go-client/idl/radmin/replica_admin.go create mode 100644 go-client/idl/replication/GoUnusedProtection__.go create mode 100644 go-client/idl/replication/dsn.layer2-consts.go create mode 100644 go-client/idl/replication/dsn.layer2.go create mode 100644 go-client/idl/rrdb/GoUnusedProtection__.go create mode 100755 go-client/idl/rrdb/meta-remote/meta-remote.go create mode 100644 go-client/idl/rrdb/rrdb-consts.go create mode 100755 go-client/idl/rrdb/rrdb-remote/rrdb-remote.go create mode 100644 go-client/idl/rrdb/rrdb.go diff --git a/.gitignore b/.gitignore index 8873dcf800..d458cd5c10 100644 --- a/.gitignore +++ b/.gitignore @@ -342,11 +342,6 @@ package-lock.json # ============= # go-client/bin go-client/coverage.txt -go-client/idl/admin/ -go-client/idl/cmd/ -go-client/idl/radmin/ -go-client/idl/replication/ -go-client/idl/rrdb/ thirdparty/output/ diff --git a/.licenserc.yaml b/.licenserc.yaml index 3a15f3201d..6128a75b7c 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -42,10 +42,12 @@ header: - '**/*.pdf' # Special files for golang. - '**/go.sum' - # TODO(wangdan): Generated files for go client, could generate dynamically? - - 'go-client/idl/base/GoUnusedProtection__.go' - - 'go-client/idl/base/dsn_err_string.go' - - 'go-client/idl/base/rocskdb_err_string.go' + - 'go-client/idl/admin/**' + - 'go-client/idl/base/**' + - 'go-client/idl/cmd/**' + - 'go-client/idl/radmin/**' + - 'go-client/idl/replication/**' + - 'go-client/idl/rrdb/**' # Special files for nodejs. - '**/.npmigonre' # Special files for python. diff --git a/go-client/admin/client_test.go b/go-client/admin/client_test.go index 6d8b12adec..ac15550770 100644 --- a/go-client/admin/client_test.go +++ b/go-client/admin/client_test.go @@ -175,7 +175,7 @@ func TestAdmin_ListNodes(t *testing.T) { for i, node := range nodes { // Each node should be alive. assert.Equal(t, admin.NodeStatus_NS_ALIVE, node.Status) - actualReplicaServerPorts[i] = node.Address.GetPort() + actualReplicaServerPorts[i] = node.GetNode().GetPort() } // Match elements without extra ordering. diff --git a/go-client/idl/admin/GoUnusedProtection__.go b/go-client/idl/admin/GoUnusedProtection__.go new file mode 100644 index 0000000000..86e6c7e055 --- /dev/null +++ b/go-client/idl/admin/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +var GoUnusedProtection__ int diff --git a/go-client/idl/admin/backup-consts.go b/go-client/idl/admin/backup-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/backup-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/backup.go b/go-client/idl/admin/backup.go new file mode 100644 index 0000000000..268cb2a5ab --- /dev/null +++ b/go-client/idl/admin/backup.go @@ -0,0 +1,5299 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +// Attributes: +// - PolicyName +// - BackupProviderType +type PolicyInfo struct { + PolicyName string `thrift:"policy_name,1" db:"policy_name" json:"policy_name"` + BackupProviderType string `thrift:"backup_provider_type,2" db:"backup_provider_type" json:"backup_provider_type"` +} + +func NewPolicyInfo() *PolicyInfo { + return &PolicyInfo{} +} + +func (p *PolicyInfo) GetPolicyName() string { + return p.PolicyName +} + +func (p *PolicyInfo) GetBackupProviderType() string { + return p.BackupProviderType +} +func (p *PolicyInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *PolicyInfo) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *PolicyInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.BackupProviderType = v + } + return nil +} + +func (p *PolicyInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("policy_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *PolicyInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:policy_name: ", p), err) + } + return err +} + +func (p *PolicyInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_type", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:backup_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_type (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:backup_provider_type: ", p), err) + } + return err +} + +func (p *PolicyInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PolicyInfo(%+v)", *p) +} + +// Attributes: +// - ClusterName +// - PolicyName +// - TimeStamp +// - AppName +// - AppID +// - NewAppName_ +// - BackupProviderName +// - SkipBadPartition +// - RestorePath +type ConfigurationRestoreRequest struct { + ClusterName string `thrift:"cluster_name,1" db:"cluster_name" json:"cluster_name"` + PolicyName string `thrift:"policy_name,2" db:"policy_name" json:"policy_name"` + TimeStamp int64 `thrift:"time_stamp,3" db:"time_stamp" json:"time_stamp"` + AppName string `thrift:"app_name,4" db:"app_name" json:"app_name"` + AppID int32 `thrift:"app_id,5" db:"app_id" json:"app_id"` + NewAppName_ string `thrift:"new_app_name,6" db:"new_app_name" json:"new_app_name"` + BackupProviderName string `thrift:"backup_provider_name,7" db:"backup_provider_name" json:"backup_provider_name"` + SkipBadPartition bool `thrift:"skip_bad_partition,8" db:"skip_bad_partition" json:"skip_bad_partition"` + RestorePath *string `thrift:"restore_path,9" db:"restore_path" json:"restore_path,omitempty"` +} + +func NewConfigurationRestoreRequest() *ConfigurationRestoreRequest { + return &ConfigurationRestoreRequest{} +} + +func (p *ConfigurationRestoreRequest) GetClusterName() string { + return p.ClusterName +} + +func (p *ConfigurationRestoreRequest) GetPolicyName() string { + return p.PolicyName +} + +func (p *ConfigurationRestoreRequest) GetTimeStamp() int64 { + return p.TimeStamp +} + +func (p *ConfigurationRestoreRequest) GetAppName() string { + return p.AppName +} + +func (p *ConfigurationRestoreRequest) GetAppID() int32 { + return p.AppID +} + +func (p *ConfigurationRestoreRequest) GetNewAppName_() string { + return p.NewAppName_ +} + +func (p *ConfigurationRestoreRequest) GetBackupProviderName() string { + return p.BackupProviderName +} + +func (p *ConfigurationRestoreRequest) GetSkipBadPartition() bool { + return p.SkipBadPartition +} + +var ConfigurationRestoreRequest_RestorePath_DEFAULT string + +func (p *ConfigurationRestoreRequest) GetRestorePath() string { + if !p.IsSetRestorePath() { + return ConfigurationRestoreRequest_RestorePath_DEFAULT + } + return *p.RestorePath +} +func (p *ConfigurationRestoreRequest) IsSetRestorePath() bool { + return p.RestorePath != nil +} + +func (p *ConfigurationRestoreRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.STRING { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.ClusterName = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.TimeStamp = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.NewAppName_ = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.BackupProviderName = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.SkipBadPartition = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.RestorePath = &v + } + return nil +} + +func (p *ConfigurationRestoreRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_restore_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRestoreRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cluster_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:cluster_name: ", p), err) + } + if err := oprot.WriteString(string(p.ClusterName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cluster_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:cluster_name: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:policy_name: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("time_stamp", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:time_stamp: ", p), err) + } + if err := oprot.WriteI64(int64(p.TimeStamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.time_stamp (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:time_stamp: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:app_id: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_app_name", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:new_app_name: ", p), err) + } + if err := oprot.WriteString(string(p.NewAppName_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_app_name (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:new_app_name: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_name", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:backup_provider_name: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_name (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:backup_provider_name: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("skip_bad_partition", thrift.BOOL, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:skip_bad_partition: ", p), err) + } + if err := oprot.WriteBool(bool(p.SkipBadPartition)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.skip_bad_partition (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:skip_bad_partition: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetRestorePath() { + if err := oprot.WriteFieldBegin("restore_path", thrift.STRING, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:restore_path: ", p), err) + } + if err := oprot.WriteString(string(*p.RestorePath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.restore_path (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:restore_path: ", p), err) + } + } + return err +} + +func (p *ConfigurationRestoreRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRestoreRequest(%+v)", *p) +} + +// Attributes: +// - Pid +// - Policy +// - AppName +// - BackupID +// - BackupPath +type BackupRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + Policy *PolicyInfo `thrift:"policy,2" db:"policy" json:"policy"` + AppName string `thrift:"app_name,3" db:"app_name" json:"app_name"` + BackupID int64 `thrift:"backup_id,4" db:"backup_id" json:"backup_id"` + BackupPath *string `thrift:"backup_path,5" db:"backup_path" json:"backup_path,omitempty"` +} + +func NewBackupRequest() *BackupRequest { + return &BackupRequest{} +} + +var BackupRequest_Pid_DEFAULT *base.Gpid + +func (p *BackupRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return BackupRequest_Pid_DEFAULT + } + return p.Pid +} + +var BackupRequest_Policy_DEFAULT *PolicyInfo + +func (p *BackupRequest) GetPolicy() *PolicyInfo { + if !p.IsSetPolicy() { + return BackupRequest_Policy_DEFAULT + } + return p.Policy +} + +func (p *BackupRequest) GetAppName() string { + return p.AppName +} + +func (p *BackupRequest) GetBackupID() int64 { + return p.BackupID +} + +var BackupRequest_BackupPath_DEFAULT string + +func (p *BackupRequest) GetBackupPath() string { + if !p.IsSetBackupPath() { + return BackupRequest_BackupPath_DEFAULT + } + return *p.BackupPath +} +func (p *BackupRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *BackupRequest) IsSetPolicy() bool { + return p.Policy != nil +} + +func (p *BackupRequest) IsSetBackupPath() bool { + return p.BackupPath != nil +} + +func (p *BackupRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BackupRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *BackupRequest) ReadField2(iprot thrift.TProtocol) error { + p.Policy = &PolicyInfo{} + if err := p.Policy.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Policy), err) + } + return nil +} + +func (p *BackupRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *BackupRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.BackupID = v + } + return nil +} + +func (p *BackupRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BackupPath = &v + } + return nil +} + +func (p *BackupRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("backup_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BackupRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *BackupRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:policy: ", p), err) + } + if err := p.Policy.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Policy), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:policy: ", p), err) + } + return err +} + +func (p *BackupRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_name: ", p), err) + } + return err +} + +func (p *BackupRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:backup_id: ", p), err) + } + return err +} + +func (p *BackupRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupPath() { + if err := oprot.WriteFieldBegin("backup_path", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:backup_path: ", p), err) + } + if err := oprot.WriteString(string(*p.BackupPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_path (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:backup_path: ", p), err) + } + } + return err +} + +func (p *BackupRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackupRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Pid +// - Progress +// - PolicyName +// - BackupID +// - CheckpointTotalSize +type BackupResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Pid *base.Gpid `thrift:"pid,2" db:"pid" json:"pid"` + Progress int32 `thrift:"progress,3" db:"progress" json:"progress"` + PolicyName string `thrift:"policy_name,4" db:"policy_name" json:"policy_name"` + BackupID int64 `thrift:"backup_id,5" db:"backup_id" json:"backup_id"` + CheckpointTotalSize int64 `thrift:"checkpoint_total_size,6" db:"checkpoint_total_size" json:"checkpoint_total_size"` +} + +func NewBackupResponse() *BackupResponse { + return &BackupResponse{} +} + +var BackupResponse_Err_DEFAULT *base.ErrorCode + +func (p *BackupResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return BackupResponse_Err_DEFAULT + } + return p.Err +} + +var BackupResponse_Pid_DEFAULT *base.Gpid + +func (p *BackupResponse) GetPid() *base.Gpid { + if !p.IsSetPid() { + return BackupResponse_Pid_DEFAULT + } + return p.Pid +} + +func (p *BackupResponse) GetProgress() int32 { + return p.Progress +} + +func (p *BackupResponse) GetPolicyName() string { + return p.PolicyName +} + +func (p *BackupResponse) GetBackupID() int64 { + return p.BackupID +} + +func (p *BackupResponse) GetCheckpointTotalSize() int64 { + return p.CheckpointTotalSize +} +func (p *BackupResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *BackupResponse) IsSetPid() bool { + return p.Pid != nil +} + +func (p *BackupResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BackupResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *BackupResponse) ReadField2(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *BackupResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Progress = v + } + return nil +} + +func (p *BackupResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *BackupResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BackupID = v + } + return nil +} + +func (p *BackupResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.CheckpointTotalSize = v + } + return nil +} + +func (p *BackupResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("backup_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BackupResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *BackupResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:pid: ", p), err) + } + return err +} + +func (p *BackupResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("progress", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:progress: ", p), err) + } + if err := oprot.WriteI32(int32(p.Progress)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.progress (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:progress: ", p), err) + } + return err +} + +func (p *BackupResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:policy_name: ", p), err) + } + return err +} + +func (p *BackupResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:backup_id: ", p), err) + } + return err +} + +func (p *BackupResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("checkpoint_total_size", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:checkpoint_total_size: ", p), err) + } + if err := oprot.WriteI64(int64(p.CheckpointTotalSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.checkpoint_total_size (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:checkpoint_total_size: ", p), err) + } + return err +} + +func (p *BackupResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackupResponse(%+v)", *p) +} + +// Attributes: +// - Pid +// - PolicyName +type BackupClearRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + PolicyName string `thrift:"policy_name,2" db:"policy_name" json:"policy_name"` +} + +func NewBackupClearRequest() *BackupClearRequest { + return &BackupClearRequest{} +} + +var BackupClearRequest_Pid_DEFAULT *base.Gpid + +func (p *BackupClearRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return BackupClearRequest_Pid_DEFAULT + } + return p.Pid +} + +func (p *BackupClearRequest) GetPolicyName() string { + return p.PolicyName +} +func (p *BackupClearRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *BackupClearRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BackupClearRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *BackupClearRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *BackupClearRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("backup_clear_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BackupClearRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *BackupClearRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:policy_name: ", p), err) + } + return err +} + +func (p *BackupClearRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackupClearRequest(%+v)", *p) +} + +// Attributes: +// - PolicyName +// - AddAppids +// - RemovalAppids +// - NewBackupIntervalSec_ +// - BackupHistoryCountToKeep +// - IsDisable +// - StartTime +type ConfigurationModifyBackupPolicyRequest struct { + PolicyName string `thrift:"policy_name,1" db:"policy_name" json:"policy_name"` + AddAppids []int32 `thrift:"add_appids,2" db:"add_appids" json:"add_appids,omitempty"` + RemovalAppids []int32 `thrift:"removal_appids,3" db:"removal_appids" json:"removal_appids,omitempty"` + NewBackupIntervalSec_ *int64 `thrift:"new_backup_interval_sec,4" db:"new_backup_interval_sec" json:"new_backup_interval_sec,omitempty"` + BackupHistoryCountToKeep *int32 `thrift:"backup_history_count_to_keep,5" db:"backup_history_count_to_keep" json:"backup_history_count_to_keep,omitempty"` + IsDisable *bool `thrift:"is_disable,6" db:"is_disable" json:"is_disable,omitempty"` + StartTime *string `thrift:"start_time,7" db:"start_time" json:"start_time,omitempty"` +} + +func NewConfigurationModifyBackupPolicyRequest() *ConfigurationModifyBackupPolicyRequest { + return &ConfigurationModifyBackupPolicyRequest{} +} + +func (p *ConfigurationModifyBackupPolicyRequest) GetPolicyName() string { + return p.PolicyName +} + +var ConfigurationModifyBackupPolicyRequest_AddAppids_DEFAULT []int32 + +func (p *ConfigurationModifyBackupPolicyRequest) GetAddAppids() []int32 { + return p.AddAppids +} + +var ConfigurationModifyBackupPolicyRequest_RemovalAppids_DEFAULT []int32 + +func (p *ConfigurationModifyBackupPolicyRequest) GetRemovalAppids() []int32 { + return p.RemovalAppids +} + +var ConfigurationModifyBackupPolicyRequest_NewBackupIntervalSec__DEFAULT int64 + +func (p *ConfigurationModifyBackupPolicyRequest) GetNewBackupIntervalSec_() int64 { + if !p.IsSetNewBackupIntervalSec_() { + return ConfigurationModifyBackupPolicyRequest_NewBackupIntervalSec__DEFAULT + } + return *p.NewBackupIntervalSec_ +} + +var ConfigurationModifyBackupPolicyRequest_BackupHistoryCountToKeep_DEFAULT int32 + +func (p *ConfigurationModifyBackupPolicyRequest) GetBackupHistoryCountToKeep() int32 { + if !p.IsSetBackupHistoryCountToKeep() { + return ConfigurationModifyBackupPolicyRequest_BackupHistoryCountToKeep_DEFAULT + } + return *p.BackupHistoryCountToKeep +} + +var ConfigurationModifyBackupPolicyRequest_IsDisable_DEFAULT bool + +func (p *ConfigurationModifyBackupPolicyRequest) GetIsDisable() bool { + if !p.IsSetIsDisable() { + return ConfigurationModifyBackupPolicyRequest_IsDisable_DEFAULT + } + return *p.IsDisable +} + +var ConfigurationModifyBackupPolicyRequest_StartTime_DEFAULT string + +func (p *ConfigurationModifyBackupPolicyRequest) GetStartTime() string { + if !p.IsSetStartTime() { + return ConfigurationModifyBackupPolicyRequest_StartTime_DEFAULT + } + return *p.StartTime +} +func (p *ConfigurationModifyBackupPolicyRequest) IsSetAddAppids() bool { + return p.AddAppids != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) IsSetRemovalAppids() bool { + return p.RemovalAppids != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) IsSetNewBackupIntervalSec_() bool { + return p.NewBackupIntervalSec_ != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) IsSetBackupHistoryCountToKeep() bool { + return p.BackupHistoryCountToKeep != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) IsSetIsDisable() bool { + return p.IsDisable != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) IsSetStartTime() bool { + return p.StartTime != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.AddAppids = tSlice + for i := 0; i < size; i++ { + var _elem0 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem0 = v + } + p.AddAppids = append(p.AddAppids, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.RemovalAppids = tSlice + for i := 0; i < size; i++ { + var _elem1 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem1 = v + } + p.RemovalAppids = append(p.RemovalAppids, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.NewBackupIntervalSec_ = &v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BackupHistoryCountToKeep = &v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.IsDisable = &v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.StartTime = &v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_modify_backup_policy_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:policy_name: ", p), err) + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetAddAppids() { + if err := oprot.WriteFieldBegin("add_appids", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:add_appids: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.AddAppids)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.AddAppids { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:add_appids: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetRemovalAppids() { + if err := oprot.WriteFieldBegin("removal_appids", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:removal_appids: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.RemovalAppids)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.RemovalAppids { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:removal_appids: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetNewBackupIntervalSec_() { + if err := oprot.WriteFieldBegin("new_backup_interval_sec", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:new_backup_interval_sec: ", p), err) + } + if err := oprot.WriteI64(int64(*p.NewBackupIntervalSec_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_backup_interval_sec (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:new_backup_interval_sec: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupHistoryCountToKeep() { + if err := oprot.WriteFieldBegin("backup_history_count_to_keep", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:backup_history_count_to_keep: ", p), err) + } + if err := oprot.WriteI32(int32(*p.BackupHistoryCountToKeep)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_history_count_to_keep (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:backup_history_count_to_keep: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetIsDisable() { + if err := oprot.WriteFieldBegin("is_disable", thrift.BOOL, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:is_disable: ", p), err) + } + if err := oprot.WriteBool(bool(*p.IsDisable)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_disable (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:is_disable: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetStartTime() { + if err := oprot.WriteFieldBegin("start_time", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:start_time: ", p), err) + } + if err := oprot.WriteString(string(*p.StartTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_time (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:start_time: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationModifyBackupPolicyRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +type ConfigurationModifyBackupPolicyResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationModifyBackupPolicyResponse() *ConfigurationModifyBackupPolicyResponse { + return &ConfigurationModifyBackupPolicyResponse{} +} + +var ConfigurationModifyBackupPolicyResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationModifyBackupPolicyResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationModifyBackupPolicyResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationModifyBackupPolicyResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationModifyBackupPolicyResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationModifyBackupPolicyResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_modify_backup_policy_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationModifyBackupPolicyResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationModifyBackupPolicyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationModifyBackupPolicyResponse(%+v)", *p) +} + +// Attributes: +// - BackupProviderType +// - PolicyName +// - AppIds +// - BackupIntervalSeconds +// - BackupHistoryCountToKeep +// - StartTime +type ConfigurationAddBackupPolicyRequest struct { + BackupProviderType string `thrift:"backup_provider_type,1" db:"backup_provider_type" json:"backup_provider_type"` + PolicyName string `thrift:"policy_name,2" db:"policy_name" json:"policy_name"` + AppIds []int32 `thrift:"app_ids,3" db:"app_ids" json:"app_ids"` + BackupIntervalSeconds int64 `thrift:"backup_interval_seconds,4" db:"backup_interval_seconds" json:"backup_interval_seconds"` + BackupHistoryCountToKeep int32 `thrift:"backup_history_count_to_keep,5" db:"backup_history_count_to_keep" json:"backup_history_count_to_keep"` + StartTime string `thrift:"start_time,6" db:"start_time" json:"start_time"` +} + +func NewConfigurationAddBackupPolicyRequest() *ConfigurationAddBackupPolicyRequest { + return &ConfigurationAddBackupPolicyRequest{} +} + +func (p *ConfigurationAddBackupPolicyRequest) GetBackupProviderType() string { + return p.BackupProviderType +} + +func (p *ConfigurationAddBackupPolicyRequest) GetPolicyName() string { + return p.PolicyName +} + +func (p *ConfigurationAddBackupPolicyRequest) GetAppIds() []int32 { + return p.AppIds +} + +func (p *ConfigurationAddBackupPolicyRequest) GetBackupIntervalSeconds() int64 { + return p.BackupIntervalSeconds +} + +func (p *ConfigurationAddBackupPolicyRequest) GetBackupHistoryCountToKeep() int32 { + return p.BackupHistoryCountToKeep +} + +func (p *ConfigurationAddBackupPolicyRequest) GetStartTime() string { + return p.StartTime +} +func (p *ConfigurationAddBackupPolicyRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.BackupProviderType = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.AppIds = tSlice + for i := 0; i < size; i++ { + var _elem2 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem2 = v + } + p.AppIds = append(p.AppIds, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.BackupIntervalSeconds = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BackupHistoryCountToKeep = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.StartTime = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_add_backup_policy_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_type", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:backup_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_type (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:backup_provider_type: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:policy_name: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_ids", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_ids: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.AppIds)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.AppIds { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_ids: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_interval_seconds", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:backup_interval_seconds: ", p), err) + } + if err := oprot.WriteI64(int64(p.BackupIntervalSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_interval_seconds (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:backup_interval_seconds: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_history_count_to_keep", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:backup_history_count_to_keep: ", p), err) + } + if err := oprot.WriteI32(int32(p.BackupHistoryCountToKeep)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_history_count_to_keep (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:backup_history_count_to_keep: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_time", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:start_time: ", p), err) + } + if err := oprot.WriteString(string(p.StartTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_time (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:start_time: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationAddBackupPolicyRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +type ConfigurationAddBackupPolicyResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationAddBackupPolicyResponse() *ConfigurationAddBackupPolicyResponse { + return &ConfigurationAddBackupPolicyResponse{} +} + +var ConfigurationAddBackupPolicyResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationAddBackupPolicyResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationAddBackupPolicyResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationAddBackupPolicyResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationAddBackupPolicyResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationAddBackupPolicyResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_add_backup_policy_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationAddBackupPolicyResponse(%+v)", *p) +} + +// Attributes: +// - PolicyName +// - BackupProviderType +// - BackupIntervalSeconds +// - AppIds +// - BackupHistoryCountToKeep +// - StartTime +// - IsDisable +type PolicyEntry struct { + PolicyName string `thrift:"policy_name,1" db:"policy_name" json:"policy_name"` + BackupProviderType string `thrift:"backup_provider_type,2" db:"backup_provider_type" json:"backup_provider_type"` + BackupIntervalSeconds string `thrift:"backup_interval_seconds,3" db:"backup_interval_seconds" json:"backup_interval_seconds"` + AppIds []int32 `thrift:"app_ids,4" db:"app_ids" json:"app_ids"` + BackupHistoryCountToKeep int32 `thrift:"backup_history_count_to_keep,5" db:"backup_history_count_to_keep" json:"backup_history_count_to_keep"` + StartTime string `thrift:"start_time,6" db:"start_time" json:"start_time"` + IsDisable bool `thrift:"is_disable,7" db:"is_disable" json:"is_disable"` +} + +func NewPolicyEntry() *PolicyEntry { + return &PolicyEntry{} +} + +func (p *PolicyEntry) GetPolicyName() string { + return p.PolicyName +} + +func (p *PolicyEntry) GetBackupProviderType() string { + return p.BackupProviderType +} + +func (p *PolicyEntry) GetBackupIntervalSeconds() string { + return p.BackupIntervalSeconds +} + +func (p *PolicyEntry) GetAppIds() []int32 { + return p.AppIds +} + +func (p *PolicyEntry) GetBackupHistoryCountToKeep() int32 { + return p.BackupHistoryCountToKeep +} + +func (p *PolicyEntry) GetStartTime() string { + return p.StartTime +} + +func (p *PolicyEntry) GetIsDisable() bool { + return p.IsDisable +} +func (p *PolicyEntry) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.SET { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *PolicyEntry) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *PolicyEntry) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.BackupProviderType = v + } + return nil +} + +func (p *PolicyEntry) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.BackupIntervalSeconds = v + } + return nil +} + +func (p *PolicyEntry) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadSetBegin() + if err != nil { + return thrift.PrependError("error reading set begin: ", err) + } + tSet := make([]int32, 0, size) + p.AppIds = tSet + for i := 0; i < size; i++ { + var _elem3 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem3 = v + } + p.AppIds = append(p.AppIds, _elem3) + } + if err := iprot.ReadSetEnd(); err != nil { + return thrift.PrependError("error reading set end: ", err) + } + return nil +} + +func (p *PolicyEntry) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BackupHistoryCountToKeep = v + } + return nil +} + +func (p *PolicyEntry) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.StartTime = v + } + return nil +} + +func (p *PolicyEntry) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.IsDisable = v + } + return nil +} + +func (p *PolicyEntry) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("policy_entry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *PolicyEntry) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:policy_name: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_type", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:backup_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_type (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:backup_provider_type: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_interval_seconds", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_interval_seconds: ", p), err) + } + if err := oprot.WriteString(string(p.BackupIntervalSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_interval_seconds (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_interval_seconds: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_ids", thrift.SET, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_ids: ", p), err) + } + if err := oprot.WriteSetBegin(thrift.I32, len(p.AppIds)); err != nil { + return thrift.PrependError("error writing set begin: ", err) + } + for i := 0; i < len(p.AppIds); i++ { + for j := i + 1; j < len(p.AppIds); j++ { + if reflect.DeepEqual(p.AppIds[i], p.AppIds[j]) { + return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", p.AppIds[i])) + } + } + } + for _, v := range p.AppIds { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteSetEnd(); err != nil { + return thrift.PrependError("error writing set end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_ids: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_history_count_to_keep", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:backup_history_count_to_keep: ", p), err) + } + if err := oprot.WriteI32(int32(p.BackupHistoryCountToKeep)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_history_count_to_keep (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:backup_history_count_to_keep: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_time", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:start_time: ", p), err) + } + if err := oprot.WriteString(string(p.StartTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_time (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:start_time: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_disable", thrift.BOOL, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:is_disable: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsDisable)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_disable (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:is_disable: ", p), err) + } + return err +} + +func (p *PolicyEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PolicyEntry(%+v)", *p) +} + +// Attributes: +// - BackupID +// - StartTimeMs +// - EndTimeMs +// - AppIds +type BackupEntry struct { + BackupID int64 `thrift:"backup_id,1" db:"backup_id" json:"backup_id"` + StartTimeMs int64 `thrift:"start_time_ms,2" db:"start_time_ms" json:"start_time_ms"` + EndTimeMs int64 `thrift:"end_time_ms,3" db:"end_time_ms" json:"end_time_ms"` + AppIds []int32 `thrift:"app_ids,4" db:"app_ids" json:"app_ids"` +} + +func NewBackupEntry() *BackupEntry { + return &BackupEntry{} +} + +func (p *BackupEntry) GetBackupID() int64 { + return p.BackupID +} + +func (p *BackupEntry) GetStartTimeMs() int64 { + return p.StartTimeMs +} + +func (p *BackupEntry) GetEndTimeMs() int64 { + return p.EndTimeMs +} + +func (p *BackupEntry) GetAppIds() []int32 { + return p.AppIds +} +func (p *BackupEntry) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.SET { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BackupEntry) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.BackupID = v + } + return nil +} + +func (p *BackupEntry) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.StartTimeMs = v + } + return nil +} + +func (p *BackupEntry) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.EndTimeMs = v + } + return nil +} + +func (p *BackupEntry) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadSetBegin() + if err != nil { + return thrift.PrependError("error reading set begin: ", err) + } + tSet := make([]int32, 0, size) + p.AppIds = tSet + for i := 0; i < size; i++ { + var _elem4 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem4 = v + } + p.AppIds = append(p.AppIds, _elem4) + } + if err := iprot.ReadSetEnd(); err != nil { + return thrift.PrependError("error reading set end: ", err) + } + return nil +} + +func (p *BackupEntry) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("backup_entry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BackupEntry) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:backup_id: ", p), err) + } + return err +} + +func (p *BackupEntry) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_time_ms", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:start_time_ms: ", p), err) + } + if err := oprot.WriteI64(int64(p.StartTimeMs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_time_ms (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:start_time_ms: ", p), err) + } + return err +} + +func (p *BackupEntry) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("end_time_ms", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:end_time_ms: ", p), err) + } + if err := oprot.WriteI64(int64(p.EndTimeMs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.end_time_ms (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:end_time_ms: ", p), err) + } + return err +} + +func (p *BackupEntry) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_ids", thrift.SET, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_ids: ", p), err) + } + if err := oprot.WriteSetBegin(thrift.I32, len(p.AppIds)); err != nil { + return thrift.PrependError("error writing set begin: ", err) + } + for i := 0; i < len(p.AppIds); i++ { + for j := i + 1; j < len(p.AppIds); j++ { + if reflect.DeepEqual(p.AppIds[i], p.AppIds[j]) { + return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", p.AppIds[i])) + } + } + } + for _, v := range p.AppIds { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteSetEnd(); err != nil { + return thrift.PrependError("error writing set end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_ids: ", p), err) + } + return err +} + +func (p *BackupEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackupEntry(%+v)", *p) +} + +// Attributes: +// - PolicyNames +// - BackupInfoCount +type ConfigurationQueryBackupPolicyRequest struct { + PolicyNames []string `thrift:"policy_names,1" db:"policy_names" json:"policy_names"` + BackupInfoCount int32 `thrift:"backup_info_count,2" db:"backup_info_count" json:"backup_info_count"` +} + +func NewConfigurationQueryBackupPolicyRequest() *ConfigurationQueryBackupPolicyRequest { + return &ConfigurationQueryBackupPolicyRequest{} +} + +func (p *ConfigurationQueryBackupPolicyRequest) GetPolicyNames() []string { + return p.PolicyNames +} + +func (p *ConfigurationQueryBackupPolicyRequest) GetBackupInfoCount() int32 { + return p.BackupInfoCount +} +func (p *ConfigurationQueryBackupPolicyRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyRequest) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.PolicyNames = tSlice + for i := 0; i < size; i++ { + var _elem5 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem5 = v + } + p.PolicyNames = append(p.PolicyNames, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.BackupInfoCount = v + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_backup_policy_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_names", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:policy_names: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.PolicyNames)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.PolicyNames { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:policy_names: ", p), err) + } + return err +} + +func (p *ConfigurationQueryBackupPolicyRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_info_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:backup_info_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.BackupInfoCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_info_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:backup_info_count: ", p), err) + } + return err +} + +func (p *ConfigurationQueryBackupPolicyRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryBackupPolicyRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Policys +// - BackupInfos +// - HintMsg +type ConfigurationQueryBackupPolicyResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Policys []*PolicyEntry `thrift:"policys,2" db:"policys" json:"policys"` + BackupInfos [][]*BackupEntry `thrift:"backup_infos,3" db:"backup_infos" json:"backup_infos"` + HintMsg *string `thrift:"hint_msg,4" db:"hint_msg" json:"hint_msg,omitempty"` +} + +func NewConfigurationQueryBackupPolicyResponse() *ConfigurationQueryBackupPolicyResponse { + return &ConfigurationQueryBackupPolicyResponse{} +} + +var ConfigurationQueryBackupPolicyResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationQueryBackupPolicyResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationQueryBackupPolicyResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationQueryBackupPolicyResponse) GetPolicys() []*PolicyEntry { + return p.Policys +} + +func (p *ConfigurationQueryBackupPolicyResponse) GetBackupInfos() [][]*BackupEntry { + return p.BackupInfos +} + +var ConfigurationQueryBackupPolicyResponse_HintMsg_DEFAULT string + +func (p *ConfigurationQueryBackupPolicyResponse) GetHintMsg() string { + if !p.IsSetHintMsg() { + return ConfigurationQueryBackupPolicyResponse_HintMsg_DEFAULT + } + return *p.HintMsg +} +func (p *ConfigurationQueryBackupPolicyResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) IsSetHintMsg() bool { + return p.HintMsg != nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*PolicyEntry, 0, size) + p.Policys = tSlice + for i := 0; i < size; i++ { + _elem6 := &PolicyEntry{} + if err := _elem6.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) + } + p.Policys = append(p.Policys, _elem6) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([][]*BackupEntry, 0, size) + p.BackupInfos = tSlice + for i := 0; i < size; i++ { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BackupEntry, 0, size) + _elem7 := tSlice + for i := 0; i < size; i++ { + _elem8 := &BackupEntry{} + if err := _elem8.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err) + } + _elem7 = append(_elem7, _elem8) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + p.BackupInfos = append(p.BackupInfos, _elem7) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.HintMsg = &v + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_backup_policy_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationQueryBackupPolicyResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policys", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:policys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Policys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Policys { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:policys: ", p), err) + } + return err +} + +func (p *ConfigurationQueryBackupPolicyResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_infos", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_infos: ", p), err) + } + if err := oprot.WriteListBegin(thrift.LIST, len(p.BackupInfos)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BackupInfos { + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_infos: ", p), err) + } + return err +} + +func (p *ConfigurationQueryBackupPolicyResponse) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHintMsg() { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(*p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hint_msg: ", p), err) + } + } + return err +} + +func (p *ConfigurationQueryBackupPolicyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryBackupPolicyResponse(%+v)", *p) +} + +// Attributes: +// - Pid +// - RestoreStatus +// - Progress +// - Reason +type ConfigurationReportRestoreStatusRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + RestoreStatus *base.ErrorCode `thrift:"restore_status,2" db:"restore_status" json:"restore_status"` + Progress int32 `thrift:"progress,3" db:"progress" json:"progress"` + Reason *string `thrift:"reason,4" db:"reason" json:"reason,omitempty"` +} + +func NewConfigurationReportRestoreStatusRequest() *ConfigurationReportRestoreStatusRequest { + return &ConfigurationReportRestoreStatusRequest{} +} + +var ConfigurationReportRestoreStatusRequest_Pid_DEFAULT *base.Gpid + +func (p *ConfigurationReportRestoreStatusRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return ConfigurationReportRestoreStatusRequest_Pid_DEFAULT + } + return p.Pid +} + +var ConfigurationReportRestoreStatusRequest_RestoreStatus_DEFAULT *base.ErrorCode + +func (p *ConfigurationReportRestoreStatusRequest) GetRestoreStatus() *base.ErrorCode { + if !p.IsSetRestoreStatus() { + return ConfigurationReportRestoreStatusRequest_RestoreStatus_DEFAULT + } + return p.RestoreStatus +} + +func (p *ConfigurationReportRestoreStatusRequest) GetProgress() int32 { + return p.Progress +} + +var ConfigurationReportRestoreStatusRequest_Reason_DEFAULT string + +func (p *ConfigurationReportRestoreStatusRequest) GetReason() string { + if !p.IsSetReason() { + return ConfigurationReportRestoreStatusRequest_Reason_DEFAULT + } + return *p.Reason +} +func (p *ConfigurationReportRestoreStatusRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *ConfigurationReportRestoreStatusRequest) IsSetRestoreStatus() bool { + return p.RestoreStatus != nil +} + +func (p *ConfigurationReportRestoreStatusRequest) IsSetReason() bool { + return p.Reason != nil +} + +func (p *ConfigurationReportRestoreStatusRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) ReadField2(iprot thrift.TProtocol) error { + p.RestoreStatus = &base.ErrorCode{} + if err := p.RestoreStatus.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RestoreStatus), err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Progress = v + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Reason = &v + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_report_restore_status_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *ConfigurationReportRestoreStatusRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("restore_status", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:restore_status: ", p), err) + } + if err := p.RestoreStatus.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RestoreStatus), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:restore_status: ", p), err) + } + return err +} + +func (p *ConfigurationReportRestoreStatusRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("progress", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:progress: ", p), err) + } + if err := oprot.WriteI32(int32(p.Progress)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.progress (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:progress: ", p), err) + } + return err +} + +func (p *ConfigurationReportRestoreStatusRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetReason() { + if err := oprot.WriteFieldBegin("reason", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:reason: ", p), err) + } + if err := oprot.WriteString(string(*p.Reason)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.reason (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:reason: ", p), err) + } + } + return err +} + +func (p *ConfigurationReportRestoreStatusRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationReportRestoreStatusRequest(%+v)", *p) +} + +// Attributes: +// - Err +type ConfigurationReportRestoreStatusResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewConfigurationReportRestoreStatusResponse() *ConfigurationReportRestoreStatusResponse { + return &ConfigurationReportRestoreStatusResponse{} +} + +var ConfigurationReportRestoreStatusResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationReportRestoreStatusResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationReportRestoreStatusResponse_Err_DEFAULT + } + return p.Err +} +func (p *ConfigurationReportRestoreStatusResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationReportRestoreStatusResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_report_restore_status_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationReportRestoreStatusResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationReportRestoreStatusResponse(%+v)", *p) +} + +// Attributes: +// - RestoreAppID +type ConfigurationQueryRestoreRequest struct { + RestoreAppID int32 `thrift:"restore_app_id,1" db:"restore_app_id" json:"restore_app_id"` +} + +func NewConfigurationQueryRestoreRequest() *ConfigurationQueryRestoreRequest { + return &ConfigurationQueryRestoreRequest{} +} + +func (p *ConfigurationQueryRestoreRequest) GetRestoreAppID() int32 { + return p.RestoreAppID +} +func (p *ConfigurationQueryRestoreRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryRestoreRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.RestoreAppID = v + } + return nil +} + +func (p *ConfigurationQueryRestoreRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_restore_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryRestoreRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("restore_app_id", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:restore_app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.RestoreAppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.restore_app_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:restore_app_id: ", p), err) + } + return err +} + +func (p *ConfigurationQueryRestoreRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryRestoreRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - RestoreStatus +// - RestoreProgress +type ConfigurationQueryRestoreResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + RestoreStatus []*base.ErrorCode `thrift:"restore_status,2" db:"restore_status" json:"restore_status"` + RestoreProgress []int32 `thrift:"restore_progress,3" db:"restore_progress" json:"restore_progress"` +} + +func NewConfigurationQueryRestoreResponse() *ConfigurationQueryRestoreResponse { + return &ConfigurationQueryRestoreResponse{} +} + +var ConfigurationQueryRestoreResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationQueryRestoreResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationQueryRestoreResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationQueryRestoreResponse) GetRestoreStatus() []*base.ErrorCode { + return p.RestoreStatus +} + +func (p *ConfigurationQueryRestoreResponse) GetRestoreProgress() []int32 { + return p.RestoreProgress +} +func (p *ConfigurationQueryRestoreResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationQueryRestoreResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryRestoreResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationQueryRestoreResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.ErrorCode, 0, size) + p.RestoreStatus = tSlice + for i := 0; i < size; i++ { + _elem9 := &base.ErrorCode{} + if err := _elem9.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) + } + p.RestoreStatus = append(p.RestoreStatus, _elem9) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryRestoreResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.RestoreProgress = tSlice + for i := 0; i < size; i++ { + var _elem10 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem10 = v + } + p.RestoreProgress = append(p.RestoreProgress, _elem10) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryRestoreResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_restore_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryRestoreResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationQueryRestoreResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("restore_status", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:restore_status: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.RestoreStatus)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.RestoreStatus { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:restore_status: ", p), err) + } + return err +} + +func (p *ConfigurationQueryRestoreResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("restore_progress", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:restore_progress: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.RestoreProgress)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.RestoreProgress { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:restore_progress: ", p), err) + } + return err +} + +func (p *ConfigurationQueryRestoreResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryRestoreResponse(%+v)", *p) +} + +// Attributes: +// - BackupProviderType +// - AppID +// - BackupPath +type StartBackupAppRequest struct { + BackupProviderType string `thrift:"backup_provider_type,1" db:"backup_provider_type" json:"backup_provider_type"` + AppID int32 `thrift:"app_id,2" db:"app_id" json:"app_id"` + BackupPath *string `thrift:"backup_path,3" db:"backup_path" json:"backup_path,omitempty"` +} + +func NewStartBackupAppRequest() *StartBackupAppRequest { + return &StartBackupAppRequest{} +} + +func (p *StartBackupAppRequest) GetBackupProviderType() string { + return p.BackupProviderType +} + +func (p *StartBackupAppRequest) GetAppID() int32 { + return p.AppID +} + +var StartBackupAppRequest_BackupPath_DEFAULT string + +func (p *StartBackupAppRequest) GetBackupPath() string { + if !p.IsSetBackupPath() { + return StartBackupAppRequest_BackupPath_DEFAULT + } + return *p.BackupPath +} +func (p *StartBackupAppRequest) IsSetBackupPath() bool { + return p.BackupPath != nil +} + +func (p *StartBackupAppRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartBackupAppRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.BackupProviderType = v + } + return nil +} + +func (p *StartBackupAppRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *StartBackupAppRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.BackupPath = &v + } + return nil +} + +func (p *StartBackupAppRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_backup_app_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartBackupAppRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_type", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:backup_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_type (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:backup_provider_type: ", p), err) + } + return err +} + +func (p *StartBackupAppRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_id: ", p), err) + } + return err +} + +func (p *StartBackupAppRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupPath() { + if err := oprot.WriteFieldBegin("backup_path", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_path: ", p), err) + } + if err := oprot.WriteString(string(*p.BackupPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_path (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_path: ", p), err) + } + } + return err +} + +func (p *StartBackupAppRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartBackupAppRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +// - BackupID +type StartBackupAppResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` + BackupID *int64 `thrift:"backup_id,3" db:"backup_id" json:"backup_id,omitempty"` +} + +func NewStartBackupAppResponse() *StartBackupAppResponse { + return &StartBackupAppResponse{} +} + +var StartBackupAppResponse_Err_DEFAULT *base.ErrorCode + +func (p *StartBackupAppResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return StartBackupAppResponse_Err_DEFAULT + } + return p.Err +} + +func (p *StartBackupAppResponse) GetHintMessage() string { + return p.HintMessage +} + +var StartBackupAppResponse_BackupID_DEFAULT int64 + +func (p *StartBackupAppResponse) GetBackupID() int64 { + if !p.IsSetBackupID() { + return StartBackupAppResponse_BackupID_DEFAULT + } + return *p.BackupID +} +func (p *StartBackupAppResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *StartBackupAppResponse) IsSetBackupID() bool { + return p.BackupID != nil +} + +func (p *StartBackupAppResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartBackupAppResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *StartBackupAppResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *StartBackupAppResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.BackupID = &v + } + return nil +} + +func (p *StartBackupAppResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_backup_app_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartBackupAppResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *StartBackupAppResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *StartBackupAppResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupID() { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(*p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_id: ", p), err) + } + } + return err +} + +func (p *StartBackupAppResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartBackupAppResponse(%+v)", *p) +} + +// Attributes: +// - BackupID +// - AppName +// - BackupProviderType +// - BackupPath +// - StartTimeMs +// - EndTimeMs +// - IsBackupFailed +type BackupItem struct { + BackupID int64 `thrift:"backup_id,1" db:"backup_id" json:"backup_id"` + AppName string `thrift:"app_name,2" db:"app_name" json:"app_name"` + BackupProviderType string `thrift:"backup_provider_type,3" db:"backup_provider_type" json:"backup_provider_type"` + BackupPath string `thrift:"backup_path,4" db:"backup_path" json:"backup_path"` + StartTimeMs int64 `thrift:"start_time_ms,5" db:"start_time_ms" json:"start_time_ms"` + EndTimeMs int64 `thrift:"end_time_ms,6" db:"end_time_ms" json:"end_time_ms"` + IsBackupFailed bool `thrift:"is_backup_failed,7" db:"is_backup_failed" json:"is_backup_failed"` +} + +func NewBackupItem() *BackupItem { + return &BackupItem{} +} + +func (p *BackupItem) GetBackupID() int64 { + return p.BackupID +} + +func (p *BackupItem) GetAppName() string { + return p.AppName +} + +func (p *BackupItem) GetBackupProviderType() string { + return p.BackupProviderType +} + +func (p *BackupItem) GetBackupPath() string { + return p.BackupPath +} + +func (p *BackupItem) GetStartTimeMs() int64 { + return p.StartTimeMs +} + +func (p *BackupItem) GetEndTimeMs() int64 { + return p.EndTimeMs +} + +func (p *BackupItem) GetIsBackupFailed() bool { + return p.IsBackupFailed +} +func (p *BackupItem) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BackupItem) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.BackupID = v + } + return nil +} + +func (p *BackupItem) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *BackupItem) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.BackupProviderType = v + } + return nil +} + +func (p *BackupItem) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.BackupPath = v + } + return nil +} + +func (p *BackupItem) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.StartTimeMs = v + } + return nil +} + +func (p *BackupItem) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.EndTimeMs = v + } + return nil +} + +func (p *BackupItem) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.IsBackupFailed = v + } + return nil +} + +func (p *BackupItem) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("backup_item"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BackupItem) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:backup_id: ", p), err) + } + return err +} + +func (p *BackupItem) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_name: ", p), err) + } + return err +} + +func (p *BackupItem) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_type", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_provider_type: ", p), err) + } + return err +} + +func (p *BackupItem) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_path", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:backup_path: ", p), err) + } + if err := oprot.WriteString(string(p.BackupPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_path (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:backup_path: ", p), err) + } + return err +} + +func (p *BackupItem) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_time_ms", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:start_time_ms: ", p), err) + } + if err := oprot.WriteI64(int64(p.StartTimeMs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_time_ms (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:start_time_ms: ", p), err) + } + return err +} + +func (p *BackupItem) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("end_time_ms", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:end_time_ms: ", p), err) + } + if err := oprot.WriteI64(int64(p.EndTimeMs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.end_time_ms (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:end_time_ms: ", p), err) + } + return err +} + +func (p *BackupItem) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_backup_failed", thrift.BOOL, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:is_backup_failed: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsBackupFailed)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_backup_failed (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:is_backup_failed: ", p), err) + } + return err +} + +func (p *BackupItem) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackupItem(%+v)", *p) +} + +// Attributes: +// - AppID +// - BackupID +type QueryBackupStatusRequest struct { + AppID int32 `thrift:"app_id,1" db:"app_id" json:"app_id"` + BackupID *int64 `thrift:"backup_id,2" db:"backup_id" json:"backup_id,omitempty"` +} + +func NewQueryBackupStatusRequest() *QueryBackupStatusRequest { + return &QueryBackupStatusRequest{} +} + +func (p *QueryBackupStatusRequest) GetAppID() int32 { + return p.AppID +} + +var QueryBackupStatusRequest_BackupID_DEFAULT int64 + +func (p *QueryBackupStatusRequest) GetBackupID() int64 { + if !p.IsSetBackupID() { + return QueryBackupStatusRequest_BackupID_DEFAULT + } + return *p.BackupID +} +func (p *QueryBackupStatusRequest) IsSetBackupID() bool { + return p.BackupID != nil +} + +func (p *QueryBackupStatusRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryBackupStatusRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *QueryBackupStatusRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.BackupID = &v + } + return nil +} + +func (p *QueryBackupStatusRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_status_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryBackupStatusRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_id: ", p), err) + } + return err +} + +func (p *QueryBackupStatusRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupID() { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(*p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:backup_id: ", p), err) + } + } + return err +} + +func (p *QueryBackupStatusRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryBackupStatusRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +// - BackupItems +type QueryBackupStatusResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` + BackupItems []*BackupItem `thrift:"backup_items,3" db:"backup_items" json:"backup_items,omitempty"` +} + +func NewQueryBackupStatusResponse() *QueryBackupStatusResponse { + return &QueryBackupStatusResponse{} +} + +var QueryBackupStatusResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryBackupStatusResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryBackupStatusResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryBackupStatusResponse) GetHintMessage() string { + return p.HintMessage +} + +var QueryBackupStatusResponse_BackupItems_DEFAULT []*BackupItem + +func (p *QueryBackupStatusResponse) GetBackupItems() []*BackupItem { + return p.BackupItems +} +func (p *QueryBackupStatusResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryBackupStatusResponse) IsSetBackupItems() bool { + return p.BackupItems != nil +} + +func (p *QueryBackupStatusResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryBackupStatusResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryBackupStatusResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *QueryBackupStatusResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BackupItem, 0, size) + p.BackupItems = tSlice + for i := 0; i < size; i++ { + _elem11 := &BackupItem{} + if err := _elem11.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem11), err) + } + p.BackupItems = append(p.BackupItems, _elem11) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryBackupStatusResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_status_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryBackupStatusResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryBackupStatusResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *QueryBackupStatusResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupItems() { + if err := oprot.WriteFieldBegin("backup_items", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_items: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BackupItems)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BackupItems { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_items: ", p), err) + } + } + return err +} + +func (p *QueryBackupStatusResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryBackupStatusResponse(%+v)", *p) +} diff --git a/go-client/idl/admin/bulk_load-consts.go b/go-client/idl/admin/bulk_load-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/bulk_load-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/bulk_load.go b/go-client/idl/admin/bulk_load.go new file mode 100644 index 0000000000..f7529538c8 --- /dev/null +++ b/go-client/idl/admin/bulk_load.go @@ -0,0 +1,4536 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +type BulkLoadStatus int64 + +const ( + BulkLoadStatus_BLS_INVALID BulkLoadStatus = 0 + BulkLoadStatus_BLS_DOWNLOADING BulkLoadStatus = 1 + BulkLoadStatus_BLS_DOWNLOADED BulkLoadStatus = 2 + BulkLoadStatus_BLS_INGESTING BulkLoadStatus = 3 + BulkLoadStatus_BLS_SUCCEED BulkLoadStatus = 4 + BulkLoadStatus_BLS_FAILED BulkLoadStatus = 5 + BulkLoadStatus_BLS_PAUSING BulkLoadStatus = 6 + BulkLoadStatus_BLS_PAUSED BulkLoadStatus = 7 + BulkLoadStatus_BLS_CANCELED BulkLoadStatus = 8 +) + +func (p BulkLoadStatus) String() string { + switch p { + case BulkLoadStatus_BLS_INVALID: + return "BLS_INVALID" + case BulkLoadStatus_BLS_DOWNLOADING: + return "BLS_DOWNLOADING" + case BulkLoadStatus_BLS_DOWNLOADED: + return "BLS_DOWNLOADED" + case BulkLoadStatus_BLS_INGESTING: + return "BLS_INGESTING" + case BulkLoadStatus_BLS_SUCCEED: + return "BLS_SUCCEED" + case BulkLoadStatus_BLS_FAILED: + return "BLS_FAILED" + case BulkLoadStatus_BLS_PAUSING: + return "BLS_PAUSING" + case BulkLoadStatus_BLS_PAUSED: + return "BLS_PAUSED" + case BulkLoadStatus_BLS_CANCELED: + return "BLS_CANCELED" + } + return "" +} + +func BulkLoadStatusFromString(s string) (BulkLoadStatus, error) { + switch s { + case "BLS_INVALID": + return BulkLoadStatus_BLS_INVALID, nil + case "BLS_DOWNLOADING": + return BulkLoadStatus_BLS_DOWNLOADING, nil + case "BLS_DOWNLOADED": + return BulkLoadStatus_BLS_DOWNLOADED, nil + case "BLS_INGESTING": + return BulkLoadStatus_BLS_INGESTING, nil + case "BLS_SUCCEED": + return BulkLoadStatus_BLS_SUCCEED, nil + case "BLS_FAILED": + return BulkLoadStatus_BLS_FAILED, nil + case "BLS_PAUSING": + return BulkLoadStatus_BLS_PAUSING, nil + case "BLS_PAUSED": + return BulkLoadStatus_BLS_PAUSED, nil + case "BLS_CANCELED": + return BulkLoadStatus_BLS_CANCELED, nil + } + return BulkLoadStatus(0), fmt.Errorf("not a valid BulkLoadStatus string") +} + +func BulkLoadStatusPtr(v BulkLoadStatus) *BulkLoadStatus { return &v } + +func (p BulkLoadStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *BulkLoadStatus) UnmarshalText(text []byte) error { + q, err := BulkLoadStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *BulkLoadStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = BulkLoadStatus(v) + return nil +} + +func (p *BulkLoadStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type IngestionStatus int64 + +const ( + IngestionStatus_IS_INVALID IngestionStatus = 0 + IngestionStatus_IS_RUNNING IngestionStatus = 1 + IngestionStatus_IS_SUCCEED IngestionStatus = 2 + IngestionStatus_IS_FAILED IngestionStatus = 3 +) + +func (p IngestionStatus) String() string { + switch p { + case IngestionStatus_IS_INVALID: + return "IS_INVALID" + case IngestionStatus_IS_RUNNING: + return "IS_RUNNING" + case IngestionStatus_IS_SUCCEED: + return "IS_SUCCEED" + case IngestionStatus_IS_FAILED: + return "IS_FAILED" + } + return "" +} + +func IngestionStatusFromString(s string) (IngestionStatus, error) { + switch s { + case "IS_INVALID": + return IngestionStatus_IS_INVALID, nil + case "IS_RUNNING": + return IngestionStatus_IS_RUNNING, nil + case "IS_SUCCEED": + return IngestionStatus_IS_SUCCEED, nil + case "IS_FAILED": + return IngestionStatus_IS_FAILED, nil + } + return IngestionStatus(0), fmt.Errorf("not a valid IngestionStatus string") +} + +func IngestionStatusPtr(v IngestionStatus) *IngestionStatus { return &v } + +func (p IngestionStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *IngestionStatus) UnmarshalText(text []byte) error { + q, err := IngestionStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *IngestionStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = IngestionStatus(v) + return nil +} + +func (p *IngestionStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type BulkLoadControlType int64 + +const ( + BulkLoadControlType_BLC_PAUSE BulkLoadControlType = 0 + BulkLoadControlType_BLC_RESTART BulkLoadControlType = 1 + BulkLoadControlType_BLC_CANCEL BulkLoadControlType = 2 + BulkLoadControlType_BLC_FORCE_CANCEL BulkLoadControlType = 3 +) + +func (p BulkLoadControlType) String() string { + switch p { + case BulkLoadControlType_BLC_PAUSE: + return "BLC_PAUSE" + case BulkLoadControlType_BLC_RESTART: + return "BLC_RESTART" + case BulkLoadControlType_BLC_CANCEL: + return "BLC_CANCEL" + case BulkLoadControlType_BLC_FORCE_CANCEL: + return "BLC_FORCE_CANCEL" + } + return "" +} + +func BulkLoadControlTypeFromString(s string) (BulkLoadControlType, error) { + switch s { + case "BLC_PAUSE": + return BulkLoadControlType_BLC_PAUSE, nil + case "BLC_RESTART": + return BulkLoadControlType_BLC_RESTART, nil + case "BLC_CANCEL": + return BulkLoadControlType_BLC_CANCEL, nil + case "BLC_FORCE_CANCEL": + return BulkLoadControlType_BLC_FORCE_CANCEL, nil + } + return BulkLoadControlType(0), fmt.Errorf("not a valid BulkLoadControlType string") +} + +func BulkLoadControlTypePtr(v BulkLoadControlType) *BulkLoadControlType { return &v } + +func (p BulkLoadControlType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *BulkLoadControlType) UnmarshalText(text []byte) error { + q, err := BulkLoadControlTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *BulkLoadControlType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = BulkLoadControlType(v) + return nil +} + +func (p *BulkLoadControlType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Files +// - FileTotalSize +type BulkLoadMetadata struct { + Files []*FileMeta `thrift:"files,1" db:"files" json:"files"` + FileTotalSize int64 `thrift:"file_total_size,2" db:"file_total_size" json:"file_total_size"` +} + +func NewBulkLoadMetadata() *BulkLoadMetadata { + return &BulkLoadMetadata{} +} + +func (p *BulkLoadMetadata) GetFiles() []*FileMeta { + return p.Files +} + +func (p *BulkLoadMetadata) GetFileTotalSize() int64 { + return p.FileTotalSize +} +func (p *BulkLoadMetadata) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BulkLoadMetadata) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*FileMeta, 0, size) + p.Files = tSlice + for i := 0; i < size; i++ { + _elem0 := &FileMeta{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Files = append(p.Files, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *BulkLoadMetadata) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.FileTotalSize = v + } + return nil +} + +func (p *BulkLoadMetadata) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("bulk_load_metadata"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BulkLoadMetadata) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("files", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:files: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Files)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Files { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:files: ", p), err) + } + return err +} + +func (p *BulkLoadMetadata) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("file_total_size", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:file_total_size: ", p), err) + } + if err := oprot.WriteI64(int64(p.FileTotalSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.file_total_size (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:file_total_size: ", p), err) + } + return err +} + +func (p *BulkLoadMetadata) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BulkLoadMetadata(%+v)", *p) +} + +// Attributes: +// - AppName +// - ClusterName +// - FileProviderType +// - RemoteRootPath +// - IngestBehind +type StartBulkLoadRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + ClusterName string `thrift:"cluster_name,2" db:"cluster_name" json:"cluster_name"` + FileProviderType string `thrift:"file_provider_type,3" db:"file_provider_type" json:"file_provider_type"` + RemoteRootPath string `thrift:"remote_root_path,4" db:"remote_root_path" json:"remote_root_path"` + IngestBehind bool `thrift:"ingest_behind,5" db:"ingest_behind" json:"ingest_behind"` +} + +func NewStartBulkLoadRequest() *StartBulkLoadRequest { + return &StartBulkLoadRequest{} +} + +func (p *StartBulkLoadRequest) GetAppName() string { + return p.AppName +} + +func (p *StartBulkLoadRequest) GetClusterName() string { + return p.ClusterName +} + +func (p *StartBulkLoadRequest) GetFileProviderType() string { + return p.FileProviderType +} + +func (p *StartBulkLoadRequest) GetRemoteRootPath() string { + return p.RemoteRootPath +} + +func (p *StartBulkLoadRequest) GetIngestBehind() bool { + return p.IngestBehind +} +func (p *StartBulkLoadRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartBulkLoadRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *StartBulkLoadRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ClusterName = v + } + return nil +} + +func (p *StartBulkLoadRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.FileProviderType = v + } + return nil +} + +func (p *StartBulkLoadRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.RemoteRootPath = v + } + return nil +} + +func (p *StartBulkLoadRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.IngestBehind = v + } + return nil +} + +func (p *StartBulkLoadRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_bulk_load_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartBulkLoadRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *StartBulkLoadRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cluster_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:cluster_name: ", p), err) + } + if err := oprot.WriteString(string(p.ClusterName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cluster_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:cluster_name: ", p), err) + } + return err +} + +func (p *StartBulkLoadRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("file_provider_type", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:file_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.FileProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.file_provider_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:file_provider_type: ", p), err) + } + return err +} + +func (p *StartBulkLoadRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote_root_path", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:remote_root_path: ", p), err) + } + if err := oprot.WriteString(string(p.RemoteRootPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_root_path (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:remote_root_path: ", p), err) + } + return err +} + +func (p *StartBulkLoadRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ingest_behind", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:ingest_behind: ", p), err) + } + if err := oprot.WriteBool(bool(p.IngestBehind)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ingest_behind (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:ingest_behind: ", p), err) + } + return err +} + +func (p *StartBulkLoadRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartBulkLoadRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type StartBulkLoadResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg"` +} + +func NewStartBulkLoadResponse() *StartBulkLoadResponse { + return &StartBulkLoadResponse{} +} + +var StartBulkLoadResponse_Err_DEFAULT *base.ErrorCode + +func (p *StartBulkLoadResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return StartBulkLoadResponse_Err_DEFAULT + } + return p.Err +} + +func (p *StartBulkLoadResponse) GetHintMsg() string { + return p.HintMsg +} +func (p *StartBulkLoadResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *StartBulkLoadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartBulkLoadResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *StartBulkLoadResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = v + } + return nil +} + +func (p *StartBulkLoadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_bulk_load_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartBulkLoadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *StartBulkLoadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + return err +} + +func (p *StartBulkLoadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartBulkLoadResponse(%+v)", *p) +} + +// Attributes: +// - DownloadProgress +// - DownloadStatus +// - IngestStatus +// - IsCleanedUp +// - IsPaused +type PartitionBulkLoadState struct { + DownloadProgress int32 `thrift:"download_progress,1" db:"download_progress" json:"download_progress"` + DownloadStatus *base.ErrorCode `thrift:"download_status,2" db:"download_status" json:"download_status,omitempty"` + IngestStatus IngestionStatus `thrift:"ingest_status,3" db:"ingest_status" json:"ingest_status"` + IsCleanedUp bool `thrift:"is_cleaned_up,4" db:"is_cleaned_up" json:"is_cleaned_up"` + IsPaused bool `thrift:"is_paused,5" db:"is_paused" json:"is_paused"` +} + +func NewPartitionBulkLoadState() *PartitionBulkLoadState { + return &PartitionBulkLoadState{ + IngestStatus: 0, + } +} + +var PartitionBulkLoadState_DownloadProgress_DEFAULT int32 = 0 + +func (p *PartitionBulkLoadState) GetDownloadProgress() int32 { + return p.DownloadProgress +} + +var PartitionBulkLoadState_DownloadStatus_DEFAULT *base.ErrorCode + +func (p *PartitionBulkLoadState) GetDownloadStatus() *base.ErrorCode { + if !p.IsSetDownloadStatus() { + return PartitionBulkLoadState_DownloadStatus_DEFAULT + } + return p.DownloadStatus +} + +var PartitionBulkLoadState_IngestStatus_DEFAULT IngestionStatus = 0 + +func (p *PartitionBulkLoadState) GetIngestStatus() IngestionStatus { + return p.IngestStatus +} + +var PartitionBulkLoadState_IsCleanedUp_DEFAULT bool = false + +func (p *PartitionBulkLoadState) GetIsCleanedUp() bool { + return p.IsCleanedUp +} + +var PartitionBulkLoadState_IsPaused_DEFAULT bool = false + +func (p *PartitionBulkLoadState) GetIsPaused() bool { + return p.IsPaused +} +func (p *PartitionBulkLoadState) IsSetDownloadProgress() bool { + return p.DownloadProgress != PartitionBulkLoadState_DownloadProgress_DEFAULT +} + +func (p *PartitionBulkLoadState) IsSetDownloadStatus() bool { + return p.DownloadStatus != nil +} + +func (p *PartitionBulkLoadState) IsSetIngestStatus() bool { + return p.IngestStatus != PartitionBulkLoadState_IngestStatus_DEFAULT +} + +func (p *PartitionBulkLoadState) IsSetIsCleanedUp() bool { + return p.IsCleanedUp != PartitionBulkLoadState_IsCleanedUp_DEFAULT +} + +func (p *PartitionBulkLoadState) IsSetIsPaused() bool { + return p.IsPaused != PartitionBulkLoadState_IsPaused_DEFAULT +} + +func (p *PartitionBulkLoadState) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *PartitionBulkLoadState) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.DownloadProgress = v + } + return nil +} + +func (p *PartitionBulkLoadState) ReadField2(iprot thrift.TProtocol) error { + p.DownloadStatus = &base.ErrorCode{} + if err := p.DownloadStatus.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DownloadStatus), err) + } + return nil +} + +func (p *PartitionBulkLoadState) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := IngestionStatus(v) + p.IngestStatus = temp + } + return nil +} + +func (p *PartitionBulkLoadState) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.IsCleanedUp = v + } + return nil +} + +func (p *PartitionBulkLoadState) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.IsPaused = v + } + return nil +} + +func (p *PartitionBulkLoadState) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("partition_bulk_load_state"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *PartitionBulkLoadState) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDownloadProgress() { + if err := oprot.WriteFieldBegin("download_progress", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:download_progress: ", p), err) + } + if err := oprot.WriteI32(int32(p.DownloadProgress)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.download_progress (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:download_progress: ", p), err) + } + } + return err +} + +func (p *PartitionBulkLoadState) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDownloadStatus() { + if err := oprot.WriteFieldBegin("download_status", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:download_status: ", p), err) + } + if err := p.DownloadStatus.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DownloadStatus), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:download_status: ", p), err) + } + } + return err +} + +func (p *PartitionBulkLoadState) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetIngestStatus() { + if err := oprot.WriteFieldBegin("ingest_status", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:ingest_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.IngestStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ingest_status (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:ingest_status: ", p), err) + } + } + return err +} + +func (p *PartitionBulkLoadState) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetIsCleanedUp() { + if err := oprot.WriteFieldBegin("is_cleaned_up", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:is_cleaned_up: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsCleanedUp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_cleaned_up (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:is_cleaned_up: ", p), err) + } + } + return err +} + +func (p *PartitionBulkLoadState) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetIsPaused() { + if err := oprot.WriteFieldBegin("is_paused", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:is_paused: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsPaused)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_paused (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:is_paused: ", p), err) + } + } + return err +} + +func (p *PartitionBulkLoadState) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PartitionBulkLoadState(%+v)", *p) +} + +// Attributes: +// - Pid +// - AppName +// - Primary +// - RemoteProviderName +// - ClusterName +// - Ballot +// - MetaBulkLoadStatus +// - QueryBulkLoadMetadata +// - RemoteRootPath +// - HpPrimary +type BulkLoadRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + AppName string `thrift:"app_name,2" db:"app_name" json:"app_name"` + Primary *base.RPCAddress `thrift:"primary,3" db:"primary" json:"primary"` + RemoteProviderName string `thrift:"remote_provider_name,4" db:"remote_provider_name" json:"remote_provider_name"` + ClusterName string `thrift:"cluster_name,5" db:"cluster_name" json:"cluster_name"` + Ballot int64 `thrift:"ballot,6" db:"ballot" json:"ballot"` + MetaBulkLoadStatus BulkLoadStatus `thrift:"meta_bulk_load_status,7" db:"meta_bulk_load_status" json:"meta_bulk_load_status"` + QueryBulkLoadMetadata bool `thrift:"query_bulk_load_metadata,8" db:"query_bulk_load_metadata" json:"query_bulk_load_metadata"` + RemoteRootPath string `thrift:"remote_root_path,9" db:"remote_root_path" json:"remote_root_path"` + HpPrimary *base.HostPort `thrift:"hp_primary,10" db:"hp_primary" json:"hp_primary,omitempty"` +} + +func NewBulkLoadRequest() *BulkLoadRequest { + return &BulkLoadRequest{} +} + +var BulkLoadRequest_Pid_DEFAULT *base.Gpid + +func (p *BulkLoadRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return BulkLoadRequest_Pid_DEFAULT + } + return p.Pid +} + +func (p *BulkLoadRequest) GetAppName() string { + return p.AppName +} + +var BulkLoadRequest_Primary_DEFAULT *base.RPCAddress + +func (p *BulkLoadRequest) GetPrimary() *base.RPCAddress { + if !p.IsSetPrimary() { + return BulkLoadRequest_Primary_DEFAULT + } + return p.Primary +} + +func (p *BulkLoadRequest) GetRemoteProviderName() string { + return p.RemoteProviderName +} + +func (p *BulkLoadRequest) GetClusterName() string { + return p.ClusterName +} + +func (p *BulkLoadRequest) GetBallot() int64 { + return p.Ballot +} + +func (p *BulkLoadRequest) GetMetaBulkLoadStatus() BulkLoadStatus { + return p.MetaBulkLoadStatus +} + +func (p *BulkLoadRequest) GetQueryBulkLoadMetadata() bool { + return p.QueryBulkLoadMetadata +} + +func (p *BulkLoadRequest) GetRemoteRootPath() string { + return p.RemoteRootPath +} + +var BulkLoadRequest_HpPrimary_DEFAULT *base.HostPort + +func (p *BulkLoadRequest) GetHpPrimary() *base.HostPort { + if !p.IsSetHpPrimary() { + return BulkLoadRequest_HpPrimary_DEFAULT + } + return p.HpPrimary +} +func (p *BulkLoadRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *BulkLoadRequest) IsSetPrimary() bool { + return p.Primary != nil +} + +func (p *BulkLoadRequest) IsSetHpPrimary() bool { + return p.HpPrimary != nil +} + +func (p *BulkLoadRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.STRING { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BulkLoadRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *BulkLoadRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField3(iprot thrift.TProtocol) error { + p.Primary = &base.RPCAddress{} + if err := p.Primary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Primary), err) + } + return nil +} + +func (p *BulkLoadRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.RemoteProviderName = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ClusterName = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + temp := BulkLoadStatus(v) + p.MetaBulkLoadStatus = temp + } + return nil +} + +func (p *BulkLoadRequest) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.QueryBulkLoadMetadata = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.RemoteRootPath = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField10(iprot thrift.TProtocol) error { + p.HpPrimary = &base.HostPort{} + if err := p.HpPrimary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpPrimary), err) + } + return nil +} + +func (p *BulkLoadRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("bulk_load_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BulkLoadRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_name: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("primary", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:primary: ", p), err) + } + if err := p.Primary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Primary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:primary: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote_provider_name", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:remote_provider_name: ", p), err) + } + if err := oprot.WriteString(string(p.RemoteProviderName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_provider_name (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:remote_provider_name: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cluster_name", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:cluster_name: ", p), err) + } + if err := oprot.WriteString(string(p.ClusterName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cluster_name (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:cluster_name: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:ballot: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("meta_bulk_load_status", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:meta_bulk_load_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.MetaBulkLoadStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.meta_bulk_load_status (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:meta_bulk_load_status: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("query_bulk_load_metadata", thrift.BOOL, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:query_bulk_load_metadata: ", p), err) + } + if err := oprot.WriteBool(bool(p.QueryBulkLoadMetadata)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.query_bulk_load_metadata (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:query_bulk_load_metadata: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField9(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote_root_path", thrift.STRING, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:remote_root_path: ", p), err) + } + if err := oprot.WriteString(string(p.RemoteRootPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_root_path (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:remote_root_path: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetHpPrimary() { + if err := oprot.WriteFieldBegin("hp_primary", thrift.STRUCT, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:hp_primary: ", p), err) + } + if err := p.HpPrimary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpPrimary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:hp_primary: ", p), err) + } + } + return err +} + +func (p *BulkLoadRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BulkLoadRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Pid +// - AppName +// - PrimaryBulkLoadStatus +// - GroupBulkLoadState +// - Metadata +// - TotalDownloadProgress +// - IsGroupIngestionFinished +// - IsGroupBulkLoadContextCleanedUp +// - IsGroupBulkLoadPaused +// - HpGroupBulkLoadState +type BulkLoadResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Pid *base.Gpid `thrift:"pid,2" db:"pid" json:"pid"` + AppName string `thrift:"app_name,3" db:"app_name" json:"app_name"` + PrimaryBulkLoadStatus BulkLoadStatus `thrift:"primary_bulk_load_status,4" db:"primary_bulk_load_status" json:"primary_bulk_load_status"` + GroupBulkLoadState map[*base.RPCAddress]*PartitionBulkLoadState `thrift:"group_bulk_load_state,5" db:"group_bulk_load_state" json:"group_bulk_load_state"` + Metadata *BulkLoadMetadata `thrift:"metadata,6" db:"metadata" json:"metadata,omitempty"` + TotalDownloadProgress *int32 `thrift:"total_download_progress,7" db:"total_download_progress" json:"total_download_progress,omitempty"` + IsGroupIngestionFinished *bool `thrift:"is_group_ingestion_finished,8" db:"is_group_ingestion_finished" json:"is_group_ingestion_finished,omitempty"` + IsGroupBulkLoadContextCleanedUp *bool `thrift:"is_group_bulk_load_context_cleaned_up,9" db:"is_group_bulk_load_context_cleaned_up" json:"is_group_bulk_load_context_cleaned_up,omitempty"` + IsGroupBulkLoadPaused *bool `thrift:"is_group_bulk_load_paused,10" db:"is_group_bulk_load_paused" json:"is_group_bulk_load_paused,omitempty"` + HpGroupBulkLoadState map[*base.HostPort]*PartitionBulkLoadState `thrift:"hp_group_bulk_load_state,11" db:"hp_group_bulk_load_state" json:"hp_group_bulk_load_state,omitempty"` +} + +func NewBulkLoadResponse() *BulkLoadResponse { + return &BulkLoadResponse{} +} + +var BulkLoadResponse_Err_DEFAULT *base.ErrorCode + +func (p *BulkLoadResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return BulkLoadResponse_Err_DEFAULT + } + return p.Err +} + +var BulkLoadResponse_Pid_DEFAULT *base.Gpid + +func (p *BulkLoadResponse) GetPid() *base.Gpid { + if !p.IsSetPid() { + return BulkLoadResponse_Pid_DEFAULT + } + return p.Pid +} + +func (p *BulkLoadResponse) GetAppName() string { + return p.AppName +} + +func (p *BulkLoadResponse) GetPrimaryBulkLoadStatus() BulkLoadStatus { + return p.PrimaryBulkLoadStatus +} + +func (p *BulkLoadResponse) GetGroupBulkLoadState() map[*base.RPCAddress]*PartitionBulkLoadState { + return p.GroupBulkLoadState +} + +var BulkLoadResponse_Metadata_DEFAULT *BulkLoadMetadata + +func (p *BulkLoadResponse) GetMetadata() *BulkLoadMetadata { + if !p.IsSetMetadata() { + return BulkLoadResponse_Metadata_DEFAULT + } + return p.Metadata +} + +var BulkLoadResponse_TotalDownloadProgress_DEFAULT int32 + +func (p *BulkLoadResponse) GetTotalDownloadProgress() int32 { + if !p.IsSetTotalDownloadProgress() { + return BulkLoadResponse_TotalDownloadProgress_DEFAULT + } + return *p.TotalDownloadProgress +} + +var BulkLoadResponse_IsGroupIngestionFinished_DEFAULT bool + +func (p *BulkLoadResponse) GetIsGroupIngestionFinished() bool { + if !p.IsSetIsGroupIngestionFinished() { + return BulkLoadResponse_IsGroupIngestionFinished_DEFAULT + } + return *p.IsGroupIngestionFinished +} + +var BulkLoadResponse_IsGroupBulkLoadContextCleanedUp_DEFAULT bool + +func (p *BulkLoadResponse) GetIsGroupBulkLoadContextCleanedUp() bool { + if !p.IsSetIsGroupBulkLoadContextCleanedUp() { + return BulkLoadResponse_IsGroupBulkLoadContextCleanedUp_DEFAULT + } + return *p.IsGroupBulkLoadContextCleanedUp +} + +var BulkLoadResponse_IsGroupBulkLoadPaused_DEFAULT bool + +func (p *BulkLoadResponse) GetIsGroupBulkLoadPaused() bool { + if !p.IsSetIsGroupBulkLoadPaused() { + return BulkLoadResponse_IsGroupBulkLoadPaused_DEFAULT + } + return *p.IsGroupBulkLoadPaused +} + +var BulkLoadResponse_HpGroupBulkLoadState_DEFAULT map[*base.HostPort]*PartitionBulkLoadState + +func (p *BulkLoadResponse) GetHpGroupBulkLoadState() map[*base.HostPort]*PartitionBulkLoadState { + return p.HpGroupBulkLoadState +} +func (p *BulkLoadResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *BulkLoadResponse) IsSetPid() bool { + return p.Pid != nil +} + +func (p *BulkLoadResponse) IsSetMetadata() bool { + return p.Metadata != nil +} + +func (p *BulkLoadResponse) IsSetTotalDownloadProgress() bool { + return p.TotalDownloadProgress != nil +} + +func (p *BulkLoadResponse) IsSetIsGroupIngestionFinished() bool { + return p.IsGroupIngestionFinished != nil +} + +func (p *BulkLoadResponse) IsSetIsGroupBulkLoadContextCleanedUp() bool { + return p.IsGroupBulkLoadContextCleanedUp != nil +} + +func (p *BulkLoadResponse) IsSetIsGroupBulkLoadPaused() bool { + return p.IsGroupBulkLoadPaused != nil +} + +func (p *BulkLoadResponse) IsSetHpGroupBulkLoadState() bool { + return p.HpGroupBulkLoadState != nil +} + +func (p *BulkLoadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.MAP { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.MAP { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BulkLoadResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *BulkLoadResponse) ReadField2(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *BulkLoadResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *BulkLoadResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + temp := BulkLoadStatus(v) + p.PrimaryBulkLoadStatus = temp + } + return nil +} + +func (p *BulkLoadResponse) ReadField5(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[*base.RPCAddress]*PartitionBulkLoadState, size) + p.GroupBulkLoadState = tMap + for i := 0; i < size; i++ { + _key1 := &base.RPCAddress{} + if err := _key1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _key1), err) + } + _val2 := &PartitionBulkLoadState{ + IngestStatus: 0, + } + if err := _val2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val2), err) + } + p.GroupBulkLoadState[_key1] = _val2 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *BulkLoadResponse) ReadField6(iprot thrift.TProtocol) error { + p.Metadata = &BulkLoadMetadata{} + if err := p.Metadata.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Metadata), err) + } + return nil +} + +func (p *BulkLoadResponse) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.TotalDownloadProgress = &v + } + return nil +} + +func (p *BulkLoadResponse) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.IsGroupIngestionFinished = &v + } + return nil +} + +func (p *BulkLoadResponse) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.IsGroupBulkLoadContextCleanedUp = &v + } + return nil +} + +func (p *BulkLoadResponse) ReadField10(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.IsGroupBulkLoadPaused = &v + } + return nil +} + +func (p *BulkLoadResponse) ReadField11(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[*base.HostPort]*PartitionBulkLoadState, size) + p.HpGroupBulkLoadState = tMap + for i := 0; i < size; i++ { + _key3 := &base.HostPort{} + if err := _key3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _key3), err) + } + _val4 := &PartitionBulkLoadState{ + IngestStatus: 0, + } + if err := _val4.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val4), err) + } + p.HpGroupBulkLoadState[_key3] = _val4 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *BulkLoadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("bulk_load_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BulkLoadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *BulkLoadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:pid: ", p), err) + } + return err +} + +func (p *BulkLoadResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_name: ", p), err) + } + return err +} + +func (p *BulkLoadResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("primary_bulk_load_status", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:primary_bulk_load_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.PrimaryBulkLoadStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.primary_bulk_load_status (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:primary_bulk_load_status: ", p), err) + } + return err +} + +func (p *BulkLoadResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("group_bulk_load_state", thrift.MAP, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:group_bulk_load_state: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRUCT, thrift.STRUCT, len(p.GroupBulkLoadState)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.GroupBulkLoadState { + if err := k.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", k), err) + } + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:group_bulk_load_state: ", p), err) + } + return err +} + +func (p *BulkLoadResponse) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetMetadata() { + if err := oprot.WriteFieldBegin("metadata", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:metadata: ", p), err) + } + if err := p.Metadata.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Metadata), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:metadata: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetTotalDownloadProgress() { + if err := oprot.WriteFieldBegin("total_download_progress", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:total_download_progress: ", p), err) + } + if err := oprot.WriteI32(int32(*p.TotalDownloadProgress)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_download_progress (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:total_download_progress: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetIsGroupIngestionFinished() { + if err := oprot.WriteFieldBegin("is_group_ingestion_finished", thrift.BOOL, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:is_group_ingestion_finished: ", p), err) + } + if err := oprot.WriteBool(bool(*p.IsGroupIngestionFinished)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_group_ingestion_finished (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:is_group_ingestion_finished: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetIsGroupBulkLoadContextCleanedUp() { + if err := oprot.WriteFieldBegin("is_group_bulk_load_context_cleaned_up", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:is_group_bulk_load_context_cleaned_up: ", p), err) + } + if err := oprot.WriteBool(bool(*p.IsGroupBulkLoadContextCleanedUp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_group_bulk_load_context_cleaned_up (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:is_group_bulk_load_context_cleaned_up: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetIsGroupBulkLoadPaused() { + if err := oprot.WriteFieldBegin("is_group_bulk_load_paused", thrift.BOOL, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:is_group_bulk_load_paused: ", p), err) + } + if err := oprot.WriteBool(bool(*p.IsGroupBulkLoadPaused)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_group_bulk_load_paused (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:is_group_bulk_load_paused: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetHpGroupBulkLoadState() { + if err := oprot.WriteFieldBegin("hp_group_bulk_load_state", thrift.MAP, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:hp_group_bulk_load_state: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRUCT, thrift.STRUCT, len(p.HpGroupBulkLoadState)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.HpGroupBulkLoadState { + if err := k.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", k), err) + } + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:hp_group_bulk_load_state: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BulkLoadResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Target +// - Config +// - ProviderName +// - ClusterName +// - MetaBulkLoadStatus +// - RemoteRootPath +// - HpTarget +type GroupBulkLoadRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Target *base.RPCAddress `thrift:"target,2" db:"target" json:"target"` + Config *ReplicaConfiguration `thrift:"config,3" db:"config" json:"config"` + ProviderName string `thrift:"provider_name,4" db:"provider_name" json:"provider_name"` + ClusterName string `thrift:"cluster_name,5" db:"cluster_name" json:"cluster_name"` + MetaBulkLoadStatus BulkLoadStatus `thrift:"meta_bulk_load_status,6" db:"meta_bulk_load_status" json:"meta_bulk_load_status"` + RemoteRootPath string `thrift:"remote_root_path,7" db:"remote_root_path" json:"remote_root_path"` + HpTarget *base.HostPort `thrift:"hp_target,8" db:"hp_target" json:"hp_target,omitempty"` +} + +func NewGroupBulkLoadRequest() *GroupBulkLoadRequest { + return &GroupBulkLoadRequest{} +} + +func (p *GroupBulkLoadRequest) GetAppName() string { + return p.AppName +} + +var GroupBulkLoadRequest_Target_DEFAULT *base.RPCAddress + +func (p *GroupBulkLoadRequest) GetTarget() *base.RPCAddress { + if !p.IsSetTarget() { + return GroupBulkLoadRequest_Target_DEFAULT + } + return p.Target +} + +var GroupBulkLoadRequest_Config_DEFAULT *ReplicaConfiguration + +func (p *GroupBulkLoadRequest) GetConfig() *ReplicaConfiguration { + if !p.IsSetConfig() { + return GroupBulkLoadRequest_Config_DEFAULT + } + return p.Config +} + +func (p *GroupBulkLoadRequest) GetProviderName() string { + return p.ProviderName +} + +func (p *GroupBulkLoadRequest) GetClusterName() string { + return p.ClusterName +} + +func (p *GroupBulkLoadRequest) GetMetaBulkLoadStatus() BulkLoadStatus { + return p.MetaBulkLoadStatus +} + +func (p *GroupBulkLoadRequest) GetRemoteRootPath() string { + return p.RemoteRootPath +} + +var GroupBulkLoadRequest_HpTarget_DEFAULT *base.HostPort + +func (p *GroupBulkLoadRequest) GetHpTarget() *base.HostPort { + if !p.IsSetHpTarget() { + return GroupBulkLoadRequest_HpTarget_DEFAULT + } + return p.HpTarget +} +func (p *GroupBulkLoadRequest) IsSetTarget() bool { + return p.Target != nil +} + +func (p *GroupBulkLoadRequest) IsSetConfig() bool { + return p.Config != nil +} + +func (p *GroupBulkLoadRequest) IsSetHpTarget() bool { + return p.HpTarget != nil +} + +func (p *GroupBulkLoadRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField2(iprot thrift.TProtocol) error { + p.Target = &base.RPCAddress{} + if err := p.Target.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Target), err) + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField3(iprot thrift.TProtocol) error { + p.Config = &ReplicaConfiguration{ + Status: 0, + } + if err := p.Config.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Config), err) + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ProviderName = v + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ClusterName = v + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + temp := BulkLoadStatus(v) + p.MetaBulkLoadStatus = temp + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.RemoteRootPath = v + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField8(iprot thrift.TProtocol) error { + p.HpTarget = &base.HostPort{} + if err := p.HpTarget.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpTarget), err) + } + return nil +} + +func (p *GroupBulkLoadRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("group_bulk_load_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *GroupBulkLoadRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("target", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:target: ", p), err) + } + if err := p.Target.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Target), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:target: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("config", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:config: ", p), err) + } + if err := p.Config.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Config), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:config: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("provider_name", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:provider_name: ", p), err) + } + if err := oprot.WriteString(string(p.ProviderName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.provider_name (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:provider_name: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cluster_name", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:cluster_name: ", p), err) + } + if err := oprot.WriteString(string(p.ClusterName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cluster_name (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:cluster_name: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("meta_bulk_load_status", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:meta_bulk_load_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.MetaBulkLoadStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.meta_bulk_load_status (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:meta_bulk_load_status: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote_root_path", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:remote_root_path: ", p), err) + } + if err := oprot.WriteString(string(p.RemoteRootPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_root_path (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:remote_root_path: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetHpTarget() { + if err := oprot.WriteFieldBegin("hp_target", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:hp_target: ", p), err) + } + if err := p.HpTarget.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpTarget), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:hp_target: ", p), err) + } + } + return err +} + +func (p *GroupBulkLoadRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("GroupBulkLoadRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Status +// - BulkLoadState +type GroupBulkLoadResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Status BulkLoadStatus `thrift:"status,2" db:"status" json:"status"` + BulkLoadState *PartitionBulkLoadState `thrift:"bulk_load_state,3" db:"bulk_load_state" json:"bulk_load_state"` +} + +func NewGroupBulkLoadResponse() *GroupBulkLoadResponse { + return &GroupBulkLoadResponse{} +} + +var GroupBulkLoadResponse_Err_DEFAULT *base.ErrorCode + +func (p *GroupBulkLoadResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return GroupBulkLoadResponse_Err_DEFAULT + } + return p.Err +} + +func (p *GroupBulkLoadResponse) GetStatus() BulkLoadStatus { + return p.Status +} + +var GroupBulkLoadResponse_BulkLoadState_DEFAULT *PartitionBulkLoadState + +func (p *GroupBulkLoadResponse) GetBulkLoadState() *PartitionBulkLoadState { + if !p.IsSetBulkLoadState() { + return GroupBulkLoadResponse_BulkLoadState_DEFAULT + } + return p.BulkLoadState +} +func (p *GroupBulkLoadResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *GroupBulkLoadResponse) IsSetBulkLoadState() bool { + return p.BulkLoadState != nil +} + +func (p *GroupBulkLoadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *GroupBulkLoadResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *GroupBulkLoadResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := BulkLoadStatus(v) + p.Status = temp + } + return nil +} + +func (p *GroupBulkLoadResponse) ReadField3(iprot thrift.TProtocol) error { + p.BulkLoadState = &PartitionBulkLoadState{ + IngestStatus: 0, + } + if err := p.BulkLoadState.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.BulkLoadState), err) + } + return nil +} + +func (p *GroupBulkLoadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("group_bulk_load_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *GroupBulkLoadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *GroupBulkLoadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:status: ", p), err) + } + return err +} + +func (p *GroupBulkLoadResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("bulk_load_state", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:bulk_load_state: ", p), err) + } + if err := p.BulkLoadState.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.BulkLoadState), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:bulk_load_state: ", p), err) + } + return err +} + +func (p *GroupBulkLoadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("GroupBulkLoadResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Metadata +// - IngestBehind +// - Ballot +// - VerifyBeforeIngest +type IngestionRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Metadata *BulkLoadMetadata `thrift:"metadata,2" db:"metadata" json:"metadata"` + IngestBehind bool `thrift:"ingest_behind,3" db:"ingest_behind" json:"ingest_behind"` + Ballot int64 `thrift:"ballot,4" db:"ballot" json:"ballot"` + VerifyBeforeIngest bool `thrift:"verify_before_ingest,5" db:"verify_before_ingest" json:"verify_before_ingest"` +} + +func NewIngestionRequest() *IngestionRequest { + return &IngestionRequest{} +} + +func (p *IngestionRequest) GetAppName() string { + return p.AppName +} + +var IngestionRequest_Metadata_DEFAULT *BulkLoadMetadata + +func (p *IngestionRequest) GetMetadata() *BulkLoadMetadata { + if !p.IsSetMetadata() { + return IngestionRequest_Metadata_DEFAULT + } + return p.Metadata +} + +func (p *IngestionRequest) GetIngestBehind() bool { + return p.IngestBehind +} + +func (p *IngestionRequest) GetBallot() int64 { + return p.Ballot +} + +func (p *IngestionRequest) GetVerifyBeforeIngest() bool { + return p.VerifyBeforeIngest +} +func (p *IngestionRequest) IsSetMetadata() bool { + return p.Metadata != nil +} + +func (p *IngestionRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *IngestionRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *IngestionRequest) ReadField2(iprot thrift.TProtocol) error { + p.Metadata = &BulkLoadMetadata{} + if err := p.Metadata.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Metadata), err) + } + return nil +} + +func (p *IngestionRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.IngestBehind = v + } + return nil +} + +func (p *IngestionRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *IngestionRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.VerifyBeforeIngest = v + } + return nil +} + +func (p *IngestionRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ingestion_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *IngestionRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *IngestionRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("metadata", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:metadata: ", p), err) + } + if err := p.Metadata.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Metadata), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:metadata: ", p), err) + } + return err +} + +func (p *IngestionRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ingest_behind", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:ingest_behind: ", p), err) + } + if err := oprot.WriteBool(bool(p.IngestBehind)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ingest_behind (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:ingest_behind: ", p), err) + } + return err +} + +func (p *IngestionRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ballot: ", p), err) + } + return err +} + +func (p *IngestionRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("verify_before_ingest", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:verify_before_ingest: ", p), err) + } + if err := oprot.WriteBool(bool(p.VerifyBeforeIngest)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.verify_before_ingest (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:verify_before_ingest: ", p), err) + } + return err +} + +func (p *IngestionRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("IngestionRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - RocksdbError +type IngestionResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + RocksdbError int32 `thrift:"rocksdb_error,2" db:"rocksdb_error" json:"rocksdb_error"` +} + +func NewIngestionResponse() *IngestionResponse { + return &IngestionResponse{} +} + +var IngestionResponse_Err_DEFAULT *base.ErrorCode + +func (p *IngestionResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return IngestionResponse_Err_DEFAULT + } + return p.Err +} + +func (p *IngestionResponse) GetRocksdbError() int32 { + return p.RocksdbError +} +func (p *IngestionResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *IngestionResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *IngestionResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *IngestionResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.RocksdbError = v + } + return nil +} + +func (p *IngestionResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ingestion_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *IngestionResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *IngestionResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("rocksdb_error", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:rocksdb_error: ", p), err) + } + if err := oprot.WriteI32(int32(p.RocksdbError)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.rocksdb_error (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:rocksdb_error: ", p), err) + } + return err +} + +func (p *IngestionResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("IngestionResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Type +type ControlBulkLoadRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Type BulkLoadControlType `thrift:"type,2" db:"type" json:"type"` +} + +func NewControlBulkLoadRequest() *ControlBulkLoadRequest { + return &ControlBulkLoadRequest{} +} + +func (p *ControlBulkLoadRequest) GetAppName() string { + return p.AppName +} + +func (p *ControlBulkLoadRequest) GetType() BulkLoadControlType { + return p.Type +} +func (p *ControlBulkLoadRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ControlBulkLoadRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ControlBulkLoadRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := BulkLoadControlType(v) + p.Type = temp + } + return nil +} + +func (p *ControlBulkLoadRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_bulk_load_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ControlBulkLoadRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ControlBulkLoadRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("type", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:type: ", p), err) + } + if err := oprot.WriteI32(int32(p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:type: ", p), err) + } + return err +} + +func (p *ControlBulkLoadRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ControlBulkLoadRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type ControlBulkLoadResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg *string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg,omitempty"` +} + +func NewControlBulkLoadResponse() *ControlBulkLoadResponse { + return &ControlBulkLoadResponse{} +} + +var ControlBulkLoadResponse_Err_DEFAULT *base.ErrorCode + +func (p *ControlBulkLoadResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ControlBulkLoadResponse_Err_DEFAULT + } + return p.Err +} + +var ControlBulkLoadResponse_HintMsg_DEFAULT string + +func (p *ControlBulkLoadResponse) GetHintMsg() string { + if !p.IsSetHintMsg() { + return ControlBulkLoadResponse_HintMsg_DEFAULT + } + return *p.HintMsg +} +func (p *ControlBulkLoadResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ControlBulkLoadResponse) IsSetHintMsg() bool { + return p.HintMsg != nil +} + +func (p *ControlBulkLoadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ControlBulkLoadResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ControlBulkLoadResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = &v + } + return nil +} + +func (p *ControlBulkLoadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_bulk_load_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ControlBulkLoadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ControlBulkLoadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetHintMsg() { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(*p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + } + return err +} + +func (p *ControlBulkLoadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ControlBulkLoadResponse(%+v)", *p) +} + +// Attributes: +// - AppName +type QueryBulkLoadRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewQueryBulkLoadRequest() *QueryBulkLoadRequest { + return &QueryBulkLoadRequest{} +} + +func (p *QueryBulkLoadRequest) GetAppName() string { + return p.AppName +} +func (p *QueryBulkLoadRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryBulkLoadRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryBulkLoadRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_bulk_load_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryBulkLoadRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *QueryBulkLoadRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryBulkLoadRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - AppName +// - AppStatus +// - PartitionsStatus +// - MaxReplicaCount +// - BulkLoadStates +// - HintMsg +// - IsBulkLoading +// - HpBulkLoadStates +type QueryBulkLoadResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + AppName string `thrift:"app_name,2" db:"app_name" json:"app_name"` + AppStatus BulkLoadStatus `thrift:"app_status,3" db:"app_status" json:"app_status"` + PartitionsStatus []BulkLoadStatus `thrift:"partitions_status,4" db:"partitions_status" json:"partitions_status"` + MaxReplicaCount int32 `thrift:"max_replica_count,5" db:"max_replica_count" json:"max_replica_count"` + BulkLoadStates []map[*base.RPCAddress]*PartitionBulkLoadState `thrift:"bulk_load_states,6" db:"bulk_load_states" json:"bulk_load_states"` + HintMsg *string `thrift:"hint_msg,7" db:"hint_msg" json:"hint_msg,omitempty"` + IsBulkLoading *bool `thrift:"is_bulk_loading,8" db:"is_bulk_loading" json:"is_bulk_loading,omitempty"` + HpBulkLoadStates []map[*base.HostPort]*PartitionBulkLoadState `thrift:"hp_bulk_load_states,9" db:"hp_bulk_load_states" json:"hp_bulk_load_states,omitempty"` +} + +func NewQueryBulkLoadResponse() *QueryBulkLoadResponse { + return &QueryBulkLoadResponse{} +} + +var QueryBulkLoadResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryBulkLoadResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryBulkLoadResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryBulkLoadResponse) GetAppName() string { + return p.AppName +} + +func (p *QueryBulkLoadResponse) GetAppStatus() BulkLoadStatus { + return p.AppStatus +} + +func (p *QueryBulkLoadResponse) GetPartitionsStatus() []BulkLoadStatus { + return p.PartitionsStatus +} + +func (p *QueryBulkLoadResponse) GetMaxReplicaCount() int32 { + return p.MaxReplicaCount +} + +func (p *QueryBulkLoadResponse) GetBulkLoadStates() []map[*base.RPCAddress]*PartitionBulkLoadState { + return p.BulkLoadStates +} + +var QueryBulkLoadResponse_HintMsg_DEFAULT string + +func (p *QueryBulkLoadResponse) GetHintMsg() string { + if !p.IsSetHintMsg() { + return QueryBulkLoadResponse_HintMsg_DEFAULT + } + return *p.HintMsg +} + +var QueryBulkLoadResponse_IsBulkLoading_DEFAULT bool + +func (p *QueryBulkLoadResponse) GetIsBulkLoading() bool { + if !p.IsSetIsBulkLoading() { + return QueryBulkLoadResponse_IsBulkLoading_DEFAULT + } + return *p.IsBulkLoading +} + +var QueryBulkLoadResponse_HpBulkLoadStates_DEFAULT []map[*base.HostPort]*PartitionBulkLoadState + +func (p *QueryBulkLoadResponse) GetHpBulkLoadStates() []map[*base.HostPort]*PartitionBulkLoadState { + return p.HpBulkLoadStates +} +func (p *QueryBulkLoadResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryBulkLoadResponse) IsSetHintMsg() bool { + return p.HintMsg != nil +} + +func (p *QueryBulkLoadResponse) IsSetIsBulkLoading() bool { + return p.IsBulkLoading != nil +} + +func (p *QueryBulkLoadResponse) IsSetHpBulkLoadStates() bool { + return p.HpBulkLoadStates != nil +} + +func (p *QueryBulkLoadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.LIST { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := BulkLoadStatus(v) + p.AppStatus = temp + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]BulkLoadStatus, 0, size) + p.PartitionsStatus = tSlice + for i := 0; i < size; i++ { + var _elem5 BulkLoadStatus + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + temp := BulkLoadStatus(v) + _elem5 = temp + } + p.PartitionsStatus = append(p.PartitionsStatus, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.MaxReplicaCount = v + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]map[*base.RPCAddress]*PartitionBulkLoadState, 0, size) + p.BulkLoadStates = tSlice + for i := 0; i < size; i++ { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[*base.RPCAddress]*PartitionBulkLoadState, size) + _elem6 := tMap + for i := 0; i < size; i++ { + _key7 := &base.RPCAddress{} + if err := _key7.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _key7), err) + } + _val8 := &PartitionBulkLoadState{ + IngestStatus: 0, + } + if err := _val8.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val8), err) + } + _elem6[_key7] = _val8 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + p.BulkLoadStates = append(p.BulkLoadStates, _elem6) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.HintMsg = &v + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.IsBulkLoading = &v + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField9(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]map[*base.HostPort]*PartitionBulkLoadState, 0, size) + p.HpBulkLoadStates = tSlice + for i := 0; i < size; i++ { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[*base.HostPort]*PartitionBulkLoadState, size) + _elem9 := tMap + for i := 0; i < size; i++ { + _key10 := &base.HostPort{} + if err := _key10.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _key10), err) + } + _val11 := &PartitionBulkLoadState{ + IngestStatus: 0, + } + if err := _val11.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val11), err) + } + _elem9[_key10] = _val11 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + p.HpBulkLoadStates = append(p.HpBulkLoadStates, _elem9) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryBulkLoadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_bulk_load_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryBulkLoadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_name: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_status", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_status (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_status: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partitions_status", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partitions_status: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.PartitionsStatus)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.PartitionsStatus { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partitions_status: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:max_replica_count: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("bulk_load_states", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:bulk_load_states: ", p), err) + } + if err := oprot.WriteListBegin(thrift.MAP, len(p.BulkLoadStates)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BulkLoadStates { + if err := oprot.WriteMapBegin(thrift.STRUCT, thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range v { + if err := k.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", k), err) + } + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:bulk_load_states: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetHintMsg() { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(*p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:hint_msg: ", p), err) + } + } + return err +} + +func (p *QueryBulkLoadResponse) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetIsBulkLoading() { + if err := oprot.WriteFieldBegin("is_bulk_loading", thrift.BOOL, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:is_bulk_loading: ", p), err) + } + if err := oprot.WriteBool(bool(*p.IsBulkLoading)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_bulk_loading (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:is_bulk_loading: ", p), err) + } + } + return err +} + +func (p *QueryBulkLoadResponse) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetHpBulkLoadStates() { + if err := oprot.WriteFieldBegin("hp_bulk_load_states", thrift.LIST, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:hp_bulk_load_states: ", p), err) + } + if err := oprot.WriteListBegin(thrift.MAP, len(p.HpBulkLoadStates)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.HpBulkLoadStates { + if err := oprot.WriteMapBegin(thrift.STRUCT, thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range v { + if err := k.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", k), err) + } + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:hp_bulk_load_states: ", p), err) + } + } + return err +} + +func (p *QueryBulkLoadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryBulkLoadResponse(%+v)", *p) +} + +// Attributes: +// - AppName +type ClearBulkLoadStateRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewClearBulkLoadStateRequest() *ClearBulkLoadStateRequest { + return &ClearBulkLoadStateRequest{} +} + +func (p *ClearBulkLoadStateRequest) GetAppName() string { + return p.AppName +} +func (p *ClearBulkLoadStateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ClearBulkLoadStateRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ClearBulkLoadStateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("clear_bulk_load_state_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ClearBulkLoadStateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ClearBulkLoadStateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ClearBulkLoadStateRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type ClearBulkLoadStateResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg"` +} + +func NewClearBulkLoadStateResponse() *ClearBulkLoadStateResponse { + return &ClearBulkLoadStateResponse{} +} + +var ClearBulkLoadStateResponse_Err_DEFAULT *base.ErrorCode + +func (p *ClearBulkLoadStateResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ClearBulkLoadStateResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ClearBulkLoadStateResponse) GetHintMsg() string { + return p.HintMsg +} +func (p *ClearBulkLoadStateResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ClearBulkLoadStateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ClearBulkLoadStateResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ClearBulkLoadStateResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = v + } + return nil +} + +func (p *ClearBulkLoadStateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("clear_bulk_load_state_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ClearBulkLoadStateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ClearBulkLoadStateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + return err +} + +func (p *ClearBulkLoadStateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ClearBulkLoadStateResponse(%+v)", *p) +} diff --git a/go-client/idl/admin/duplication-consts.go b/go-client/idl/admin/duplication-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/duplication-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/duplication.go b/go-client/idl/admin/duplication.go new file mode 100644 index 0000000000..fa55de71f6 --- /dev/null +++ b/go-client/idl/admin/duplication.go @@ -0,0 +1,2606 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +type DuplicationStatus int64 + +const ( + DuplicationStatus_DS_INIT DuplicationStatus = 0 + DuplicationStatus_DS_PREPARE DuplicationStatus = 1 + DuplicationStatus_DS_APP DuplicationStatus = 2 + DuplicationStatus_DS_LOG DuplicationStatus = 3 + DuplicationStatus_DS_PAUSE DuplicationStatus = 4 + DuplicationStatus_DS_REMOVED DuplicationStatus = 5 +) + +func (p DuplicationStatus) String() string { + switch p { + case DuplicationStatus_DS_INIT: + return "DS_INIT" + case DuplicationStatus_DS_PREPARE: + return "DS_PREPARE" + case DuplicationStatus_DS_APP: + return "DS_APP" + case DuplicationStatus_DS_LOG: + return "DS_LOG" + case DuplicationStatus_DS_PAUSE: + return "DS_PAUSE" + case DuplicationStatus_DS_REMOVED: + return "DS_REMOVED" + } + return "" +} + +func DuplicationStatusFromString(s string) (DuplicationStatus, error) { + switch s { + case "DS_INIT": + return DuplicationStatus_DS_INIT, nil + case "DS_PREPARE": + return DuplicationStatus_DS_PREPARE, nil + case "DS_APP": + return DuplicationStatus_DS_APP, nil + case "DS_LOG": + return DuplicationStatus_DS_LOG, nil + case "DS_PAUSE": + return DuplicationStatus_DS_PAUSE, nil + case "DS_REMOVED": + return DuplicationStatus_DS_REMOVED, nil + } + return DuplicationStatus(0), fmt.Errorf("not a valid DuplicationStatus string") +} + +func DuplicationStatusPtr(v DuplicationStatus) *DuplicationStatus { return &v } + +func (p DuplicationStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *DuplicationStatus) UnmarshalText(text []byte) error { + q, err := DuplicationStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *DuplicationStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = DuplicationStatus(v) + return nil +} + +func (p *DuplicationStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type DuplicationFailMode int64 + +const ( + DuplicationFailMode_FAIL_SLOW DuplicationFailMode = 0 + DuplicationFailMode_FAIL_SKIP DuplicationFailMode = 1 + DuplicationFailMode_FAIL_FAST DuplicationFailMode = 2 +) + +func (p DuplicationFailMode) String() string { + switch p { + case DuplicationFailMode_FAIL_SLOW: + return "FAIL_SLOW" + case DuplicationFailMode_FAIL_SKIP: + return "FAIL_SKIP" + case DuplicationFailMode_FAIL_FAST: + return "FAIL_FAST" + } + return "" +} + +func DuplicationFailModeFromString(s string) (DuplicationFailMode, error) { + switch s { + case "FAIL_SLOW": + return DuplicationFailMode_FAIL_SLOW, nil + case "FAIL_SKIP": + return DuplicationFailMode_FAIL_SKIP, nil + case "FAIL_FAST": + return DuplicationFailMode_FAIL_FAST, nil + } + return DuplicationFailMode(0), fmt.Errorf("not a valid DuplicationFailMode string") +} + +func DuplicationFailModePtr(v DuplicationFailMode) *DuplicationFailMode { return &v } + +func (p DuplicationFailMode) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *DuplicationFailMode) UnmarshalText(text []byte) error { + q, err := DuplicationFailModeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *DuplicationFailMode) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = DuplicationFailMode(v) + return nil +} + +func (p *DuplicationFailMode) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - AppName +// - RemoteClusterName +// - IsDuplicatingCheckpoint +// - RemoteAppName +// - RemoteReplicaCount +type DuplicationAddRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + RemoteClusterName string `thrift:"remote_cluster_name,2" db:"remote_cluster_name" json:"remote_cluster_name"` + IsDuplicatingCheckpoint bool `thrift:"is_duplicating_checkpoint,3" db:"is_duplicating_checkpoint" json:"is_duplicating_checkpoint"` + RemoteAppName *string `thrift:"remote_app_name,4" db:"remote_app_name" json:"remote_app_name,omitempty"` + RemoteReplicaCount *int32 `thrift:"remote_replica_count,5" db:"remote_replica_count" json:"remote_replica_count,omitempty"` +} + +func NewDuplicationAddRequest() *DuplicationAddRequest { + return &DuplicationAddRequest{ + IsDuplicatingCheckpoint: true, + } +} + +func (p *DuplicationAddRequest) GetAppName() string { + return p.AppName +} + +func (p *DuplicationAddRequest) GetRemoteClusterName() string { + return p.RemoteClusterName +} + +var DuplicationAddRequest_IsDuplicatingCheckpoint_DEFAULT bool = true + +func (p *DuplicationAddRequest) GetIsDuplicatingCheckpoint() bool { + return p.IsDuplicatingCheckpoint +} + +var DuplicationAddRequest_RemoteAppName_DEFAULT string + +func (p *DuplicationAddRequest) GetRemoteAppName() string { + if !p.IsSetRemoteAppName() { + return DuplicationAddRequest_RemoteAppName_DEFAULT + } + return *p.RemoteAppName +} + +var DuplicationAddRequest_RemoteReplicaCount_DEFAULT int32 + +func (p *DuplicationAddRequest) GetRemoteReplicaCount() int32 { + if !p.IsSetRemoteReplicaCount() { + return DuplicationAddRequest_RemoteReplicaCount_DEFAULT + } + return *p.RemoteReplicaCount +} +func (p *DuplicationAddRequest) IsSetIsDuplicatingCheckpoint() bool { + return p.IsDuplicatingCheckpoint != DuplicationAddRequest_IsDuplicatingCheckpoint_DEFAULT +} + +func (p *DuplicationAddRequest) IsSetRemoteAppName() bool { + return p.RemoteAppName != nil +} + +func (p *DuplicationAddRequest) IsSetRemoteReplicaCount() bool { + return p.RemoteReplicaCount != nil +} + +func (p *DuplicationAddRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationAddRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *DuplicationAddRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.RemoteClusterName = v + } + return nil +} + +func (p *DuplicationAddRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.IsDuplicatingCheckpoint = v + } + return nil +} + +func (p *DuplicationAddRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.RemoteAppName = &v + } + return nil +} + +func (p *DuplicationAddRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.RemoteReplicaCount = &v + } + return nil +} + +func (p *DuplicationAddRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_add_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationAddRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *DuplicationAddRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote_cluster_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:remote_cluster_name: ", p), err) + } + if err := oprot.WriteString(string(p.RemoteClusterName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_cluster_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:remote_cluster_name: ", p), err) + } + return err +} + +func (p *DuplicationAddRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetIsDuplicatingCheckpoint() { + if err := oprot.WriteFieldBegin("is_duplicating_checkpoint", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:is_duplicating_checkpoint: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsDuplicatingCheckpoint)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_duplicating_checkpoint (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:is_duplicating_checkpoint: ", p), err) + } + } + return err +} + +func (p *DuplicationAddRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteAppName() { + if err := oprot.WriteFieldBegin("remote_app_name", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:remote_app_name: ", p), err) + } + if err := oprot.WriteString(string(*p.RemoteAppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_app_name (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:remote_app_name: ", p), err) + } + } + return err +} + +func (p *DuplicationAddRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteReplicaCount() { + if err := oprot.WriteFieldBegin("remote_replica_count", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:remote_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.RemoteReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_replica_count (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:remote_replica_count: ", p), err) + } + } + return err +} + +func (p *DuplicationAddRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationAddRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Appid +// - Dupid +// - Hint +// - RemoteAppName +// - RemoteReplicaCount +type DuplicationAddResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Appid int32 `thrift:"appid,2" db:"appid" json:"appid"` + Dupid int32 `thrift:"dupid,3" db:"dupid" json:"dupid"` + Hint *string `thrift:"hint,4" db:"hint" json:"hint,omitempty"` + RemoteAppName *string `thrift:"remote_app_name,5" db:"remote_app_name" json:"remote_app_name,omitempty"` + RemoteReplicaCount *int32 `thrift:"remote_replica_count,6" db:"remote_replica_count" json:"remote_replica_count,omitempty"` +} + +func NewDuplicationAddResponse() *DuplicationAddResponse { + return &DuplicationAddResponse{} +} + +var DuplicationAddResponse_Err_DEFAULT *base.ErrorCode + +func (p *DuplicationAddResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DuplicationAddResponse_Err_DEFAULT + } + return p.Err +} + +func (p *DuplicationAddResponse) GetAppid() int32 { + return p.Appid +} + +func (p *DuplicationAddResponse) GetDupid() int32 { + return p.Dupid +} + +var DuplicationAddResponse_Hint_DEFAULT string + +func (p *DuplicationAddResponse) GetHint() string { + if !p.IsSetHint() { + return DuplicationAddResponse_Hint_DEFAULT + } + return *p.Hint +} + +var DuplicationAddResponse_RemoteAppName_DEFAULT string + +func (p *DuplicationAddResponse) GetRemoteAppName() string { + if !p.IsSetRemoteAppName() { + return DuplicationAddResponse_RemoteAppName_DEFAULT + } + return *p.RemoteAppName +} + +var DuplicationAddResponse_RemoteReplicaCount_DEFAULT int32 + +func (p *DuplicationAddResponse) GetRemoteReplicaCount() int32 { + if !p.IsSetRemoteReplicaCount() { + return DuplicationAddResponse_RemoteReplicaCount_DEFAULT + } + return *p.RemoteReplicaCount +} +func (p *DuplicationAddResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DuplicationAddResponse) IsSetHint() bool { + return p.Hint != nil +} + +func (p *DuplicationAddResponse) IsSetRemoteAppName() bool { + return p.RemoteAppName != nil +} + +func (p *DuplicationAddResponse) IsSetRemoteReplicaCount() bool { + return p.RemoteReplicaCount != nil +} + +func (p *DuplicationAddResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationAddResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DuplicationAddResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Appid = v + } + return nil +} + +func (p *DuplicationAddResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Dupid = v + } + return nil +} + +func (p *DuplicationAddResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Hint = &v + } + return nil +} + +func (p *DuplicationAddResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.RemoteAppName = &v + } + return nil +} + +func (p *DuplicationAddResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.RemoteReplicaCount = &v + } + return nil +} + +func (p *DuplicationAddResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_add_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationAddResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DuplicationAddResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("appid", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:appid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Appid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.appid (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:appid: ", p), err) + } + return err +} + +func (p *DuplicationAddResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dupid", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:dupid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Dupid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.dupid (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:dupid: ", p), err) + } + return err +} + +func (p *DuplicationAddResponse) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHint() { + if err := oprot.WriteFieldBegin("hint", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hint: ", p), err) + } + if err := oprot.WriteString(string(*p.Hint)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hint: ", p), err) + } + } + return err +} + +func (p *DuplicationAddResponse) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteAppName() { + if err := oprot.WriteFieldBegin("remote_app_name", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:remote_app_name: ", p), err) + } + if err := oprot.WriteString(string(*p.RemoteAppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_app_name (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:remote_app_name: ", p), err) + } + } + return err +} + +func (p *DuplicationAddResponse) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteReplicaCount() { + if err := oprot.WriteFieldBegin("remote_replica_count", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:remote_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.RemoteReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_replica_count (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:remote_replica_count: ", p), err) + } + } + return err +} + +func (p *DuplicationAddResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationAddResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Dupid +// - Status +// - FailMode +type DuplicationModifyRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Dupid int32 `thrift:"dupid,2" db:"dupid" json:"dupid"` + Status *DuplicationStatus `thrift:"status,3" db:"status" json:"status,omitempty"` + FailMode *DuplicationFailMode `thrift:"fail_mode,4" db:"fail_mode" json:"fail_mode,omitempty"` +} + +func NewDuplicationModifyRequest() *DuplicationModifyRequest { + return &DuplicationModifyRequest{} +} + +func (p *DuplicationModifyRequest) GetAppName() string { + return p.AppName +} + +func (p *DuplicationModifyRequest) GetDupid() int32 { + return p.Dupid +} + +var DuplicationModifyRequest_Status_DEFAULT DuplicationStatus + +func (p *DuplicationModifyRequest) GetStatus() DuplicationStatus { + if !p.IsSetStatus() { + return DuplicationModifyRequest_Status_DEFAULT + } + return *p.Status +} + +var DuplicationModifyRequest_FailMode_DEFAULT DuplicationFailMode + +func (p *DuplicationModifyRequest) GetFailMode() DuplicationFailMode { + if !p.IsSetFailMode() { + return DuplicationModifyRequest_FailMode_DEFAULT + } + return *p.FailMode +} +func (p *DuplicationModifyRequest) IsSetStatus() bool { + return p.Status != nil +} + +func (p *DuplicationModifyRequest) IsSetFailMode() bool { + return p.FailMode != nil +} + +func (p *DuplicationModifyRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationModifyRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *DuplicationModifyRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Dupid = v + } + return nil +} + +func (p *DuplicationModifyRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := DuplicationStatus(v) + p.Status = &temp + } + return nil +} + +func (p *DuplicationModifyRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + temp := DuplicationFailMode(v) + p.FailMode = &temp + } + return nil +} + +func (p *DuplicationModifyRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_modify_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationModifyRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *DuplicationModifyRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dupid", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:dupid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Dupid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.dupid (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:dupid: ", p), err) + } + return err +} + +func (p *DuplicationModifyRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err := oprot.WriteFieldBegin("status", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:status: ", p), err) + } + if err := oprot.WriteI32(int32(*p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:status: ", p), err) + } + } + return err +} + +func (p *DuplicationModifyRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetFailMode() { + if err := oprot.WriteFieldBegin("fail_mode", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:fail_mode: ", p), err) + } + if err := oprot.WriteI32(int32(*p.FailMode)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fail_mode (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:fail_mode: ", p), err) + } + } + return err +} + +func (p *DuplicationModifyRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationModifyRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Appid +type DuplicationModifyResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Appid int32 `thrift:"appid,2" db:"appid" json:"appid"` +} + +func NewDuplicationModifyResponse() *DuplicationModifyResponse { + return &DuplicationModifyResponse{} +} + +var DuplicationModifyResponse_Err_DEFAULT *base.ErrorCode + +func (p *DuplicationModifyResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DuplicationModifyResponse_Err_DEFAULT + } + return p.Err +} + +func (p *DuplicationModifyResponse) GetAppid() int32 { + return p.Appid +} +func (p *DuplicationModifyResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DuplicationModifyResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationModifyResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DuplicationModifyResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Appid = v + } + return nil +} + +func (p *DuplicationModifyResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_modify_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationModifyResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DuplicationModifyResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("appid", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:appid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Appid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.appid (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:appid: ", p), err) + } + return err +} + +func (p *DuplicationModifyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationModifyResponse(%+v)", *p) +} + +// Attributes: +// - Dupid +// - Status +// - Remote +// - CreateTs +// - Progress +// - FailMode +// - RemoteAppName +// - RemoteReplicaCount +type DuplicationEntry struct { + Dupid int32 `thrift:"dupid,1" db:"dupid" json:"dupid"` + Status DuplicationStatus `thrift:"status,2" db:"status" json:"status"` + Remote string `thrift:"remote,3" db:"remote" json:"remote"` + CreateTs int64 `thrift:"create_ts,4" db:"create_ts" json:"create_ts"` + Progress map[int32]int64 `thrift:"progress,5" db:"progress" json:"progress,omitempty"` + // unused field # 6 + FailMode *DuplicationFailMode `thrift:"fail_mode,7" db:"fail_mode" json:"fail_mode,omitempty"` + RemoteAppName *string `thrift:"remote_app_name,8" db:"remote_app_name" json:"remote_app_name,omitempty"` + RemoteReplicaCount *int32 `thrift:"remote_replica_count,9" db:"remote_replica_count" json:"remote_replica_count,omitempty"` +} + +func NewDuplicationEntry() *DuplicationEntry { + return &DuplicationEntry{} +} + +func (p *DuplicationEntry) GetDupid() int32 { + return p.Dupid +} + +func (p *DuplicationEntry) GetStatus() DuplicationStatus { + return p.Status +} + +func (p *DuplicationEntry) GetRemote() string { + return p.Remote +} + +func (p *DuplicationEntry) GetCreateTs() int64 { + return p.CreateTs +} + +var DuplicationEntry_Progress_DEFAULT map[int32]int64 + +func (p *DuplicationEntry) GetProgress() map[int32]int64 { + return p.Progress +} + +var DuplicationEntry_FailMode_DEFAULT DuplicationFailMode + +func (p *DuplicationEntry) GetFailMode() DuplicationFailMode { + if !p.IsSetFailMode() { + return DuplicationEntry_FailMode_DEFAULT + } + return *p.FailMode +} + +var DuplicationEntry_RemoteAppName_DEFAULT string + +func (p *DuplicationEntry) GetRemoteAppName() string { + if !p.IsSetRemoteAppName() { + return DuplicationEntry_RemoteAppName_DEFAULT + } + return *p.RemoteAppName +} + +var DuplicationEntry_RemoteReplicaCount_DEFAULT int32 + +func (p *DuplicationEntry) GetRemoteReplicaCount() int32 { + if !p.IsSetRemoteReplicaCount() { + return DuplicationEntry_RemoteReplicaCount_DEFAULT + } + return *p.RemoteReplicaCount +} +func (p *DuplicationEntry) IsSetProgress() bool { + return p.Progress != nil +} + +func (p *DuplicationEntry) IsSetFailMode() bool { + return p.FailMode != nil +} + +func (p *DuplicationEntry) IsSetRemoteAppName() bool { + return p.RemoteAppName != nil +} + +func (p *DuplicationEntry) IsSetRemoteReplicaCount() bool { + return p.RemoteReplicaCount != nil +} + +func (p *DuplicationEntry) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.MAP { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRING { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I32 { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationEntry) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Dupid = v + } + return nil +} + +func (p *DuplicationEntry) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := DuplicationStatus(v) + p.Status = temp + } + return nil +} + +func (p *DuplicationEntry) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Remote = v + } + return nil +} + +func (p *DuplicationEntry) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.CreateTs = v + } + return nil +} + +func (p *DuplicationEntry) ReadField5(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32]int64, size) + p.Progress = tMap + for i := 0; i < size; i++ { + var _key0 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key0 = v + } + var _val1 int64 + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _val1 = v + } + p.Progress[_key0] = _val1 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *DuplicationEntry) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + temp := DuplicationFailMode(v) + p.FailMode = &temp + } + return nil +} + +func (p *DuplicationEntry) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.RemoteAppName = &v + } + return nil +} + +func (p *DuplicationEntry) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.RemoteReplicaCount = &v + } + return nil +} + +func (p *DuplicationEntry) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_entry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationEntry) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dupid", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:dupid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Dupid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.dupid (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:dupid: ", p), err) + } + return err +} + +func (p *DuplicationEntry) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:status: ", p), err) + } + return err +} + +func (p *DuplicationEntry) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:remote: ", p), err) + } + if err := oprot.WriteString(string(p.Remote)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:remote: ", p), err) + } + return err +} + +func (p *DuplicationEntry) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("create_ts", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:create_ts: ", p), err) + } + if err := oprot.WriteI64(int64(p.CreateTs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.create_ts (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:create_ts: ", p), err) + } + return err +} + +func (p *DuplicationEntry) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetProgress() { + if err := oprot.WriteFieldBegin("progress", thrift.MAP, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:progress: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.I64, len(p.Progress)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Progress { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteI64(int64(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:progress: ", p), err) + } + } + return err +} + +func (p *DuplicationEntry) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetFailMode() { + if err := oprot.WriteFieldBegin("fail_mode", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:fail_mode: ", p), err) + } + if err := oprot.WriteI32(int32(*p.FailMode)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fail_mode (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:fail_mode: ", p), err) + } + } + return err +} + +func (p *DuplicationEntry) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteAppName() { + if err := oprot.WriteFieldBegin("remote_app_name", thrift.STRING, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:remote_app_name: ", p), err) + } + if err := oprot.WriteString(string(*p.RemoteAppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_app_name (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:remote_app_name: ", p), err) + } + } + return err +} + +func (p *DuplicationEntry) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteReplicaCount() { + if err := oprot.WriteFieldBegin("remote_replica_count", thrift.I32, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:remote_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.RemoteReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_replica_count (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:remote_replica_count: ", p), err) + } + } + return err +} + +func (p *DuplicationEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationEntry(%+v)", *p) +} + +// Attributes: +// - AppName +type DuplicationQueryRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewDuplicationQueryRequest() *DuplicationQueryRequest { + return &DuplicationQueryRequest{} +} + +func (p *DuplicationQueryRequest) GetAppName() string { + return p.AppName +} +func (p *DuplicationQueryRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationQueryRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *DuplicationQueryRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_query_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationQueryRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *DuplicationQueryRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationQueryRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Appid +// - EntryList +type DuplicationQueryResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + // unused field # 2 + Appid int32 `thrift:"appid,3" db:"appid" json:"appid"` + EntryList []*DuplicationEntry `thrift:"entry_list,4" db:"entry_list" json:"entry_list"` +} + +func NewDuplicationQueryResponse() *DuplicationQueryResponse { + return &DuplicationQueryResponse{} +} + +var DuplicationQueryResponse_Err_DEFAULT *base.ErrorCode + +func (p *DuplicationQueryResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DuplicationQueryResponse_Err_DEFAULT + } + return p.Err +} + +func (p *DuplicationQueryResponse) GetAppid() int32 { + return p.Appid +} + +func (p *DuplicationQueryResponse) GetEntryList() []*DuplicationEntry { + return p.EntryList +} +func (p *DuplicationQueryResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DuplicationQueryResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationQueryResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DuplicationQueryResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Appid = v + } + return nil +} + +func (p *DuplicationQueryResponse) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*DuplicationEntry, 0, size) + p.EntryList = tSlice + for i := 0; i < size; i++ { + _elem2 := &DuplicationEntry{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.EntryList = append(p.EntryList, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *DuplicationQueryResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_query_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationQueryResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DuplicationQueryResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("appid", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:appid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Appid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.appid (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:appid: ", p), err) + } + return err +} + +func (p *DuplicationQueryResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("entry_list", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:entry_list: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.EntryList)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.EntryList { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:entry_list: ", p), err) + } + return err +} + +func (p *DuplicationQueryResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationQueryResponse(%+v)", *p) +} + +// Attributes: +// - Dupid +// - ConfirmedDecree +// - CheckpointPrepared +type DuplicationConfirmEntry struct { + Dupid int32 `thrift:"dupid,1" db:"dupid" json:"dupid"` + ConfirmedDecree int64 `thrift:"confirmed_decree,2" db:"confirmed_decree" json:"confirmed_decree"` + CheckpointPrepared bool `thrift:"checkpoint_prepared,3" db:"checkpoint_prepared" json:"checkpoint_prepared"` +} + +func NewDuplicationConfirmEntry() *DuplicationConfirmEntry { + return &DuplicationConfirmEntry{} +} + +func (p *DuplicationConfirmEntry) GetDupid() int32 { + return p.Dupid +} + +func (p *DuplicationConfirmEntry) GetConfirmedDecree() int64 { + return p.ConfirmedDecree +} + +var DuplicationConfirmEntry_CheckpointPrepared_DEFAULT bool = false + +func (p *DuplicationConfirmEntry) GetCheckpointPrepared() bool { + return p.CheckpointPrepared +} +func (p *DuplicationConfirmEntry) IsSetCheckpointPrepared() bool { + return p.CheckpointPrepared != DuplicationConfirmEntry_CheckpointPrepared_DEFAULT +} + +func (p *DuplicationConfirmEntry) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationConfirmEntry) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Dupid = v + } + return nil +} + +func (p *DuplicationConfirmEntry) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ConfirmedDecree = v + } + return nil +} + +func (p *DuplicationConfirmEntry) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.CheckpointPrepared = v + } + return nil +} + +func (p *DuplicationConfirmEntry) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_confirm_entry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationConfirmEntry) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dupid", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:dupid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Dupid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.dupid (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:dupid: ", p), err) + } + return err +} + +func (p *DuplicationConfirmEntry) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("confirmed_decree", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:confirmed_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.ConfirmedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.confirmed_decree (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:confirmed_decree: ", p), err) + } + return err +} + +func (p *DuplicationConfirmEntry) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetCheckpointPrepared() { + if err := oprot.WriteFieldBegin("checkpoint_prepared", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:checkpoint_prepared: ", p), err) + } + if err := oprot.WriteBool(bool(p.CheckpointPrepared)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.checkpoint_prepared (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:checkpoint_prepared: ", p), err) + } + } + return err +} + +func (p *DuplicationConfirmEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationConfirmEntry(%+v)", *p) +} + +// Attributes: +// - Node +// - ConfirmList +// - HpNode +type DuplicationSyncRequest struct { + Node *base.RPCAddress `thrift:"node,1" db:"node" json:"node"` + ConfirmList map[*base.Gpid][]*DuplicationConfirmEntry `thrift:"confirm_list,2" db:"confirm_list" json:"confirm_list"` + HpNode *base.HostPort `thrift:"hp_node,3" db:"hp_node" json:"hp_node"` +} + +func NewDuplicationSyncRequest() *DuplicationSyncRequest { + return &DuplicationSyncRequest{} +} + +var DuplicationSyncRequest_Node_DEFAULT *base.RPCAddress + +func (p *DuplicationSyncRequest) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return DuplicationSyncRequest_Node_DEFAULT + } + return p.Node +} + +func (p *DuplicationSyncRequest) GetConfirmList() map[*base.Gpid][]*DuplicationConfirmEntry { + return p.ConfirmList +} + +var DuplicationSyncRequest_HpNode_DEFAULT *base.HostPort + +func (p *DuplicationSyncRequest) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return DuplicationSyncRequest_HpNode_DEFAULT + } + return p.HpNode +} +func (p *DuplicationSyncRequest) IsSetNode() bool { + return p.Node != nil +} + +func (p *DuplicationSyncRequest) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *DuplicationSyncRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.MAP { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationSyncRequest) ReadField1(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *DuplicationSyncRequest) ReadField2(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[*base.Gpid][]*DuplicationConfirmEntry, size) + p.ConfirmList = tMap + for i := 0; i < size; i++ { + _key3 := &base.Gpid{} + if err := _key3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _key3), err) + } + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*DuplicationConfirmEntry, 0, size) + _val4 := tSlice + for i := 0; i < size; i++ { + _elem5 := &DuplicationConfirmEntry{} + if err := _elem5.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) + } + _val4 = append(_val4, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + p.ConfirmList[_key3] = _val4 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *DuplicationSyncRequest) ReadField3(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *DuplicationSyncRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_sync_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationSyncRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:node: ", p), err) + } + return err +} + +func (p *DuplicationSyncRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("confirm_list", thrift.MAP, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:confirm_list: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRUCT, thrift.LIST, len(p.ConfirmList)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.ConfirmList { + if err := k.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", k), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:confirm_list: ", p), err) + } + return err +} + +func (p *DuplicationSyncRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hp_node: ", p), err) + } + return err +} + +func (p *DuplicationSyncRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationSyncRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - DupMap +type DuplicationSyncResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + DupMap map[int32]map[int32]*DuplicationEntry `thrift:"dup_map,2" db:"dup_map" json:"dup_map"` +} + +func NewDuplicationSyncResponse() *DuplicationSyncResponse { + return &DuplicationSyncResponse{} +} + +var DuplicationSyncResponse_Err_DEFAULT *base.ErrorCode + +func (p *DuplicationSyncResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DuplicationSyncResponse_Err_DEFAULT + } + return p.Err +} + +func (p *DuplicationSyncResponse) GetDupMap() map[int32]map[int32]*DuplicationEntry { + return p.DupMap +} +func (p *DuplicationSyncResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DuplicationSyncResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.MAP { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationSyncResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DuplicationSyncResponse) ReadField2(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32]map[int32]*DuplicationEntry, size) + p.DupMap = tMap + for i := 0; i < size; i++ { + var _key6 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key6 = v + } + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32]*DuplicationEntry, size) + _val7 := tMap + for i := 0; i < size; i++ { + var _key8 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key8 = v + } + _val9 := &DuplicationEntry{} + if err := _val9.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val9), err) + } + _val7[_key8] = _val9 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + p.DupMap[_key6] = _val7 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *DuplicationSyncResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_sync_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationSyncResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DuplicationSyncResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dup_map", thrift.MAP, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:dup_map: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.MAP, len(p.DupMap)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.DupMap { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range v { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:dup_map: ", p), err) + } + return err +} + +func (p *DuplicationSyncResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationSyncResponse(%+v)", *p) +} diff --git a/go-client/idl/admin/meta_admin-consts.go b/go-client/idl/admin/meta_admin-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/meta_admin-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/meta_admin.go b/go-client/idl/admin/meta_admin.go new file mode 100644 index 0000000000..4a335e282d --- /dev/null +++ b/go-client/idl/admin/meta_admin.go @@ -0,0 +1,16081 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +type ConfigType int64 + +const ( + ConfigType_CT_INVALID ConfigType = 0 + ConfigType_CT_ASSIGN_PRIMARY ConfigType = 1 + ConfigType_CT_UPGRADE_TO_PRIMARY ConfigType = 2 + ConfigType_CT_ADD_SECONDARY ConfigType = 3 + ConfigType_CT_UPGRADE_TO_SECONDARY ConfigType = 4 + ConfigType_CT_DOWNGRADE_TO_SECONDARY ConfigType = 5 + ConfigType_CT_DOWNGRADE_TO_INACTIVE ConfigType = 6 + ConfigType_CT_REMOVE ConfigType = 7 + ConfigType_CT_ADD_SECONDARY_FOR_LB ConfigType = 8 + ConfigType_CT_PRIMARY_FORCE_UPDATE_BALLOT ConfigType = 9 + ConfigType_CT_DROP_PARTITION ConfigType = 10 + ConfigType_CT_REGISTER_CHILD ConfigType = 11 +) + +func (p ConfigType) String() string { + switch p { + case ConfigType_CT_INVALID: + return "CT_INVALID" + case ConfigType_CT_ASSIGN_PRIMARY: + return "CT_ASSIGN_PRIMARY" + case ConfigType_CT_UPGRADE_TO_PRIMARY: + return "CT_UPGRADE_TO_PRIMARY" + case ConfigType_CT_ADD_SECONDARY: + return "CT_ADD_SECONDARY" + case ConfigType_CT_UPGRADE_TO_SECONDARY: + return "CT_UPGRADE_TO_SECONDARY" + case ConfigType_CT_DOWNGRADE_TO_SECONDARY: + return "CT_DOWNGRADE_TO_SECONDARY" + case ConfigType_CT_DOWNGRADE_TO_INACTIVE: + return "CT_DOWNGRADE_TO_INACTIVE" + case ConfigType_CT_REMOVE: + return "CT_REMOVE" + case ConfigType_CT_ADD_SECONDARY_FOR_LB: + return "CT_ADD_SECONDARY_FOR_LB" + case ConfigType_CT_PRIMARY_FORCE_UPDATE_BALLOT: + return "CT_PRIMARY_FORCE_UPDATE_BALLOT" + case ConfigType_CT_DROP_PARTITION: + return "CT_DROP_PARTITION" + case ConfigType_CT_REGISTER_CHILD: + return "CT_REGISTER_CHILD" + } + return "" +} + +func ConfigTypeFromString(s string) (ConfigType, error) { + switch s { + case "CT_INVALID": + return ConfigType_CT_INVALID, nil + case "CT_ASSIGN_PRIMARY": + return ConfigType_CT_ASSIGN_PRIMARY, nil + case "CT_UPGRADE_TO_PRIMARY": + return ConfigType_CT_UPGRADE_TO_PRIMARY, nil + case "CT_ADD_SECONDARY": + return ConfigType_CT_ADD_SECONDARY, nil + case "CT_UPGRADE_TO_SECONDARY": + return ConfigType_CT_UPGRADE_TO_SECONDARY, nil + case "CT_DOWNGRADE_TO_SECONDARY": + return ConfigType_CT_DOWNGRADE_TO_SECONDARY, nil + case "CT_DOWNGRADE_TO_INACTIVE": + return ConfigType_CT_DOWNGRADE_TO_INACTIVE, nil + case "CT_REMOVE": + return ConfigType_CT_REMOVE, nil + case "CT_ADD_SECONDARY_FOR_LB": + return ConfigType_CT_ADD_SECONDARY_FOR_LB, nil + case "CT_PRIMARY_FORCE_UPDATE_BALLOT": + return ConfigType_CT_PRIMARY_FORCE_UPDATE_BALLOT, nil + case "CT_DROP_PARTITION": + return ConfigType_CT_DROP_PARTITION, nil + case "CT_REGISTER_CHILD": + return ConfigType_CT_REGISTER_CHILD, nil + } + return ConfigType(0), fmt.Errorf("not a valid ConfigType string") +} + +func ConfigTypePtr(v ConfigType) *ConfigType { return &v } + +func (p ConfigType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *ConfigType) UnmarshalText(text []byte) error { + q, err := ConfigTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *ConfigType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = ConfigType(v) + return nil +} + +func (p *ConfigType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type NodeStatus int64 + +const ( + NodeStatus_NS_INVALID NodeStatus = 0 + NodeStatus_NS_ALIVE NodeStatus = 1 + NodeStatus_NS_UNALIVE NodeStatus = 2 +) + +func (p NodeStatus) String() string { + switch p { + case NodeStatus_NS_INVALID: + return "NS_INVALID" + case NodeStatus_NS_ALIVE: + return "NS_ALIVE" + case NodeStatus_NS_UNALIVE: + return "NS_UNALIVE" + } + return "" +} + +func NodeStatusFromString(s string) (NodeStatus, error) { + switch s { + case "NS_INVALID": + return NodeStatus_NS_INVALID, nil + case "NS_ALIVE": + return NodeStatus_NS_ALIVE, nil + case "NS_UNALIVE": + return NodeStatus_NS_UNALIVE, nil + } + return NodeStatus(0), fmt.Errorf("not a valid NodeStatus string") +} + +func NodeStatusPtr(v NodeStatus) *NodeStatus { return &v } + +func (p NodeStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *NodeStatus) UnmarshalText(text []byte) error { + q, err := NodeStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *NodeStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = NodeStatus(v) + return nil +} + +func (p *NodeStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type AppEnvOperation int64 + +const ( + AppEnvOperation_APP_ENV_OP_INVALID AppEnvOperation = 0 + AppEnvOperation_APP_ENV_OP_SET AppEnvOperation = 1 + AppEnvOperation_APP_ENV_OP_DEL AppEnvOperation = 2 + AppEnvOperation_APP_ENV_OP_CLEAR AppEnvOperation = 3 +) + +func (p AppEnvOperation) String() string { + switch p { + case AppEnvOperation_APP_ENV_OP_INVALID: + return "APP_ENV_OP_INVALID" + case AppEnvOperation_APP_ENV_OP_SET: + return "APP_ENV_OP_SET" + case AppEnvOperation_APP_ENV_OP_DEL: + return "APP_ENV_OP_DEL" + case AppEnvOperation_APP_ENV_OP_CLEAR: + return "APP_ENV_OP_CLEAR" + } + return "" +} + +func AppEnvOperationFromString(s string) (AppEnvOperation, error) { + switch s { + case "APP_ENV_OP_INVALID": + return AppEnvOperation_APP_ENV_OP_INVALID, nil + case "APP_ENV_OP_SET": + return AppEnvOperation_APP_ENV_OP_SET, nil + case "APP_ENV_OP_DEL": + return AppEnvOperation_APP_ENV_OP_DEL, nil + case "APP_ENV_OP_CLEAR": + return AppEnvOperation_APP_ENV_OP_CLEAR, nil + } + return AppEnvOperation(0), fmt.Errorf("not a valid AppEnvOperation string") +} + +func AppEnvOperationPtr(v AppEnvOperation) *AppEnvOperation { return &v } + +func (p AppEnvOperation) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AppEnvOperation) UnmarshalText(text []byte) error { + q, err := AppEnvOperationFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *AppEnvOperation) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = AppEnvOperation(v) + return nil +} + +func (p *AppEnvOperation) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type MetaFunctionLevel int64 + +const ( + MetaFunctionLevel_fl_stopped MetaFunctionLevel = 100 + MetaFunctionLevel_fl_blind MetaFunctionLevel = 200 + MetaFunctionLevel_fl_freezed MetaFunctionLevel = 300 + MetaFunctionLevel_fl_steady MetaFunctionLevel = 400 + MetaFunctionLevel_fl_lively MetaFunctionLevel = 500 + MetaFunctionLevel_fl_invalid MetaFunctionLevel = 10000 +) + +func (p MetaFunctionLevel) String() string { + switch p { + case MetaFunctionLevel_fl_stopped: + return "fl_stopped" + case MetaFunctionLevel_fl_blind: + return "fl_blind" + case MetaFunctionLevel_fl_freezed: + return "fl_freezed" + case MetaFunctionLevel_fl_steady: + return "fl_steady" + case MetaFunctionLevel_fl_lively: + return "fl_lively" + case MetaFunctionLevel_fl_invalid: + return "fl_invalid" + } + return "" +} + +func MetaFunctionLevelFromString(s string) (MetaFunctionLevel, error) { + switch s { + case "fl_stopped": + return MetaFunctionLevel_fl_stopped, nil + case "fl_blind": + return MetaFunctionLevel_fl_blind, nil + case "fl_freezed": + return MetaFunctionLevel_fl_freezed, nil + case "fl_steady": + return MetaFunctionLevel_fl_steady, nil + case "fl_lively": + return MetaFunctionLevel_fl_lively, nil + case "fl_invalid": + return MetaFunctionLevel_fl_invalid, nil + } + return MetaFunctionLevel(0), fmt.Errorf("not a valid MetaFunctionLevel string") +} + +func MetaFunctionLevelPtr(v MetaFunctionLevel) *MetaFunctionLevel { return &v } + +func (p MetaFunctionLevel) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *MetaFunctionLevel) UnmarshalText(text []byte) error { + q, err := MetaFunctionLevelFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *MetaFunctionLevel) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = MetaFunctionLevel(v) + return nil +} + +func (p *MetaFunctionLevel) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type BalancerRequestType int64 + +const ( + BalancerRequestType_move_primary BalancerRequestType = 0 + BalancerRequestType_copy_primary BalancerRequestType = 1 + BalancerRequestType_copy_secondary BalancerRequestType = 2 +) + +func (p BalancerRequestType) String() string { + switch p { + case BalancerRequestType_move_primary: + return "move_primary" + case BalancerRequestType_copy_primary: + return "copy_primary" + case BalancerRequestType_copy_secondary: + return "copy_secondary" + } + return "" +} + +func BalancerRequestTypeFromString(s string) (BalancerRequestType, error) { + switch s { + case "move_primary": + return BalancerRequestType_move_primary, nil + case "copy_primary": + return BalancerRequestType_copy_primary, nil + case "copy_secondary": + return BalancerRequestType_copy_secondary, nil + } + return BalancerRequestType(0), fmt.Errorf("not a valid BalancerRequestType string") +} + +func BalancerRequestTypePtr(v BalancerRequestType) *BalancerRequestType { return &v } + +func (p BalancerRequestType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *BalancerRequestType) UnmarshalText(text []byte) error { + q, err := BalancerRequestTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *BalancerRequestType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = BalancerRequestType(v) + return nil +} + +func (p *BalancerRequestType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Info +// - Config +// - Type +// - Node +// - HostNode +// - MetaSplitStatus +// - HpNode +type ConfigurationUpdateRequest struct { + Info *replication.AppInfo `thrift:"info,1" db:"info" json:"info"` + Config *replication.PartitionConfiguration `thrift:"config,2" db:"config" json:"config"` + Type ConfigType `thrift:"type,3" db:"type" json:"type"` + Node *base.RPCAddress `thrift:"node,4" db:"node" json:"node"` + HostNode *base.RPCAddress `thrift:"host_node,5" db:"host_node" json:"host_node"` + MetaSplitStatus *SplitStatus `thrift:"meta_split_status,6" db:"meta_split_status" json:"meta_split_status,omitempty"` + HpNode *base.HostPort `thrift:"hp_node,7" db:"hp_node" json:"hp_node,omitempty"` +} + +func NewConfigurationUpdateRequest() *ConfigurationUpdateRequest { + return &ConfigurationUpdateRequest{ + Type: 0, + } +} + +var ConfigurationUpdateRequest_Info_DEFAULT *replication.AppInfo + +func (p *ConfigurationUpdateRequest) GetInfo() *replication.AppInfo { + if !p.IsSetInfo() { + return ConfigurationUpdateRequest_Info_DEFAULT + } + return p.Info +} + +var ConfigurationUpdateRequest_Config_DEFAULT *replication.PartitionConfiguration + +func (p *ConfigurationUpdateRequest) GetConfig() *replication.PartitionConfiguration { + if !p.IsSetConfig() { + return ConfigurationUpdateRequest_Config_DEFAULT + } + return p.Config +} + +func (p *ConfigurationUpdateRequest) GetType() ConfigType { + return p.Type +} + +var ConfigurationUpdateRequest_Node_DEFAULT *base.RPCAddress + +func (p *ConfigurationUpdateRequest) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return ConfigurationUpdateRequest_Node_DEFAULT + } + return p.Node +} + +var ConfigurationUpdateRequest_HostNode_DEFAULT *base.RPCAddress + +func (p *ConfigurationUpdateRequest) GetHostNode() *base.RPCAddress { + if !p.IsSetHostNode() { + return ConfigurationUpdateRequest_HostNode_DEFAULT + } + return p.HostNode +} + +var ConfigurationUpdateRequest_MetaSplitStatus_DEFAULT SplitStatus + +func (p *ConfigurationUpdateRequest) GetMetaSplitStatus() SplitStatus { + if !p.IsSetMetaSplitStatus() { + return ConfigurationUpdateRequest_MetaSplitStatus_DEFAULT + } + return *p.MetaSplitStatus +} + +var ConfigurationUpdateRequest_HpNode_DEFAULT *base.HostPort + +func (p *ConfigurationUpdateRequest) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return ConfigurationUpdateRequest_HpNode_DEFAULT + } + return p.HpNode +} +func (p *ConfigurationUpdateRequest) IsSetInfo() bool { + return p.Info != nil +} + +func (p *ConfigurationUpdateRequest) IsSetConfig() bool { + return p.Config != nil +} + +func (p *ConfigurationUpdateRequest) IsSetNode() bool { + return p.Node != nil +} + +func (p *ConfigurationUpdateRequest) IsSetHostNode() bool { + return p.HostNode != nil +} + +func (p *ConfigurationUpdateRequest) IsSetMetaSplitStatus() bool { + return p.MetaSplitStatus != nil +} + +func (p *ConfigurationUpdateRequest) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *ConfigurationUpdateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField1(iprot thrift.TProtocol) error { + p.Info = &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := p.Info.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Info), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField2(iprot thrift.TProtocol) error { + p.Config = &replication.PartitionConfiguration{} + if err := p.Config.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Config), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := ConfigType(v) + p.Type = temp + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField4(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField5(iprot thrift.TProtocol) error { + p.HostNode = &base.RPCAddress{} + if err := p.HostNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HostNode), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + temp := SplitStatus(v) + p.MetaSplitStatus = &temp + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField7(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_update_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("info", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:info: ", p), err) + } + if err := p.Info.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Info), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:info: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("config", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:config: ", p), err) + } + if err := p.Config.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Config), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:config: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:type: ", p), err) + } + if err := oprot.WriteI32(int32(p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:type: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:node: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("host_node", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:host_node: ", p), err) + } + if err := p.HostNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HostNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:host_node: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetMetaSplitStatus() { + if err := oprot.WriteFieldBegin("meta_split_status", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:meta_split_status: ", p), err) + } + if err := oprot.WriteI32(int32(*p.MetaSplitStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.meta_split_status (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:meta_split_status: ", p), err) + } + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode() { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:hp_node: ", p), err) + } + } + return err +} + +func (p *ConfigurationUpdateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationUpdateRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Config +type ConfigurationUpdateResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Config *replication.PartitionConfiguration `thrift:"config,2" db:"config" json:"config"` +} + +func NewConfigurationUpdateResponse() *ConfigurationUpdateResponse { + return &ConfigurationUpdateResponse{} +} + +var ConfigurationUpdateResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationUpdateResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationUpdateResponse_Err_DEFAULT + } + return p.Err +} + +var ConfigurationUpdateResponse_Config_DEFAULT *replication.PartitionConfiguration + +func (p *ConfigurationUpdateResponse) GetConfig() *replication.PartitionConfiguration { + if !p.IsSetConfig() { + return ConfigurationUpdateResponse_Config_DEFAULT + } + return p.Config +} +func (p *ConfigurationUpdateResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationUpdateResponse) IsSetConfig() bool { + return p.Config != nil +} + +func (p *ConfigurationUpdateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationUpdateResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationUpdateResponse) ReadField2(iprot thrift.TProtocol) error { + p.Config = &replication.PartitionConfiguration{} + if err := p.Config.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Config), err) + } + return nil +} + +func (p *ConfigurationUpdateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_update_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationUpdateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("config", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:config: ", p), err) + } + if err := p.Config.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Config), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:config: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationUpdateResponse(%+v)", *p) +} + +// Attributes: +// - GeoTags +// - TotalCapacityMb +type ReplicaServerInfo struct { + GeoTags map[string]string `thrift:"geo_tags,1" db:"geo_tags" json:"geo_tags"` + TotalCapacityMb int64 `thrift:"total_capacity_mb,2" db:"total_capacity_mb" json:"total_capacity_mb"` +} + +func NewReplicaServerInfo() *ReplicaServerInfo { + return &ReplicaServerInfo{} +} + +func (p *ReplicaServerInfo) GetGeoTags() map[string]string { + return p.GeoTags +} + +func (p *ReplicaServerInfo) GetTotalCapacityMb() int64 { + return p.TotalCapacityMb +} +func (p *ReplicaServerInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaServerInfo) ReadField1(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.GeoTags = tMap + for i := 0; i < size; i++ { + var _key0 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key0 = v + } + var _val1 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _val1 = v + } + p.GeoTags[_key0] = _val1 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *ReplicaServerInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TotalCapacityMb = v + } + return nil +} + +func (p *ReplicaServerInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("replica_server_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaServerInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("geo_tags", thrift.MAP, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:geo_tags: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.GeoTags)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.GeoTags { + if err := oprot.WriteString(string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:geo_tags: ", p), err) + } + return err +} + +func (p *ReplicaServerInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("total_capacity_mb", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:total_capacity_mb: ", p), err) + } + if err := oprot.WriteI64(int64(p.TotalCapacityMb)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_capacity_mb (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:total_capacity_mb: ", p), err) + } + return err +} + +func (p *ReplicaServerInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaServerInfo(%+v)", *p) +} + +// Attributes: +// - Node +// - StoredReplicas +// - Info +// - HpNode +type ConfigurationQueryByNodeRequest struct { + Node *base.RPCAddress `thrift:"node,1" db:"node" json:"node"` + StoredReplicas []*ReplicaInfo `thrift:"stored_replicas,2" db:"stored_replicas" json:"stored_replicas,omitempty"` + Info *ReplicaServerInfo `thrift:"info,3" db:"info" json:"info,omitempty"` + HpNode *base.HostPort `thrift:"hp_node,4" db:"hp_node" json:"hp_node,omitempty"` +} + +func NewConfigurationQueryByNodeRequest() *ConfigurationQueryByNodeRequest { + return &ConfigurationQueryByNodeRequest{} +} + +var ConfigurationQueryByNodeRequest_Node_DEFAULT *base.RPCAddress + +func (p *ConfigurationQueryByNodeRequest) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return ConfigurationQueryByNodeRequest_Node_DEFAULT + } + return p.Node +} + +var ConfigurationQueryByNodeRequest_StoredReplicas_DEFAULT []*ReplicaInfo + +func (p *ConfigurationQueryByNodeRequest) GetStoredReplicas() []*ReplicaInfo { + return p.StoredReplicas +} + +var ConfigurationQueryByNodeRequest_Info_DEFAULT *ReplicaServerInfo + +func (p *ConfigurationQueryByNodeRequest) GetInfo() *ReplicaServerInfo { + if !p.IsSetInfo() { + return ConfigurationQueryByNodeRequest_Info_DEFAULT + } + return p.Info +} + +var ConfigurationQueryByNodeRequest_HpNode_DEFAULT *base.HostPort + +func (p *ConfigurationQueryByNodeRequest) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return ConfigurationQueryByNodeRequest_HpNode_DEFAULT + } + return p.HpNode +} +func (p *ConfigurationQueryByNodeRequest) IsSetNode() bool { + return p.Node != nil +} + +func (p *ConfigurationQueryByNodeRequest) IsSetStoredReplicas() bool { + return p.StoredReplicas != nil +} + +func (p *ConfigurationQueryByNodeRequest) IsSetInfo() bool { + return p.Info != nil +} + +func (p *ConfigurationQueryByNodeRequest) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *ConfigurationQueryByNodeRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) ReadField1(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*ReplicaInfo, 0, size) + p.StoredReplicas = tSlice + for i := 0; i < size; i++ { + _elem2 := &ReplicaInfo{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.StoredReplicas = append(p.StoredReplicas, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) ReadField3(iprot thrift.TProtocol) error { + p.Info = &ReplicaServerInfo{} + if err := p.Info.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Info), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) ReadField4(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_by_node_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:node: ", p), err) + } + return err +} + +func (p *ConfigurationQueryByNodeRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetStoredReplicas() { + if err := oprot.WriteFieldBegin("stored_replicas", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:stored_replicas: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.StoredReplicas)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.StoredReplicas { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:stored_replicas: ", p), err) + } + } + return err +} + +func (p *ConfigurationQueryByNodeRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetInfo() { + if err := oprot.WriteFieldBegin("info", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:info: ", p), err) + } + if err := p.Info.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Info), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:info: ", p), err) + } + } + return err +} + +func (p *ConfigurationQueryByNodeRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode() { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hp_node: ", p), err) + } + } + return err +} + +func (p *ConfigurationQueryByNodeRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryByNodeRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Partitions +// - GcReplicas +type ConfigurationQueryByNodeResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Partitions []*ConfigurationUpdateRequest `thrift:"partitions,2" db:"partitions" json:"partitions"` + GcReplicas []*ReplicaInfo `thrift:"gc_replicas,3" db:"gc_replicas" json:"gc_replicas,omitempty"` +} + +func NewConfigurationQueryByNodeResponse() *ConfigurationQueryByNodeResponse { + return &ConfigurationQueryByNodeResponse{} +} + +var ConfigurationQueryByNodeResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationQueryByNodeResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationQueryByNodeResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationQueryByNodeResponse) GetPartitions() []*ConfigurationUpdateRequest { + return p.Partitions +} + +var ConfigurationQueryByNodeResponse_GcReplicas_DEFAULT []*ReplicaInfo + +func (p *ConfigurationQueryByNodeResponse) GetGcReplicas() []*ReplicaInfo { + return p.GcReplicas +} +func (p *ConfigurationQueryByNodeResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationQueryByNodeResponse) IsSetGcReplicas() bool { + return p.GcReplicas != nil +} + +func (p *ConfigurationQueryByNodeResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*ConfigurationUpdateRequest, 0, size) + p.Partitions = tSlice + for i := 0; i < size; i++ { + _elem3 := &ConfigurationUpdateRequest{ + Type: 0, + } + if err := _elem3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + p.Partitions = append(p.Partitions, _elem3) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryByNodeResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*ReplicaInfo, 0, size) + p.GcReplicas = tSlice + for i := 0; i < size; i++ { + _elem4 := &ReplicaInfo{} + if err := _elem4.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.GcReplicas = append(p.GcReplicas, _elem4) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryByNodeResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_by_node_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryByNodeResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationQueryByNodeResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partitions", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:partitions: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Partitions { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:partitions: ", p), err) + } + return err +} + +func (p *ConfigurationQueryByNodeResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetGcReplicas() { + if err := oprot.WriteFieldBegin("gc_replicas", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:gc_replicas: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.GcReplicas)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.GcReplicas { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:gc_replicas: ", p), err) + } + } + return err +} + +func (p *ConfigurationQueryByNodeResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryByNodeResponse(%+v)", *p) +} + +// Attributes: +// - RecoveryNodes +// - SkipBadNodes +// - SkipLostPartitions +// - HpRecoveryNodes +type ConfigurationRecoveryRequest struct { + RecoveryNodes []*base.RPCAddress `thrift:"recovery_nodes,1" db:"recovery_nodes" json:"recovery_nodes"` + SkipBadNodes bool `thrift:"skip_bad_nodes,2" db:"skip_bad_nodes" json:"skip_bad_nodes"` + SkipLostPartitions bool `thrift:"skip_lost_partitions,3" db:"skip_lost_partitions" json:"skip_lost_partitions"` + HpRecoveryNodes []*base.HostPort `thrift:"hp_recovery_nodes,4" db:"hp_recovery_nodes" json:"hp_recovery_nodes,omitempty"` +} + +func NewConfigurationRecoveryRequest() *ConfigurationRecoveryRequest { + return &ConfigurationRecoveryRequest{} +} + +func (p *ConfigurationRecoveryRequest) GetRecoveryNodes() []*base.RPCAddress { + return p.RecoveryNodes +} + +func (p *ConfigurationRecoveryRequest) GetSkipBadNodes() bool { + return p.SkipBadNodes +} + +func (p *ConfigurationRecoveryRequest) GetSkipLostPartitions() bool { + return p.SkipLostPartitions +} + +var ConfigurationRecoveryRequest_HpRecoveryNodes_DEFAULT []*base.HostPort + +func (p *ConfigurationRecoveryRequest) GetHpRecoveryNodes() []*base.HostPort { + return p.HpRecoveryNodes +} +func (p *ConfigurationRecoveryRequest) IsSetHpRecoveryNodes() bool { + return p.HpRecoveryNodes != nil +} + +func (p *ConfigurationRecoveryRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRecoveryRequest) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.RPCAddress, 0, size) + p.RecoveryNodes = tSlice + for i := 0; i < size; i++ { + _elem5 := &base.RPCAddress{} + if err := _elem5.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) + } + p.RecoveryNodes = append(p.RecoveryNodes, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationRecoveryRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.SkipBadNodes = v + } + return nil +} + +func (p *ConfigurationRecoveryRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.SkipLostPartitions = v + } + return nil +} + +func (p *ConfigurationRecoveryRequest) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.HostPort, 0, size) + p.HpRecoveryNodes = tSlice + for i := 0; i < size; i++ { + _elem6 := &base.HostPort{} + if err := _elem6.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) + } + p.HpRecoveryNodes = append(p.HpRecoveryNodes, _elem6) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationRecoveryRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_recovery_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRecoveryRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("recovery_nodes", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:recovery_nodes: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.RecoveryNodes)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.RecoveryNodes { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:recovery_nodes: ", p), err) + } + return err +} + +func (p *ConfigurationRecoveryRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("skip_bad_nodes", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:skip_bad_nodes: ", p), err) + } + if err := oprot.WriteBool(bool(p.SkipBadNodes)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.skip_bad_nodes (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:skip_bad_nodes: ", p), err) + } + return err +} + +func (p *ConfigurationRecoveryRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("skip_lost_partitions", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:skip_lost_partitions: ", p), err) + } + if err := oprot.WriteBool(bool(p.SkipLostPartitions)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.skip_lost_partitions (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:skip_lost_partitions: ", p), err) + } + return err +} + +func (p *ConfigurationRecoveryRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHpRecoveryNodes() { + if err := oprot.WriteFieldBegin("hp_recovery_nodes", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hp_recovery_nodes: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HpRecoveryNodes)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.HpRecoveryNodes { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hp_recovery_nodes: ", p), err) + } + } + return err +} + +func (p *ConfigurationRecoveryRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRecoveryRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +type ConfigurationRecoveryResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationRecoveryResponse() *ConfigurationRecoveryResponse { + return &ConfigurationRecoveryResponse{} +} + +var ConfigurationRecoveryResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationRecoveryResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationRecoveryResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationRecoveryResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationRecoveryResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationRecoveryResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRecoveryResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationRecoveryResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationRecoveryResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_recovery_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRecoveryResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationRecoveryResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationRecoveryResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRecoveryResponse(%+v)", *p) +} + +// Attributes: +// - PartitionCount +// - ReplicaCount +// - SuccessIfExist +// - AppType +// - IsStateful +// - Envs +type CreateAppOptions struct { + PartitionCount int32 `thrift:"partition_count,1" db:"partition_count" json:"partition_count"` + ReplicaCount int32 `thrift:"replica_count,2" db:"replica_count" json:"replica_count"` + SuccessIfExist bool `thrift:"success_if_exist,3" db:"success_if_exist" json:"success_if_exist"` + AppType string `thrift:"app_type,4" db:"app_type" json:"app_type"` + IsStateful bool `thrift:"is_stateful,5" db:"is_stateful" json:"is_stateful"` + Envs map[string]string `thrift:"envs,6" db:"envs" json:"envs"` +} + +func NewCreateAppOptions() *CreateAppOptions { + return &CreateAppOptions{} +} + +func (p *CreateAppOptions) GetPartitionCount() int32 { + return p.PartitionCount +} + +func (p *CreateAppOptions) GetReplicaCount() int32 { + return p.ReplicaCount +} + +func (p *CreateAppOptions) GetSuccessIfExist() bool { + return p.SuccessIfExist +} + +func (p *CreateAppOptions) GetAppType() string { + return p.AppType +} + +func (p *CreateAppOptions) GetIsStateful() bool { + return p.IsStateful +} + +func (p *CreateAppOptions) GetEnvs() map[string]string { + return p.Envs +} +func (p *CreateAppOptions) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.MAP { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CreateAppOptions) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.PartitionCount = v + } + return nil +} + +func (p *CreateAppOptions) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ReplicaCount = v + } + return nil +} + +func (p *CreateAppOptions) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.SuccessIfExist = v + } + return nil +} + +func (p *CreateAppOptions) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.AppType = v + } + return nil +} + +func (p *CreateAppOptions) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.IsStateful = v + } + return nil +} + +func (p *CreateAppOptions) ReadField6(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.Envs = tMap + for i := 0; i < size; i++ { + var _key7 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key7 = v + } + var _val8 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _val8 = v + } + p.Envs[_key7] = _val8 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *CreateAppOptions) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("create_app_options"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CreateAppOptions) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:partition_count: ", p), err) + } + return err +} + +func (p *CreateAppOptions) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("replica_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.ReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.replica_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:replica_count: ", p), err) + } + return err +} + +func (p *CreateAppOptions) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("success_if_exist", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:success_if_exist: ", p), err) + } + if err := oprot.WriteBool(bool(p.SuccessIfExist)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.success_if_exist (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:success_if_exist: ", p), err) + } + return err +} + +func (p *CreateAppOptions) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_type", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_type: ", p), err) + } + if err := oprot.WriteString(string(p.AppType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_type (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_type: ", p), err) + } + return err +} + +func (p *CreateAppOptions) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_stateful", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:is_stateful: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsStateful)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_stateful (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:is_stateful: ", p), err) + } + return err +} + +func (p *CreateAppOptions) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("envs", thrift.MAP, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:envs: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Envs)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Envs { + if err := oprot.WriteString(string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:envs: ", p), err) + } + return err +} + +func (p *CreateAppOptions) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CreateAppOptions(%+v)", *p) +} + +// Attributes: +// - AppName +// - Options +type ConfigurationCreateAppRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Options *CreateAppOptions `thrift:"options,2" db:"options" json:"options"` +} + +func NewConfigurationCreateAppRequest() *ConfigurationCreateAppRequest { + return &ConfigurationCreateAppRequest{} +} + +func (p *ConfigurationCreateAppRequest) GetAppName() string { + return p.AppName +} + +var ConfigurationCreateAppRequest_Options_DEFAULT *CreateAppOptions + +func (p *ConfigurationCreateAppRequest) GetOptions() *CreateAppOptions { + if !p.IsSetOptions() { + return ConfigurationCreateAppRequest_Options_DEFAULT + } + return p.Options +} +func (p *ConfigurationCreateAppRequest) IsSetOptions() bool { + return p.Options != nil +} + +func (p *ConfigurationCreateAppRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationCreateAppRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationCreateAppRequest) ReadField2(iprot thrift.TProtocol) error { + p.Options = &CreateAppOptions{} + if err := p.Options.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Options), err) + } + return nil +} + +func (p *ConfigurationCreateAppRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_create_app_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationCreateAppRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationCreateAppRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("options", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:options: ", p), err) + } + if err := p.Options.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Options), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:options: ", p), err) + } + return err +} + +func (p *ConfigurationCreateAppRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationCreateAppRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Appid +type ConfigurationCreateAppResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Appid int32 `thrift:"appid,2" db:"appid" json:"appid"` +} + +func NewConfigurationCreateAppResponse() *ConfigurationCreateAppResponse { + return &ConfigurationCreateAppResponse{} +} + +var ConfigurationCreateAppResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationCreateAppResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationCreateAppResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationCreateAppResponse) GetAppid() int32 { + return p.Appid +} +func (p *ConfigurationCreateAppResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationCreateAppResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationCreateAppResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationCreateAppResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Appid = v + } + return nil +} + +func (p *ConfigurationCreateAppResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_create_app_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationCreateAppResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationCreateAppResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("appid", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:appid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Appid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.appid (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:appid: ", p), err) + } + return err +} + +func (p *ConfigurationCreateAppResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationCreateAppResponse(%+v)", *p) +} + +// Attributes: +// - SuccessIfNotExist +// - ReserveSeconds +type DropAppOptions struct { + SuccessIfNotExist bool `thrift:"success_if_not_exist,1" db:"success_if_not_exist" json:"success_if_not_exist"` + ReserveSeconds *int64 `thrift:"reserve_seconds,2" db:"reserve_seconds" json:"reserve_seconds,omitempty"` +} + +func NewDropAppOptions() *DropAppOptions { + return &DropAppOptions{} +} + +func (p *DropAppOptions) GetSuccessIfNotExist() bool { + return p.SuccessIfNotExist +} + +var DropAppOptions_ReserveSeconds_DEFAULT int64 + +func (p *DropAppOptions) GetReserveSeconds() int64 { + if !p.IsSetReserveSeconds() { + return DropAppOptions_ReserveSeconds_DEFAULT + } + return *p.ReserveSeconds +} +func (p *DropAppOptions) IsSetReserveSeconds() bool { + return p.ReserveSeconds != nil +} + +func (p *DropAppOptions) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DropAppOptions) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.SuccessIfNotExist = v + } + return nil +} + +func (p *DropAppOptions) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ReserveSeconds = &v + } + return nil +} + +func (p *DropAppOptions) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("drop_app_options"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DropAppOptions) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("success_if_not_exist", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:success_if_not_exist: ", p), err) + } + if err := oprot.WriteBool(bool(p.SuccessIfNotExist)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.success_if_not_exist (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:success_if_not_exist: ", p), err) + } + return err +} + +func (p *DropAppOptions) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetReserveSeconds() { + if err := oprot.WriteFieldBegin("reserve_seconds", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:reserve_seconds: ", p), err) + } + if err := oprot.WriteI64(int64(*p.ReserveSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.reserve_seconds (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:reserve_seconds: ", p), err) + } + } + return err +} + +func (p *DropAppOptions) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DropAppOptions(%+v)", *p) +} + +// Attributes: +// - AppName +// - Options +type ConfigurationDropAppRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Options *DropAppOptions `thrift:"options,2" db:"options" json:"options"` +} + +func NewConfigurationDropAppRequest() *ConfigurationDropAppRequest { + return &ConfigurationDropAppRequest{} +} + +func (p *ConfigurationDropAppRequest) GetAppName() string { + return p.AppName +} + +var ConfigurationDropAppRequest_Options_DEFAULT *DropAppOptions + +func (p *ConfigurationDropAppRequest) GetOptions() *DropAppOptions { + if !p.IsSetOptions() { + return ConfigurationDropAppRequest_Options_DEFAULT + } + return p.Options +} +func (p *ConfigurationDropAppRequest) IsSetOptions() bool { + return p.Options != nil +} + +func (p *ConfigurationDropAppRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationDropAppRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationDropAppRequest) ReadField2(iprot thrift.TProtocol) error { + p.Options = &DropAppOptions{} + if err := p.Options.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Options), err) + } + return nil +} + +func (p *ConfigurationDropAppRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_drop_app_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationDropAppRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationDropAppRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("options", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:options: ", p), err) + } + if err := p.Options.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Options), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:options: ", p), err) + } + return err +} + +func (p *ConfigurationDropAppRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationDropAppRequest(%+v)", *p) +} + +// Attributes: +// - Err +type ConfigurationDropAppResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewConfigurationDropAppResponse() *ConfigurationDropAppResponse { + return &ConfigurationDropAppResponse{} +} + +var ConfigurationDropAppResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationDropAppResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationDropAppResponse_Err_DEFAULT + } + return p.Err +} +func (p *ConfigurationDropAppResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationDropAppResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationDropAppResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationDropAppResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_drop_app_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationDropAppResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationDropAppResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationDropAppResponse(%+v)", *p) +} + +// Attributes: +// - OldAppName +// - NewAppName_ +type ConfigurationRenameAppRequest struct { + OldAppName string `thrift:"old_app_name,1" db:"old_app_name" json:"old_app_name"` + NewAppName_ string `thrift:"new_app_name,2" db:"new_app_name" json:"new_app_name"` +} + +func NewConfigurationRenameAppRequest() *ConfigurationRenameAppRequest { + return &ConfigurationRenameAppRequest{} +} + +func (p *ConfigurationRenameAppRequest) GetOldAppName() string { + return p.OldAppName +} + +func (p *ConfigurationRenameAppRequest) GetNewAppName_() string { + return p.NewAppName_ +} +func (p *ConfigurationRenameAppRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRenameAppRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.OldAppName = v + } + return nil +} + +func (p *ConfigurationRenameAppRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewAppName_ = v + } + return nil +} + +func (p *ConfigurationRenameAppRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_rename_app_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRenameAppRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("old_app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:old_app_name: ", p), err) + } + if err := oprot.WriteString(string(p.OldAppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.old_app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:old_app_name: ", p), err) + } + return err +} + +func (p *ConfigurationRenameAppRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_app_name: ", p), err) + } + if err := oprot.WriteString(string(p.NewAppName_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_app_name: ", p), err) + } + return err +} + +func (p *ConfigurationRenameAppRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRenameAppRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +type ConfigurationRenameAppResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationRenameAppResponse() *ConfigurationRenameAppResponse { + return &ConfigurationRenameAppResponse{} +} + +var ConfigurationRenameAppResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationRenameAppResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationRenameAppResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationRenameAppResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationRenameAppResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationRenameAppResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRenameAppResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationRenameAppResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationRenameAppResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_rename_app_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRenameAppResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationRenameAppResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationRenameAppResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRenameAppResponse(%+v)", *p) +} + +// Attributes: +// - AppID +// - NewAppName_ +type ConfigurationRecallAppRequest struct { + AppID int32 `thrift:"app_id,1" db:"app_id" json:"app_id"` + NewAppName_ string `thrift:"new_app_name,2" db:"new_app_name" json:"new_app_name"` +} + +func NewConfigurationRecallAppRequest() *ConfigurationRecallAppRequest { + return &ConfigurationRecallAppRequest{} +} + +func (p *ConfigurationRecallAppRequest) GetAppID() int32 { + return p.AppID +} + +func (p *ConfigurationRecallAppRequest) GetNewAppName_() string { + return p.NewAppName_ +} +func (p *ConfigurationRecallAppRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRecallAppRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *ConfigurationRecallAppRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewAppName_ = v + } + return nil +} + +func (p *ConfigurationRecallAppRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_recall_app_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRecallAppRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_id: ", p), err) + } + return err +} + +func (p *ConfigurationRecallAppRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_app_name: ", p), err) + } + if err := oprot.WriteString(string(p.NewAppName_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_app_name: ", p), err) + } + return err +} + +func (p *ConfigurationRecallAppRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRecallAppRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Info +type ConfigurationRecallAppResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Info *replication.AppInfo `thrift:"info,2" db:"info" json:"info"` +} + +func NewConfigurationRecallAppResponse() *ConfigurationRecallAppResponse { + return &ConfigurationRecallAppResponse{} +} + +var ConfigurationRecallAppResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationRecallAppResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationRecallAppResponse_Err_DEFAULT + } + return p.Err +} + +var ConfigurationRecallAppResponse_Info_DEFAULT *replication.AppInfo + +func (p *ConfigurationRecallAppResponse) GetInfo() *replication.AppInfo { + if !p.IsSetInfo() { + return ConfigurationRecallAppResponse_Info_DEFAULT + } + return p.Info +} +func (p *ConfigurationRecallAppResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationRecallAppResponse) IsSetInfo() bool { + return p.Info != nil +} + +func (p *ConfigurationRecallAppResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRecallAppResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationRecallAppResponse) ReadField2(iprot thrift.TProtocol) error { + p.Info = &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := p.Info.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Info), err) + } + return nil +} + +func (p *ConfigurationRecallAppResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_recall_app_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRecallAppResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationRecallAppResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("info", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:info: ", p), err) + } + if err := p.Info.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Info), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:info: ", p), err) + } + return err +} + +func (p *ConfigurationRecallAppResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRecallAppResponse(%+v)", *p) +} + +// Attributes: +// - Status +type ConfigurationListAppsRequest struct { + Status replication.AppStatus `thrift:"status,1" db:"status" json:"status"` +} + +func NewConfigurationListAppsRequest() *ConfigurationListAppsRequest { + return &ConfigurationListAppsRequest{ + Status: 0, + } +} + +func (p *ConfigurationListAppsRequest) GetStatus() replication.AppStatus { + return p.Status +} +func (p *ConfigurationListAppsRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationListAppsRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := replication.AppStatus(v) + p.Status = temp + } + return nil +} + +func (p *ConfigurationListAppsRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_list_apps_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationListAppsRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) + } + return err +} + +func (p *ConfigurationListAppsRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationListAppsRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Infos +type ConfigurationListAppsResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Infos []*replication.AppInfo `thrift:"infos,2" db:"infos" json:"infos"` +} + +func NewConfigurationListAppsResponse() *ConfigurationListAppsResponse { + return &ConfigurationListAppsResponse{} +} + +var ConfigurationListAppsResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationListAppsResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationListAppsResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationListAppsResponse) GetInfos() []*replication.AppInfo { + return p.Infos +} +func (p *ConfigurationListAppsResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationListAppsResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationListAppsResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationListAppsResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*replication.AppInfo, 0, size) + p.Infos = tSlice + for i := 0; i < size; i++ { + _elem9 := &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := _elem9.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) + } + p.Infos = append(p.Infos, _elem9) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationListAppsResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_list_apps_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationListAppsResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationListAppsResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("infos", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:infos: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Infos)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Infos { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:infos: ", p), err) + } + return err +} + +func (p *ConfigurationListAppsResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationListAppsResponse(%+v)", *p) +} + +// Attributes: +// - MetaServer +// - HpMetaServer +type QueryAppInfoRequest struct { + MetaServer *base.RPCAddress `thrift:"meta_server,1" db:"meta_server" json:"meta_server"` + HpMetaServer *base.HostPort `thrift:"hp_meta_server,2" db:"hp_meta_server" json:"hp_meta_server,omitempty"` +} + +func NewQueryAppInfoRequest() *QueryAppInfoRequest { + return &QueryAppInfoRequest{} +} + +var QueryAppInfoRequest_MetaServer_DEFAULT *base.RPCAddress + +func (p *QueryAppInfoRequest) GetMetaServer() *base.RPCAddress { + if !p.IsSetMetaServer() { + return QueryAppInfoRequest_MetaServer_DEFAULT + } + return p.MetaServer +} + +var QueryAppInfoRequest_HpMetaServer_DEFAULT *base.HostPort + +func (p *QueryAppInfoRequest) GetHpMetaServer() *base.HostPort { + if !p.IsSetHpMetaServer() { + return QueryAppInfoRequest_HpMetaServer_DEFAULT + } + return p.HpMetaServer +} +func (p *QueryAppInfoRequest) IsSetMetaServer() bool { + return p.MetaServer != nil +} + +func (p *QueryAppInfoRequest) IsSetHpMetaServer() bool { + return p.HpMetaServer != nil +} + +func (p *QueryAppInfoRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryAppInfoRequest) ReadField1(iprot thrift.TProtocol) error { + p.MetaServer = &base.RPCAddress{} + if err := p.MetaServer.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MetaServer), err) + } + return nil +} + +func (p *QueryAppInfoRequest) ReadField2(iprot thrift.TProtocol) error { + p.HpMetaServer = &base.HostPort{} + if err := p.HpMetaServer.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpMetaServer), err) + } + return nil +} + +func (p *QueryAppInfoRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_info_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryAppInfoRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("meta_server", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:meta_server: ", p), err) + } + if err := p.MetaServer.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MetaServer), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:meta_server: ", p), err) + } + return err +} + +func (p *QueryAppInfoRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetHpMetaServer() { + if err := oprot.WriteFieldBegin("hp_meta_server", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hp_meta_server: ", p), err) + } + if err := p.HpMetaServer.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpMetaServer), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hp_meta_server: ", p), err) + } + } + return err +} + +func (p *QueryAppInfoRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryAppInfoRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Apps +type QueryAppInfoResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Apps []*replication.AppInfo `thrift:"apps,2" db:"apps" json:"apps"` +} + +func NewQueryAppInfoResponse() *QueryAppInfoResponse { + return &QueryAppInfoResponse{} +} + +var QueryAppInfoResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryAppInfoResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryAppInfoResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryAppInfoResponse) GetApps() []*replication.AppInfo { + return p.Apps +} +func (p *QueryAppInfoResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryAppInfoResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryAppInfoResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryAppInfoResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*replication.AppInfo, 0, size) + p.Apps = tSlice + for i := 0; i < size; i++ { + _elem10 := &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := _elem10.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) + } + p.Apps = append(p.Apps, _elem10) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryAppInfoResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_info_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryAppInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryAppInfoResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("apps", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:apps: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Apps)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Apps { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:apps: ", p), err) + } + return err +} + +func (p *QueryAppInfoResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryAppInfoResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Op +// - Keys +// - Values +// - ClearPrefix +type ConfigurationUpdateAppEnvRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Op AppEnvOperation `thrift:"op,2" db:"op" json:"op"` + Keys []string `thrift:"keys,3" db:"keys" json:"keys,omitempty"` + Values []string `thrift:"values,4" db:"values" json:"values,omitempty"` + ClearPrefix *string `thrift:"clear_prefix,5" db:"clear_prefix" json:"clear_prefix,omitempty"` +} + +func NewConfigurationUpdateAppEnvRequest() *ConfigurationUpdateAppEnvRequest { + return &ConfigurationUpdateAppEnvRequest{ + Op: 0, + } +} + +func (p *ConfigurationUpdateAppEnvRequest) GetAppName() string { + return p.AppName +} + +func (p *ConfigurationUpdateAppEnvRequest) GetOp() AppEnvOperation { + return p.Op +} + +var ConfigurationUpdateAppEnvRequest_Keys_DEFAULT []string + +func (p *ConfigurationUpdateAppEnvRequest) GetKeys() []string { + return p.Keys +} + +var ConfigurationUpdateAppEnvRequest_Values_DEFAULT []string + +func (p *ConfigurationUpdateAppEnvRequest) GetValues() []string { + return p.Values +} + +var ConfigurationUpdateAppEnvRequest_ClearPrefix_DEFAULT string + +func (p *ConfigurationUpdateAppEnvRequest) GetClearPrefix() string { + if !p.IsSetClearPrefix() { + return ConfigurationUpdateAppEnvRequest_ClearPrefix_DEFAULT + } + return *p.ClearPrefix +} +func (p *ConfigurationUpdateAppEnvRequest) IsSetKeys() bool { + return p.Keys != nil +} + +func (p *ConfigurationUpdateAppEnvRequest) IsSetValues() bool { + return p.Values != nil +} + +func (p *ConfigurationUpdateAppEnvRequest) IsSetClearPrefix() bool { + return p.ClearPrefix != nil +} + +func (p *ConfigurationUpdateAppEnvRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := AppEnvOperation(v) + p.Op = temp + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.Keys = tSlice + for i := 0; i < size; i++ { + var _elem11 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem11 = v + } + p.Keys = append(p.Keys, _elem11) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.Values = tSlice + for i := 0; i < size; i++ { + var _elem12 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem12 = v + } + p.Values = append(p.Values, _elem12) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ClearPrefix = &v + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_update_app_env_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateAppEnvRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("op", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:op: ", p), err) + } + if err := oprot.WriteI32(int32(p.Op)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.op (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:op: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateAppEnvRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetKeys() { + if err := oprot.WriteFieldBegin("keys", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:keys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Keys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Keys { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:keys: ", p), err) + } + } + return err +} + +func (p *ConfigurationUpdateAppEnvRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetValues() { + if err := oprot.WriteFieldBegin("values", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:values: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:values: ", p), err) + } + } + return err +} + +func (p *ConfigurationUpdateAppEnvRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetClearPrefix() { + if err := oprot.WriteFieldBegin("clear_prefix", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:clear_prefix: ", p), err) + } + if err := oprot.WriteString(string(*p.ClearPrefix)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.clear_prefix (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:clear_prefix: ", p), err) + } + } + return err +} + +func (p *ConfigurationUpdateAppEnvRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationUpdateAppEnvRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +type ConfigurationUpdateAppEnvResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationUpdateAppEnvResponse() *ConfigurationUpdateAppEnvResponse { + return &ConfigurationUpdateAppEnvResponse{} +} + +var ConfigurationUpdateAppEnvResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationUpdateAppEnvResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationUpdateAppEnvResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationUpdateAppEnvResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationUpdateAppEnvResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationUpdateAppEnvResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationUpdateAppEnvResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_update_app_env_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateAppEnvResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateAppEnvResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationUpdateAppEnvResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - TriggerTime +// - TargetLevel +// - Bottommost +// - MaxRunningCount +type StartAppManualCompactRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + TriggerTime *int64 `thrift:"trigger_time,2" db:"trigger_time" json:"trigger_time,omitempty"` + TargetLevel *int32 `thrift:"target_level,3" db:"target_level" json:"target_level,omitempty"` + Bottommost *bool `thrift:"bottommost,4" db:"bottommost" json:"bottommost,omitempty"` + MaxRunningCount *int32 `thrift:"max_running_count,5" db:"max_running_count" json:"max_running_count,omitempty"` +} + +func NewStartAppManualCompactRequest() *StartAppManualCompactRequest { + return &StartAppManualCompactRequest{} +} + +func (p *StartAppManualCompactRequest) GetAppName() string { + return p.AppName +} + +var StartAppManualCompactRequest_TriggerTime_DEFAULT int64 + +func (p *StartAppManualCompactRequest) GetTriggerTime() int64 { + if !p.IsSetTriggerTime() { + return StartAppManualCompactRequest_TriggerTime_DEFAULT + } + return *p.TriggerTime +} + +var StartAppManualCompactRequest_TargetLevel_DEFAULT int32 + +func (p *StartAppManualCompactRequest) GetTargetLevel() int32 { + if !p.IsSetTargetLevel() { + return StartAppManualCompactRequest_TargetLevel_DEFAULT + } + return *p.TargetLevel +} + +var StartAppManualCompactRequest_Bottommost_DEFAULT bool + +func (p *StartAppManualCompactRequest) GetBottommost() bool { + if !p.IsSetBottommost() { + return StartAppManualCompactRequest_Bottommost_DEFAULT + } + return *p.Bottommost +} + +var StartAppManualCompactRequest_MaxRunningCount_DEFAULT int32 + +func (p *StartAppManualCompactRequest) GetMaxRunningCount() int32 { + if !p.IsSetMaxRunningCount() { + return StartAppManualCompactRequest_MaxRunningCount_DEFAULT + } + return *p.MaxRunningCount +} +func (p *StartAppManualCompactRequest) IsSetTriggerTime() bool { + return p.TriggerTime != nil +} + +func (p *StartAppManualCompactRequest) IsSetTargetLevel() bool { + return p.TargetLevel != nil +} + +func (p *StartAppManualCompactRequest) IsSetBottommost() bool { + return p.Bottommost != nil +} + +func (p *StartAppManualCompactRequest) IsSetMaxRunningCount() bool { + return p.MaxRunningCount != nil +} + +func (p *StartAppManualCompactRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartAppManualCompactRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *StartAppManualCompactRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TriggerTime = &v + } + return nil +} + +func (p *StartAppManualCompactRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.TargetLevel = &v + } + return nil +} + +func (p *StartAppManualCompactRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Bottommost = &v + } + return nil +} + +func (p *StartAppManualCompactRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.MaxRunningCount = &v + } + return nil +} + +func (p *StartAppManualCompactRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_app_manual_compact_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartAppManualCompactRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *StartAppManualCompactRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTriggerTime() { + if err := oprot.WriteFieldBegin("trigger_time", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:trigger_time: ", p), err) + } + if err := oprot.WriteI64(int64(*p.TriggerTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trigger_time (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:trigger_time: ", p), err) + } + } + return err +} + +func (p *StartAppManualCompactRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTargetLevel() { + if err := oprot.WriteFieldBegin("target_level", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:target_level: ", p), err) + } + if err := oprot.WriteI32(int32(*p.TargetLevel)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.target_level (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:target_level: ", p), err) + } + } + return err +} + +func (p *StartAppManualCompactRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBottommost() { + if err := oprot.WriteFieldBegin("bottommost", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:bottommost: ", p), err) + } + if err := oprot.WriteBool(bool(*p.Bottommost)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.bottommost (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:bottommost: ", p), err) + } + } + return err +} + +func (p *StartAppManualCompactRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxRunningCount() { + if err := oprot.WriteFieldBegin("max_running_count", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:max_running_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.MaxRunningCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_running_count (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:max_running_count: ", p), err) + } + } + return err +} + +func (p *StartAppManualCompactRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartAppManualCompactRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type StartAppManualCompactResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg"` +} + +func NewStartAppManualCompactResponse() *StartAppManualCompactResponse { + return &StartAppManualCompactResponse{} +} + +var StartAppManualCompactResponse_Err_DEFAULT *base.ErrorCode + +func (p *StartAppManualCompactResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return StartAppManualCompactResponse_Err_DEFAULT + } + return p.Err +} + +func (p *StartAppManualCompactResponse) GetHintMsg() string { + return p.HintMsg +} +func (p *StartAppManualCompactResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *StartAppManualCompactResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartAppManualCompactResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *StartAppManualCompactResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = v + } + return nil +} + +func (p *StartAppManualCompactResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_app_manual_compact_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartAppManualCompactResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *StartAppManualCompactResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + return err +} + +func (p *StartAppManualCompactResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartAppManualCompactResponse(%+v)", *p) +} + +// Attributes: +// - AppName +type QueryAppManualCompactRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewQueryAppManualCompactRequest() *QueryAppManualCompactRequest { + return &QueryAppManualCompactRequest{} +} + +func (p *QueryAppManualCompactRequest) GetAppName() string { + return p.AppName +} +func (p *QueryAppManualCompactRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryAppManualCompactRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryAppManualCompactRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_manual_compact_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryAppManualCompactRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *QueryAppManualCompactRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryAppManualCompactRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +// - Progress +type QueryAppManualCompactResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg"` + Progress *int32 `thrift:"progress,3" db:"progress" json:"progress,omitempty"` +} + +func NewQueryAppManualCompactResponse() *QueryAppManualCompactResponse { + return &QueryAppManualCompactResponse{} +} + +var QueryAppManualCompactResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryAppManualCompactResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryAppManualCompactResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryAppManualCompactResponse) GetHintMsg() string { + return p.HintMsg +} + +var QueryAppManualCompactResponse_Progress_DEFAULT int32 + +func (p *QueryAppManualCompactResponse) GetProgress() int32 { + if !p.IsSetProgress() { + return QueryAppManualCompactResponse_Progress_DEFAULT + } + return *p.Progress +} +func (p *QueryAppManualCompactResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryAppManualCompactResponse) IsSetProgress() bool { + return p.Progress != nil +} + +func (p *QueryAppManualCompactResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryAppManualCompactResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryAppManualCompactResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = v + } + return nil +} + +func (p *QueryAppManualCompactResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Progress = &v + } + return nil +} + +func (p *QueryAppManualCompactResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_manual_compact_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryAppManualCompactResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryAppManualCompactResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + return err +} + +func (p *QueryAppManualCompactResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetProgress() { + if err := oprot.WriteFieldBegin("progress", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:progress: ", p), err) + } + if err := oprot.WriteI32(int32(*p.Progress)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.progress (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:progress: ", p), err) + } + } + return err +} + +func (p *QueryAppManualCompactResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryAppManualCompactResponse(%+v)", *p) +} + +// Attributes: +// - Status +// - Node +// - HpNode +type NodeInfo struct { + Status NodeStatus `thrift:"status,1" db:"status" json:"status"` + Node *base.RPCAddress `thrift:"node,2" db:"node" json:"node"` + HpNode *base.HostPort `thrift:"hp_node,3" db:"hp_node" json:"hp_node,omitempty"` +} + +func NewNodeInfo() *NodeInfo { + return &NodeInfo{ + Status: 0, + } +} + +func (p *NodeInfo) GetStatus() NodeStatus { + return p.Status +} + +var NodeInfo_Node_DEFAULT *base.RPCAddress + +func (p *NodeInfo) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return NodeInfo_Node_DEFAULT + } + return p.Node +} + +var NodeInfo_HpNode_DEFAULT *base.HostPort + +func (p *NodeInfo) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return NodeInfo_HpNode_DEFAULT + } + return p.HpNode +} +func (p *NodeInfo) IsSetNode() bool { + return p.Node != nil +} + +func (p *NodeInfo) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *NodeInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NodeInfo) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := NodeStatus(v) + p.Status = temp + } + return nil +} + +func (p *NodeInfo) ReadField2(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *NodeInfo) ReadField3(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *NodeInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("node_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NodeInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) + } + return err +} + +func (p *NodeInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:node: ", p), err) + } + return err +} + +func (p *NodeInfo) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode() { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hp_node: ", p), err) + } + } + return err +} + +func (p *NodeInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NodeInfo(%+v)", *p) +} + +// Attributes: +// - Status +type ConfigurationListNodesRequest struct { + Status NodeStatus `thrift:"status,1" db:"status" json:"status"` +} + +func NewConfigurationListNodesRequest() *ConfigurationListNodesRequest { + return &ConfigurationListNodesRequest{ + Status: 0, + } +} + +func (p *ConfigurationListNodesRequest) GetStatus() NodeStatus { + return p.Status +} +func (p *ConfigurationListNodesRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationListNodesRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := NodeStatus(v) + p.Status = temp + } + return nil +} + +func (p *ConfigurationListNodesRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_list_nodes_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationListNodesRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) + } + return err +} + +func (p *ConfigurationListNodesRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationListNodesRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Infos +type ConfigurationListNodesResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Infos []*NodeInfo `thrift:"infos,2" db:"infos" json:"infos"` +} + +func NewConfigurationListNodesResponse() *ConfigurationListNodesResponse { + return &ConfigurationListNodesResponse{} +} + +var ConfigurationListNodesResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationListNodesResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationListNodesResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationListNodesResponse) GetInfos() []*NodeInfo { + return p.Infos +} +func (p *ConfigurationListNodesResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationListNodesResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationListNodesResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationListNodesResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*NodeInfo, 0, size) + p.Infos = tSlice + for i := 0; i < size; i++ { + _elem13 := &NodeInfo{ + Status: 0, + } + if err := _elem13.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem13), err) + } + p.Infos = append(p.Infos, _elem13) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationListNodesResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_list_nodes_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationListNodesResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationListNodesResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("infos", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:infos: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Infos)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Infos { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:infos: ", p), err) + } + return err +} + +func (p *ConfigurationListNodesResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationListNodesResponse(%+v)", *p) +} + +type ConfigurationClusterInfoRequest struct { +} + +func NewConfigurationClusterInfoRequest() *ConfigurationClusterInfoRequest { + return &ConfigurationClusterInfoRequest{} +} + +func (p *ConfigurationClusterInfoRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationClusterInfoRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_cluster_info_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationClusterInfoRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationClusterInfoRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Keys +// - Values +type ConfigurationClusterInfoResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Keys []string `thrift:"keys,2" db:"keys" json:"keys"` + Values []string `thrift:"values,3" db:"values" json:"values"` +} + +func NewConfigurationClusterInfoResponse() *ConfigurationClusterInfoResponse { + return &ConfigurationClusterInfoResponse{} +} + +var ConfigurationClusterInfoResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationClusterInfoResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationClusterInfoResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationClusterInfoResponse) GetKeys() []string { + return p.Keys +} + +func (p *ConfigurationClusterInfoResponse) GetValues() []string { + return p.Values +} +func (p *ConfigurationClusterInfoResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationClusterInfoResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationClusterInfoResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationClusterInfoResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.Keys = tSlice + for i := 0; i < size; i++ { + var _elem14 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem14 = v + } + p.Keys = append(p.Keys, _elem14) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationClusterInfoResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.Values = tSlice + for i := 0; i < size; i++ { + var _elem15 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem15 = v + } + p.Values = append(p.Values, _elem15) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationClusterInfoResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_cluster_info_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationClusterInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationClusterInfoResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("keys", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:keys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Keys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Keys { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:keys: ", p), err) + } + return err +} + +func (p *ConfigurationClusterInfoResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("values", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:values: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:values: ", p), err) + } + return err +} + +func (p *ConfigurationClusterInfoResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationClusterInfoResponse(%+v)", *p) +} + +// Attributes: +// - Level +type ConfigurationMetaControlRequest struct { + Level MetaFunctionLevel `thrift:"level,1" db:"level" json:"level"` +} + +func NewConfigurationMetaControlRequest() *ConfigurationMetaControlRequest { + return &ConfigurationMetaControlRequest{} +} + +func (p *ConfigurationMetaControlRequest) GetLevel() MetaFunctionLevel { + return p.Level +} +func (p *ConfigurationMetaControlRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationMetaControlRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := MetaFunctionLevel(v) + p.Level = temp + } + return nil +} + +func (p *ConfigurationMetaControlRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_meta_control_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationMetaControlRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("level", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:level: ", p), err) + } + if err := oprot.WriteI32(int32(p.Level)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.level (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:level: ", p), err) + } + return err +} + +func (p *ConfigurationMetaControlRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationMetaControlRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - OldLevel +type ConfigurationMetaControlResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + OldLevel MetaFunctionLevel `thrift:"old_level,2" db:"old_level" json:"old_level"` +} + +func NewConfigurationMetaControlResponse() *ConfigurationMetaControlResponse { + return &ConfigurationMetaControlResponse{} +} + +var ConfigurationMetaControlResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationMetaControlResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationMetaControlResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationMetaControlResponse) GetOldLevel() MetaFunctionLevel { + return p.OldLevel +} +func (p *ConfigurationMetaControlResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationMetaControlResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationMetaControlResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationMetaControlResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := MetaFunctionLevel(v) + p.OldLevel = temp + } + return nil +} + +func (p *ConfigurationMetaControlResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_meta_control_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationMetaControlResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationMetaControlResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("old_level", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:old_level: ", p), err) + } + if err := oprot.WriteI32(int32(p.OldLevel)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.old_level (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:old_level: ", p), err) + } + return err +} + +func (p *ConfigurationMetaControlResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationMetaControlResponse(%+v)", *p) +} + +// Attributes: +// - Target +// - Node +// - Type +// - HpTarget +// - HpNode +type ConfigurationProposalAction struct { + Target *base.RPCAddress `thrift:"target,1" db:"target" json:"target"` + Node *base.RPCAddress `thrift:"node,2" db:"node" json:"node"` + Type ConfigType `thrift:"type,3" db:"type" json:"type"` + // unused field # 4 + HpTarget *base.HostPort `thrift:"hp_target,5" db:"hp_target" json:"hp_target,omitempty"` + HpNode *base.HostPort `thrift:"hp_node,6" db:"hp_node" json:"hp_node,omitempty"` +} + +func NewConfigurationProposalAction() *ConfigurationProposalAction { + return &ConfigurationProposalAction{} +} + +var ConfigurationProposalAction_Target_DEFAULT *base.RPCAddress + +func (p *ConfigurationProposalAction) GetTarget() *base.RPCAddress { + if !p.IsSetTarget() { + return ConfigurationProposalAction_Target_DEFAULT + } + return p.Target +} + +var ConfigurationProposalAction_Node_DEFAULT *base.RPCAddress + +func (p *ConfigurationProposalAction) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return ConfigurationProposalAction_Node_DEFAULT + } + return p.Node +} + +func (p *ConfigurationProposalAction) GetType() ConfigType { + return p.Type +} + +var ConfigurationProposalAction_HpTarget_DEFAULT *base.HostPort + +func (p *ConfigurationProposalAction) GetHpTarget() *base.HostPort { + if !p.IsSetHpTarget() { + return ConfigurationProposalAction_HpTarget_DEFAULT + } + return p.HpTarget +} + +var ConfigurationProposalAction_HpNode_DEFAULT *base.HostPort + +func (p *ConfigurationProposalAction) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return ConfigurationProposalAction_HpNode_DEFAULT + } + return p.HpNode +} +func (p *ConfigurationProposalAction) IsSetTarget() bool { + return p.Target != nil +} + +func (p *ConfigurationProposalAction) IsSetNode() bool { + return p.Node != nil +} + +func (p *ConfigurationProposalAction) IsSetHpTarget() bool { + return p.HpTarget != nil +} + +func (p *ConfigurationProposalAction) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *ConfigurationProposalAction) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationProposalAction) ReadField1(iprot thrift.TProtocol) error { + p.Target = &base.RPCAddress{} + if err := p.Target.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Target), err) + } + return nil +} + +func (p *ConfigurationProposalAction) ReadField2(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *ConfigurationProposalAction) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := ConfigType(v) + p.Type = temp + } + return nil +} + +func (p *ConfigurationProposalAction) ReadField5(iprot thrift.TProtocol) error { + p.HpTarget = &base.HostPort{} + if err := p.HpTarget.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpTarget), err) + } + return nil +} + +func (p *ConfigurationProposalAction) ReadField6(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *ConfigurationProposalAction) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_proposal_action"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationProposalAction) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("target", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:target: ", p), err) + } + if err := p.Target.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Target), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:target: ", p), err) + } + return err +} + +func (p *ConfigurationProposalAction) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:node: ", p), err) + } + return err +} + +func (p *ConfigurationProposalAction) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:type: ", p), err) + } + if err := oprot.WriteI32(int32(p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:type: ", p), err) + } + return err +} + +func (p *ConfigurationProposalAction) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetHpTarget() { + if err := oprot.WriteFieldBegin("hp_target", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:hp_target: ", p), err) + } + if err := p.HpTarget.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpTarget), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:hp_target: ", p), err) + } + } + return err +} + +func (p *ConfigurationProposalAction) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode() { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:hp_node: ", p), err) + } + } + return err +} + +func (p *ConfigurationProposalAction) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationProposalAction(%+v)", *p) +} + +// Attributes: +// - Gpid +// - ActionList +// - Force +// - BalanceType +type ConfigurationBalancerRequest struct { + Gpid *base.Gpid `thrift:"gpid,1" db:"gpid" json:"gpid"` + ActionList []*ConfigurationProposalAction `thrift:"action_list,2" db:"action_list" json:"action_list"` + Force bool `thrift:"force,3" db:"force" json:"force"` + BalanceType *BalancerRequestType `thrift:"balance_type,4" db:"balance_type" json:"balance_type,omitempty"` +} + +func NewConfigurationBalancerRequest() *ConfigurationBalancerRequest { + return &ConfigurationBalancerRequest{} +} + +var ConfigurationBalancerRequest_Gpid_DEFAULT *base.Gpid + +func (p *ConfigurationBalancerRequest) GetGpid() *base.Gpid { + if !p.IsSetGpid() { + return ConfigurationBalancerRequest_Gpid_DEFAULT + } + return p.Gpid +} + +func (p *ConfigurationBalancerRequest) GetActionList() []*ConfigurationProposalAction { + return p.ActionList +} + +var ConfigurationBalancerRequest_Force_DEFAULT bool = false + +func (p *ConfigurationBalancerRequest) GetForce() bool { + return p.Force +} + +var ConfigurationBalancerRequest_BalanceType_DEFAULT BalancerRequestType + +func (p *ConfigurationBalancerRequest) GetBalanceType() BalancerRequestType { + if !p.IsSetBalanceType() { + return ConfigurationBalancerRequest_BalanceType_DEFAULT + } + return *p.BalanceType +} +func (p *ConfigurationBalancerRequest) IsSetGpid() bool { + return p.Gpid != nil +} + +func (p *ConfigurationBalancerRequest) IsSetForce() bool { + return p.Force != ConfigurationBalancerRequest_Force_DEFAULT +} + +func (p *ConfigurationBalancerRequest) IsSetBalanceType() bool { + return p.BalanceType != nil +} + +func (p *ConfigurationBalancerRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationBalancerRequest) ReadField1(iprot thrift.TProtocol) error { + p.Gpid = &base.Gpid{} + if err := p.Gpid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Gpid), err) + } + return nil +} + +func (p *ConfigurationBalancerRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*ConfigurationProposalAction, 0, size) + p.ActionList = tSlice + for i := 0; i < size; i++ { + _elem16 := &ConfigurationProposalAction{} + if err := _elem16.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem16), err) + } + p.ActionList = append(p.ActionList, _elem16) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationBalancerRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Force = v + } + return nil +} + +func (p *ConfigurationBalancerRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + temp := BalancerRequestType(v) + p.BalanceType = &temp + } + return nil +} + +func (p *ConfigurationBalancerRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_balancer_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationBalancerRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("gpid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:gpid: ", p), err) + } + if err := p.Gpid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Gpid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:gpid: ", p), err) + } + return err +} + +func (p *ConfigurationBalancerRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("action_list", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:action_list: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ActionList)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.ActionList { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:action_list: ", p), err) + } + return err +} + +func (p *ConfigurationBalancerRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetForce() { + if err := oprot.WriteFieldBegin("force", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:force: ", p), err) + } + if err := oprot.WriteBool(bool(p.Force)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.force (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:force: ", p), err) + } + } + return err +} + +func (p *ConfigurationBalancerRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBalanceType() { + if err := oprot.WriteFieldBegin("balance_type", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:balance_type: ", p), err) + } + if err := oprot.WriteI32(int32(*p.BalanceType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.balance_type (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:balance_type: ", p), err) + } + } + return err +} + +func (p *ConfigurationBalancerRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationBalancerRequest(%+v)", *p) +} + +// Attributes: +// - Err +type ConfigurationBalancerResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewConfigurationBalancerResponse() *ConfigurationBalancerResponse { + return &ConfigurationBalancerResponse{} +} + +var ConfigurationBalancerResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationBalancerResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationBalancerResponse_Err_DEFAULT + } + return p.Err +} +func (p *ConfigurationBalancerResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationBalancerResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationBalancerResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationBalancerResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_balancer_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationBalancerResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationBalancerResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationBalancerResponse(%+v)", *p) +} + +// Attributes: +// - Pid +type DddDiagnoseRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` +} + +func NewDddDiagnoseRequest() *DddDiagnoseRequest { + return &DddDiagnoseRequest{} +} + +var DddDiagnoseRequest_Pid_DEFAULT *base.Gpid + +func (p *DddDiagnoseRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return DddDiagnoseRequest_Pid_DEFAULT + } + return p.Pid +} +func (p *DddDiagnoseRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *DddDiagnoseRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DddDiagnoseRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *DddDiagnoseRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ddd_diagnose_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DddDiagnoseRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *DddDiagnoseRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DddDiagnoseRequest(%+v)", *p) +} + +// Attributes: +// - Node +// - DropTimeMs +// - IsAlive +// - IsCollected +// - Ballot +// - LastCommittedDecree +// - LastPreparedDecree +// - HpNode +type DddNodeInfo struct { + Node *base.RPCAddress `thrift:"node,1" db:"node" json:"node"` + DropTimeMs int64 `thrift:"drop_time_ms,2" db:"drop_time_ms" json:"drop_time_ms"` + IsAlive bool `thrift:"is_alive,3" db:"is_alive" json:"is_alive"` + IsCollected bool `thrift:"is_collected,4" db:"is_collected" json:"is_collected"` + Ballot int64 `thrift:"ballot,5" db:"ballot" json:"ballot"` + LastCommittedDecree int64 `thrift:"last_committed_decree,6" db:"last_committed_decree" json:"last_committed_decree"` + LastPreparedDecree int64 `thrift:"last_prepared_decree,7" db:"last_prepared_decree" json:"last_prepared_decree"` + HpNode *base.HostPort `thrift:"hp_node,8" db:"hp_node" json:"hp_node,omitempty"` +} + +func NewDddNodeInfo() *DddNodeInfo { + return &DddNodeInfo{} +} + +var DddNodeInfo_Node_DEFAULT *base.RPCAddress + +func (p *DddNodeInfo) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return DddNodeInfo_Node_DEFAULT + } + return p.Node +} + +func (p *DddNodeInfo) GetDropTimeMs() int64 { + return p.DropTimeMs +} + +func (p *DddNodeInfo) GetIsAlive() bool { + return p.IsAlive +} + +func (p *DddNodeInfo) GetIsCollected() bool { + return p.IsCollected +} + +func (p *DddNodeInfo) GetBallot() int64 { + return p.Ballot +} + +func (p *DddNodeInfo) GetLastCommittedDecree() int64 { + return p.LastCommittedDecree +} + +func (p *DddNodeInfo) GetLastPreparedDecree() int64 { + return p.LastPreparedDecree +} + +var DddNodeInfo_HpNode_DEFAULT *base.HostPort + +func (p *DddNodeInfo) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return DddNodeInfo_HpNode_DEFAULT + } + return p.HpNode +} +func (p *DddNodeInfo) IsSetNode() bool { + return p.Node != nil +} + +func (p *DddNodeInfo) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *DddNodeInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I64 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DddNodeInfo) ReadField1(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *DddNodeInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.DropTimeMs = v + } + return nil +} + +func (p *DddNodeInfo) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.IsAlive = v + } + return nil +} + +func (p *DddNodeInfo) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.IsCollected = v + } + return nil +} + +func (p *DddNodeInfo) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *DddNodeInfo) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.LastCommittedDecree = v + } + return nil +} + +func (p *DddNodeInfo) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.LastPreparedDecree = v + } + return nil +} + +func (p *DddNodeInfo) ReadField8(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *DddNodeInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ddd_node_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DddNodeInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:node: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("drop_time_ms", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:drop_time_ms: ", p), err) + } + if err := oprot.WriteI64(int64(p.DropTimeMs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.drop_time_ms (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:drop_time_ms: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_alive", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:is_alive: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsAlive)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_alive (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:is_alive: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_collected", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:is_collected: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsCollected)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_collected (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:is_collected: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:ballot: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_committed_decree", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:last_committed_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastCommittedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_committed_decree (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:last_committed_decree: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_prepared_decree", thrift.I64, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:last_prepared_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastPreparedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_prepared_decree (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:last_prepared_decree: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode() { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:hp_node: ", p), err) + } + } + return err +} + +func (p *DddNodeInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DddNodeInfo(%+v)", *p) +} + +// Attributes: +// - Config +// - Dropped +// - Reason +type DddPartitionInfo struct { + Config *replication.PartitionConfiguration `thrift:"config,1" db:"config" json:"config"` + Dropped []*DddNodeInfo `thrift:"dropped,2" db:"dropped" json:"dropped"` + Reason string `thrift:"reason,3" db:"reason" json:"reason"` +} + +func NewDddPartitionInfo() *DddPartitionInfo { + return &DddPartitionInfo{} +} + +var DddPartitionInfo_Config_DEFAULT *replication.PartitionConfiguration + +func (p *DddPartitionInfo) GetConfig() *replication.PartitionConfiguration { + if !p.IsSetConfig() { + return DddPartitionInfo_Config_DEFAULT + } + return p.Config +} + +func (p *DddPartitionInfo) GetDropped() []*DddNodeInfo { + return p.Dropped +} + +func (p *DddPartitionInfo) GetReason() string { + return p.Reason +} +func (p *DddPartitionInfo) IsSetConfig() bool { + return p.Config != nil +} + +func (p *DddPartitionInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DddPartitionInfo) ReadField1(iprot thrift.TProtocol) error { + p.Config = &replication.PartitionConfiguration{} + if err := p.Config.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Config), err) + } + return nil +} + +func (p *DddPartitionInfo) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*DddNodeInfo, 0, size) + p.Dropped = tSlice + for i := 0; i < size; i++ { + _elem17 := &DddNodeInfo{} + if err := _elem17.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err) + } + p.Dropped = append(p.Dropped, _elem17) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *DddPartitionInfo) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Reason = v + } + return nil +} + +func (p *DddPartitionInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ddd_partition_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DddPartitionInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("config", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:config: ", p), err) + } + if err := p.Config.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Config), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:config: ", p), err) + } + return err +} + +func (p *DddPartitionInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dropped", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:dropped: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Dropped)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Dropped { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:dropped: ", p), err) + } + return err +} + +func (p *DddPartitionInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("reason", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:reason: ", p), err) + } + if err := oprot.WriteString(string(p.Reason)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.reason (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:reason: ", p), err) + } + return err +} + +func (p *DddPartitionInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DddPartitionInfo(%+v)", *p) +} + +// Attributes: +// - Err +// - Partitions +type DddDiagnoseResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Partitions []*DddPartitionInfo `thrift:"partitions,2" db:"partitions" json:"partitions"` +} + +func NewDddDiagnoseResponse() *DddDiagnoseResponse { + return &DddDiagnoseResponse{} +} + +var DddDiagnoseResponse_Err_DEFAULT *base.ErrorCode + +func (p *DddDiagnoseResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DddDiagnoseResponse_Err_DEFAULT + } + return p.Err +} + +func (p *DddDiagnoseResponse) GetPartitions() []*DddPartitionInfo { + return p.Partitions +} +func (p *DddDiagnoseResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DddDiagnoseResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DddDiagnoseResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DddDiagnoseResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*DddPartitionInfo, 0, size) + p.Partitions = tSlice + for i := 0; i < size; i++ { + _elem18 := &DddPartitionInfo{} + if err := _elem18.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err) + } + p.Partitions = append(p.Partitions, _elem18) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *DddDiagnoseResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ddd_diagnose_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DddDiagnoseResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DddDiagnoseResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partitions", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:partitions: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Partitions { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:partitions: ", p), err) + } + return err +} + +func (p *DddDiagnoseResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DddDiagnoseResponse(%+v)", *p) +} + +// Attributes: +// - AppName +type ConfigurationGetMaxReplicaCountRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewConfigurationGetMaxReplicaCountRequest() *ConfigurationGetMaxReplicaCountRequest { + return &ConfigurationGetMaxReplicaCountRequest{} +} + +func (p *ConfigurationGetMaxReplicaCountRequest) GetAppName() string { + return p.AppName +} +func (p *ConfigurationGetMaxReplicaCountRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_get_max_replica_count_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationGetMaxReplicaCountRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationGetMaxReplicaCountRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - MaxReplicaCount +// - HintMessage +type ConfigurationGetMaxReplicaCountResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + MaxReplicaCount int32 `thrift:"max_replica_count,2" db:"max_replica_count" json:"max_replica_count"` + HintMessage string `thrift:"hint_message,3" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationGetMaxReplicaCountResponse() *ConfigurationGetMaxReplicaCountResponse { + return &ConfigurationGetMaxReplicaCountResponse{} +} + +var ConfigurationGetMaxReplicaCountResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationGetMaxReplicaCountResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationGetMaxReplicaCountResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationGetMaxReplicaCountResponse) GetMaxReplicaCount() int32 { + return p.MaxReplicaCount +} + +func (p *ConfigurationGetMaxReplicaCountResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationGetMaxReplicaCountResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.MaxReplicaCount = v + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_get_max_replica_count_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationGetMaxReplicaCountResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:max_replica_count: ", p), err) + } + return err +} + +func (p *ConfigurationGetMaxReplicaCountResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationGetMaxReplicaCountResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationGetMaxReplicaCountResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - MaxReplicaCount +type ConfigurationSetMaxReplicaCountRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + MaxReplicaCount int32 `thrift:"max_replica_count,2" db:"max_replica_count" json:"max_replica_count"` +} + +func NewConfigurationSetMaxReplicaCountRequest() *ConfigurationSetMaxReplicaCountRequest { + return &ConfigurationSetMaxReplicaCountRequest{} +} + +func (p *ConfigurationSetMaxReplicaCountRequest) GetAppName() string { + return p.AppName +} + +func (p *ConfigurationSetMaxReplicaCountRequest) GetMaxReplicaCount() int32 { + return p.MaxReplicaCount +} +func (p *ConfigurationSetMaxReplicaCountRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.MaxReplicaCount = v + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_set_max_replica_count_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationSetMaxReplicaCountRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:max_replica_count: ", p), err) + } + return err +} + +func (p *ConfigurationSetMaxReplicaCountRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationSetMaxReplicaCountRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - OldMaxReplicaCount +// - HintMessage +type ConfigurationSetMaxReplicaCountResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + OldMaxReplicaCount int32 `thrift:"old_max_replica_count,2" db:"old_max_replica_count" json:"old_max_replica_count"` + HintMessage string `thrift:"hint_message,3" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationSetMaxReplicaCountResponse() *ConfigurationSetMaxReplicaCountResponse { + return &ConfigurationSetMaxReplicaCountResponse{} +} + +var ConfigurationSetMaxReplicaCountResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationSetMaxReplicaCountResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationSetMaxReplicaCountResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationSetMaxReplicaCountResponse) GetOldMaxReplicaCount() int32 { + return p.OldMaxReplicaCount +} + +func (p *ConfigurationSetMaxReplicaCountResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationSetMaxReplicaCountResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.OldMaxReplicaCount = v + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_set_max_replica_count_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationSetMaxReplicaCountResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("old_max_replica_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:old_max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.OldMaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.old_max_replica_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:old_max_replica_count: ", p), err) + } + return err +} + +func (p *ConfigurationSetMaxReplicaCountResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationSetMaxReplicaCountResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationSetMaxReplicaCountResponse(%+v)", *p) +} + +type AdminClient interface { + // Parameters: + // - Req + CreateApp(ctx context.Context, req *ConfigurationCreateAppRequest) (r *ConfigurationCreateAppResponse, err error) + // Parameters: + // - Req + DropApp(ctx context.Context, req *ConfigurationDropAppRequest) (r *ConfigurationDropAppResponse, err error) + // Parameters: + // - Req + RecallApp(ctx context.Context, req *ConfigurationRecallAppRequest) (r *ConfigurationRecallAppResponse, err error) + // Parameters: + // - Req + ListApps(ctx context.Context, req *ConfigurationListAppsRequest) (r *ConfigurationListAppsResponse, err error) + // Parameters: + // - Req + AddDuplication(ctx context.Context, req *DuplicationAddRequest) (r *DuplicationAddResponse, err error) + // Parameters: + // - Req + QueryDuplication(ctx context.Context, req *DuplicationQueryRequest) (r *DuplicationQueryResponse, err error) + // Parameters: + // - Req + ModifyDuplication(ctx context.Context, req *DuplicationModifyRequest) (r *DuplicationModifyResponse, err error) + // Parameters: + // - Req + QueryAppInfo(ctx context.Context, req *QueryAppInfoRequest) (r *QueryAppInfoResponse, err error) + // Parameters: + // - Req + UpdateAppEnv(ctx context.Context, req *ConfigurationUpdateAppEnvRequest) (r *ConfigurationUpdateAppEnvResponse, err error) + // Parameters: + // - Req + ListNodes(ctx context.Context, req *ConfigurationListNodesRequest) (r *ConfigurationListNodesResponse, err error) + // Parameters: + // - Req + QueryClusterInfo(ctx context.Context, req *ConfigurationClusterInfoRequest) (r *ConfigurationClusterInfoResponse, err error) + // Parameters: + // - Req + MetaControl(ctx context.Context, req *ConfigurationMetaControlRequest) (r *ConfigurationMetaControlResponse, err error) + // Parameters: + // - Req + QueryBackupPolicy(ctx context.Context, req *ConfigurationQueryBackupPolicyRequest) (r *ConfigurationQueryBackupPolicyResponse, err error) + // Parameters: + // - Req + Balance(ctx context.Context, req *ConfigurationBalancerRequest) (r *ConfigurationBalancerResponse, err error) + // Parameters: + // - Req + StartBackupApp(ctx context.Context, req *StartBackupAppRequest) (r *StartBackupAppResponse, err error) + // Parameters: + // - Req + QueryBackupStatus(ctx context.Context, req *QueryBackupStatusRequest) (r *QueryBackupStatusResponse, err error) + // Parameters: + // - Req + RestoreApp(ctx context.Context, req *ConfigurationRestoreRequest) (r *ConfigurationCreateAppResponse, err error) + // Parameters: + // - Req + StartPartitionSplit(ctx context.Context, req *StartPartitionSplitRequest) (r *StartPartitionSplitResponse, err error) + // Parameters: + // - Req + QuerySplitStatus(ctx context.Context, req *QuerySplitRequest) (r *QuerySplitResponse, err error) + // Parameters: + // - Req + ControlPartitionSplit(ctx context.Context, req *ControlSplitRequest) (r *ControlSplitResponse, err error) + // Parameters: + // - Req + StartBulkLoad(ctx context.Context, req *StartBulkLoadRequest) (r *StartBulkLoadResponse, err error) + // Parameters: + // - Req + QueryBulkLoadStatus(ctx context.Context, req *QueryBulkLoadRequest) (r *QueryBulkLoadResponse, err error) + // Parameters: + // - Req + ControlBulkLoad(ctx context.Context, req *ControlBulkLoadRequest) (r *ControlBulkLoadResponse, err error) + // Parameters: + // - Req + ClearBulkLoad(ctx context.Context, req *ClearBulkLoadStateRequest) (r *ClearBulkLoadStateResponse, err error) + // Parameters: + // - Req + StartManualCompact(ctx context.Context, req *StartAppManualCompactRequest) (r *StartAppManualCompactResponse, err error) + // Parameters: + // - Req + QueryManualCompact(ctx context.Context, req *QueryAppManualCompactRequest) (r *QueryAppManualCompactResponse, err error) +} + +type AdminClientClient struct { + c thrift.TClient +} + +func NewAdminClientClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AdminClientClient { + return &AdminClientClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewAdminClientClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AdminClientClient { + return &AdminClientClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewAdminClientClient(c thrift.TClient) *AdminClientClient { + return &AdminClientClient{ + c: c, + } +} + +func (p *AdminClientClient) Client_() thrift.TClient { + return p.c +} + +// Parameters: +// - Req +func (p *AdminClientClient) CreateApp(ctx context.Context, req *ConfigurationCreateAppRequest) (r *ConfigurationCreateAppResponse, err error) { + var _args19 AdminClientCreateAppArgs + _args19.Req = req + var _result20 AdminClientCreateAppResult + if err = p.Client_().Call(ctx, "create_app", &_args19, &_result20); err != nil { + return + } + return _result20.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) DropApp(ctx context.Context, req *ConfigurationDropAppRequest) (r *ConfigurationDropAppResponse, err error) { + var _args21 AdminClientDropAppArgs + _args21.Req = req + var _result22 AdminClientDropAppResult + if err = p.Client_().Call(ctx, "drop_app", &_args21, &_result22); err != nil { + return + } + return _result22.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) RecallApp(ctx context.Context, req *ConfigurationRecallAppRequest) (r *ConfigurationRecallAppResponse, err error) { + var _args23 AdminClientRecallAppArgs + _args23.Req = req + var _result24 AdminClientRecallAppResult + if err = p.Client_().Call(ctx, "recall_app", &_args23, &_result24); err != nil { + return + } + return _result24.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ListApps(ctx context.Context, req *ConfigurationListAppsRequest) (r *ConfigurationListAppsResponse, err error) { + var _args25 AdminClientListAppsArgs + _args25.Req = req + var _result26 AdminClientListAppsResult + if err = p.Client_().Call(ctx, "list_apps", &_args25, &_result26); err != nil { + return + } + return _result26.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) AddDuplication(ctx context.Context, req *DuplicationAddRequest) (r *DuplicationAddResponse, err error) { + var _args27 AdminClientAddDuplicationArgs + _args27.Req = req + var _result28 AdminClientAddDuplicationResult + if err = p.Client_().Call(ctx, "add_duplication", &_args27, &_result28); err != nil { + return + } + return _result28.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryDuplication(ctx context.Context, req *DuplicationQueryRequest) (r *DuplicationQueryResponse, err error) { + var _args29 AdminClientQueryDuplicationArgs + _args29.Req = req + var _result30 AdminClientQueryDuplicationResult + if err = p.Client_().Call(ctx, "query_duplication", &_args29, &_result30); err != nil { + return + } + return _result30.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ModifyDuplication(ctx context.Context, req *DuplicationModifyRequest) (r *DuplicationModifyResponse, err error) { + var _args31 AdminClientModifyDuplicationArgs + _args31.Req = req + var _result32 AdminClientModifyDuplicationResult + if err = p.Client_().Call(ctx, "modify_duplication", &_args31, &_result32); err != nil { + return + } + return _result32.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryAppInfo(ctx context.Context, req *QueryAppInfoRequest) (r *QueryAppInfoResponse, err error) { + var _args33 AdminClientQueryAppInfoArgs + _args33.Req = req + var _result34 AdminClientQueryAppInfoResult + if err = p.Client_().Call(ctx, "query_app_info", &_args33, &_result34); err != nil { + return + } + return _result34.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) UpdateAppEnv(ctx context.Context, req *ConfigurationUpdateAppEnvRequest) (r *ConfigurationUpdateAppEnvResponse, err error) { + var _args35 AdminClientUpdateAppEnvArgs + _args35.Req = req + var _result36 AdminClientUpdateAppEnvResult + if err = p.Client_().Call(ctx, "update_app_env", &_args35, &_result36); err != nil { + return + } + return _result36.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ListNodes(ctx context.Context, req *ConfigurationListNodesRequest) (r *ConfigurationListNodesResponse, err error) { + var _args37 AdminClientListNodesArgs + _args37.Req = req + var _result38 AdminClientListNodesResult + if err = p.Client_().Call(ctx, "list_nodes", &_args37, &_result38); err != nil { + return + } + return _result38.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryClusterInfo(ctx context.Context, req *ConfigurationClusterInfoRequest) (r *ConfigurationClusterInfoResponse, err error) { + var _args39 AdminClientQueryClusterInfoArgs + _args39.Req = req + var _result40 AdminClientQueryClusterInfoResult + if err = p.Client_().Call(ctx, "query_cluster_info", &_args39, &_result40); err != nil { + return + } + return _result40.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) MetaControl(ctx context.Context, req *ConfigurationMetaControlRequest) (r *ConfigurationMetaControlResponse, err error) { + var _args41 AdminClientMetaControlArgs + _args41.Req = req + var _result42 AdminClientMetaControlResult + if err = p.Client_().Call(ctx, "meta_control", &_args41, &_result42); err != nil { + return + } + return _result42.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryBackupPolicy(ctx context.Context, req *ConfigurationQueryBackupPolicyRequest) (r *ConfigurationQueryBackupPolicyResponse, err error) { + var _args43 AdminClientQueryBackupPolicyArgs + _args43.Req = req + var _result44 AdminClientQueryBackupPolicyResult + if err = p.Client_().Call(ctx, "query_backup_policy", &_args43, &_result44); err != nil { + return + } + return _result44.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) Balance(ctx context.Context, req *ConfigurationBalancerRequest) (r *ConfigurationBalancerResponse, err error) { + var _args45 AdminClientBalanceArgs + _args45.Req = req + var _result46 AdminClientBalanceResult + if err = p.Client_().Call(ctx, "balance", &_args45, &_result46); err != nil { + return + } + return _result46.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) StartBackupApp(ctx context.Context, req *StartBackupAppRequest) (r *StartBackupAppResponse, err error) { + var _args47 AdminClientStartBackupAppArgs + _args47.Req = req + var _result48 AdminClientStartBackupAppResult + if err = p.Client_().Call(ctx, "start_backup_app", &_args47, &_result48); err != nil { + return + } + return _result48.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryBackupStatus(ctx context.Context, req *QueryBackupStatusRequest) (r *QueryBackupStatusResponse, err error) { + var _args49 AdminClientQueryBackupStatusArgs + _args49.Req = req + var _result50 AdminClientQueryBackupStatusResult + if err = p.Client_().Call(ctx, "query_backup_status", &_args49, &_result50); err != nil { + return + } + return _result50.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) RestoreApp(ctx context.Context, req *ConfigurationRestoreRequest) (r *ConfigurationCreateAppResponse, err error) { + var _args51 AdminClientRestoreAppArgs + _args51.Req = req + var _result52 AdminClientRestoreAppResult + if err = p.Client_().Call(ctx, "restore_app", &_args51, &_result52); err != nil { + return + } + return _result52.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) StartPartitionSplit(ctx context.Context, req *StartPartitionSplitRequest) (r *StartPartitionSplitResponse, err error) { + var _args53 AdminClientStartPartitionSplitArgs + _args53.Req = req + var _result54 AdminClientStartPartitionSplitResult + if err = p.Client_().Call(ctx, "start_partition_split", &_args53, &_result54); err != nil { + return + } + return _result54.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QuerySplitStatus(ctx context.Context, req *QuerySplitRequest) (r *QuerySplitResponse, err error) { + var _args55 AdminClientQuerySplitStatusArgs + _args55.Req = req + var _result56 AdminClientQuerySplitStatusResult + if err = p.Client_().Call(ctx, "query_split_status", &_args55, &_result56); err != nil { + return + } + return _result56.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ControlPartitionSplit(ctx context.Context, req *ControlSplitRequest) (r *ControlSplitResponse, err error) { + var _args57 AdminClientControlPartitionSplitArgs + _args57.Req = req + var _result58 AdminClientControlPartitionSplitResult + if err = p.Client_().Call(ctx, "control_partition_split", &_args57, &_result58); err != nil { + return + } + return _result58.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) StartBulkLoad(ctx context.Context, req *StartBulkLoadRequest) (r *StartBulkLoadResponse, err error) { + var _args59 AdminClientStartBulkLoadArgs + _args59.Req = req + var _result60 AdminClientStartBulkLoadResult + if err = p.Client_().Call(ctx, "start_bulk_load", &_args59, &_result60); err != nil { + return + } + return _result60.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryBulkLoadStatus(ctx context.Context, req *QueryBulkLoadRequest) (r *QueryBulkLoadResponse, err error) { + var _args61 AdminClientQueryBulkLoadStatusArgs + _args61.Req = req + var _result62 AdminClientQueryBulkLoadStatusResult + if err = p.Client_().Call(ctx, "query_bulk_load_status", &_args61, &_result62); err != nil { + return + } + return _result62.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ControlBulkLoad(ctx context.Context, req *ControlBulkLoadRequest) (r *ControlBulkLoadResponse, err error) { + var _args63 AdminClientControlBulkLoadArgs + _args63.Req = req + var _result64 AdminClientControlBulkLoadResult + if err = p.Client_().Call(ctx, "control_bulk_load", &_args63, &_result64); err != nil { + return + } + return _result64.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ClearBulkLoad(ctx context.Context, req *ClearBulkLoadStateRequest) (r *ClearBulkLoadStateResponse, err error) { + var _args65 AdminClientClearBulkLoadArgs + _args65.Req = req + var _result66 AdminClientClearBulkLoadResult + if err = p.Client_().Call(ctx, "clear_bulk_load", &_args65, &_result66); err != nil { + return + } + return _result66.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) StartManualCompact(ctx context.Context, req *StartAppManualCompactRequest) (r *StartAppManualCompactResponse, err error) { + var _args67 AdminClientStartManualCompactArgs + _args67.Req = req + var _result68 AdminClientStartManualCompactResult + if err = p.Client_().Call(ctx, "start_manual_compact", &_args67, &_result68); err != nil { + return + } + return _result68.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryManualCompact(ctx context.Context, req *QueryAppManualCompactRequest) (r *QueryAppManualCompactResponse, err error) { + var _args69 AdminClientQueryManualCompactArgs + _args69.Req = req + var _result70 AdminClientQueryManualCompactResult + if err = p.Client_().Call(ctx, "query_manual_compact", &_args69, &_result70); err != nil { + return + } + return _result70.GetSuccess(), nil +} + +type AdminClientProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler AdminClient +} + +func (p *AdminClientProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *AdminClientProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *AdminClientProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewAdminClientProcessor(handler AdminClient) *AdminClientProcessor { + + self71 := &AdminClientProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self71.processorMap["create_app"] = &adminClientProcessorCreateApp{handler: handler} + self71.processorMap["drop_app"] = &adminClientProcessorDropApp{handler: handler} + self71.processorMap["recall_app"] = &adminClientProcessorRecallApp{handler: handler} + self71.processorMap["list_apps"] = &adminClientProcessorListApps{handler: handler} + self71.processorMap["add_duplication"] = &adminClientProcessorAddDuplication{handler: handler} + self71.processorMap["query_duplication"] = &adminClientProcessorQueryDuplication{handler: handler} + self71.processorMap["modify_duplication"] = &adminClientProcessorModifyDuplication{handler: handler} + self71.processorMap["query_app_info"] = &adminClientProcessorQueryAppInfo{handler: handler} + self71.processorMap["update_app_env"] = &adminClientProcessorUpdateAppEnv{handler: handler} + self71.processorMap["list_nodes"] = &adminClientProcessorListNodes{handler: handler} + self71.processorMap["query_cluster_info"] = &adminClientProcessorQueryClusterInfo{handler: handler} + self71.processorMap["meta_control"] = &adminClientProcessorMetaControl{handler: handler} + self71.processorMap["query_backup_policy"] = &adminClientProcessorQueryBackupPolicy{handler: handler} + self71.processorMap["balance"] = &adminClientProcessorBalance{handler: handler} + self71.processorMap["start_backup_app"] = &adminClientProcessorStartBackupApp{handler: handler} + self71.processorMap["query_backup_status"] = &adminClientProcessorQueryBackupStatus{handler: handler} + self71.processorMap["restore_app"] = &adminClientProcessorRestoreApp{handler: handler} + self71.processorMap["start_partition_split"] = &adminClientProcessorStartPartitionSplit{handler: handler} + self71.processorMap["query_split_status"] = &adminClientProcessorQuerySplitStatus{handler: handler} + self71.processorMap["control_partition_split"] = &adminClientProcessorControlPartitionSplit{handler: handler} + self71.processorMap["start_bulk_load"] = &adminClientProcessorStartBulkLoad{handler: handler} + self71.processorMap["query_bulk_load_status"] = &adminClientProcessorQueryBulkLoadStatus{handler: handler} + self71.processorMap["control_bulk_load"] = &adminClientProcessorControlBulkLoad{handler: handler} + self71.processorMap["clear_bulk_load"] = &adminClientProcessorClearBulkLoad{handler: handler} + self71.processorMap["start_manual_compact"] = &adminClientProcessorStartManualCompact{handler: handler} + self71.processorMap["query_manual_compact"] = &adminClientProcessorQueryManualCompact{handler: handler} + return self71 +} + +func (p *AdminClientProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x72 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x72.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x72 + +} + +type adminClientProcessorCreateApp struct { + handler AdminClient +} + +func (p *adminClientProcessorCreateApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientCreateAppArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("create_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientCreateAppResult{} + var retval *ConfigurationCreateAppResponse + var err2 error + if retval, err2 = p.handler.CreateApp(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing create_app: "+err2.Error()) + oprot.WriteMessageBegin("create_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("create_app", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorDropApp struct { + handler AdminClient +} + +func (p *adminClientProcessorDropApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientDropAppArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("drop_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientDropAppResult{} + var retval *ConfigurationDropAppResponse + var err2 error + if retval, err2 = p.handler.DropApp(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing drop_app: "+err2.Error()) + oprot.WriteMessageBegin("drop_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("drop_app", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorRecallApp struct { + handler AdminClient +} + +func (p *adminClientProcessorRecallApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientRecallAppArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("recall_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientRecallAppResult{} + var retval *ConfigurationRecallAppResponse + var err2 error + if retval, err2 = p.handler.RecallApp(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing recall_app: "+err2.Error()) + oprot.WriteMessageBegin("recall_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("recall_app", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorListApps struct { + handler AdminClient +} + +func (p *adminClientProcessorListApps) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientListAppsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("list_apps", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientListAppsResult{} + var retval *ConfigurationListAppsResponse + var err2 error + if retval, err2 = p.handler.ListApps(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing list_apps: "+err2.Error()) + oprot.WriteMessageBegin("list_apps", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("list_apps", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorAddDuplication struct { + handler AdminClient +} + +func (p *adminClientProcessorAddDuplication) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientAddDuplicationArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("add_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientAddDuplicationResult{} + var retval *DuplicationAddResponse + var err2 error + if retval, err2 = p.handler.AddDuplication(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing add_duplication: "+err2.Error()) + oprot.WriteMessageBegin("add_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("add_duplication", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryDuplication struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryDuplication) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryDuplicationArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryDuplicationResult{} + var retval *DuplicationQueryResponse + var err2 error + if retval, err2 = p.handler.QueryDuplication(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_duplication: "+err2.Error()) + oprot.WriteMessageBegin("query_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_duplication", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorModifyDuplication struct { + handler AdminClient +} + +func (p *adminClientProcessorModifyDuplication) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientModifyDuplicationArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("modify_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientModifyDuplicationResult{} + var retval *DuplicationModifyResponse + var err2 error + if retval, err2 = p.handler.ModifyDuplication(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing modify_duplication: "+err2.Error()) + oprot.WriteMessageBegin("modify_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("modify_duplication", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryAppInfo struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryAppInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryAppInfoArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_app_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryAppInfoResult{} + var retval *QueryAppInfoResponse + var err2 error + if retval, err2 = p.handler.QueryAppInfo(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_app_info: "+err2.Error()) + oprot.WriteMessageBegin("query_app_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_app_info", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorUpdateAppEnv struct { + handler AdminClient +} + +func (p *adminClientProcessorUpdateAppEnv) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientUpdateAppEnvArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("update_app_env", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientUpdateAppEnvResult{} + var retval *ConfigurationUpdateAppEnvResponse + var err2 error + if retval, err2 = p.handler.UpdateAppEnv(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing update_app_env: "+err2.Error()) + oprot.WriteMessageBegin("update_app_env", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("update_app_env", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorListNodes struct { + handler AdminClient +} + +func (p *adminClientProcessorListNodes) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientListNodesArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("list_nodes", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientListNodesResult{} + var retval *ConfigurationListNodesResponse + var err2 error + if retval, err2 = p.handler.ListNodes(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing list_nodes: "+err2.Error()) + oprot.WriteMessageBegin("list_nodes", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("list_nodes", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryClusterInfo struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryClusterInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryClusterInfoArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_cluster_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryClusterInfoResult{} + var retval *ConfigurationClusterInfoResponse + var err2 error + if retval, err2 = p.handler.QueryClusterInfo(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_cluster_info: "+err2.Error()) + oprot.WriteMessageBegin("query_cluster_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_cluster_info", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorMetaControl struct { + handler AdminClient +} + +func (p *adminClientProcessorMetaControl) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientMetaControlArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("meta_control", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientMetaControlResult{} + var retval *ConfigurationMetaControlResponse + var err2 error + if retval, err2 = p.handler.MetaControl(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing meta_control: "+err2.Error()) + oprot.WriteMessageBegin("meta_control", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("meta_control", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryBackupPolicy struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryBackupPolicy) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryBackupPolicyArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_backup_policy", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryBackupPolicyResult{} + var retval *ConfigurationQueryBackupPolicyResponse + var err2 error + if retval, err2 = p.handler.QueryBackupPolicy(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_backup_policy: "+err2.Error()) + oprot.WriteMessageBegin("query_backup_policy", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_backup_policy", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorBalance struct { + handler AdminClient +} + +func (p *adminClientProcessorBalance) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientBalanceArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("balance", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientBalanceResult{} + var retval *ConfigurationBalancerResponse + var err2 error + if retval, err2 = p.handler.Balance(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing balance: "+err2.Error()) + oprot.WriteMessageBegin("balance", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("balance", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorStartBackupApp struct { + handler AdminClient +} + +func (p *adminClientProcessorStartBackupApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientStartBackupAppArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("start_backup_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientStartBackupAppResult{} + var retval *StartBackupAppResponse + var err2 error + if retval, err2 = p.handler.StartBackupApp(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing start_backup_app: "+err2.Error()) + oprot.WriteMessageBegin("start_backup_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("start_backup_app", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryBackupStatus struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryBackupStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryBackupStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_backup_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryBackupStatusResult{} + var retval *QueryBackupStatusResponse + var err2 error + if retval, err2 = p.handler.QueryBackupStatus(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_backup_status: "+err2.Error()) + oprot.WriteMessageBegin("query_backup_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_backup_status", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorRestoreApp struct { + handler AdminClient +} + +func (p *adminClientProcessorRestoreApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientRestoreAppArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("restore_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientRestoreAppResult{} + var retval *ConfigurationCreateAppResponse + var err2 error + if retval, err2 = p.handler.RestoreApp(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing restore_app: "+err2.Error()) + oprot.WriteMessageBegin("restore_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("restore_app", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorStartPartitionSplit struct { + handler AdminClient +} + +func (p *adminClientProcessorStartPartitionSplit) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientStartPartitionSplitArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("start_partition_split", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientStartPartitionSplitResult{} + var retval *StartPartitionSplitResponse + var err2 error + if retval, err2 = p.handler.StartPartitionSplit(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing start_partition_split: "+err2.Error()) + oprot.WriteMessageBegin("start_partition_split", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("start_partition_split", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQuerySplitStatus struct { + handler AdminClient +} + +func (p *adminClientProcessorQuerySplitStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQuerySplitStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_split_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQuerySplitStatusResult{} + var retval *QuerySplitResponse + var err2 error + if retval, err2 = p.handler.QuerySplitStatus(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_split_status: "+err2.Error()) + oprot.WriteMessageBegin("query_split_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_split_status", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorControlPartitionSplit struct { + handler AdminClient +} + +func (p *adminClientProcessorControlPartitionSplit) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientControlPartitionSplitArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("control_partition_split", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientControlPartitionSplitResult{} + var retval *ControlSplitResponse + var err2 error + if retval, err2 = p.handler.ControlPartitionSplit(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing control_partition_split: "+err2.Error()) + oprot.WriteMessageBegin("control_partition_split", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("control_partition_split", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorStartBulkLoad struct { + handler AdminClient +} + +func (p *adminClientProcessorStartBulkLoad) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientStartBulkLoadArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("start_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientStartBulkLoadResult{} + var retval *StartBulkLoadResponse + var err2 error + if retval, err2 = p.handler.StartBulkLoad(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing start_bulk_load: "+err2.Error()) + oprot.WriteMessageBegin("start_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("start_bulk_load", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryBulkLoadStatus struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryBulkLoadStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryBulkLoadStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_bulk_load_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryBulkLoadStatusResult{} + var retval *QueryBulkLoadResponse + var err2 error + if retval, err2 = p.handler.QueryBulkLoadStatus(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_bulk_load_status: "+err2.Error()) + oprot.WriteMessageBegin("query_bulk_load_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_bulk_load_status", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorControlBulkLoad struct { + handler AdminClient +} + +func (p *adminClientProcessorControlBulkLoad) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientControlBulkLoadArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("control_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientControlBulkLoadResult{} + var retval *ControlBulkLoadResponse + var err2 error + if retval, err2 = p.handler.ControlBulkLoad(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing control_bulk_load: "+err2.Error()) + oprot.WriteMessageBegin("control_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("control_bulk_load", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorClearBulkLoad struct { + handler AdminClient +} + +func (p *adminClientProcessorClearBulkLoad) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientClearBulkLoadArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("clear_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientClearBulkLoadResult{} + var retval *ClearBulkLoadStateResponse + var err2 error + if retval, err2 = p.handler.ClearBulkLoad(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing clear_bulk_load: "+err2.Error()) + oprot.WriteMessageBegin("clear_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("clear_bulk_load", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorStartManualCompact struct { + handler AdminClient +} + +func (p *adminClientProcessorStartManualCompact) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientStartManualCompactArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("start_manual_compact", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientStartManualCompactResult{} + var retval *StartAppManualCompactResponse + var err2 error + if retval, err2 = p.handler.StartManualCompact(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing start_manual_compact: "+err2.Error()) + oprot.WriteMessageBegin("start_manual_compact", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("start_manual_compact", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryManualCompact struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryManualCompact) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryManualCompactArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_manual_compact", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryManualCompactResult{} + var retval *QueryAppManualCompactResponse + var err2 error + if retval, err2 = p.handler.QueryManualCompact(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_manual_compact: "+err2.Error()) + oprot.WriteMessageBegin("query_manual_compact", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_manual_compact", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Req +type AdminClientCreateAppArgs struct { + Req *ConfigurationCreateAppRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientCreateAppArgs() *AdminClientCreateAppArgs { + return &AdminClientCreateAppArgs{} +} + +var AdminClientCreateAppArgs_Req_DEFAULT *ConfigurationCreateAppRequest + +func (p *AdminClientCreateAppArgs) GetReq() *ConfigurationCreateAppRequest { + if !p.IsSetReq() { + return AdminClientCreateAppArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientCreateAppArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientCreateAppArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientCreateAppArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationCreateAppRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientCreateAppArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("create_app_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientCreateAppArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientCreateAppArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientCreateAppArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientCreateAppResult struct { + Success *ConfigurationCreateAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientCreateAppResult() *AdminClientCreateAppResult { + return &AdminClientCreateAppResult{} +} + +var AdminClientCreateAppResult_Success_DEFAULT *ConfigurationCreateAppResponse + +func (p *AdminClientCreateAppResult) GetSuccess() *ConfigurationCreateAppResponse { + if !p.IsSetSuccess() { + return AdminClientCreateAppResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientCreateAppResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientCreateAppResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientCreateAppResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationCreateAppResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientCreateAppResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("create_app_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientCreateAppResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientCreateAppResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientCreateAppResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientDropAppArgs struct { + Req *ConfigurationDropAppRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientDropAppArgs() *AdminClientDropAppArgs { + return &AdminClientDropAppArgs{} +} + +var AdminClientDropAppArgs_Req_DEFAULT *ConfigurationDropAppRequest + +func (p *AdminClientDropAppArgs) GetReq() *ConfigurationDropAppRequest { + if !p.IsSetReq() { + return AdminClientDropAppArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientDropAppArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientDropAppArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientDropAppArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationDropAppRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientDropAppArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("drop_app_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientDropAppArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientDropAppArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientDropAppArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientDropAppResult struct { + Success *ConfigurationDropAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientDropAppResult() *AdminClientDropAppResult { + return &AdminClientDropAppResult{} +} + +var AdminClientDropAppResult_Success_DEFAULT *ConfigurationDropAppResponse + +func (p *AdminClientDropAppResult) GetSuccess() *ConfigurationDropAppResponse { + if !p.IsSetSuccess() { + return AdminClientDropAppResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientDropAppResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientDropAppResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientDropAppResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationDropAppResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientDropAppResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("drop_app_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientDropAppResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientDropAppResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientDropAppResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientRecallAppArgs struct { + Req *ConfigurationRecallAppRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientRecallAppArgs() *AdminClientRecallAppArgs { + return &AdminClientRecallAppArgs{} +} + +var AdminClientRecallAppArgs_Req_DEFAULT *ConfigurationRecallAppRequest + +func (p *AdminClientRecallAppArgs) GetReq() *ConfigurationRecallAppRequest { + if !p.IsSetReq() { + return AdminClientRecallAppArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientRecallAppArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientRecallAppArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientRecallAppArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationRecallAppRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientRecallAppArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("recall_app_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientRecallAppArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientRecallAppArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientRecallAppArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientRecallAppResult struct { + Success *ConfigurationRecallAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientRecallAppResult() *AdminClientRecallAppResult { + return &AdminClientRecallAppResult{} +} + +var AdminClientRecallAppResult_Success_DEFAULT *ConfigurationRecallAppResponse + +func (p *AdminClientRecallAppResult) GetSuccess() *ConfigurationRecallAppResponse { + if !p.IsSetSuccess() { + return AdminClientRecallAppResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientRecallAppResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientRecallAppResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientRecallAppResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationRecallAppResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientRecallAppResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("recall_app_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientRecallAppResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientRecallAppResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientRecallAppResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientListAppsArgs struct { + Req *ConfigurationListAppsRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientListAppsArgs() *AdminClientListAppsArgs { + return &AdminClientListAppsArgs{} +} + +var AdminClientListAppsArgs_Req_DEFAULT *ConfigurationListAppsRequest + +func (p *AdminClientListAppsArgs) GetReq() *ConfigurationListAppsRequest { + if !p.IsSetReq() { + return AdminClientListAppsArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientListAppsArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientListAppsArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientListAppsArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationListAppsRequest{ + Status: 0, + } + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientListAppsArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("list_apps_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientListAppsArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientListAppsArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientListAppsArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientListAppsResult struct { + Success *ConfigurationListAppsResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientListAppsResult() *AdminClientListAppsResult { + return &AdminClientListAppsResult{} +} + +var AdminClientListAppsResult_Success_DEFAULT *ConfigurationListAppsResponse + +func (p *AdminClientListAppsResult) GetSuccess() *ConfigurationListAppsResponse { + if !p.IsSetSuccess() { + return AdminClientListAppsResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientListAppsResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientListAppsResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientListAppsResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationListAppsResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientListAppsResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("list_apps_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientListAppsResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientListAppsResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientListAppsResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientAddDuplicationArgs struct { + Req *DuplicationAddRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientAddDuplicationArgs() *AdminClientAddDuplicationArgs { + return &AdminClientAddDuplicationArgs{} +} + +var AdminClientAddDuplicationArgs_Req_DEFAULT *DuplicationAddRequest + +func (p *AdminClientAddDuplicationArgs) GetReq() *DuplicationAddRequest { + if !p.IsSetReq() { + return AdminClientAddDuplicationArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientAddDuplicationArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientAddDuplicationArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientAddDuplicationArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &DuplicationAddRequest{ + IsDuplicatingCheckpoint: true, + } + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientAddDuplicationArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_duplication_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientAddDuplicationArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientAddDuplicationArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientAddDuplicationArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientAddDuplicationResult struct { + Success *DuplicationAddResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientAddDuplicationResult() *AdminClientAddDuplicationResult { + return &AdminClientAddDuplicationResult{} +} + +var AdminClientAddDuplicationResult_Success_DEFAULT *DuplicationAddResponse + +func (p *AdminClientAddDuplicationResult) GetSuccess() *DuplicationAddResponse { + if !p.IsSetSuccess() { + return AdminClientAddDuplicationResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientAddDuplicationResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientAddDuplicationResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientAddDuplicationResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &DuplicationAddResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientAddDuplicationResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_duplication_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientAddDuplicationResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientAddDuplicationResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientAddDuplicationResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryDuplicationArgs struct { + Req *DuplicationQueryRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryDuplicationArgs() *AdminClientQueryDuplicationArgs { + return &AdminClientQueryDuplicationArgs{} +} + +var AdminClientQueryDuplicationArgs_Req_DEFAULT *DuplicationQueryRequest + +func (p *AdminClientQueryDuplicationArgs) GetReq() *DuplicationQueryRequest { + if !p.IsSetReq() { + return AdminClientQueryDuplicationArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryDuplicationArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryDuplicationArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryDuplicationArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &DuplicationQueryRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryDuplicationArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_duplication_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryDuplicationArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryDuplicationArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryDuplicationArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryDuplicationResult struct { + Success *DuplicationQueryResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryDuplicationResult() *AdminClientQueryDuplicationResult { + return &AdminClientQueryDuplicationResult{} +} + +var AdminClientQueryDuplicationResult_Success_DEFAULT *DuplicationQueryResponse + +func (p *AdminClientQueryDuplicationResult) GetSuccess() *DuplicationQueryResponse { + if !p.IsSetSuccess() { + return AdminClientQueryDuplicationResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryDuplicationResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryDuplicationResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryDuplicationResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &DuplicationQueryResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryDuplicationResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_duplication_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryDuplicationResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryDuplicationResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryDuplicationResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientModifyDuplicationArgs struct { + Req *DuplicationModifyRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientModifyDuplicationArgs() *AdminClientModifyDuplicationArgs { + return &AdminClientModifyDuplicationArgs{} +} + +var AdminClientModifyDuplicationArgs_Req_DEFAULT *DuplicationModifyRequest + +func (p *AdminClientModifyDuplicationArgs) GetReq() *DuplicationModifyRequest { + if !p.IsSetReq() { + return AdminClientModifyDuplicationArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientModifyDuplicationArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientModifyDuplicationArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientModifyDuplicationArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &DuplicationModifyRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientModifyDuplicationArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("modify_duplication_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientModifyDuplicationArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientModifyDuplicationArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientModifyDuplicationArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientModifyDuplicationResult struct { + Success *DuplicationModifyResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientModifyDuplicationResult() *AdminClientModifyDuplicationResult { + return &AdminClientModifyDuplicationResult{} +} + +var AdminClientModifyDuplicationResult_Success_DEFAULT *DuplicationModifyResponse + +func (p *AdminClientModifyDuplicationResult) GetSuccess() *DuplicationModifyResponse { + if !p.IsSetSuccess() { + return AdminClientModifyDuplicationResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientModifyDuplicationResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientModifyDuplicationResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientModifyDuplicationResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &DuplicationModifyResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientModifyDuplicationResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("modify_duplication_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientModifyDuplicationResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientModifyDuplicationResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientModifyDuplicationResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryAppInfoArgs struct { + Req *QueryAppInfoRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryAppInfoArgs() *AdminClientQueryAppInfoArgs { + return &AdminClientQueryAppInfoArgs{} +} + +var AdminClientQueryAppInfoArgs_Req_DEFAULT *QueryAppInfoRequest + +func (p *AdminClientQueryAppInfoArgs) GetReq() *QueryAppInfoRequest { + if !p.IsSetReq() { + return AdminClientQueryAppInfoArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryAppInfoArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryAppInfoArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryAppInfoArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QueryAppInfoRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryAppInfoArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_info_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryAppInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryAppInfoArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryAppInfoArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryAppInfoResult struct { + Success *QueryAppInfoResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryAppInfoResult() *AdminClientQueryAppInfoResult { + return &AdminClientQueryAppInfoResult{} +} + +var AdminClientQueryAppInfoResult_Success_DEFAULT *QueryAppInfoResponse + +func (p *AdminClientQueryAppInfoResult) GetSuccess() *QueryAppInfoResponse { + if !p.IsSetSuccess() { + return AdminClientQueryAppInfoResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryAppInfoResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryAppInfoResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryAppInfoResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QueryAppInfoResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryAppInfoResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_info_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryAppInfoResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryAppInfoResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryAppInfoResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientUpdateAppEnvArgs struct { + Req *ConfigurationUpdateAppEnvRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientUpdateAppEnvArgs() *AdminClientUpdateAppEnvArgs { + return &AdminClientUpdateAppEnvArgs{} +} + +var AdminClientUpdateAppEnvArgs_Req_DEFAULT *ConfigurationUpdateAppEnvRequest + +func (p *AdminClientUpdateAppEnvArgs) GetReq() *ConfigurationUpdateAppEnvRequest { + if !p.IsSetReq() { + return AdminClientUpdateAppEnvArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientUpdateAppEnvArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientUpdateAppEnvArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationUpdateAppEnvRequest{ + Op: 0, + } + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_app_env_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientUpdateAppEnvArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientUpdateAppEnvArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientUpdateAppEnvResult struct { + Success *ConfigurationUpdateAppEnvResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientUpdateAppEnvResult() *AdminClientUpdateAppEnvResult { + return &AdminClientUpdateAppEnvResult{} +} + +var AdminClientUpdateAppEnvResult_Success_DEFAULT *ConfigurationUpdateAppEnvResponse + +func (p *AdminClientUpdateAppEnvResult) GetSuccess() *ConfigurationUpdateAppEnvResponse { + if !p.IsSetSuccess() { + return AdminClientUpdateAppEnvResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientUpdateAppEnvResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientUpdateAppEnvResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationUpdateAppEnvResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_app_env_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientUpdateAppEnvResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientUpdateAppEnvResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientListNodesArgs struct { + Req *ConfigurationListNodesRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientListNodesArgs() *AdminClientListNodesArgs { + return &AdminClientListNodesArgs{} +} + +var AdminClientListNodesArgs_Req_DEFAULT *ConfigurationListNodesRequest + +func (p *AdminClientListNodesArgs) GetReq() *ConfigurationListNodesRequest { + if !p.IsSetReq() { + return AdminClientListNodesArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientListNodesArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientListNodesArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientListNodesArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationListNodesRequest{ + Status: 0, + } + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientListNodesArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("list_nodes_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientListNodesArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientListNodesArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientListNodesArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientListNodesResult struct { + Success *ConfigurationListNodesResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientListNodesResult() *AdminClientListNodesResult { + return &AdminClientListNodesResult{} +} + +var AdminClientListNodesResult_Success_DEFAULT *ConfigurationListNodesResponse + +func (p *AdminClientListNodesResult) GetSuccess() *ConfigurationListNodesResponse { + if !p.IsSetSuccess() { + return AdminClientListNodesResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientListNodesResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientListNodesResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientListNodesResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationListNodesResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientListNodesResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("list_nodes_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientListNodesResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientListNodesResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientListNodesResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryClusterInfoArgs struct { + Req *ConfigurationClusterInfoRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryClusterInfoArgs() *AdminClientQueryClusterInfoArgs { + return &AdminClientQueryClusterInfoArgs{} +} + +var AdminClientQueryClusterInfoArgs_Req_DEFAULT *ConfigurationClusterInfoRequest + +func (p *AdminClientQueryClusterInfoArgs) GetReq() *ConfigurationClusterInfoRequest { + if !p.IsSetReq() { + return AdminClientQueryClusterInfoArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryClusterInfoArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryClusterInfoArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationClusterInfoRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cluster_info_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryClusterInfoArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryClusterInfoArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryClusterInfoResult struct { + Success *ConfigurationClusterInfoResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryClusterInfoResult() *AdminClientQueryClusterInfoResult { + return &AdminClientQueryClusterInfoResult{} +} + +var AdminClientQueryClusterInfoResult_Success_DEFAULT *ConfigurationClusterInfoResponse + +func (p *AdminClientQueryClusterInfoResult) GetSuccess() *ConfigurationClusterInfoResponse { + if !p.IsSetSuccess() { + return AdminClientQueryClusterInfoResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryClusterInfoResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryClusterInfoResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationClusterInfoResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cluster_info_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryClusterInfoResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryClusterInfoResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientMetaControlArgs struct { + Req *ConfigurationMetaControlRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientMetaControlArgs() *AdminClientMetaControlArgs { + return &AdminClientMetaControlArgs{} +} + +var AdminClientMetaControlArgs_Req_DEFAULT *ConfigurationMetaControlRequest + +func (p *AdminClientMetaControlArgs) GetReq() *ConfigurationMetaControlRequest { + if !p.IsSetReq() { + return AdminClientMetaControlArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientMetaControlArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientMetaControlArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientMetaControlArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationMetaControlRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientMetaControlArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("meta_control_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientMetaControlArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientMetaControlArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientMetaControlArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientMetaControlResult struct { + Success *ConfigurationMetaControlResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientMetaControlResult() *AdminClientMetaControlResult { + return &AdminClientMetaControlResult{} +} + +var AdminClientMetaControlResult_Success_DEFAULT *ConfigurationMetaControlResponse + +func (p *AdminClientMetaControlResult) GetSuccess() *ConfigurationMetaControlResponse { + if !p.IsSetSuccess() { + return AdminClientMetaControlResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientMetaControlResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientMetaControlResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientMetaControlResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationMetaControlResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientMetaControlResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("meta_control_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientMetaControlResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientMetaControlResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientMetaControlResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryBackupPolicyArgs struct { + Req *ConfigurationQueryBackupPolicyRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryBackupPolicyArgs() *AdminClientQueryBackupPolicyArgs { + return &AdminClientQueryBackupPolicyArgs{} +} + +var AdminClientQueryBackupPolicyArgs_Req_DEFAULT *ConfigurationQueryBackupPolicyRequest + +func (p *AdminClientQueryBackupPolicyArgs) GetReq() *ConfigurationQueryBackupPolicyRequest { + if !p.IsSetReq() { + return AdminClientQueryBackupPolicyArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryBackupPolicyArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryBackupPolicyArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationQueryBackupPolicyRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_policy_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryBackupPolicyArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBackupPolicyArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryBackupPolicyResult struct { + Success *ConfigurationQueryBackupPolicyResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryBackupPolicyResult() *AdminClientQueryBackupPolicyResult { + return &AdminClientQueryBackupPolicyResult{} +} + +var AdminClientQueryBackupPolicyResult_Success_DEFAULT *ConfigurationQueryBackupPolicyResponse + +func (p *AdminClientQueryBackupPolicyResult) GetSuccess() *ConfigurationQueryBackupPolicyResponse { + if !p.IsSetSuccess() { + return AdminClientQueryBackupPolicyResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryBackupPolicyResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryBackupPolicyResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationQueryBackupPolicyResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_policy_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryBackupPolicyResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBackupPolicyResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientBalanceArgs struct { + Req *ConfigurationBalancerRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientBalanceArgs() *AdminClientBalanceArgs { + return &AdminClientBalanceArgs{} +} + +var AdminClientBalanceArgs_Req_DEFAULT *ConfigurationBalancerRequest + +func (p *AdminClientBalanceArgs) GetReq() *ConfigurationBalancerRequest { + if !p.IsSetReq() { + return AdminClientBalanceArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientBalanceArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientBalanceArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientBalanceArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationBalancerRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientBalanceArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("balance_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientBalanceArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientBalanceArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientBalanceArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientBalanceResult struct { + Success *ConfigurationBalancerResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientBalanceResult() *AdminClientBalanceResult { + return &AdminClientBalanceResult{} +} + +var AdminClientBalanceResult_Success_DEFAULT *ConfigurationBalancerResponse + +func (p *AdminClientBalanceResult) GetSuccess() *ConfigurationBalancerResponse { + if !p.IsSetSuccess() { + return AdminClientBalanceResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientBalanceResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientBalanceResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientBalanceResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationBalancerResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientBalanceResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("balance_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientBalanceResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientBalanceResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientBalanceResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientStartBackupAppArgs struct { + Req *StartBackupAppRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientStartBackupAppArgs() *AdminClientStartBackupAppArgs { + return &AdminClientStartBackupAppArgs{} +} + +var AdminClientStartBackupAppArgs_Req_DEFAULT *StartBackupAppRequest + +func (p *AdminClientStartBackupAppArgs) GetReq() *StartBackupAppRequest { + if !p.IsSetReq() { + return AdminClientStartBackupAppArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientStartBackupAppArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientStartBackupAppArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartBackupAppArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &StartBackupAppRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientStartBackupAppArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_backup_app_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartBackupAppArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientStartBackupAppArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartBackupAppArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientStartBackupAppResult struct { + Success *StartBackupAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientStartBackupAppResult() *AdminClientStartBackupAppResult { + return &AdminClientStartBackupAppResult{} +} + +var AdminClientStartBackupAppResult_Success_DEFAULT *StartBackupAppResponse + +func (p *AdminClientStartBackupAppResult) GetSuccess() *StartBackupAppResponse { + if !p.IsSetSuccess() { + return AdminClientStartBackupAppResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientStartBackupAppResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientStartBackupAppResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartBackupAppResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &StartBackupAppResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientStartBackupAppResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_backup_app_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartBackupAppResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientStartBackupAppResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartBackupAppResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryBackupStatusArgs struct { + Req *QueryBackupStatusRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryBackupStatusArgs() *AdminClientQueryBackupStatusArgs { + return &AdminClientQueryBackupStatusArgs{} +} + +var AdminClientQueryBackupStatusArgs_Req_DEFAULT *QueryBackupStatusRequest + +func (p *AdminClientQueryBackupStatusArgs) GetReq() *QueryBackupStatusRequest { + if !p.IsSetReq() { + return AdminClientQueryBackupStatusArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryBackupStatusArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryBackupStatusArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QueryBackupStatusRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_status_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryBackupStatusArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBackupStatusArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryBackupStatusResult struct { + Success *QueryBackupStatusResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryBackupStatusResult() *AdminClientQueryBackupStatusResult { + return &AdminClientQueryBackupStatusResult{} +} + +var AdminClientQueryBackupStatusResult_Success_DEFAULT *QueryBackupStatusResponse + +func (p *AdminClientQueryBackupStatusResult) GetSuccess() *QueryBackupStatusResponse { + if !p.IsSetSuccess() { + return AdminClientQueryBackupStatusResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryBackupStatusResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryBackupStatusResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QueryBackupStatusResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_status_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryBackupStatusResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBackupStatusResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientRestoreAppArgs struct { + Req *ConfigurationRestoreRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientRestoreAppArgs() *AdminClientRestoreAppArgs { + return &AdminClientRestoreAppArgs{} +} + +var AdminClientRestoreAppArgs_Req_DEFAULT *ConfigurationRestoreRequest + +func (p *AdminClientRestoreAppArgs) GetReq() *ConfigurationRestoreRequest { + if !p.IsSetReq() { + return AdminClientRestoreAppArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientRestoreAppArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientRestoreAppArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientRestoreAppArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationRestoreRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientRestoreAppArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("restore_app_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientRestoreAppArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientRestoreAppArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientRestoreAppArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientRestoreAppResult struct { + Success *ConfigurationCreateAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientRestoreAppResult() *AdminClientRestoreAppResult { + return &AdminClientRestoreAppResult{} +} + +var AdminClientRestoreAppResult_Success_DEFAULT *ConfigurationCreateAppResponse + +func (p *AdminClientRestoreAppResult) GetSuccess() *ConfigurationCreateAppResponse { + if !p.IsSetSuccess() { + return AdminClientRestoreAppResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientRestoreAppResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientRestoreAppResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientRestoreAppResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationCreateAppResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientRestoreAppResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("restore_app_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientRestoreAppResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientRestoreAppResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientRestoreAppResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientStartPartitionSplitArgs struct { + Req *StartPartitionSplitRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientStartPartitionSplitArgs() *AdminClientStartPartitionSplitArgs { + return &AdminClientStartPartitionSplitArgs{} +} + +var AdminClientStartPartitionSplitArgs_Req_DEFAULT *StartPartitionSplitRequest + +func (p *AdminClientStartPartitionSplitArgs) GetReq() *StartPartitionSplitRequest { + if !p.IsSetReq() { + return AdminClientStartPartitionSplitArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientStartPartitionSplitArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientStartPartitionSplitArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &StartPartitionSplitRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_partition_split_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientStartPartitionSplitArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartPartitionSplitArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientStartPartitionSplitResult struct { + Success *StartPartitionSplitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientStartPartitionSplitResult() *AdminClientStartPartitionSplitResult { + return &AdminClientStartPartitionSplitResult{} +} + +var AdminClientStartPartitionSplitResult_Success_DEFAULT *StartPartitionSplitResponse + +func (p *AdminClientStartPartitionSplitResult) GetSuccess() *StartPartitionSplitResponse { + if !p.IsSetSuccess() { + return AdminClientStartPartitionSplitResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientStartPartitionSplitResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientStartPartitionSplitResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &StartPartitionSplitResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_partition_split_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientStartPartitionSplitResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartPartitionSplitResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQuerySplitStatusArgs struct { + Req *QuerySplitRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQuerySplitStatusArgs() *AdminClientQuerySplitStatusArgs { + return &AdminClientQuerySplitStatusArgs{} +} + +var AdminClientQuerySplitStatusArgs_Req_DEFAULT *QuerySplitRequest + +func (p *AdminClientQuerySplitStatusArgs) GetReq() *QuerySplitRequest { + if !p.IsSetReq() { + return AdminClientQuerySplitStatusArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQuerySplitStatusArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQuerySplitStatusArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QuerySplitRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_split_status_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQuerySplitStatusArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQuerySplitStatusArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQuerySplitStatusResult struct { + Success *QuerySplitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQuerySplitStatusResult() *AdminClientQuerySplitStatusResult { + return &AdminClientQuerySplitStatusResult{} +} + +var AdminClientQuerySplitStatusResult_Success_DEFAULT *QuerySplitResponse + +func (p *AdminClientQuerySplitStatusResult) GetSuccess() *QuerySplitResponse { + if !p.IsSetSuccess() { + return AdminClientQuerySplitStatusResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQuerySplitStatusResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQuerySplitStatusResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QuerySplitResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_split_status_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQuerySplitStatusResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQuerySplitStatusResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientControlPartitionSplitArgs struct { + Req *ControlSplitRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientControlPartitionSplitArgs() *AdminClientControlPartitionSplitArgs { + return &AdminClientControlPartitionSplitArgs{} +} + +var AdminClientControlPartitionSplitArgs_Req_DEFAULT *ControlSplitRequest + +func (p *AdminClientControlPartitionSplitArgs) GetReq() *ControlSplitRequest { + if !p.IsSetReq() { + return AdminClientControlPartitionSplitArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientControlPartitionSplitArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientControlPartitionSplitArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ControlSplitRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_partition_split_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientControlPartitionSplitArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientControlPartitionSplitArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientControlPartitionSplitResult struct { + Success *ControlSplitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientControlPartitionSplitResult() *AdminClientControlPartitionSplitResult { + return &AdminClientControlPartitionSplitResult{} +} + +var AdminClientControlPartitionSplitResult_Success_DEFAULT *ControlSplitResponse + +func (p *AdminClientControlPartitionSplitResult) GetSuccess() *ControlSplitResponse { + if !p.IsSetSuccess() { + return AdminClientControlPartitionSplitResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientControlPartitionSplitResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientControlPartitionSplitResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ControlSplitResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_partition_split_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientControlPartitionSplitResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientControlPartitionSplitResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientStartBulkLoadArgs struct { + Req *StartBulkLoadRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientStartBulkLoadArgs() *AdminClientStartBulkLoadArgs { + return &AdminClientStartBulkLoadArgs{} +} + +var AdminClientStartBulkLoadArgs_Req_DEFAULT *StartBulkLoadRequest + +func (p *AdminClientStartBulkLoadArgs) GetReq() *StartBulkLoadRequest { + if !p.IsSetReq() { + return AdminClientStartBulkLoadArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientStartBulkLoadArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientStartBulkLoadArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartBulkLoadArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &StartBulkLoadRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientStartBulkLoadArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_bulk_load_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartBulkLoadArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientStartBulkLoadArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartBulkLoadArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientStartBulkLoadResult struct { + Success *StartBulkLoadResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientStartBulkLoadResult() *AdminClientStartBulkLoadResult { + return &AdminClientStartBulkLoadResult{} +} + +var AdminClientStartBulkLoadResult_Success_DEFAULT *StartBulkLoadResponse + +func (p *AdminClientStartBulkLoadResult) GetSuccess() *StartBulkLoadResponse { + if !p.IsSetSuccess() { + return AdminClientStartBulkLoadResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientStartBulkLoadResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientStartBulkLoadResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartBulkLoadResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &StartBulkLoadResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientStartBulkLoadResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_bulk_load_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartBulkLoadResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientStartBulkLoadResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartBulkLoadResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryBulkLoadStatusArgs struct { + Req *QueryBulkLoadRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryBulkLoadStatusArgs() *AdminClientQueryBulkLoadStatusArgs { + return &AdminClientQueryBulkLoadStatusArgs{} +} + +var AdminClientQueryBulkLoadStatusArgs_Req_DEFAULT *QueryBulkLoadRequest + +func (p *AdminClientQueryBulkLoadStatusArgs) GetReq() *QueryBulkLoadRequest { + if !p.IsSetReq() { + return AdminClientQueryBulkLoadStatusArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryBulkLoadStatusArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryBulkLoadStatusArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QueryBulkLoadRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_bulk_load_status_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryBulkLoadStatusArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBulkLoadStatusArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryBulkLoadStatusResult struct { + Success *QueryBulkLoadResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryBulkLoadStatusResult() *AdminClientQueryBulkLoadStatusResult { + return &AdminClientQueryBulkLoadStatusResult{} +} + +var AdminClientQueryBulkLoadStatusResult_Success_DEFAULT *QueryBulkLoadResponse + +func (p *AdminClientQueryBulkLoadStatusResult) GetSuccess() *QueryBulkLoadResponse { + if !p.IsSetSuccess() { + return AdminClientQueryBulkLoadStatusResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryBulkLoadStatusResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryBulkLoadStatusResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QueryBulkLoadResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_bulk_load_status_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryBulkLoadStatusResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBulkLoadStatusResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientControlBulkLoadArgs struct { + Req *ControlBulkLoadRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientControlBulkLoadArgs() *AdminClientControlBulkLoadArgs { + return &AdminClientControlBulkLoadArgs{} +} + +var AdminClientControlBulkLoadArgs_Req_DEFAULT *ControlBulkLoadRequest + +func (p *AdminClientControlBulkLoadArgs) GetReq() *ControlBulkLoadRequest { + if !p.IsSetReq() { + return AdminClientControlBulkLoadArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientControlBulkLoadArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientControlBulkLoadArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientControlBulkLoadArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ControlBulkLoadRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientControlBulkLoadArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_bulk_load_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientControlBulkLoadArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientControlBulkLoadArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientControlBulkLoadArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientControlBulkLoadResult struct { + Success *ControlBulkLoadResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientControlBulkLoadResult() *AdminClientControlBulkLoadResult { + return &AdminClientControlBulkLoadResult{} +} + +var AdminClientControlBulkLoadResult_Success_DEFAULT *ControlBulkLoadResponse + +func (p *AdminClientControlBulkLoadResult) GetSuccess() *ControlBulkLoadResponse { + if !p.IsSetSuccess() { + return AdminClientControlBulkLoadResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientControlBulkLoadResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientControlBulkLoadResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientControlBulkLoadResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ControlBulkLoadResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientControlBulkLoadResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_bulk_load_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientControlBulkLoadResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientControlBulkLoadResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientControlBulkLoadResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientClearBulkLoadArgs struct { + Req *ClearBulkLoadStateRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientClearBulkLoadArgs() *AdminClientClearBulkLoadArgs { + return &AdminClientClearBulkLoadArgs{} +} + +var AdminClientClearBulkLoadArgs_Req_DEFAULT *ClearBulkLoadStateRequest + +func (p *AdminClientClearBulkLoadArgs) GetReq() *ClearBulkLoadStateRequest { + if !p.IsSetReq() { + return AdminClientClearBulkLoadArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientClearBulkLoadArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientClearBulkLoadArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientClearBulkLoadArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ClearBulkLoadStateRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientClearBulkLoadArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("clear_bulk_load_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientClearBulkLoadArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientClearBulkLoadArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientClearBulkLoadArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientClearBulkLoadResult struct { + Success *ClearBulkLoadStateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientClearBulkLoadResult() *AdminClientClearBulkLoadResult { + return &AdminClientClearBulkLoadResult{} +} + +var AdminClientClearBulkLoadResult_Success_DEFAULT *ClearBulkLoadStateResponse + +func (p *AdminClientClearBulkLoadResult) GetSuccess() *ClearBulkLoadStateResponse { + if !p.IsSetSuccess() { + return AdminClientClearBulkLoadResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientClearBulkLoadResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientClearBulkLoadResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientClearBulkLoadResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ClearBulkLoadStateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientClearBulkLoadResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("clear_bulk_load_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientClearBulkLoadResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientClearBulkLoadResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientClearBulkLoadResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientStartManualCompactArgs struct { + Req *StartAppManualCompactRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientStartManualCompactArgs() *AdminClientStartManualCompactArgs { + return &AdminClientStartManualCompactArgs{} +} + +var AdminClientStartManualCompactArgs_Req_DEFAULT *StartAppManualCompactRequest + +func (p *AdminClientStartManualCompactArgs) GetReq() *StartAppManualCompactRequest { + if !p.IsSetReq() { + return AdminClientStartManualCompactArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientStartManualCompactArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientStartManualCompactArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartManualCompactArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &StartAppManualCompactRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientStartManualCompactArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_manual_compact_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartManualCompactArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientStartManualCompactArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartManualCompactArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientStartManualCompactResult struct { + Success *StartAppManualCompactResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientStartManualCompactResult() *AdminClientStartManualCompactResult { + return &AdminClientStartManualCompactResult{} +} + +var AdminClientStartManualCompactResult_Success_DEFAULT *StartAppManualCompactResponse + +func (p *AdminClientStartManualCompactResult) GetSuccess() *StartAppManualCompactResponse { + if !p.IsSetSuccess() { + return AdminClientStartManualCompactResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientStartManualCompactResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientStartManualCompactResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartManualCompactResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &StartAppManualCompactResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientStartManualCompactResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_manual_compact_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartManualCompactResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientStartManualCompactResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartManualCompactResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryManualCompactArgs struct { + Req *QueryAppManualCompactRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryManualCompactArgs() *AdminClientQueryManualCompactArgs { + return &AdminClientQueryManualCompactArgs{} +} + +var AdminClientQueryManualCompactArgs_Req_DEFAULT *QueryAppManualCompactRequest + +func (p *AdminClientQueryManualCompactArgs) GetReq() *QueryAppManualCompactRequest { + if !p.IsSetReq() { + return AdminClientQueryManualCompactArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryManualCompactArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryManualCompactArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryManualCompactArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QueryAppManualCompactRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryManualCompactArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_manual_compact_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryManualCompactArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryManualCompactArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryManualCompactArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryManualCompactResult struct { + Success *QueryAppManualCompactResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryManualCompactResult() *AdminClientQueryManualCompactResult { + return &AdminClientQueryManualCompactResult{} +} + +var AdminClientQueryManualCompactResult_Success_DEFAULT *QueryAppManualCompactResponse + +func (p *AdminClientQueryManualCompactResult) GetSuccess() *QueryAppManualCompactResponse { + if !p.IsSetSuccess() { + return AdminClientQueryManualCompactResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryManualCompactResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryManualCompactResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryManualCompactResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QueryAppManualCompactResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryManualCompactResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_manual_compact_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryManualCompactResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryManualCompactResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryManualCompactResult(%+v)", *p) +} diff --git a/go-client/idl/admin/metadata-consts.go b/go-client/idl/admin/metadata-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/metadata-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/metadata.go b/go-client/idl/admin/metadata.go new file mode 100644 index 0000000000..4a3c5592c5 --- /dev/null +++ b/go-client/idl/admin/metadata.go @@ -0,0 +1,1373 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +type PartitionStatus int64 + +const ( + PartitionStatus_PS_INVALID PartitionStatus = 0 + PartitionStatus_PS_INACTIVE PartitionStatus = 1 + PartitionStatus_PS_ERROR PartitionStatus = 2 + PartitionStatus_PS_PRIMARY PartitionStatus = 3 + PartitionStatus_PS_SECONDARY PartitionStatus = 4 + PartitionStatus_PS_POTENTIAL_SECONDARY PartitionStatus = 5 + PartitionStatus_PS_PARTITION_SPLIT PartitionStatus = 6 +) + +func (p PartitionStatus) String() string { + switch p { + case PartitionStatus_PS_INVALID: + return "PS_INVALID" + case PartitionStatus_PS_INACTIVE: + return "PS_INACTIVE" + case PartitionStatus_PS_ERROR: + return "PS_ERROR" + case PartitionStatus_PS_PRIMARY: + return "PS_PRIMARY" + case PartitionStatus_PS_SECONDARY: + return "PS_SECONDARY" + case PartitionStatus_PS_POTENTIAL_SECONDARY: + return "PS_POTENTIAL_SECONDARY" + case PartitionStatus_PS_PARTITION_SPLIT: + return "PS_PARTITION_SPLIT" + } + return "" +} + +func PartitionStatusFromString(s string) (PartitionStatus, error) { + switch s { + case "PS_INVALID": + return PartitionStatus_PS_INVALID, nil + case "PS_INACTIVE": + return PartitionStatus_PS_INACTIVE, nil + case "PS_ERROR": + return PartitionStatus_PS_ERROR, nil + case "PS_PRIMARY": + return PartitionStatus_PS_PRIMARY, nil + case "PS_SECONDARY": + return PartitionStatus_PS_SECONDARY, nil + case "PS_POTENTIAL_SECONDARY": + return PartitionStatus_PS_POTENTIAL_SECONDARY, nil + case "PS_PARTITION_SPLIT": + return PartitionStatus_PS_PARTITION_SPLIT, nil + } + return PartitionStatus(0), fmt.Errorf("not a valid PartitionStatus string") +} + +func PartitionStatusPtr(v PartitionStatus) *PartitionStatus { return &v } + +func (p PartitionStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *PartitionStatus) UnmarshalText(text []byte) error { + q, err := PartitionStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *PartitionStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = PartitionStatus(v) + return nil +} + +func (p *PartitionStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type SplitStatus int64 + +const ( + SplitStatus_NOT_SPLIT SplitStatus = 0 + SplitStatus_SPLITTING SplitStatus = 1 + SplitStatus_PAUSING SplitStatus = 2 + SplitStatus_PAUSED SplitStatus = 3 + SplitStatus_CANCELING SplitStatus = 4 +) + +func (p SplitStatus) String() string { + switch p { + case SplitStatus_NOT_SPLIT: + return "NOT_SPLIT" + case SplitStatus_SPLITTING: + return "SPLITTING" + case SplitStatus_PAUSING: + return "PAUSING" + case SplitStatus_PAUSED: + return "PAUSED" + case SplitStatus_CANCELING: + return "CANCELING" + } + return "" +} + +func SplitStatusFromString(s string) (SplitStatus, error) { + switch s { + case "NOT_SPLIT": + return SplitStatus_NOT_SPLIT, nil + case "SPLITTING": + return SplitStatus_SPLITTING, nil + case "PAUSING": + return SplitStatus_PAUSING, nil + case "PAUSED": + return SplitStatus_PAUSED, nil + case "CANCELING": + return SplitStatus_CANCELING, nil + } + return SplitStatus(0), fmt.Errorf("not a valid SplitStatus string") +} + +func SplitStatusPtr(v SplitStatus) *SplitStatus { return &v } + +func (p SplitStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *SplitStatus) UnmarshalText(text []byte) error { + q, err := SplitStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *SplitStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = SplitStatus(v) + return nil +} + +func (p *SplitStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type DiskStatus int64 + +const ( + DiskStatus_NORMAL DiskStatus = 0 + DiskStatus_SPACE_INSUFFICIENT DiskStatus = 1 + DiskStatus_IO_ERROR DiskStatus = 2 +) + +func (p DiskStatus) String() string { + switch p { + case DiskStatus_NORMAL: + return "NORMAL" + case DiskStatus_SPACE_INSUFFICIENT: + return "SPACE_INSUFFICIENT" + case DiskStatus_IO_ERROR: + return "IO_ERROR" + } + return "" +} + +func DiskStatusFromString(s string) (DiskStatus, error) { + switch s { + case "NORMAL": + return DiskStatus_NORMAL, nil + case "SPACE_INSUFFICIENT": + return DiskStatus_SPACE_INSUFFICIENT, nil + case "IO_ERROR": + return DiskStatus_IO_ERROR, nil + } + return DiskStatus(0), fmt.Errorf("not a valid DiskStatus string") +} + +func DiskStatusPtr(v DiskStatus) *DiskStatus { return &v } + +func (p DiskStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *DiskStatus) UnmarshalText(text []byte) error { + q, err := DiskStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *DiskStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = DiskStatus(v) + return nil +} + +func (p *DiskStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type ManualCompactionStatus int64 + +const ( + ManualCompactionStatus_IDLE ManualCompactionStatus = 0 + ManualCompactionStatus_QUEUING ManualCompactionStatus = 1 + ManualCompactionStatus_RUNNING ManualCompactionStatus = 2 + ManualCompactionStatus_FINISHED ManualCompactionStatus = 3 +) + +func (p ManualCompactionStatus) String() string { + switch p { + case ManualCompactionStatus_IDLE: + return "IDLE" + case ManualCompactionStatus_QUEUING: + return "QUEUING" + case ManualCompactionStatus_RUNNING: + return "RUNNING" + case ManualCompactionStatus_FINISHED: + return "FINISHED" + } + return "" +} + +func ManualCompactionStatusFromString(s string) (ManualCompactionStatus, error) { + switch s { + case "IDLE": + return ManualCompactionStatus_IDLE, nil + case "QUEUING": + return ManualCompactionStatus_QUEUING, nil + case "RUNNING": + return ManualCompactionStatus_RUNNING, nil + case "FINISHED": + return ManualCompactionStatus_FINISHED, nil + } + return ManualCompactionStatus(0), fmt.Errorf("not a valid ManualCompactionStatus string") +} + +func ManualCompactionStatusPtr(v ManualCompactionStatus) *ManualCompactionStatus { return &v } + +func (p ManualCompactionStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *ManualCompactionStatus) UnmarshalText(text []byte) error { + q, err := ManualCompactionStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *ManualCompactionStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = ManualCompactionStatus(v) + return nil +} + +func (p *ManualCompactionStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Name +// - Size +// - Md5 +type FileMeta struct { + Name string `thrift:"name,1" db:"name" json:"name"` + Size int64 `thrift:"size,2" db:"size" json:"size"` + Md5 string `thrift:"md5,3" db:"md5" json:"md5"` +} + +func NewFileMeta() *FileMeta { + return &FileMeta{} +} + +func (p *FileMeta) GetName() string { + return p.Name +} + +func (p *FileMeta) GetSize() int64 { + return p.Size +} + +func (p *FileMeta) GetMd5() string { + return p.Md5 +} +func (p *FileMeta) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *FileMeta) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *FileMeta) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Size = v + } + return nil +} + +func (p *FileMeta) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Md5 = v + } + return nil +} + +func (p *FileMeta) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("file_meta"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *FileMeta) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:name: ", p), err) + } + if err := oprot.WriteString(string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:name: ", p), err) + } + return err +} + +func (p *FileMeta) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("size", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:size: ", p), err) + } + if err := oprot.WriteI64(int64(p.Size)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.size (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:size: ", p), err) + } + return err +} + +func (p *FileMeta) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("md5", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:md5: ", p), err) + } + if err := oprot.WriteString(string(p.Md5)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.md5 (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:md5: ", p), err) + } + return err +} + +func (p *FileMeta) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("FileMeta(%+v)", *p) +} + +// Attributes: +// - Pid +// - Ballot +// - Primary +// - Status +// - LearnerSignature +// - PopAll +// - SplitSyncToChild +// - HpPrimary +type ReplicaConfiguration struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + Ballot int64 `thrift:"ballot,2" db:"ballot" json:"ballot"` + Primary *base.RPCAddress `thrift:"primary,3" db:"primary" json:"primary"` + Status PartitionStatus `thrift:"status,4" db:"status" json:"status"` + LearnerSignature int64 `thrift:"learner_signature,5" db:"learner_signature" json:"learner_signature"` + PopAll bool `thrift:"pop_all,6" db:"pop_all" json:"pop_all"` + SplitSyncToChild bool `thrift:"split_sync_to_child,7" db:"split_sync_to_child" json:"split_sync_to_child"` + HpPrimary *base.HostPort `thrift:"hp_primary,8" db:"hp_primary" json:"hp_primary,omitempty"` +} + +func NewReplicaConfiguration() *ReplicaConfiguration { + return &ReplicaConfiguration{ + Status: 0, + } +} + +var ReplicaConfiguration_Pid_DEFAULT *base.Gpid + +func (p *ReplicaConfiguration) GetPid() *base.Gpid { + if !p.IsSetPid() { + return ReplicaConfiguration_Pid_DEFAULT + } + return p.Pid +} + +func (p *ReplicaConfiguration) GetBallot() int64 { + return p.Ballot +} + +var ReplicaConfiguration_Primary_DEFAULT *base.RPCAddress + +func (p *ReplicaConfiguration) GetPrimary() *base.RPCAddress { + if !p.IsSetPrimary() { + return ReplicaConfiguration_Primary_DEFAULT + } + return p.Primary +} + +func (p *ReplicaConfiguration) GetStatus() PartitionStatus { + return p.Status +} + +func (p *ReplicaConfiguration) GetLearnerSignature() int64 { + return p.LearnerSignature +} + +var ReplicaConfiguration_PopAll_DEFAULT bool = false + +func (p *ReplicaConfiguration) GetPopAll() bool { + return p.PopAll +} + +var ReplicaConfiguration_SplitSyncToChild_DEFAULT bool = false + +func (p *ReplicaConfiguration) GetSplitSyncToChild() bool { + return p.SplitSyncToChild +} + +var ReplicaConfiguration_HpPrimary_DEFAULT *base.HostPort + +func (p *ReplicaConfiguration) GetHpPrimary() *base.HostPort { + if !p.IsSetHpPrimary() { + return ReplicaConfiguration_HpPrimary_DEFAULT + } + return p.HpPrimary +} +func (p *ReplicaConfiguration) IsSetPid() bool { + return p.Pid != nil +} + +func (p *ReplicaConfiguration) IsSetPrimary() bool { + return p.Primary != nil +} + +func (p *ReplicaConfiguration) IsSetPopAll() bool { + return p.PopAll != ReplicaConfiguration_PopAll_DEFAULT +} + +func (p *ReplicaConfiguration) IsSetSplitSyncToChild() bool { + return p.SplitSyncToChild != ReplicaConfiguration_SplitSyncToChild_DEFAULT +} + +func (p *ReplicaConfiguration) IsSetHpPrimary() bool { + return p.HpPrimary != nil +} + +func (p *ReplicaConfiguration) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaConfiguration) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *ReplicaConfiguration) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *ReplicaConfiguration) ReadField3(iprot thrift.TProtocol) error { + p.Primary = &base.RPCAddress{} + if err := p.Primary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Primary), err) + } + return nil +} + +func (p *ReplicaConfiguration) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + temp := PartitionStatus(v) + p.Status = temp + } + return nil +} + +func (p *ReplicaConfiguration) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.LearnerSignature = v + } + return nil +} + +func (p *ReplicaConfiguration) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.PopAll = v + } + return nil +} + +func (p *ReplicaConfiguration) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.SplitSyncToChild = v + } + return nil +} + +func (p *ReplicaConfiguration) ReadField8(iprot thrift.TProtocol) error { + p.HpPrimary = &base.HostPort{} + if err := p.HpPrimary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpPrimary), err) + } + return nil +} + +func (p *ReplicaConfiguration) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("replica_configuration"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaConfiguration) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *ReplicaConfiguration) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ballot: ", p), err) + } + return err +} + +func (p *ReplicaConfiguration) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("primary", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:primary: ", p), err) + } + if err := p.Primary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Primary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:primary: ", p), err) + } + return err +} + +func (p *ReplicaConfiguration) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:status: ", p), err) + } + return err +} + +func (p *ReplicaConfiguration) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("learner_signature", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:learner_signature: ", p), err) + } + if err := oprot.WriteI64(int64(p.LearnerSignature)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.learner_signature (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:learner_signature: ", p), err) + } + return err +} + +func (p *ReplicaConfiguration) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetPopAll() { + if err := oprot.WriteFieldBegin("pop_all", thrift.BOOL, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:pop_all: ", p), err) + } + if err := oprot.WriteBool(bool(p.PopAll)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.pop_all (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:pop_all: ", p), err) + } + } + return err +} + +func (p *ReplicaConfiguration) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetSplitSyncToChild() { + if err := oprot.WriteFieldBegin("split_sync_to_child", thrift.BOOL, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:split_sync_to_child: ", p), err) + } + if err := oprot.WriteBool(bool(p.SplitSyncToChild)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.split_sync_to_child (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:split_sync_to_child: ", p), err) + } + } + return err +} + +func (p *ReplicaConfiguration) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetHpPrimary() { + if err := oprot.WriteFieldBegin("hp_primary", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:hp_primary: ", p), err) + } + if err := p.HpPrimary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpPrimary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:hp_primary: ", p), err) + } + } + return err +} + +func (p *ReplicaConfiguration) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaConfiguration(%+v)", *p) +} + +// Attributes: +// - Pid +// - Ballot +// - Status +// - LastCommittedDecree +// - LastPreparedDecree +// - LastDurableDecree +// - AppType +// - DiskTag +// - ManualCompactStatus +type ReplicaInfo struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + Ballot int64 `thrift:"ballot,2" db:"ballot" json:"ballot"` + Status PartitionStatus `thrift:"status,3" db:"status" json:"status"` + LastCommittedDecree int64 `thrift:"last_committed_decree,4" db:"last_committed_decree" json:"last_committed_decree"` + LastPreparedDecree int64 `thrift:"last_prepared_decree,5" db:"last_prepared_decree" json:"last_prepared_decree"` + LastDurableDecree int64 `thrift:"last_durable_decree,6" db:"last_durable_decree" json:"last_durable_decree"` + AppType string `thrift:"app_type,7" db:"app_type" json:"app_type"` + DiskTag string `thrift:"disk_tag,8" db:"disk_tag" json:"disk_tag"` + ManualCompactStatus *ManualCompactionStatus `thrift:"manual_compact_status,9" db:"manual_compact_status" json:"manual_compact_status,omitempty"` +} + +func NewReplicaInfo() *ReplicaInfo { + return &ReplicaInfo{} +} + +var ReplicaInfo_Pid_DEFAULT *base.Gpid + +func (p *ReplicaInfo) GetPid() *base.Gpid { + if !p.IsSetPid() { + return ReplicaInfo_Pid_DEFAULT + } + return p.Pid +} + +func (p *ReplicaInfo) GetBallot() int64 { + return p.Ballot +} + +func (p *ReplicaInfo) GetStatus() PartitionStatus { + return p.Status +} + +func (p *ReplicaInfo) GetLastCommittedDecree() int64 { + return p.LastCommittedDecree +} + +func (p *ReplicaInfo) GetLastPreparedDecree() int64 { + return p.LastPreparedDecree +} + +func (p *ReplicaInfo) GetLastDurableDecree() int64 { + return p.LastDurableDecree +} + +func (p *ReplicaInfo) GetAppType() string { + return p.AppType +} + +func (p *ReplicaInfo) GetDiskTag() string { + return p.DiskTag +} + +var ReplicaInfo_ManualCompactStatus_DEFAULT ManualCompactionStatus + +func (p *ReplicaInfo) GetManualCompactStatus() ManualCompactionStatus { + if !p.IsSetManualCompactStatus() { + return ReplicaInfo_ManualCompactStatus_DEFAULT + } + return *p.ManualCompactStatus +} +func (p *ReplicaInfo) IsSetPid() bool { + return p.Pid != nil +} + +func (p *ReplicaInfo) IsSetManualCompactStatus() bool { + return p.ManualCompactStatus != nil +} + +func (p *ReplicaInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRING { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I32 { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaInfo) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *ReplicaInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *ReplicaInfo) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := PartitionStatus(v) + p.Status = temp + } + return nil +} + +func (p *ReplicaInfo) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.LastCommittedDecree = v + } + return nil +} + +func (p *ReplicaInfo) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.LastPreparedDecree = v + } + return nil +} + +func (p *ReplicaInfo) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.LastDurableDecree = v + } + return nil +} + +func (p *ReplicaInfo) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.AppType = v + } + return nil +} + +func (p *ReplicaInfo) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.DiskTag = v + } + return nil +} + +func (p *ReplicaInfo) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + temp := ManualCompactionStatus(v) + p.ManualCompactStatus = &temp + } + return nil +} + +func (p *ReplicaInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("replica_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ballot: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:status: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_committed_decree", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:last_committed_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastCommittedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_committed_decree (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:last_committed_decree: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_prepared_decree", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:last_prepared_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastPreparedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_prepared_decree (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:last_prepared_decree: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_durable_decree", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:last_durable_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastDurableDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_durable_decree (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:last_durable_decree: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_type", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:app_type: ", p), err) + } + if err := oprot.WriteString(string(p.AppType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_type (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:app_type: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("disk_tag", thrift.STRING, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:disk_tag: ", p), err) + } + if err := oprot.WriteString(string(p.DiskTag)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.disk_tag (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:disk_tag: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetManualCompactStatus() { + if err := oprot.WriteFieldBegin("manual_compact_status", thrift.I32, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:manual_compact_status: ", p), err) + } + if err := oprot.WriteI32(int32(*p.ManualCompactStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.manual_compact_status (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:manual_compact_status: ", p), err) + } + } + return err +} + +func (p *ReplicaInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaInfo(%+v)", *p) +} diff --git a/go-client/idl/admin/partition_split-consts.go b/go-client/idl/admin/partition_split-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/partition_split-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/partition_split.go b/go-client/idl/admin/partition_split.go new file mode 100644 index 0000000000..45cc544d62 --- /dev/null +++ b/go-client/idl/admin/partition_split.go @@ -0,0 +1,3245 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +type SplitControlType int64 + +const ( + SplitControlType_PAUSE SplitControlType = 0 + SplitControlType_RESTART SplitControlType = 1 + SplitControlType_CANCEL SplitControlType = 2 +) + +func (p SplitControlType) String() string { + switch p { + case SplitControlType_PAUSE: + return "PAUSE" + case SplitControlType_RESTART: + return "RESTART" + case SplitControlType_CANCEL: + return "CANCEL" + } + return "" +} + +func SplitControlTypeFromString(s string) (SplitControlType, error) { + switch s { + case "PAUSE": + return SplitControlType_PAUSE, nil + case "RESTART": + return SplitControlType_RESTART, nil + case "CANCEL": + return SplitControlType_CANCEL, nil + } + return SplitControlType(0), fmt.Errorf("not a valid SplitControlType string") +} + +func SplitControlTypePtr(v SplitControlType) *SplitControlType { return &v } + +func (p SplitControlType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *SplitControlType) UnmarshalText(text []byte) error { + q, err := SplitControlTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *SplitControlType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = SplitControlType(v) + return nil +} + +func (p *SplitControlType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - AppName +// - NewPartitionCount_ +type StartPartitionSplitRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + NewPartitionCount_ int32 `thrift:"new_partition_count,2" db:"new_partition_count" json:"new_partition_count"` +} + +func NewStartPartitionSplitRequest() *StartPartitionSplitRequest { + return &StartPartitionSplitRequest{} +} + +func (p *StartPartitionSplitRequest) GetAppName() string { + return p.AppName +} + +func (p *StartPartitionSplitRequest) GetNewPartitionCount_() int32 { + return p.NewPartitionCount_ +} +func (p *StartPartitionSplitRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartPartitionSplitRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *StartPartitionSplitRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewPartitionCount_ = v + } + return nil +} + +func (p *StartPartitionSplitRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_partition_split_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartPartitionSplitRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *StartPartitionSplitRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_partition_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.NewPartitionCount_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_partition_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_partition_count: ", p), err) + } + return err +} + +func (p *StartPartitionSplitRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartPartitionSplitRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type StartPartitionSplitResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg"` +} + +func NewStartPartitionSplitResponse() *StartPartitionSplitResponse { + return &StartPartitionSplitResponse{} +} + +var StartPartitionSplitResponse_Err_DEFAULT *base.ErrorCode + +func (p *StartPartitionSplitResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return StartPartitionSplitResponse_Err_DEFAULT + } + return p.Err +} + +func (p *StartPartitionSplitResponse) GetHintMsg() string { + return p.HintMsg +} +func (p *StartPartitionSplitResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *StartPartitionSplitResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartPartitionSplitResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *StartPartitionSplitResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = v + } + return nil +} + +func (p *StartPartitionSplitResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_partition_split_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartPartitionSplitResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *StartPartitionSplitResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + return err +} + +func (p *StartPartitionSplitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartPartitionSplitResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - ControlType +// - ParentPidx +// - OldPartitionCount +type ControlSplitRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + ControlType SplitControlType `thrift:"control_type,2" db:"control_type" json:"control_type"` + ParentPidx int32 `thrift:"parent_pidx,3" db:"parent_pidx" json:"parent_pidx"` + OldPartitionCount *int32 `thrift:"old_partition_count,4" db:"old_partition_count" json:"old_partition_count,omitempty"` +} + +func NewControlSplitRequest() *ControlSplitRequest { + return &ControlSplitRequest{} +} + +func (p *ControlSplitRequest) GetAppName() string { + return p.AppName +} + +func (p *ControlSplitRequest) GetControlType() SplitControlType { + return p.ControlType +} + +func (p *ControlSplitRequest) GetParentPidx() int32 { + return p.ParentPidx +} + +var ControlSplitRequest_OldPartitionCount_DEFAULT int32 + +func (p *ControlSplitRequest) GetOldPartitionCount() int32 { + if !p.IsSetOldPartitionCount() { + return ControlSplitRequest_OldPartitionCount_DEFAULT + } + return *p.OldPartitionCount +} +func (p *ControlSplitRequest) IsSetOldPartitionCount() bool { + return p.OldPartitionCount != nil +} + +func (p *ControlSplitRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ControlSplitRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ControlSplitRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := SplitControlType(v) + p.ControlType = temp + } + return nil +} + +func (p *ControlSplitRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ParentPidx = v + } + return nil +} + +func (p *ControlSplitRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.OldPartitionCount = &v + } + return nil +} + +func (p *ControlSplitRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_split_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ControlSplitRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ControlSplitRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("control_type", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:control_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.ControlType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.control_type (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:control_type: ", p), err) + } + return err +} + +func (p *ControlSplitRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("parent_pidx", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:parent_pidx: ", p), err) + } + if err := oprot.WriteI32(int32(p.ParentPidx)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_pidx (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:parent_pidx: ", p), err) + } + return err +} + +func (p *ControlSplitRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetOldPartitionCount() { + if err := oprot.WriteFieldBegin("old_partition_count", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:old_partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.OldPartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.old_partition_count (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:old_partition_count: ", p), err) + } + } + return err +} + +func (p *ControlSplitRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ControlSplitRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type ControlSplitResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg *string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg,omitempty"` +} + +func NewControlSplitResponse() *ControlSplitResponse { + return &ControlSplitResponse{} +} + +var ControlSplitResponse_Err_DEFAULT *base.ErrorCode + +func (p *ControlSplitResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ControlSplitResponse_Err_DEFAULT + } + return p.Err +} + +var ControlSplitResponse_HintMsg_DEFAULT string + +func (p *ControlSplitResponse) GetHintMsg() string { + if !p.IsSetHintMsg() { + return ControlSplitResponse_HintMsg_DEFAULT + } + return *p.HintMsg +} +func (p *ControlSplitResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ControlSplitResponse) IsSetHintMsg() bool { + return p.HintMsg != nil +} + +func (p *ControlSplitResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ControlSplitResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ControlSplitResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = &v + } + return nil +} + +func (p *ControlSplitResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_split_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ControlSplitResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ControlSplitResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetHintMsg() { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(*p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + } + return err +} + +func (p *ControlSplitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ControlSplitResponse(%+v)", *p) +} + +// Attributes: +// - AppName +type QuerySplitRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewQuerySplitRequest() *QuerySplitRequest { + return &QuerySplitRequest{} +} + +func (p *QuerySplitRequest) GetAppName() string { + return p.AppName +} +func (p *QuerySplitRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QuerySplitRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QuerySplitRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_split_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QuerySplitRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *QuerySplitRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QuerySplitRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - NewPartitionCount_ +// - Status +// - HintMsg +type QuerySplitResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + NewPartitionCount_ int32 `thrift:"new_partition_count,2" db:"new_partition_count" json:"new_partition_count"` + Status map[int32]SplitStatus `thrift:"status,3" db:"status" json:"status"` + HintMsg *string `thrift:"hint_msg,4" db:"hint_msg" json:"hint_msg,omitempty"` +} + +func NewQuerySplitResponse() *QuerySplitResponse { + return &QuerySplitResponse{} +} + +var QuerySplitResponse_Err_DEFAULT *base.ErrorCode + +func (p *QuerySplitResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QuerySplitResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QuerySplitResponse) GetNewPartitionCount_() int32 { + return p.NewPartitionCount_ +} + +func (p *QuerySplitResponse) GetStatus() map[int32]SplitStatus { + return p.Status +} + +var QuerySplitResponse_HintMsg_DEFAULT string + +func (p *QuerySplitResponse) GetHintMsg() string { + if !p.IsSetHintMsg() { + return QuerySplitResponse_HintMsg_DEFAULT + } + return *p.HintMsg +} +func (p *QuerySplitResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QuerySplitResponse) IsSetHintMsg() bool { + return p.HintMsg != nil +} + +func (p *QuerySplitResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.MAP { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QuerySplitResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QuerySplitResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewPartitionCount_ = v + } + return nil +} + +func (p *QuerySplitResponse) ReadField3(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32]SplitStatus, size) + p.Status = tMap + for i := 0; i < size; i++ { + var _key0 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key0 = v + } + var _val1 SplitStatus + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + temp := SplitStatus(v) + _val1 = temp + } + p.Status[_key0] = _val1 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *QuerySplitResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.HintMsg = &v + } + return nil +} + +func (p *QuerySplitResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_split_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QuerySplitResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QuerySplitResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_partition_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.NewPartitionCount_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_partition_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_partition_count: ", p), err) + } + return err +} + +func (p *QuerySplitResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.MAP, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:status: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.Status)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Status { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:status: ", p), err) + } + return err +} + +func (p *QuerySplitResponse) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHintMsg() { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(*p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hint_msg: ", p), err) + } + } + return err +} + +func (p *QuerySplitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QuerySplitResponse(%+v)", *p) +} + +// Attributes: +// - ParentGpid +// - ChildGpid +// - ChildBallot +// - Child +// - HpChild +type NotifyCatchUpRequest struct { + ParentGpid *base.Gpid `thrift:"parent_gpid,1" db:"parent_gpid" json:"parent_gpid"` + ChildGpid *base.Gpid `thrift:"child_gpid,2" db:"child_gpid" json:"child_gpid"` + ChildBallot int64 `thrift:"child_ballot,3" db:"child_ballot" json:"child_ballot"` + Child *base.RPCAddress `thrift:"child,4" db:"child" json:"child"` + HpChild *base.HostPort `thrift:"hp_child,5" db:"hp_child" json:"hp_child,omitempty"` +} + +func NewNotifyCatchUpRequest() *NotifyCatchUpRequest { + return &NotifyCatchUpRequest{} +} + +var NotifyCatchUpRequest_ParentGpid_DEFAULT *base.Gpid + +func (p *NotifyCatchUpRequest) GetParentGpid() *base.Gpid { + if !p.IsSetParentGpid() { + return NotifyCatchUpRequest_ParentGpid_DEFAULT + } + return p.ParentGpid +} + +var NotifyCatchUpRequest_ChildGpid_DEFAULT *base.Gpid + +func (p *NotifyCatchUpRequest) GetChildGpid() *base.Gpid { + if !p.IsSetChildGpid() { + return NotifyCatchUpRequest_ChildGpid_DEFAULT + } + return p.ChildGpid +} + +func (p *NotifyCatchUpRequest) GetChildBallot() int64 { + return p.ChildBallot +} + +var NotifyCatchUpRequest_Child_DEFAULT *base.RPCAddress + +func (p *NotifyCatchUpRequest) GetChild() *base.RPCAddress { + if !p.IsSetChild() { + return NotifyCatchUpRequest_Child_DEFAULT + } + return p.Child +} + +var NotifyCatchUpRequest_HpChild_DEFAULT *base.HostPort + +func (p *NotifyCatchUpRequest) GetHpChild() *base.HostPort { + if !p.IsSetHpChild() { + return NotifyCatchUpRequest_HpChild_DEFAULT + } + return p.HpChild +} +func (p *NotifyCatchUpRequest) IsSetParentGpid() bool { + return p.ParentGpid != nil +} + +func (p *NotifyCatchUpRequest) IsSetChildGpid() bool { + return p.ChildGpid != nil +} + +func (p *NotifyCatchUpRequest) IsSetChild() bool { + return p.Child != nil +} + +func (p *NotifyCatchUpRequest) IsSetHpChild() bool { + return p.HpChild != nil +} + +func (p *NotifyCatchUpRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NotifyCatchUpRequest) ReadField1(iprot thrift.TProtocol) error { + p.ParentGpid = &base.Gpid{} + if err := p.ParentGpid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ParentGpid), err) + } + return nil +} + +func (p *NotifyCatchUpRequest) ReadField2(iprot thrift.TProtocol) error { + p.ChildGpid = &base.Gpid{} + if err := p.ChildGpid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ChildGpid), err) + } + return nil +} + +func (p *NotifyCatchUpRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ChildBallot = v + } + return nil +} + +func (p *NotifyCatchUpRequest) ReadField4(iprot thrift.TProtocol) error { + p.Child = &base.RPCAddress{} + if err := p.Child.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Child), err) + } + return nil +} + +func (p *NotifyCatchUpRequest) ReadField5(iprot thrift.TProtocol) error { + p.HpChild = &base.HostPort{} + if err := p.HpChild.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpChild), err) + } + return nil +} + +func (p *NotifyCatchUpRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("notify_catch_up_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NotifyCatchUpRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("parent_gpid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:parent_gpid: ", p), err) + } + if err := p.ParentGpid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ParentGpid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:parent_gpid: ", p), err) + } + return err +} + +func (p *NotifyCatchUpRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child_gpid", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:child_gpid: ", p), err) + } + if err := p.ChildGpid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ChildGpid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:child_gpid: ", p), err) + } + return err +} + +func (p *NotifyCatchUpRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child_ballot", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:child_ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.ChildBallot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.child_ballot (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:child_ballot: ", p), err) + } + return err +} + +func (p *NotifyCatchUpRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:child: ", p), err) + } + if err := p.Child.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Child), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:child: ", p), err) + } + return err +} + +func (p *NotifyCatchUpRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetHpChild() { + if err := oprot.WriteFieldBegin("hp_child", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:hp_child: ", p), err) + } + if err := p.HpChild.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpChild), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:hp_child: ", p), err) + } + } + return err +} + +func (p *NotifyCatchUpRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NotifyCatchUpRequest(%+v)", *p) +} + +// Attributes: +// - Err +type NotifyCacthUpResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewNotifyCacthUpResponse() *NotifyCacthUpResponse { + return &NotifyCacthUpResponse{} +} + +var NotifyCacthUpResponse_Err_DEFAULT *base.ErrorCode + +func (p *NotifyCacthUpResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return NotifyCacthUpResponse_Err_DEFAULT + } + return p.Err +} +func (p *NotifyCacthUpResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *NotifyCacthUpResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NotifyCacthUpResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *NotifyCacthUpResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("notify_cacth_up_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NotifyCacthUpResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *NotifyCacthUpResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NotifyCacthUpResponse(%+v)", *p) +} + +// Attributes: +// - Target +// - NewPartitionCount_ +// - ChildPid +// - Ballot +// - HpTarget +type UpdateChildGroupPartitionCountRequest struct { + Target *base.RPCAddress `thrift:"target,1" db:"target" json:"target"` + NewPartitionCount_ int32 `thrift:"new_partition_count,2" db:"new_partition_count" json:"new_partition_count"` + ChildPid *base.Gpid `thrift:"child_pid,3" db:"child_pid" json:"child_pid"` + Ballot int64 `thrift:"ballot,4" db:"ballot" json:"ballot"` + HpTarget *base.HostPort `thrift:"hp_target,5" db:"hp_target" json:"hp_target,omitempty"` +} + +func NewUpdateChildGroupPartitionCountRequest() *UpdateChildGroupPartitionCountRequest { + return &UpdateChildGroupPartitionCountRequest{} +} + +var UpdateChildGroupPartitionCountRequest_Target_DEFAULT *base.RPCAddress + +func (p *UpdateChildGroupPartitionCountRequest) GetTarget() *base.RPCAddress { + if !p.IsSetTarget() { + return UpdateChildGroupPartitionCountRequest_Target_DEFAULT + } + return p.Target +} + +func (p *UpdateChildGroupPartitionCountRequest) GetNewPartitionCount_() int32 { + return p.NewPartitionCount_ +} + +var UpdateChildGroupPartitionCountRequest_ChildPid_DEFAULT *base.Gpid + +func (p *UpdateChildGroupPartitionCountRequest) GetChildPid() *base.Gpid { + if !p.IsSetChildPid() { + return UpdateChildGroupPartitionCountRequest_ChildPid_DEFAULT + } + return p.ChildPid +} + +func (p *UpdateChildGroupPartitionCountRequest) GetBallot() int64 { + return p.Ballot +} + +var UpdateChildGroupPartitionCountRequest_HpTarget_DEFAULT *base.HostPort + +func (p *UpdateChildGroupPartitionCountRequest) GetHpTarget() *base.HostPort { + if !p.IsSetHpTarget() { + return UpdateChildGroupPartitionCountRequest_HpTarget_DEFAULT + } + return p.HpTarget +} +func (p *UpdateChildGroupPartitionCountRequest) IsSetTarget() bool { + return p.Target != nil +} + +func (p *UpdateChildGroupPartitionCountRequest) IsSetChildPid() bool { + return p.ChildPid != nil +} + +func (p *UpdateChildGroupPartitionCountRequest) IsSetHpTarget() bool { + return p.HpTarget != nil +} + +func (p *UpdateChildGroupPartitionCountRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) ReadField1(iprot thrift.TProtocol) error { + p.Target = &base.RPCAddress{} + if err := p.Target.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Target), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewPartitionCount_ = v + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) ReadField3(iprot thrift.TProtocol) error { + p.ChildPid = &base.Gpid{} + if err := p.ChildPid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ChildPid), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) ReadField5(iprot thrift.TProtocol) error { + p.HpTarget = &base.HostPort{} + if err := p.HpTarget.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpTarget), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_child_group_partition_count_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("target", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:target: ", p), err) + } + if err := p.Target.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Target), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:target: ", p), err) + } + return err +} + +func (p *UpdateChildGroupPartitionCountRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_partition_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.NewPartitionCount_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_partition_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_partition_count: ", p), err) + } + return err +} + +func (p *UpdateChildGroupPartitionCountRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child_pid", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:child_pid: ", p), err) + } + if err := p.ChildPid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ChildPid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:child_pid: ", p), err) + } + return err +} + +func (p *UpdateChildGroupPartitionCountRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ballot: ", p), err) + } + return err +} + +func (p *UpdateChildGroupPartitionCountRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetHpTarget() { + if err := oprot.WriteFieldBegin("hp_target", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:hp_target: ", p), err) + } + if err := p.HpTarget.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpTarget), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:hp_target: ", p), err) + } + } + return err +} + +func (p *UpdateChildGroupPartitionCountRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("UpdateChildGroupPartitionCountRequest(%+v)", *p) +} + +// Attributes: +// - Err +type UpdateChildGroupPartitionCountResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewUpdateChildGroupPartitionCountResponse() *UpdateChildGroupPartitionCountResponse { + return &UpdateChildGroupPartitionCountResponse{} +} + +var UpdateChildGroupPartitionCountResponse_Err_DEFAULT *base.ErrorCode + +func (p *UpdateChildGroupPartitionCountResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return UpdateChildGroupPartitionCountResponse_Err_DEFAULT + } + return p.Err +} +func (p *UpdateChildGroupPartitionCountResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *UpdateChildGroupPartitionCountResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_child_group_partition_count_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *UpdateChildGroupPartitionCountResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("UpdateChildGroupPartitionCountResponse(%+v)", *p) +} + +// Attributes: +// - App +// - ParentConfig +// - ChildConfig +// - Primary +// - HpPrimary +type RegisterChildRequest struct { + App *replication.AppInfo `thrift:"app,1" db:"app" json:"app"` + ParentConfig *replication.PartitionConfiguration `thrift:"parent_config,2" db:"parent_config" json:"parent_config"` + ChildConfig *replication.PartitionConfiguration `thrift:"child_config,3" db:"child_config" json:"child_config"` + Primary *base.RPCAddress `thrift:"primary,4" db:"primary" json:"primary"` + HpPrimary *base.HostPort `thrift:"hp_primary,5" db:"hp_primary" json:"hp_primary,omitempty"` +} + +func NewRegisterChildRequest() *RegisterChildRequest { + return &RegisterChildRequest{} +} + +var RegisterChildRequest_App_DEFAULT *replication.AppInfo + +func (p *RegisterChildRequest) GetApp() *replication.AppInfo { + if !p.IsSetApp() { + return RegisterChildRequest_App_DEFAULT + } + return p.App +} + +var RegisterChildRequest_ParentConfig_DEFAULT *replication.PartitionConfiguration + +func (p *RegisterChildRequest) GetParentConfig() *replication.PartitionConfiguration { + if !p.IsSetParentConfig() { + return RegisterChildRequest_ParentConfig_DEFAULT + } + return p.ParentConfig +} + +var RegisterChildRequest_ChildConfig_DEFAULT *replication.PartitionConfiguration + +func (p *RegisterChildRequest) GetChildConfig() *replication.PartitionConfiguration { + if !p.IsSetChildConfig() { + return RegisterChildRequest_ChildConfig_DEFAULT + } + return p.ChildConfig +} + +var RegisterChildRequest_Primary_DEFAULT *base.RPCAddress + +func (p *RegisterChildRequest) GetPrimary() *base.RPCAddress { + if !p.IsSetPrimary() { + return RegisterChildRequest_Primary_DEFAULT + } + return p.Primary +} + +var RegisterChildRequest_HpPrimary_DEFAULT *base.HostPort + +func (p *RegisterChildRequest) GetHpPrimary() *base.HostPort { + if !p.IsSetHpPrimary() { + return RegisterChildRequest_HpPrimary_DEFAULT + } + return p.HpPrimary +} +func (p *RegisterChildRequest) IsSetApp() bool { + return p.App != nil +} + +func (p *RegisterChildRequest) IsSetParentConfig() bool { + return p.ParentConfig != nil +} + +func (p *RegisterChildRequest) IsSetChildConfig() bool { + return p.ChildConfig != nil +} + +func (p *RegisterChildRequest) IsSetPrimary() bool { + return p.Primary != nil +} + +func (p *RegisterChildRequest) IsSetHpPrimary() bool { + return p.HpPrimary != nil +} + +func (p *RegisterChildRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RegisterChildRequest) ReadField1(iprot thrift.TProtocol) error { + p.App = &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := p.App.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.App), err) + } + return nil +} + +func (p *RegisterChildRequest) ReadField2(iprot thrift.TProtocol) error { + p.ParentConfig = &replication.PartitionConfiguration{} + if err := p.ParentConfig.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ParentConfig), err) + } + return nil +} + +func (p *RegisterChildRequest) ReadField3(iprot thrift.TProtocol) error { + p.ChildConfig = &replication.PartitionConfiguration{} + if err := p.ChildConfig.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ChildConfig), err) + } + return nil +} + +func (p *RegisterChildRequest) ReadField4(iprot thrift.TProtocol) error { + p.Primary = &base.RPCAddress{} + if err := p.Primary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Primary), err) + } + return nil +} + +func (p *RegisterChildRequest) ReadField5(iprot thrift.TProtocol) error { + p.HpPrimary = &base.HostPort{} + if err := p.HpPrimary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpPrimary), err) + } + return nil +} + +func (p *RegisterChildRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("register_child_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RegisterChildRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app: ", p), err) + } + if err := p.App.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.App), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app: ", p), err) + } + return err +} + +func (p *RegisterChildRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("parent_config", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:parent_config: ", p), err) + } + if err := p.ParentConfig.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ParentConfig), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:parent_config: ", p), err) + } + return err +} + +func (p *RegisterChildRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child_config", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:child_config: ", p), err) + } + if err := p.ChildConfig.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ChildConfig), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:child_config: ", p), err) + } + return err +} + +func (p *RegisterChildRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("primary", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:primary: ", p), err) + } + if err := p.Primary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Primary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:primary: ", p), err) + } + return err +} + +func (p *RegisterChildRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetHpPrimary() { + if err := oprot.WriteFieldBegin("hp_primary", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:hp_primary: ", p), err) + } + if err := p.HpPrimary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpPrimary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:hp_primary: ", p), err) + } + } + return err +} + +func (p *RegisterChildRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RegisterChildRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - App +// - ParentConfig +// - ChildConfig +type RegisterChildResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + App *replication.AppInfo `thrift:"app,2" db:"app" json:"app"` + ParentConfig *replication.PartitionConfiguration `thrift:"parent_config,3" db:"parent_config" json:"parent_config"` + ChildConfig *replication.PartitionConfiguration `thrift:"child_config,4" db:"child_config" json:"child_config"` +} + +func NewRegisterChildResponse() *RegisterChildResponse { + return &RegisterChildResponse{} +} + +var RegisterChildResponse_Err_DEFAULT *base.ErrorCode + +func (p *RegisterChildResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return RegisterChildResponse_Err_DEFAULT + } + return p.Err +} + +var RegisterChildResponse_App_DEFAULT *replication.AppInfo + +func (p *RegisterChildResponse) GetApp() *replication.AppInfo { + if !p.IsSetApp() { + return RegisterChildResponse_App_DEFAULT + } + return p.App +} + +var RegisterChildResponse_ParentConfig_DEFAULT *replication.PartitionConfiguration + +func (p *RegisterChildResponse) GetParentConfig() *replication.PartitionConfiguration { + if !p.IsSetParentConfig() { + return RegisterChildResponse_ParentConfig_DEFAULT + } + return p.ParentConfig +} + +var RegisterChildResponse_ChildConfig_DEFAULT *replication.PartitionConfiguration + +func (p *RegisterChildResponse) GetChildConfig() *replication.PartitionConfiguration { + if !p.IsSetChildConfig() { + return RegisterChildResponse_ChildConfig_DEFAULT + } + return p.ChildConfig +} +func (p *RegisterChildResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *RegisterChildResponse) IsSetApp() bool { + return p.App != nil +} + +func (p *RegisterChildResponse) IsSetParentConfig() bool { + return p.ParentConfig != nil +} + +func (p *RegisterChildResponse) IsSetChildConfig() bool { + return p.ChildConfig != nil +} + +func (p *RegisterChildResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RegisterChildResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *RegisterChildResponse) ReadField2(iprot thrift.TProtocol) error { + p.App = &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := p.App.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.App), err) + } + return nil +} + +func (p *RegisterChildResponse) ReadField3(iprot thrift.TProtocol) error { + p.ParentConfig = &replication.PartitionConfiguration{} + if err := p.ParentConfig.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ParentConfig), err) + } + return nil +} + +func (p *RegisterChildResponse) ReadField4(iprot thrift.TProtocol) error { + p.ChildConfig = &replication.PartitionConfiguration{} + if err := p.ChildConfig.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ChildConfig), err) + } + return nil +} + +func (p *RegisterChildResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("register_child_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RegisterChildResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *RegisterChildResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app: ", p), err) + } + if err := p.App.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.App), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app: ", p), err) + } + return err +} + +func (p *RegisterChildResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("parent_config", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:parent_config: ", p), err) + } + if err := p.ParentConfig.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ParentConfig), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:parent_config: ", p), err) + } + return err +} + +func (p *RegisterChildResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child_config", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:child_config: ", p), err) + } + if err := p.ChildConfig.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ChildConfig), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:child_config: ", p), err) + } + return err +} + +func (p *RegisterChildResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RegisterChildResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - ParentGpid +// - MetaSplitStatus +// - PartitionCount +type NotifyStopSplitRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + ParentGpid *base.Gpid `thrift:"parent_gpid,2" db:"parent_gpid" json:"parent_gpid"` + MetaSplitStatus SplitStatus `thrift:"meta_split_status,3" db:"meta_split_status" json:"meta_split_status"` + PartitionCount int32 `thrift:"partition_count,4" db:"partition_count" json:"partition_count"` +} + +func NewNotifyStopSplitRequest() *NotifyStopSplitRequest { + return &NotifyStopSplitRequest{} +} + +func (p *NotifyStopSplitRequest) GetAppName() string { + return p.AppName +} + +var NotifyStopSplitRequest_ParentGpid_DEFAULT *base.Gpid + +func (p *NotifyStopSplitRequest) GetParentGpid() *base.Gpid { + if !p.IsSetParentGpid() { + return NotifyStopSplitRequest_ParentGpid_DEFAULT + } + return p.ParentGpid +} + +func (p *NotifyStopSplitRequest) GetMetaSplitStatus() SplitStatus { + return p.MetaSplitStatus +} + +func (p *NotifyStopSplitRequest) GetPartitionCount() int32 { + return p.PartitionCount +} +func (p *NotifyStopSplitRequest) IsSetParentGpid() bool { + return p.ParentGpid != nil +} + +func (p *NotifyStopSplitRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NotifyStopSplitRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *NotifyStopSplitRequest) ReadField2(iprot thrift.TProtocol) error { + p.ParentGpid = &base.Gpid{} + if err := p.ParentGpid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ParentGpid), err) + } + return nil +} + +func (p *NotifyStopSplitRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := SplitStatus(v) + p.MetaSplitStatus = temp + } + return nil +} + +func (p *NotifyStopSplitRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionCount = v + } + return nil +} + +func (p *NotifyStopSplitRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("notify_stop_split_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NotifyStopSplitRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *NotifyStopSplitRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("parent_gpid", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:parent_gpid: ", p), err) + } + if err := p.ParentGpid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ParentGpid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:parent_gpid: ", p), err) + } + return err +} + +func (p *NotifyStopSplitRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("meta_split_status", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:meta_split_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.MetaSplitStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.meta_split_status (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:meta_split_status: ", p), err) + } + return err +} + +func (p *NotifyStopSplitRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_count: ", p), err) + } + return err +} + +func (p *NotifyStopSplitRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NotifyStopSplitRequest(%+v)", *p) +} + +// Attributes: +// - Err +type NotifyStopSplitResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewNotifyStopSplitResponse() *NotifyStopSplitResponse { + return &NotifyStopSplitResponse{} +} + +var NotifyStopSplitResponse_Err_DEFAULT *base.ErrorCode + +func (p *NotifyStopSplitResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return NotifyStopSplitResponse_Err_DEFAULT + } + return p.Err +} +func (p *NotifyStopSplitResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *NotifyStopSplitResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NotifyStopSplitResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *NotifyStopSplitResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("notify_stop_split_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NotifyStopSplitResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *NotifyStopSplitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NotifyStopSplitResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Pid +// - PartitionCount +type QueryChildStateRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Pid *base.Gpid `thrift:"pid,2" db:"pid" json:"pid"` + PartitionCount int32 `thrift:"partition_count,3" db:"partition_count" json:"partition_count"` +} + +func NewQueryChildStateRequest() *QueryChildStateRequest { + return &QueryChildStateRequest{} +} + +func (p *QueryChildStateRequest) GetAppName() string { + return p.AppName +} + +var QueryChildStateRequest_Pid_DEFAULT *base.Gpid + +func (p *QueryChildStateRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return QueryChildStateRequest_Pid_DEFAULT + } + return p.Pid +} + +func (p *QueryChildStateRequest) GetPartitionCount() int32 { + return p.PartitionCount +} +func (p *QueryChildStateRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *QueryChildStateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryChildStateRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryChildStateRequest) ReadField2(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *QueryChildStateRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.PartitionCount = v + } + return nil +} + +func (p *QueryChildStateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_child_state_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryChildStateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *QueryChildStateRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:pid: ", p), err) + } + return err +} + +func (p *QueryChildStateRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:partition_count: ", p), err) + } + return err +} + +func (p *QueryChildStateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryChildStateRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - PartitionCount +// - ChildConfig +type QueryChildStateResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + PartitionCount *int32 `thrift:"partition_count,2" db:"partition_count" json:"partition_count,omitempty"` + ChildConfig *replication.PartitionConfiguration `thrift:"child_config,3" db:"child_config" json:"child_config,omitempty"` +} + +func NewQueryChildStateResponse() *QueryChildStateResponse { + return &QueryChildStateResponse{} +} + +var QueryChildStateResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryChildStateResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryChildStateResponse_Err_DEFAULT + } + return p.Err +} + +var QueryChildStateResponse_PartitionCount_DEFAULT int32 + +func (p *QueryChildStateResponse) GetPartitionCount() int32 { + if !p.IsSetPartitionCount() { + return QueryChildStateResponse_PartitionCount_DEFAULT + } + return *p.PartitionCount +} + +var QueryChildStateResponse_ChildConfig_DEFAULT *replication.PartitionConfiguration + +func (p *QueryChildStateResponse) GetChildConfig() *replication.PartitionConfiguration { + if !p.IsSetChildConfig() { + return QueryChildStateResponse_ChildConfig_DEFAULT + } + return p.ChildConfig +} +func (p *QueryChildStateResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryChildStateResponse) IsSetPartitionCount() bool { + return p.PartitionCount != nil +} + +func (p *QueryChildStateResponse) IsSetChildConfig() bool { + return p.ChildConfig != nil +} + +func (p *QueryChildStateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryChildStateResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryChildStateResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.PartitionCount = &v + } + return nil +} + +func (p *QueryChildStateResponse) ReadField3(iprot thrift.TProtocol) error { + p.ChildConfig = &replication.PartitionConfiguration{} + if err := p.ChildConfig.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ChildConfig), err) + } + return nil +} + +func (p *QueryChildStateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_child_state_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryChildStateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryChildStateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionCount() { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:partition_count: ", p), err) + } + } + return err +} + +func (p *QueryChildStateResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetChildConfig() { + if err := oprot.WriteFieldBegin("child_config", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:child_config: ", p), err) + } + if err := p.ChildConfig.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ChildConfig), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:child_config: ", p), err) + } + } + return err +} + +func (p *QueryChildStateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryChildStateResponse(%+v)", *p) +} diff --git a/go-client/idl/cmd/GoUnusedProtection__.go b/go-client/idl/cmd/GoUnusedProtection__.go new file mode 100644 index 0000000000..b15aabc4ad --- /dev/null +++ b/go-client/idl/cmd/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package cmd + +var GoUnusedProtection__ int diff --git a/go-client/idl/cmd/command-consts.go b/go-client/idl/cmd/command-consts.go new file mode 100644 index 0000000000..7dba9de517 --- /dev/null +++ b/go-client/idl/cmd/command-consts.go @@ -0,0 +1,22 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package cmd + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +func init() { +} diff --git a/go-client/idl/cmd/command.go b/go-client/idl/cmd/command.go new file mode 100644 index 0000000000..edeb8b1da4 --- /dev/null +++ b/go-client/idl/cmd/command.go @@ -0,0 +1,535 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package cmd + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +// Attributes: +// - Cmd +// - Arguments +type Command struct { + Cmd string `thrift:"cmd,1" db:"cmd" json:"cmd"` + Arguments []string `thrift:"arguments,2" db:"arguments" json:"arguments"` +} + +func NewCommand() *Command { + return &Command{} +} + +func (p *Command) GetCmd() string { + return p.Cmd +} + +func (p *Command) GetArguments() []string { + return p.Arguments +} +func (p *Command) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Command) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Cmd = v + } + return nil +} + +func (p *Command) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.Arguments = tSlice + for i := 0; i < size; i++ { + var _elem0 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem0 = v + } + p.Arguments = append(p.Arguments, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Command) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("command"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Command) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cmd", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:cmd: ", p), err) + } + if err := oprot.WriteString(string(p.Cmd)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cmd (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:cmd: ", p), err) + } + return err +} + +func (p *Command) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("arguments", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:arguments: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Arguments)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Arguments { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:arguments: ", p), err) + } + return err +} + +func (p *Command) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Command(%+v)", *p) +} + +type RemoteCmdService interface { + // Parameters: + // - Cmd + CallCommand(ctx context.Context, cmd *Command) (r string, err error) +} + +type RemoteCmdServiceClient struct { + c thrift.TClient +} + +func NewRemoteCmdServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *RemoteCmdServiceClient { + return &RemoteCmdServiceClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewRemoteCmdServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *RemoteCmdServiceClient { + return &RemoteCmdServiceClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewRemoteCmdServiceClient(c thrift.TClient) *RemoteCmdServiceClient { + return &RemoteCmdServiceClient{ + c: c, + } +} + +func (p *RemoteCmdServiceClient) Client_() thrift.TClient { + return p.c +} + +// Parameters: +// - Cmd +func (p *RemoteCmdServiceClient) CallCommand(ctx context.Context, cmd *Command) (r string, err error) { + var _args1 RemoteCmdServiceCallCommandArgs + _args1.Cmd = cmd + var _result2 RemoteCmdServiceCallCommandResult + if err = p.Client_().Call(ctx, "callCommand", &_args1, &_result2); err != nil { + return + } + return _result2.GetSuccess(), nil +} + +type RemoteCmdServiceProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler RemoteCmdService +} + +func (p *RemoteCmdServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *RemoteCmdServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *RemoteCmdServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewRemoteCmdServiceProcessor(handler RemoteCmdService) *RemoteCmdServiceProcessor { + + self3 := &RemoteCmdServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self3.processorMap["callCommand"] = &remoteCmdServiceProcessorCallCommand{handler: handler} + return self3 +} + +func (p *RemoteCmdServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x4.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x4 + +} + +type remoteCmdServiceProcessorCallCommand struct { + handler RemoteCmdService +} + +func (p *remoteCmdServiceProcessorCallCommand) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RemoteCmdServiceCallCommandArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("callCommand", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RemoteCmdServiceCallCommandResult{} + var retval string + var err2 error + if retval, err2 = p.handler.CallCommand(ctx, args.Cmd); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing callCommand: "+err2.Error()) + oprot.WriteMessageBegin("callCommand", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = &retval + } + if err2 = oprot.WriteMessageBegin("callCommand", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Cmd +type RemoteCmdServiceCallCommandArgs struct { + Cmd *Command `thrift:"cmd,1" db:"cmd" json:"cmd"` +} + +func NewRemoteCmdServiceCallCommandArgs() *RemoteCmdServiceCallCommandArgs { + return &RemoteCmdServiceCallCommandArgs{} +} + +var RemoteCmdServiceCallCommandArgs_Cmd_DEFAULT *Command + +func (p *RemoteCmdServiceCallCommandArgs) GetCmd() *Command { + if !p.IsSetCmd() { + return RemoteCmdServiceCallCommandArgs_Cmd_DEFAULT + } + return p.Cmd +} +func (p *RemoteCmdServiceCallCommandArgs) IsSetCmd() bool { + return p.Cmd != nil +} + +func (p *RemoteCmdServiceCallCommandArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RemoteCmdServiceCallCommandArgs) ReadField1(iprot thrift.TProtocol) error { + p.Cmd = &Command{} + if err := p.Cmd.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Cmd), err) + } + return nil +} + +func (p *RemoteCmdServiceCallCommandArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("callCommand_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RemoteCmdServiceCallCommandArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cmd", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:cmd: ", p), err) + } + if err := p.Cmd.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Cmd), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:cmd: ", p), err) + } + return err +} + +func (p *RemoteCmdServiceCallCommandArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RemoteCmdServiceCallCommandArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RemoteCmdServiceCallCommandResult struct { + Success *string `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRemoteCmdServiceCallCommandResult() *RemoteCmdServiceCallCommandResult { + return &RemoteCmdServiceCallCommandResult{} +} + +var RemoteCmdServiceCallCommandResult_Success_DEFAULT string + +func (p *RemoteCmdServiceCallCommandResult) GetSuccess() string { + if !p.IsSetSuccess() { + return RemoteCmdServiceCallCommandResult_Success_DEFAULT + } + return *p.Success +} +func (p *RemoteCmdServiceCallCommandResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RemoteCmdServiceCallCommandResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRING { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RemoteCmdServiceCallCommandResult) ReadField0(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + p.Success = &v + } + return nil +} + +func (p *RemoteCmdServiceCallCommandResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("callCommand_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RemoteCmdServiceCallCommandResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRING, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := oprot.WriteString(string(*p.Success)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.success (0) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RemoteCmdServiceCallCommandResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RemoteCmdServiceCallCommandResult(%+v)", *p) +} diff --git a/go-client/idl/radmin/GoUnusedProtection__.go b/go-client/idl/radmin/GoUnusedProtection__.go new file mode 100644 index 0000000000..c460900104 --- /dev/null +++ b/go-client/idl/radmin/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package radmin + +var GoUnusedProtection__ int diff --git a/go-client/idl/radmin/replica_admin-consts.go b/go-client/idl/radmin/replica_admin-consts.go new file mode 100644 index 0000000000..6e0cdbb21d --- /dev/null +++ b/go-client/idl/radmin/replica_admin-consts.go @@ -0,0 +1,29 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package radmin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/admin" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ +var _ = admin.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/radmin/replica_admin.go b/go-client/idl/radmin/replica_admin.go new file mode 100644 index 0000000000..0ad97a8977 --- /dev/null +++ b/go-client/idl/radmin/replica_admin.go @@ -0,0 +1,3681 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package radmin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/admin" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ +var _ = admin.GoUnusedProtection__ + +type DiskMigrationStatus int64 + +const ( + DiskMigrationStatus_IDLE DiskMigrationStatus = 0 + DiskMigrationStatus_MOVING DiskMigrationStatus = 1 + DiskMigrationStatus_MOVED DiskMigrationStatus = 2 + DiskMigrationStatus_CLOSED DiskMigrationStatus = 3 +) + +func (p DiskMigrationStatus) String() string { + switch p { + case DiskMigrationStatus_IDLE: + return "IDLE" + case DiskMigrationStatus_MOVING: + return "MOVING" + case DiskMigrationStatus_MOVED: + return "MOVED" + case DiskMigrationStatus_CLOSED: + return "CLOSED" + } + return "" +} + +func DiskMigrationStatusFromString(s string) (DiskMigrationStatus, error) { + switch s { + case "IDLE": + return DiskMigrationStatus_IDLE, nil + case "MOVING": + return DiskMigrationStatus_MOVING, nil + case "MOVED": + return DiskMigrationStatus_MOVED, nil + case "CLOSED": + return DiskMigrationStatus_CLOSED, nil + } + return DiskMigrationStatus(0), fmt.Errorf("not a valid DiskMigrationStatus string") +} + +func DiskMigrationStatusPtr(v DiskMigrationStatus) *DiskMigrationStatus { return &v } + +func (p DiskMigrationStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *DiskMigrationStatus) UnmarshalText(text []byte) error { + q, err := DiskMigrationStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *DiskMigrationStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = DiskMigrationStatus(v) + return nil +} + +func (p *DiskMigrationStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type HotkeyType int64 + +const ( + HotkeyType_READ HotkeyType = 0 + HotkeyType_WRITE HotkeyType = 1 +) + +func (p HotkeyType) String() string { + switch p { + case HotkeyType_READ: + return "READ" + case HotkeyType_WRITE: + return "WRITE" + } + return "" +} + +func HotkeyTypeFromString(s string) (HotkeyType, error) { + switch s { + case "READ": + return HotkeyType_READ, nil + case "WRITE": + return HotkeyType_WRITE, nil + } + return HotkeyType(0), fmt.Errorf("not a valid HotkeyType string") +} + +func HotkeyTypePtr(v HotkeyType) *HotkeyType { return &v } + +func (p HotkeyType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *HotkeyType) UnmarshalText(text []byte) error { + q, err := HotkeyTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *HotkeyType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = HotkeyType(v) + return nil +} + +func (p *HotkeyType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type DetectAction int64 + +const ( + DetectAction_START DetectAction = 0 + DetectAction_STOP DetectAction = 1 + DetectAction_QUERY DetectAction = 2 +) + +func (p DetectAction) String() string { + switch p { + case DetectAction_START: + return "START" + case DetectAction_STOP: + return "STOP" + case DetectAction_QUERY: + return "QUERY" + } + return "" +} + +func DetectActionFromString(s string) (DetectAction, error) { + switch s { + case "START": + return DetectAction_START, nil + case "STOP": + return DetectAction_STOP, nil + case "QUERY": + return DetectAction_QUERY, nil + } + return DetectAction(0), fmt.Errorf("not a valid DetectAction string") +} + +func DetectActionPtr(v DetectAction) *DetectAction { return &v } + +func (p DetectAction) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *DetectAction) UnmarshalText(text []byte) error { + q, err := DetectActionFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *DetectAction) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = DetectAction(v) + return nil +} + +func (p *DetectAction) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Pid +// - Node1 +// - HpNode1 +type QueryReplicaDecreeRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + Node1 *base.RPCAddress `thrift:"node1,2" db:"node1" json:"node1"` + HpNode1 *base.HostPort `thrift:"hp_node1,3" db:"hp_node1" json:"hp_node1,omitempty"` +} + +func NewQueryReplicaDecreeRequest() *QueryReplicaDecreeRequest { + return &QueryReplicaDecreeRequest{} +} + +var QueryReplicaDecreeRequest_Pid_DEFAULT *base.Gpid + +func (p *QueryReplicaDecreeRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return QueryReplicaDecreeRequest_Pid_DEFAULT + } + return p.Pid +} + +var QueryReplicaDecreeRequest_Node1_DEFAULT *base.RPCAddress + +func (p *QueryReplicaDecreeRequest) GetNode1() *base.RPCAddress { + if !p.IsSetNode1() { + return QueryReplicaDecreeRequest_Node1_DEFAULT + } + return p.Node1 +} + +var QueryReplicaDecreeRequest_HpNode1_DEFAULT *base.HostPort + +func (p *QueryReplicaDecreeRequest) GetHpNode1() *base.HostPort { + if !p.IsSetHpNode1() { + return QueryReplicaDecreeRequest_HpNode1_DEFAULT + } + return p.HpNode1 +} +func (p *QueryReplicaDecreeRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *QueryReplicaDecreeRequest) IsSetNode1() bool { + return p.Node1 != nil +} + +func (p *QueryReplicaDecreeRequest) IsSetHpNode1() bool { + return p.HpNode1 != nil +} + +func (p *QueryReplicaDecreeRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryReplicaDecreeRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *QueryReplicaDecreeRequest) ReadField2(iprot thrift.TProtocol) error { + p.Node1 = &base.RPCAddress{} + if err := p.Node1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node1), err) + } + return nil +} + +func (p *QueryReplicaDecreeRequest) ReadField3(iprot thrift.TProtocol) error { + p.HpNode1 = &base.HostPort{} + if err := p.HpNode1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode1), err) + } + return nil +} + +func (p *QueryReplicaDecreeRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_replica_decree_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryReplicaDecreeRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *QueryReplicaDecreeRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node1", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:node1: ", p), err) + } + if err := p.Node1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:node1: ", p), err) + } + return err +} + +func (p *QueryReplicaDecreeRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode1() { + if err := oprot.WriteFieldBegin("hp_node1", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hp_node1: ", p), err) + } + if err := p.HpNode1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hp_node1: ", p), err) + } + } + return err +} + +func (p *QueryReplicaDecreeRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryReplicaDecreeRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - LastDecree +type QueryReplicaDecreeResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + LastDecree int64 `thrift:"last_decree,2" db:"last_decree" json:"last_decree"` +} + +func NewQueryReplicaDecreeResponse() *QueryReplicaDecreeResponse { + return &QueryReplicaDecreeResponse{} +} + +var QueryReplicaDecreeResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryReplicaDecreeResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryReplicaDecreeResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryReplicaDecreeResponse) GetLastDecree() int64 { + return p.LastDecree +} +func (p *QueryReplicaDecreeResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryReplicaDecreeResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryReplicaDecreeResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryReplicaDecreeResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.LastDecree = v + } + return nil +} + +func (p *QueryReplicaDecreeResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_replica_decree_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryReplicaDecreeResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryReplicaDecreeResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_decree", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:last_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_decree (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:last_decree: ", p), err) + } + return err +} + +func (p *QueryReplicaDecreeResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryReplicaDecreeResponse(%+v)", *p) +} + +// Attributes: +// - Node1 +// - HpNode1 +type QueryReplicaInfoRequest struct { + Node1 *base.RPCAddress `thrift:"node1,1" db:"node1" json:"node1"` + HpNode1 *base.HostPort `thrift:"hp_node1,2" db:"hp_node1" json:"hp_node1,omitempty"` +} + +func NewQueryReplicaInfoRequest() *QueryReplicaInfoRequest { + return &QueryReplicaInfoRequest{} +} + +var QueryReplicaInfoRequest_Node1_DEFAULT *base.RPCAddress + +func (p *QueryReplicaInfoRequest) GetNode1() *base.RPCAddress { + if !p.IsSetNode1() { + return QueryReplicaInfoRequest_Node1_DEFAULT + } + return p.Node1 +} + +var QueryReplicaInfoRequest_HpNode1_DEFAULT *base.HostPort + +func (p *QueryReplicaInfoRequest) GetHpNode1() *base.HostPort { + if !p.IsSetHpNode1() { + return QueryReplicaInfoRequest_HpNode1_DEFAULT + } + return p.HpNode1 +} +func (p *QueryReplicaInfoRequest) IsSetNode1() bool { + return p.Node1 != nil +} + +func (p *QueryReplicaInfoRequest) IsSetHpNode1() bool { + return p.HpNode1 != nil +} + +func (p *QueryReplicaInfoRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryReplicaInfoRequest) ReadField1(iprot thrift.TProtocol) error { + p.Node1 = &base.RPCAddress{} + if err := p.Node1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node1), err) + } + return nil +} + +func (p *QueryReplicaInfoRequest) ReadField2(iprot thrift.TProtocol) error { + p.HpNode1 = &base.HostPort{} + if err := p.HpNode1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode1), err) + } + return nil +} + +func (p *QueryReplicaInfoRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_replica_info_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryReplicaInfoRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node1", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:node1: ", p), err) + } + if err := p.Node1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:node1: ", p), err) + } + return err +} + +func (p *QueryReplicaInfoRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode1() { + if err := oprot.WriteFieldBegin("hp_node1", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hp_node1: ", p), err) + } + if err := p.HpNode1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hp_node1: ", p), err) + } + } + return err +} + +func (p *QueryReplicaInfoRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryReplicaInfoRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Replicas +type QueryReplicaInfoResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Replicas []*admin.ReplicaInfo `thrift:"replicas,2" db:"replicas" json:"replicas"` +} + +func NewQueryReplicaInfoResponse() *QueryReplicaInfoResponse { + return &QueryReplicaInfoResponse{} +} + +var QueryReplicaInfoResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryReplicaInfoResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryReplicaInfoResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryReplicaInfoResponse) GetReplicas() []*admin.ReplicaInfo { + return p.Replicas +} +func (p *QueryReplicaInfoResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryReplicaInfoResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryReplicaInfoResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryReplicaInfoResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*admin.ReplicaInfo, 0, size) + p.Replicas = tSlice + for i := 0; i < size; i++ { + _elem0 := &admin.ReplicaInfo{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Replicas = append(p.Replicas, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryReplicaInfoResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_replica_info_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryReplicaInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryReplicaInfoResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("replicas", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:replicas: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Replicas)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Replicas { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:replicas: ", p), err) + } + return err +} + +func (p *QueryReplicaInfoResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryReplicaInfoResponse(%+v)", *p) +} + +// Attributes: +// - Tag +// - FullDir +// - DiskCapacityMb +// - DiskAvailableMb +// - HoldingPrimaryReplicas +// - HoldingSecondaryReplicas +type DiskInfo struct { + Tag string `thrift:"tag,1" db:"tag" json:"tag"` + FullDir string `thrift:"full_dir,2" db:"full_dir" json:"full_dir"` + DiskCapacityMb int64 `thrift:"disk_capacity_mb,3" db:"disk_capacity_mb" json:"disk_capacity_mb"` + DiskAvailableMb int64 `thrift:"disk_available_mb,4" db:"disk_available_mb" json:"disk_available_mb"` + HoldingPrimaryReplicas map[int32][]*base.Gpid `thrift:"holding_primary_replicas,5" db:"holding_primary_replicas" json:"holding_primary_replicas"` + HoldingSecondaryReplicas map[int32][]*base.Gpid `thrift:"holding_secondary_replicas,6" db:"holding_secondary_replicas" json:"holding_secondary_replicas"` +} + +func NewDiskInfo() *DiskInfo { + return &DiskInfo{} +} + +func (p *DiskInfo) GetTag() string { + return p.Tag +} + +func (p *DiskInfo) GetFullDir() string { + return p.FullDir +} + +func (p *DiskInfo) GetDiskCapacityMb() int64 { + return p.DiskCapacityMb +} + +func (p *DiskInfo) GetDiskAvailableMb() int64 { + return p.DiskAvailableMb +} + +func (p *DiskInfo) GetHoldingPrimaryReplicas() map[int32][]*base.Gpid { + return p.HoldingPrimaryReplicas +} + +func (p *DiskInfo) GetHoldingSecondaryReplicas() map[int32][]*base.Gpid { + return p.HoldingSecondaryReplicas +} +func (p *DiskInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.MAP { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.MAP { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DiskInfo) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Tag = v + } + return nil +} + +func (p *DiskInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.FullDir = v + } + return nil +} + +func (p *DiskInfo) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.DiskCapacityMb = v + } + return nil +} + +func (p *DiskInfo) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.DiskAvailableMb = v + } + return nil +} + +func (p *DiskInfo) ReadField5(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32][]*base.Gpid, size) + p.HoldingPrimaryReplicas = tMap + for i := 0; i < size; i++ { + var _key1 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key1 = v + } + _, size, err := iprot.ReadSetBegin() + if err != nil { + return thrift.PrependError("error reading set begin: ", err) + } + tSet := make([]*base.Gpid, 0, size) + _val2 := tSet + for i := 0; i < size; i++ { + _elem3 := &base.Gpid{} + if err := _elem3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + _val2 = append(_val2, _elem3) + } + if err := iprot.ReadSetEnd(); err != nil { + return thrift.PrependError("error reading set end: ", err) + } + p.HoldingPrimaryReplicas[_key1] = _val2 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *DiskInfo) ReadField6(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32][]*base.Gpid, size) + p.HoldingSecondaryReplicas = tMap + for i := 0; i < size; i++ { + var _key4 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key4 = v + } + _, size, err := iprot.ReadSetBegin() + if err != nil { + return thrift.PrependError("error reading set begin: ", err) + } + tSet := make([]*base.Gpid, 0, size) + _val5 := tSet + for i := 0; i < size; i++ { + _elem6 := &base.Gpid{} + if err := _elem6.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) + } + _val5 = append(_val5, _elem6) + } + if err := iprot.ReadSetEnd(); err != nil { + return thrift.PrependError("error reading set end: ", err) + } + p.HoldingSecondaryReplicas[_key4] = _val5 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *DiskInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("disk_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DiskInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("tag", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:tag: ", p), err) + } + if err := oprot.WriteString(string(p.Tag)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.tag (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:tag: ", p), err) + } + return err +} + +func (p *DiskInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("full_dir", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:full_dir: ", p), err) + } + if err := oprot.WriteString(string(p.FullDir)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.full_dir (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:full_dir: ", p), err) + } + return err +} + +func (p *DiskInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("disk_capacity_mb", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:disk_capacity_mb: ", p), err) + } + if err := oprot.WriteI64(int64(p.DiskCapacityMb)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.disk_capacity_mb (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:disk_capacity_mb: ", p), err) + } + return err +} + +func (p *DiskInfo) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("disk_available_mb", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:disk_available_mb: ", p), err) + } + if err := oprot.WriteI64(int64(p.DiskAvailableMb)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.disk_available_mb (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:disk_available_mb: ", p), err) + } + return err +} + +func (p *DiskInfo) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("holding_primary_replicas", thrift.MAP, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:holding_primary_replicas: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.SET, len(p.HoldingPrimaryReplicas)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.HoldingPrimaryReplicas { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteSetBegin(thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing set begin: ", err) + } + for i := 0; i < len(v); i++ { + for j := i + 1; j < len(v); j++ { + if reflect.DeepEqual(v[i], v[j]) { + return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", v[i])) + } + } + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteSetEnd(); err != nil { + return thrift.PrependError("error writing set end: ", err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:holding_primary_replicas: ", p), err) + } + return err +} + +func (p *DiskInfo) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("holding_secondary_replicas", thrift.MAP, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:holding_secondary_replicas: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.SET, len(p.HoldingSecondaryReplicas)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.HoldingSecondaryReplicas { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteSetBegin(thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing set begin: ", err) + } + for i := 0; i < len(v); i++ { + for j := i + 1; j < len(v); j++ { + if reflect.DeepEqual(v[i], v[j]) { + return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", v[i])) + } + } + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteSetEnd(); err != nil { + return thrift.PrependError("error writing set end: ", err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:holding_secondary_replicas: ", p), err) + } + return err +} + +func (p *DiskInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DiskInfo(%+v)", *p) +} + +// Attributes: +// - Node1 +// - AppName +// - HpNode1 +type QueryDiskInfoRequest struct { + Node1 *base.RPCAddress `thrift:"node1,1" db:"node1" json:"node1"` + AppName string `thrift:"app_name,2" db:"app_name" json:"app_name"` + HpNode1 *base.HostPort `thrift:"hp_node1,3" db:"hp_node1" json:"hp_node1,omitempty"` +} + +func NewQueryDiskInfoRequest() *QueryDiskInfoRequest { + return &QueryDiskInfoRequest{} +} + +var QueryDiskInfoRequest_Node1_DEFAULT *base.RPCAddress + +func (p *QueryDiskInfoRequest) GetNode1() *base.RPCAddress { + if !p.IsSetNode1() { + return QueryDiskInfoRequest_Node1_DEFAULT + } + return p.Node1 +} + +func (p *QueryDiskInfoRequest) GetAppName() string { + return p.AppName +} + +var QueryDiskInfoRequest_HpNode1_DEFAULT *base.HostPort + +func (p *QueryDiskInfoRequest) GetHpNode1() *base.HostPort { + if !p.IsSetHpNode1() { + return QueryDiskInfoRequest_HpNode1_DEFAULT + } + return p.HpNode1 +} +func (p *QueryDiskInfoRequest) IsSetNode1() bool { + return p.Node1 != nil +} + +func (p *QueryDiskInfoRequest) IsSetHpNode1() bool { + return p.HpNode1 != nil +} + +func (p *QueryDiskInfoRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryDiskInfoRequest) ReadField1(iprot thrift.TProtocol) error { + p.Node1 = &base.RPCAddress{} + if err := p.Node1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node1), err) + } + return nil +} + +func (p *QueryDiskInfoRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryDiskInfoRequest) ReadField3(iprot thrift.TProtocol) error { + p.HpNode1 = &base.HostPort{} + if err := p.HpNode1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode1), err) + } + return nil +} + +func (p *QueryDiskInfoRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_disk_info_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryDiskInfoRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node1", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:node1: ", p), err) + } + if err := p.Node1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:node1: ", p), err) + } + return err +} + +func (p *QueryDiskInfoRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_name: ", p), err) + } + return err +} + +func (p *QueryDiskInfoRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode1() { + if err := oprot.WriteFieldBegin("hp_node1", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hp_node1: ", p), err) + } + if err := p.HpNode1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hp_node1: ", p), err) + } + } + return err +} + +func (p *QueryDiskInfoRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryDiskInfoRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - TotalCapacityMb +// - TotalAvailableMb +// - DiskInfos +type QueryDiskInfoResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + TotalCapacityMb int64 `thrift:"total_capacity_mb,2" db:"total_capacity_mb" json:"total_capacity_mb"` + TotalAvailableMb int64 `thrift:"total_available_mb,3" db:"total_available_mb" json:"total_available_mb"` + DiskInfos []*DiskInfo `thrift:"disk_infos,4" db:"disk_infos" json:"disk_infos"` +} + +func NewQueryDiskInfoResponse() *QueryDiskInfoResponse { + return &QueryDiskInfoResponse{} +} + +var QueryDiskInfoResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryDiskInfoResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryDiskInfoResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryDiskInfoResponse) GetTotalCapacityMb() int64 { + return p.TotalCapacityMb +} + +func (p *QueryDiskInfoResponse) GetTotalAvailableMb() int64 { + return p.TotalAvailableMb +} + +func (p *QueryDiskInfoResponse) GetDiskInfos() []*DiskInfo { + return p.DiskInfos +} +func (p *QueryDiskInfoResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryDiskInfoResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryDiskInfoResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryDiskInfoResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TotalCapacityMb = v + } + return nil +} + +func (p *QueryDiskInfoResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.TotalAvailableMb = v + } + return nil +} + +func (p *QueryDiskInfoResponse) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*DiskInfo, 0, size) + p.DiskInfos = tSlice + for i := 0; i < size; i++ { + _elem7 := &DiskInfo{} + if err := _elem7.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err) + } + p.DiskInfos = append(p.DiskInfos, _elem7) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryDiskInfoResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_disk_info_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryDiskInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryDiskInfoResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("total_capacity_mb", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:total_capacity_mb: ", p), err) + } + if err := oprot.WriteI64(int64(p.TotalCapacityMb)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_capacity_mb (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:total_capacity_mb: ", p), err) + } + return err +} + +func (p *QueryDiskInfoResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("total_available_mb", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:total_available_mb: ", p), err) + } + if err := oprot.WriteI64(int64(p.TotalAvailableMb)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_available_mb (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:total_available_mb: ", p), err) + } + return err +} + +func (p *QueryDiskInfoResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("disk_infos", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:disk_infos: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DiskInfos)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.DiskInfos { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:disk_infos: ", p), err) + } + return err +} + +func (p *QueryDiskInfoResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryDiskInfoResponse(%+v)", *p) +} + +// Attributes: +// - Pid +// - OriginDisk +// - TargetDisk +type ReplicaDiskMigrateRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + OriginDisk string `thrift:"origin_disk,2" db:"origin_disk" json:"origin_disk"` + TargetDisk string `thrift:"target_disk,3" db:"target_disk" json:"target_disk"` +} + +func NewReplicaDiskMigrateRequest() *ReplicaDiskMigrateRequest { + return &ReplicaDiskMigrateRequest{} +} + +var ReplicaDiskMigrateRequest_Pid_DEFAULT *base.Gpid + +func (p *ReplicaDiskMigrateRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return ReplicaDiskMigrateRequest_Pid_DEFAULT + } + return p.Pid +} + +func (p *ReplicaDiskMigrateRequest) GetOriginDisk() string { + return p.OriginDisk +} + +func (p *ReplicaDiskMigrateRequest) GetTargetDisk() string { + return p.TargetDisk +} +func (p *ReplicaDiskMigrateRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *ReplicaDiskMigrateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaDiskMigrateRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *ReplicaDiskMigrateRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.OriginDisk = v + } + return nil +} + +func (p *ReplicaDiskMigrateRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.TargetDisk = v + } + return nil +} + +func (p *ReplicaDiskMigrateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("replica_disk_migrate_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaDiskMigrateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *ReplicaDiskMigrateRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("origin_disk", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:origin_disk: ", p), err) + } + if err := oprot.WriteString(string(p.OriginDisk)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.origin_disk (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:origin_disk: ", p), err) + } + return err +} + +func (p *ReplicaDiskMigrateRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("target_disk", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:target_disk: ", p), err) + } + if err := oprot.WriteString(string(p.TargetDisk)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.target_disk (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:target_disk: ", p), err) + } + return err +} + +func (p *ReplicaDiskMigrateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaDiskMigrateRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Hint +type ReplicaDiskMigrateResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Hint *string `thrift:"hint,2" db:"hint" json:"hint,omitempty"` +} + +func NewReplicaDiskMigrateResponse() *ReplicaDiskMigrateResponse { + return &ReplicaDiskMigrateResponse{} +} + +var ReplicaDiskMigrateResponse_Err_DEFAULT *base.ErrorCode + +func (p *ReplicaDiskMigrateResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ReplicaDiskMigrateResponse_Err_DEFAULT + } + return p.Err +} + +var ReplicaDiskMigrateResponse_Hint_DEFAULT string + +func (p *ReplicaDiskMigrateResponse) GetHint() string { + if !p.IsSetHint() { + return ReplicaDiskMigrateResponse_Hint_DEFAULT + } + return *p.Hint +} +func (p *ReplicaDiskMigrateResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ReplicaDiskMigrateResponse) IsSetHint() bool { + return p.Hint != nil +} + +func (p *ReplicaDiskMigrateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaDiskMigrateResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ReplicaDiskMigrateResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Hint = &v + } + return nil +} + +func (p *ReplicaDiskMigrateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("replica_disk_migrate_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaDiskMigrateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ReplicaDiskMigrateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetHint() { + if err := oprot.WriteFieldBegin("hint", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint: ", p), err) + } + if err := oprot.WriteString(string(*p.Hint)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint: ", p), err) + } + } + return err +} + +func (p *ReplicaDiskMigrateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaDiskMigrateResponse(%+v)", *p) +} + +// Attributes: +// - Type +// - Action +// - Pid +type DetectHotkeyRequest struct { + Type HotkeyType `thrift:"type,1" db:"type" json:"type"` + Action DetectAction `thrift:"action,2" db:"action" json:"action"` + Pid *base.Gpid `thrift:"pid,3" db:"pid" json:"pid"` +} + +func NewDetectHotkeyRequest() *DetectHotkeyRequest { + return &DetectHotkeyRequest{} +} + +func (p *DetectHotkeyRequest) GetType() HotkeyType { + return p.Type +} + +func (p *DetectHotkeyRequest) GetAction() DetectAction { + return p.Action +} + +var DetectHotkeyRequest_Pid_DEFAULT *base.Gpid + +func (p *DetectHotkeyRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return DetectHotkeyRequest_Pid_DEFAULT + } + return p.Pid +} +func (p *DetectHotkeyRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *DetectHotkeyRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DetectHotkeyRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := HotkeyType(v) + p.Type = temp + } + return nil +} + +func (p *DetectHotkeyRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := DetectAction(v) + p.Action = temp + } + return nil +} + +func (p *DetectHotkeyRequest) ReadField3(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *DetectHotkeyRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("detect_hotkey_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DetectHotkeyRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("type", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:type: ", p), err) + } + if err := oprot.WriteI32(int32(p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:type: ", p), err) + } + return err +} + +func (p *DetectHotkeyRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("action", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:action: ", p), err) + } + if err := oprot.WriteI32(int32(p.Action)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.action (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:action: ", p), err) + } + return err +} + +func (p *DetectHotkeyRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:pid: ", p), err) + } + return err +} + +func (p *DetectHotkeyRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DetectHotkeyRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - ErrHint +// - HotkeyResult_ +type DetectHotkeyResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + ErrHint *string `thrift:"err_hint,2" db:"err_hint" json:"err_hint,omitempty"` + HotkeyResult_ *string `thrift:"hotkey_result,3" db:"hotkey_result" json:"hotkey_result,omitempty"` +} + +func NewDetectHotkeyResponse() *DetectHotkeyResponse { + return &DetectHotkeyResponse{} +} + +var DetectHotkeyResponse_Err_DEFAULT *base.ErrorCode + +func (p *DetectHotkeyResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DetectHotkeyResponse_Err_DEFAULT + } + return p.Err +} + +var DetectHotkeyResponse_ErrHint_DEFAULT string + +func (p *DetectHotkeyResponse) GetErrHint() string { + if !p.IsSetErrHint() { + return DetectHotkeyResponse_ErrHint_DEFAULT + } + return *p.ErrHint +} + +var DetectHotkeyResponse_HotkeyResult__DEFAULT string + +func (p *DetectHotkeyResponse) GetHotkeyResult_() string { + if !p.IsSetHotkeyResult_() { + return DetectHotkeyResponse_HotkeyResult__DEFAULT + } + return *p.HotkeyResult_ +} +func (p *DetectHotkeyResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DetectHotkeyResponse) IsSetErrHint() bool { + return p.ErrHint != nil +} + +func (p *DetectHotkeyResponse) IsSetHotkeyResult_() bool { + return p.HotkeyResult_ != nil +} + +func (p *DetectHotkeyResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DetectHotkeyResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DetectHotkeyResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ErrHint = &v + } + return nil +} + +func (p *DetectHotkeyResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.HotkeyResult_ = &v + } + return nil +} + +func (p *DetectHotkeyResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("detect_hotkey_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DetectHotkeyResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DetectHotkeyResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetErrHint() { + if err := oprot.WriteFieldBegin("err_hint", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:err_hint: ", p), err) + } + if err := oprot.WriteString(string(*p.ErrHint)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.err_hint (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:err_hint: ", p), err) + } + } + return err +} + +func (p *DetectHotkeyResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHotkeyResult_() { + if err := oprot.WriteFieldBegin("hotkey_result", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hotkey_result: ", p), err) + } + if err := oprot.WriteString(string(*p.HotkeyResult_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hotkey_result (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hotkey_result: ", p), err) + } + } + return err +} + +func (p *DetectHotkeyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DetectHotkeyResponse(%+v)", *p) +} + +// Attributes: +// - DiskStr +type AddNewDiskRequest struct { + DiskStr string `thrift:"disk_str,1" db:"disk_str" json:"disk_str"` +} + +func NewAddNewDiskRequest() *AddNewDiskRequest { + return &AddNewDiskRequest{} +} + +func (p *AddNewDiskRequest) GetDiskStr() string { + return p.DiskStr +} +func (p *AddNewDiskRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AddNewDiskRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.DiskStr = v + } + return nil +} + +func (p *AddNewDiskRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_new_disk_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AddNewDiskRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("disk_str", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:disk_str: ", p), err) + } + if err := oprot.WriteString(string(p.DiskStr)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.disk_str (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:disk_str: ", p), err) + } + return err +} + +func (p *AddNewDiskRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AddNewDiskRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - ErrHint +type AddNewDiskResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + ErrHint *string `thrift:"err_hint,2" db:"err_hint" json:"err_hint,omitempty"` +} + +func NewAddNewDiskResponse() *AddNewDiskResponse { + return &AddNewDiskResponse{} +} + +var AddNewDiskResponse_Err_DEFAULT *base.ErrorCode + +func (p *AddNewDiskResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return AddNewDiskResponse_Err_DEFAULT + } + return p.Err +} + +var AddNewDiskResponse_ErrHint_DEFAULT string + +func (p *AddNewDiskResponse) GetErrHint() string { + if !p.IsSetErrHint() { + return AddNewDiskResponse_ErrHint_DEFAULT + } + return *p.ErrHint +} +func (p *AddNewDiskResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *AddNewDiskResponse) IsSetErrHint() bool { + return p.ErrHint != nil +} + +func (p *AddNewDiskResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AddNewDiskResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *AddNewDiskResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ErrHint = &v + } + return nil +} + +func (p *AddNewDiskResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_new_disk_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AddNewDiskResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *AddNewDiskResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetErrHint() { + if err := oprot.WriteFieldBegin("err_hint", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:err_hint: ", p), err) + } + if err := oprot.WriteString(string(*p.ErrHint)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.err_hint (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:err_hint: ", p), err) + } + } + return err +} + +func (p *AddNewDiskResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AddNewDiskResponse(%+v)", *p) +} + +type ReplicaClient interface { + // Parameters: + // - Req + QueryDiskInfo(ctx context.Context, req *QueryDiskInfoRequest) (r *QueryDiskInfoResponse, err error) + // Parameters: + // - Req + DiskMigrate(ctx context.Context, req *ReplicaDiskMigrateRequest) (r *ReplicaDiskMigrateResponse, err error) + // Parameters: + // - Req + AddDisk(ctx context.Context, req *AddNewDiskRequest) (r *AddNewDiskResponse, err error) +} + +type ReplicaClientClient struct { + c thrift.TClient +} + +func NewReplicaClientClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ReplicaClientClient { + return &ReplicaClientClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewReplicaClientClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ReplicaClientClient { + return &ReplicaClientClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewReplicaClientClient(c thrift.TClient) *ReplicaClientClient { + return &ReplicaClientClient{ + c: c, + } +} + +func (p *ReplicaClientClient) Client_() thrift.TClient { + return p.c +} + +// Parameters: +// - Req +func (p *ReplicaClientClient) QueryDiskInfo(ctx context.Context, req *QueryDiskInfoRequest) (r *QueryDiskInfoResponse, err error) { + var _args8 ReplicaClientQueryDiskInfoArgs + _args8.Req = req + var _result9 ReplicaClientQueryDiskInfoResult + if err = p.Client_().Call(ctx, "query_disk_info", &_args8, &_result9); err != nil { + return + } + return _result9.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *ReplicaClientClient) DiskMigrate(ctx context.Context, req *ReplicaDiskMigrateRequest) (r *ReplicaDiskMigrateResponse, err error) { + var _args10 ReplicaClientDiskMigrateArgs + _args10.Req = req + var _result11 ReplicaClientDiskMigrateResult + if err = p.Client_().Call(ctx, "disk_migrate", &_args10, &_result11); err != nil { + return + } + return _result11.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *ReplicaClientClient) AddDisk(ctx context.Context, req *AddNewDiskRequest) (r *AddNewDiskResponse, err error) { + var _args12 ReplicaClientAddDiskArgs + _args12.Req = req + var _result13 ReplicaClientAddDiskResult + if err = p.Client_().Call(ctx, "add_disk", &_args12, &_result13); err != nil { + return + } + return _result13.GetSuccess(), nil +} + +type ReplicaClientProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler ReplicaClient +} + +func (p *ReplicaClientProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *ReplicaClientProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *ReplicaClientProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewReplicaClientProcessor(handler ReplicaClient) *ReplicaClientProcessor { + + self14 := &ReplicaClientProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self14.processorMap["query_disk_info"] = &replicaClientProcessorQueryDiskInfo{handler: handler} + self14.processorMap["disk_migrate"] = &replicaClientProcessorDiskMigrate{handler: handler} + self14.processorMap["add_disk"] = &replicaClientProcessorAddDisk{handler: handler} + return self14 +} + +func (p *ReplicaClientProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x15 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x15.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x15 + +} + +type replicaClientProcessorQueryDiskInfo struct { + handler ReplicaClient +} + +func (p *replicaClientProcessorQueryDiskInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ReplicaClientQueryDiskInfoArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_disk_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := ReplicaClientQueryDiskInfoResult{} + var retval *QueryDiskInfoResponse + var err2 error + if retval, err2 = p.handler.QueryDiskInfo(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_disk_info: "+err2.Error()) + oprot.WriteMessageBegin("query_disk_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_disk_info", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type replicaClientProcessorDiskMigrate struct { + handler ReplicaClient +} + +func (p *replicaClientProcessorDiskMigrate) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ReplicaClientDiskMigrateArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("disk_migrate", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := ReplicaClientDiskMigrateResult{} + var retval *ReplicaDiskMigrateResponse + var err2 error + if retval, err2 = p.handler.DiskMigrate(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing disk_migrate: "+err2.Error()) + oprot.WriteMessageBegin("disk_migrate", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("disk_migrate", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type replicaClientProcessorAddDisk struct { + handler ReplicaClient +} + +func (p *replicaClientProcessorAddDisk) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ReplicaClientAddDiskArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("add_disk", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := ReplicaClientAddDiskResult{} + var retval *AddNewDiskResponse + var err2 error + if retval, err2 = p.handler.AddDisk(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing add_disk: "+err2.Error()) + oprot.WriteMessageBegin("add_disk", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("add_disk", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Req +type ReplicaClientQueryDiskInfoArgs struct { + Req *QueryDiskInfoRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewReplicaClientQueryDiskInfoArgs() *ReplicaClientQueryDiskInfoArgs { + return &ReplicaClientQueryDiskInfoArgs{} +} + +var ReplicaClientQueryDiskInfoArgs_Req_DEFAULT *QueryDiskInfoRequest + +func (p *ReplicaClientQueryDiskInfoArgs) GetReq() *QueryDiskInfoRequest { + if !p.IsSetReq() { + return ReplicaClientQueryDiskInfoArgs_Req_DEFAULT + } + return p.Req +} +func (p *ReplicaClientQueryDiskInfoArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *ReplicaClientQueryDiskInfoArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QueryDiskInfoRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_disk_info_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *ReplicaClientQueryDiskInfoArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientQueryDiskInfoArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ReplicaClientQueryDiskInfoResult struct { + Success *QueryDiskInfoResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewReplicaClientQueryDiskInfoResult() *ReplicaClientQueryDiskInfoResult { + return &ReplicaClientQueryDiskInfoResult{} +} + +var ReplicaClientQueryDiskInfoResult_Success_DEFAULT *QueryDiskInfoResponse + +func (p *ReplicaClientQueryDiskInfoResult) GetSuccess() *QueryDiskInfoResponse { + if !p.IsSetSuccess() { + return ReplicaClientQueryDiskInfoResult_Success_DEFAULT + } + return p.Success +} +func (p *ReplicaClientQueryDiskInfoResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ReplicaClientQueryDiskInfoResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QueryDiskInfoResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_disk_info_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *ReplicaClientQueryDiskInfoResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientQueryDiskInfoResult(%+v)", *p) +} + +// Attributes: +// - Req +type ReplicaClientDiskMigrateArgs struct { + Req *ReplicaDiskMigrateRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewReplicaClientDiskMigrateArgs() *ReplicaClientDiskMigrateArgs { + return &ReplicaClientDiskMigrateArgs{} +} + +var ReplicaClientDiskMigrateArgs_Req_DEFAULT *ReplicaDiskMigrateRequest + +func (p *ReplicaClientDiskMigrateArgs) GetReq() *ReplicaDiskMigrateRequest { + if !p.IsSetReq() { + return ReplicaClientDiskMigrateArgs_Req_DEFAULT + } + return p.Req +} +func (p *ReplicaClientDiskMigrateArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *ReplicaClientDiskMigrateArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ReplicaDiskMigrateRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("disk_migrate_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *ReplicaClientDiskMigrateArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientDiskMigrateArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ReplicaClientDiskMigrateResult struct { + Success *ReplicaDiskMigrateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewReplicaClientDiskMigrateResult() *ReplicaClientDiskMigrateResult { + return &ReplicaClientDiskMigrateResult{} +} + +var ReplicaClientDiskMigrateResult_Success_DEFAULT *ReplicaDiskMigrateResponse + +func (p *ReplicaClientDiskMigrateResult) GetSuccess() *ReplicaDiskMigrateResponse { + if !p.IsSetSuccess() { + return ReplicaClientDiskMigrateResult_Success_DEFAULT + } + return p.Success +} +func (p *ReplicaClientDiskMigrateResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ReplicaClientDiskMigrateResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ReplicaDiskMigrateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("disk_migrate_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *ReplicaClientDiskMigrateResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientDiskMigrateResult(%+v)", *p) +} + +// Attributes: +// - Req +type ReplicaClientAddDiskArgs struct { + Req *AddNewDiskRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewReplicaClientAddDiskArgs() *ReplicaClientAddDiskArgs { + return &ReplicaClientAddDiskArgs{} +} + +var ReplicaClientAddDiskArgs_Req_DEFAULT *AddNewDiskRequest + +func (p *ReplicaClientAddDiskArgs) GetReq() *AddNewDiskRequest { + if !p.IsSetReq() { + return ReplicaClientAddDiskArgs_Req_DEFAULT + } + return p.Req +} +func (p *ReplicaClientAddDiskArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *ReplicaClientAddDiskArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientAddDiskArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &AddNewDiskRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *ReplicaClientAddDiskArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_disk_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientAddDiskArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *ReplicaClientAddDiskArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientAddDiskArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ReplicaClientAddDiskResult struct { + Success *AddNewDiskResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewReplicaClientAddDiskResult() *ReplicaClientAddDiskResult { + return &ReplicaClientAddDiskResult{} +} + +var ReplicaClientAddDiskResult_Success_DEFAULT *AddNewDiskResponse + +func (p *ReplicaClientAddDiskResult) GetSuccess() *AddNewDiskResponse { + if !p.IsSetSuccess() { + return ReplicaClientAddDiskResult_Success_DEFAULT + } + return p.Success +} +func (p *ReplicaClientAddDiskResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ReplicaClientAddDiskResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientAddDiskResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &AddNewDiskResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *ReplicaClientAddDiskResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_disk_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientAddDiskResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *ReplicaClientAddDiskResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientAddDiskResult(%+v)", *p) +} diff --git a/go-client/idl/replication/GoUnusedProtection__.go b/go-client/idl/replication/GoUnusedProtection__.go new file mode 100644 index 0000000000..20c1f2d56a --- /dev/null +++ b/go-client/idl/replication/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package replication + +var GoUnusedProtection__ int diff --git a/go-client/idl/replication/dsn.layer2-consts.go b/go-client/idl/replication/dsn.layer2-consts.go new file mode 100644 index 0000000000..da6f576153 --- /dev/null +++ b/go-client/idl/replication/dsn.layer2-consts.go @@ -0,0 +1,25 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package replication + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/replication/dsn.layer2.go b/go-client/idl/replication/dsn.layer2.go new file mode 100644 index 0000000000..0ee6ec2e90 --- /dev/null +++ b/go-client/idl/replication/dsn.layer2.go @@ -0,0 +1,2136 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package replication + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ + +type AppStatus int64 + +const ( + AppStatus_AS_INVALID AppStatus = 0 + AppStatus_AS_AVAILABLE AppStatus = 1 + AppStatus_AS_CREATING AppStatus = 2 + AppStatus_AS_CREATE_FAILED AppStatus = 3 + AppStatus_AS_DROPPING AppStatus = 4 + AppStatus_AS_DROP_FAILED AppStatus = 5 + AppStatus_AS_DROPPED AppStatus = 6 + AppStatus_AS_RECALLING AppStatus = 7 +) + +func (p AppStatus) String() string { + switch p { + case AppStatus_AS_INVALID: + return "AS_INVALID" + case AppStatus_AS_AVAILABLE: + return "AS_AVAILABLE" + case AppStatus_AS_CREATING: + return "AS_CREATING" + case AppStatus_AS_CREATE_FAILED: + return "AS_CREATE_FAILED" + case AppStatus_AS_DROPPING: + return "AS_DROPPING" + case AppStatus_AS_DROP_FAILED: + return "AS_DROP_FAILED" + case AppStatus_AS_DROPPED: + return "AS_DROPPED" + case AppStatus_AS_RECALLING: + return "AS_RECALLING" + } + return "" +} + +func AppStatusFromString(s string) (AppStatus, error) { + switch s { + case "AS_INVALID": + return AppStatus_AS_INVALID, nil + case "AS_AVAILABLE": + return AppStatus_AS_AVAILABLE, nil + case "AS_CREATING": + return AppStatus_AS_CREATING, nil + case "AS_CREATE_FAILED": + return AppStatus_AS_CREATE_FAILED, nil + case "AS_DROPPING": + return AppStatus_AS_DROPPING, nil + case "AS_DROP_FAILED": + return AppStatus_AS_DROP_FAILED, nil + case "AS_DROPPED": + return AppStatus_AS_DROPPED, nil + case "AS_RECALLING": + return AppStatus_AS_RECALLING, nil + } + return AppStatus(0), fmt.Errorf("not a valid AppStatus string") +} + +func AppStatusPtr(v AppStatus) *AppStatus { return &v } + +func (p AppStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AppStatus) UnmarshalText(text []byte) error { + q, err := AppStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *AppStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = AppStatus(v) + return nil +} + +func (p *AppStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Pid +// - Ballot +// - MaxReplicaCount +// - Primary +// - Secondaries +// - LastDrops +// - LastCommittedDecree +// - PartitionFlags +// - HpPrimary +// - HpSecondaries +// - HpLastDrops +type PartitionConfiguration struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + Ballot int64 `thrift:"ballot,2" db:"ballot" json:"ballot"` + MaxReplicaCount int32 `thrift:"max_replica_count,3" db:"max_replica_count" json:"max_replica_count"` + Primary *base.RPCAddress `thrift:"primary,4" db:"primary" json:"primary"` + Secondaries []*base.RPCAddress `thrift:"secondaries,5" db:"secondaries" json:"secondaries"` + LastDrops []*base.RPCAddress `thrift:"last_drops,6" db:"last_drops" json:"last_drops"` + LastCommittedDecree int64 `thrift:"last_committed_decree,7" db:"last_committed_decree" json:"last_committed_decree"` + PartitionFlags int32 `thrift:"partition_flags,8" db:"partition_flags" json:"partition_flags"` + HpPrimary *base.HostPort `thrift:"hp_primary,9" db:"hp_primary" json:"hp_primary,omitempty"` + HpSecondaries []*base.HostPort `thrift:"hp_secondaries,10" db:"hp_secondaries" json:"hp_secondaries,omitempty"` + HpLastDrops []*base.HostPort `thrift:"hp_last_drops,11" db:"hp_last_drops" json:"hp_last_drops,omitempty"` +} + +func NewPartitionConfiguration() *PartitionConfiguration { + return &PartitionConfiguration{} +} + +var PartitionConfiguration_Pid_DEFAULT *base.Gpid + +func (p *PartitionConfiguration) GetPid() *base.Gpid { + if !p.IsSetPid() { + return PartitionConfiguration_Pid_DEFAULT + } + return p.Pid +} + +func (p *PartitionConfiguration) GetBallot() int64 { + return p.Ballot +} + +func (p *PartitionConfiguration) GetMaxReplicaCount() int32 { + return p.MaxReplicaCount +} + +var PartitionConfiguration_Primary_DEFAULT *base.RPCAddress + +func (p *PartitionConfiguration) GetPrimary() *base.RPCAddress { + if !p.IsSetPrimary() { + return PartitionConfiguration_Primary_DEFAULT + } + return p.Primary +} + +func (p *PartitionConfiguration) GetSecondaries() []*base.RPCAddress { + return p.Secondaries +} + +func (p *PartitionConfiguration) GetLastDrops() []*base.RPCAddress { + return p.LastDrops +} + +func (p *PartitionConfiguration) GetLastCommittedDecree() int64 { + return p.LastCommittedDecree +} + +func (p *PartitionConfiguration) GetPartitionFlags() int32 { + return p.PartitionFlags +} + +var PartitionConfiguration_HpPrimary_DEFAULT *base.HostPort + +func (p *PartitionConfiguration) GetHpPrimary() *base.HostPort { + if !p.IsSetHpPrimary() { + return PartitionConfiguration_HpPrimary_DEFAULT + } + return p.HpPrimary +} + +var PartitionConfiguration_HpSecondaries_DEFAULT []*base.HostPort + +func (p *PartitionConfiguration) GetHpSecondaries() []*base.HostPort { + return p.HpSecondaries +} + +var PartitionConfiguration_HpLastDrops_DEFAULT []*base.HostPort + +func (p *PartitionConfiguration) GetHpLastDrops() []*base.HostPort { + return p.HpLastDrops +} +func (p *PartitionConfiguration) IsSetPid() bool { + return p.Pid != nil +} + +func (p *PartitionConfiguration) IsSetPrimary() bool { + return p.Primary != nil +} + +func (p *PartitionConfiguration) IsSetHpPrimary() bool { + return p.HpPrimary != nil +} + +func (p *PartitionConfiguration) IsSetHpSecondaries() bool { + return p.HpSecondaries != nil +} + +func (p *PartitionConfiguration) IsSetHpLastDrops() bool { + return p.HpLastDrops != nil +} + +func (p *PartitionConfiguration) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.LIST { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I64 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.I32 { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.LIST { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.LIST { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *PartitionConfiguration) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.MaxReplicaCount = v + } + return nil +} + +func (p *PartitionConfiguration) ReadField4(iprot thrift.TProtocol) error { + p.Primary = &base.RPCAddress{} + if err := p.Primary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Primary), err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.RPCAddress, 0, size) + p.Secondaries = tSlice + for i := 0; i < size; i++ { + _elem0 := &base.RPCAddress{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Secondaries = append(p.Secondaries, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.RPCAddress, 0, size) + p.LastDrops = tSlice + for i := 0; i < size; i++ { + _elem1 := &base.RPCAddress{} + if err := _elem1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.LastDrops = append(p.LastDrops, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.LastCommittedDecree = v + } + return nil +} + +func (p *PartitionConfiguration) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.PartitionFlags = v + } + return nil +} + +func (p *PartitionConfiguration) ReadField9(iprot thrift.TProtocol) error { + p.HpPrimary = &base.HostPort{} + if err := p.HpPrimary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpPrimary), err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField10(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.HostPort, 0, size) + p.HpSecondaries = tSlice + for i := 0; i < size; i++ { + _elem2 := &base.HostPort{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.HpSecondaries = append(p.HpSecondaries, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField11(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.HostPort, 0, size) + p.HpLastDrops = tSlice + for i := 0; i < size; i++ { + _elem3 := &base.HostPort{} + if err := _elem3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + p.HpLastDrops = append(p.HpLastDrops, _elem3) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *PartitionConfiguration) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("partition_configuration"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *PartitionConfiguration) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ballot: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:max_replica_count: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("primary", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:primary: ", p), err) + } + if err := p.Primary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Primary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:primary: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("secondaries", thrift.LIST, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:secondaries: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Secondaries)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Secondaries { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:secondaries: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_drops", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:last_drops: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.LastDrops)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.LastDrops { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:last_drops: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_committed_decree", thrift.I64, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:last_committed_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastCommittedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_committed_decree (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:last_committed_decree: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_flags", thrift.I32, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:partition_flags: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionFlags)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_flags (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:partition_flags: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetHpPrimary() { + if err := oprot.WriteFieldBegin("hp_primary", thrift.STRUCT, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:hp_primary: ", p), err) + } + if err := p.HpPrimary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpPrimary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:hp_primary: ", p), err) + } + } + return err +} + +func (p *PartitionConfiguration) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetHpSecondaries() { + if err := oprot.WriteFieldBegin("hp_secondaries", thrift.LIST, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:hp_secondaries: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HpSecondaries)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.HpSecondaries { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:hp_secondaries: ", p), err) + } + } + return err +} + +func (p *PartitionConfiguration) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetHpLastDrops() { + if err := oprot.WriteFieldBegin("hp_last_drops", thrift.LIST, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:hp_last_drops: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HpLastDrops)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.HpLastDrops { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:hp_last_drops: ", p), err) + } + } + return err +} + +func (p *PartitionConfiguration) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PartitionConfiguration(%+v)", *p) +} + +// Attributes: +// - AppName +// - PartitionIndices +type QueryCfgRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + PartitionIndices []int32 `thrift:"partition_indices,2" db:"partition_indices" json:"partition_indices"` +} + +func NewQueryCfgRequest() *QueryCfgRequest { + return &QueryCfgRequest{} +} + +func (p *QueryCfgRequest) GetAppName() string { + return p.AppName +} + +func (p *QueryCfgRequest) GetPartitionIndices() []int32 { + return p.PartitionIndices +} +func (p *QueryCfgRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryCfgRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryCfgRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.PartitionIndices = tSlice + for i := 0; i < size; i++ { + var _elem4 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem4 = v + } + p.PartitionIndices = append(p.PartitionIndices, _elem4) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryCfgRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cfg_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryCfgRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *QueryCfgRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_indices", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:partition_indices: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.PartitionIndices)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.PartitionIndices { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:partition_indices: ", p), err) + } + return err +} + +func (p *QueryCfgRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryCfgRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - AppID +// - PartitionCount +// - IsStateful +// - Partitions +type QueryCfgResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + AppID int32 `thrift:"app_id,2" db:"app_id" json:"app_id"` + PartitionCount int32 `thrift:"partition_count,3" db:"partition_count" json:"partition_count"` + IsStateful bool `thrift:"is_stateful,4" db:"is_stateful" json:"is_stateful"` + Partitions []*PartitionConfiguration `thrift:"partitions,5" db:"partitions" json:"partitions"` +} + +func NewQueryCfgResponse() *QueryCfgResponse { + return &QueryCfgResponse{} +} + +var QueryCfgResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryCfgResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryCfgResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryCfgResponse) GetAppID() int32 { + return p.AppID +} + +func (p *QueryCfgResponse) GetPartitionCount() int32 { + return p.PartitionCount +} + +func (p *QueryCfgResponse) GetIsStateful() bool { + return p.IsStateful +} + +func (p *QueryCfgResponse) GetPartitions() []*PartitionConfiguration { + return p.Partitions +} +func (p *QueryCfgResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryCfgResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.LIST { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryCfgResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryCfgResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *QueryCfgResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.PartitionCount = v + } + return nil +} + +func (p *QueryCfgResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.IsStateful = v + } + return nil +} + +func (p *QueryCfgResponse) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*PartitionConfiguration, 0, size) + p.Partitions = tSlice + for i := 0; i < size; i++ { + _elem5 := &PartitionConfiguration{} + if err := _elem5.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) + } + p.Partitions = append(p.Partitions, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryCfgResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cfg_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryCfgResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryCfgResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_id: ", p), err) + } + return err +} + +func (p *QueryCfgResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:partition_count: ", p), err) + } + return err +} + +func (p *QueryCfgResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_stateful", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:is_stateful: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsStateful)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_stateful (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:is_stateful: ", p), err) + } + return err +} + +func (p *QueryCfgResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partitions", thrift.LIST, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:partitions: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Partitions { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:partitions: ", p), err) + } + return err +} + +func (p *QueryCfgResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryCfgResponse(%+v)", *p) +} + +// Attributes: +// - AppID +// - PartitionIndex +// - ClientTimeout +// - PartitionHash +// - IsBackupRequest +type RequestMeta struct { + AppID int32 `thrift:"app_id,1" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,2" db:"partition_index" json:"partition_index"` + ClientTimeout int32 `thrift:"client_timeout,3" db:"client_timeout" json:"client_timeout"` + PartitionHash int64 `thrift:"partition_hash,4" db:"partition_hash" json:"partition_hash"` + IsBackupRequest bool `thrift:"is_backup_request,5" db:"is_backup_request" json:"is_backup_request"` +} + +func NewRequestMeta() *RequestMeta { + return &RequestMeta{} +} + +func (p *RequestMeta) GetAppID() int32 { + return p.AppID +} + +func (p *RequestMeta) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *RequestMeta) GetClientTimeout() int32 { + return p.ClientTimeout +} + +func (p *RequestMeta) GetPartitionHash() int64 { + return p.PartitionHash +} + +func (p *RequestMeta) GetIsBackupRequest() bool { + return p.IsBackupRequest +} +func (p *RequestMeta) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RequestMeta) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *RequestMeta) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *RequestMeta) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ClientTimeout = v + } + return nil +} + +func (p *RequestMeta) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionHash = v + } + return nil +} + +func (p *RequestMeta) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.IsBackupRequest = v + } + return nil +} + +func (p *RequestMeta) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("request_meta"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RequestMeta) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_id: ", p), err) + } + return err +} + +func (p *RequestMeta) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:partition_index: ", p), err) + } + return err +} + +func (p *RequestMeta) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("client_timeout", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:client_timeout: ", p), err) + } + if err := oprot.WriteI32(int32(p.ClientTimeout)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.client_timeout (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:client_timeout: ", p), err) + } + return err +} + +func (p *RequestMeta) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_hash", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_hash: ", p), err) + } + if err := oprot.WriteI64(int64(p.PartitionHash)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_hash (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_hash: ", p), err) + } + return err +} + +func (p *RequestMeta) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_backup_request", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:is_backup_request: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsBackupRequest)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_backup_request (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:is_backup_request: ", p), err) + } + return err +} + +func (p *RequestMeta) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RequestMeta(%+v)", *p) +} + +// Attributes: +// - Status +// - AppType +// - AppName +// - AppID +// - PartitionCount +// - Envs +// - IsStateful +// - MaxReplicaCount +// - ExpireSecond +// - CreateSecond +// - DropSecond +// - Duplicating +// - InitPartitionCount +// - IsBulkLoading +type AppInfo struct { + Status AppStatus `thrift:"status,1" db:"status" json:"status"` + AppType string `thrift:"app_type,2" db:"app_type" json:"app_type"` + AppName string `thrift:"app_name,3" db:"app_name" json:"app_name"` + AppID int32 `thrift:"app_id,4" db:"app_id" json:"app_id"` + PartitionCount int32 `thrift:"partition_count,5" db:"partition_count" json:"partition_count"` + Envs map[string]string `thrift:"envs,6" db:"envs" json:"envs"` + IsStateful bool `thrift:"is_stateful,7" db:"is_stateful" json:"is_stateful"` + MaxReplicaCount int32 `thrift:"max_replica_count,8" db:"max_replica_count" json:"max_replica_count"` + ExpireSecond int64 `thrift:"expire_second,9" db:"expire_second" json:"expire_second"` + CreateSecond int64 `thrift:"create_second,10" db:"create_second" json:"create_second"` + DropSecond int64 `thrift:"drop_second,11" db:"drop_second" json:"drop_second"` + Duplicating bool `thrift:"duplicating,12" db:"duplicating" json:"duplicating"` + InitPartitionCount int32 `thrift:"init_partition_count,13" db:"init_partition_count" json:"init_partition_count"` + IsBulkLoading bool `thrift:"is_bulk_loading,14" db:"is_bulk_loading" json:"is_bulk_loading"` +} + +func NewAppInfo() *AppInfo { + return &AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } +} + +func (p *AppInfo) GetStatus() AppStatus { + return p.Status +} + +func (p *AppInfo) GetAppType() string { + return p.AppType +} + +func (p *AppInfo) GetAppName() string { + return p.AppName +} + +func (p *AppInfo) GetAppID() int32 { + return p.AppID +} + +func (p *AppInfo) GetPartitionCount() int32 { + return p.PartitionCount +} + +func (p *AppInfo) GetEnvs() map[string]string { + return p.Envs +} + +func (p *AppInfo) GetIsStateful() bool { + return p.IsStateful +} + +func (p *AppInfo) GetMaxReplicaCount() int32 { + return p.MaxReplicaCount +} + +func (p *AppInfo) GetExpireSecond() int64 { + return p.ExpireSecond +} + +func (p *AppInfo) GetCreateSecond() int64 { + return p.CreateSecond +} + +func (p *AppInfo) GetDropSecond() int64 { + return p.DropSecond +} + +var AppInfo_Duplicating_DEFAULT bool = false + +func (p *AppInfo) GetDuplicating() bool { + return p.Duplicating +} + +func (p *AppInfo) GetInitPartitionCount() int32 { + return p.InitPartitionCount +} + +var AppInfo_IsBulkLoading_DEFAULT bool = false + +func (p *AppInfo) GetIsBulkLoading() bool { + return p.IsBulkLoading +} +func (p *AppInfo) IsSetDuplicating() bool { + return p.Duplicating != AppInfo_Duplicating_DEFAULT +} + +func (p *AppInfo) IsSetIsBulkLoading() bool { + return p.IsBulkLoading != AppInfo_IsBulkLoading_DEFAULT +} + +func (p *AppInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.MAP { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.I32 { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I64 { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField12(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 13: + if fieldTypeId == thrift.I32 { + if err := p.ReadField13(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 14: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField14(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AppInfo) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := AppStatus(v) + p.Status = temp + } + return nil +} + +func (p *AppInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppType = v + } + return nil +} + +func (p *AppInfo) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *AppInfo) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *AppInfo) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.PartitionCount = v + } + return nil +} + +func (p *AppInfo) ReadField6(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.Envs = tMap + for i := 0; i < size; i++ { + var _key6 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key6 = v + } + var _val7 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _val7 = v + } + p.Envs[_key6] = _val7 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *AppInfo) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.IsStateful = v + } + return nil +} + +func (p *AppInfo) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.MaxReplicaCount = v + } + return nil +} + +func (p *AppInfo) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.ExpireSecond = v + } + return nil +} + +func (p *AppInfo) ReadField10(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.CreateSecond = v + } + return nil +} + +func (p *AppInfo) ReadField11(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.DropSecond = v + } + return nil +} + +func (p *AppInfo) ReadField12(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.Duplicating = v + } + return nil +} + +func (p *AppInfo) ReadField13(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 13: ", err) + } else { + p.InitPartitionCount = v + } + return nil +} + +func (p *AppInfo) ReadField14(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 14: ", err) + } else { + p.IsBulkLoading = v + } + return nil +} + +func (p *AppInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("app_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + if err := p.writeField12(oprot); err != nil { + return err + } + if err := p.writeField13(oprot); err != nil { + return err + } + if err := p.writeField14(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AppInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) + } + return err +} + +func (p *AppInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_type", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_type: ", p), err) + } + if err := oprot.WriteString(string(p.AppType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_type (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_type: ", p), err) + } + return err +} + +func (p *AppInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_name: ", p), err) + } + return err +} + +func (p *AppInfo) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_id: ", p), err) + } + return err +} + +func (p *AppInfo) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:partition_count: ", p), err) + } + return err +} + +func (p *AppInfo) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("envs", thrift.MAP, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:envs: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Envs)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Envs { + if err := oprot.WriteString(string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:envs: ", p), err) + } + return err +} + +func (p *AppInfo) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_stateful", thrift.BOOL, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:is_stateful: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsStateful)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_stateful (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:is_stateful: ", p), err) + } + return err +} + +func (p *AppInfo) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:max_replica_count: ", p), err) + } + return err +} + +func (p *AppInfo) writeField9(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("expire_second", thrift.I64, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:expire_second: ", p), err) + } + if err := oprot.WriteI64(int64(p.ExpireSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.expire_second (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:expire_second: ", p), err) + } + return err +} + +func (p *AppInfo) writeField10(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("create_second", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:create_second: ", p), err) + } + if err := oprot.WriteI64(int64(p.CreateSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.create_second (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:create_second: ", p), err) + } + return err +} + +func (p *AppInfo) writeField11(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("drop_second", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:drop_second: ", p), err) + } + if err := oprot.WriteI64(int64(p.DropSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.drop_second (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:drop_second: ", p), err) + } + return err +} + +func (p *AppInfo) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetDuplicating() { + if err := oprot.WriteFieldBegin("duplicating", thrift.BOOL, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:duplicating: ", p), err) + } + if err := oprot.WriteBool(bool(p.Duplicating)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duplicating (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:duplicating: ", p), err) + } + } + return err +} + +func (p *AppInfo) writeField13(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("init_partition_count", thrift.I32, 13); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 13:init_partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.InitPartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.init_partition_count (13) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 13:init_partition_count: ", p), err) + } + return err +} + +func (p *AppInfo) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetIsBulkLoading() { + if err := oprot.WriteFieldBegin("is_bulk_loading", thrift.BOOL, 14); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 14:is_bulk_loading: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsBulkLoading)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_bulk_loading (14) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 14:is_bulk_loading: ", p), err) + } + } + return err +} + +func (p *AppInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AppInfo(%+v)", *p) +} diff --git a/go-client/idl/rrdb/GoUnusedProtection__.go b/go-client/idl/rrdb/GoUnusedProtection__.go new file mode 100644 index 0000000000..ba179697d3 --- /dev/null +++ b/go-client/idl/rrdb/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package rrdb + +var GoUnusedProtection__ int diff --git a/go-client/idl/rrdb/meta-remote/meta-remote.go b/go-client/idl/rrdb/meta-remote/meta-remote.go new file mode 100755 index 0000000000..af4fe0b133 --- /dev/null +++ b/go-client/idl/rrdb/meta-remote/meta-remote.go @@ -0,0 +1,183 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package main + +import ( + "context" + "flag" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/incubator-pegasus/go-client/idl/rrdb" + "github.com/apache/thrift/lib/go/thrift" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" +) + +var _ = replication.GoUnusedProtection__ +var _ = base.GoUnusedProtection__ +var _ = rrdb.GoUnusedProtection__ + +func Usage() { + fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, "\nFunctions:") + fmt.Fprintln(os.Stderr, " query_cfg_response query_cfg(query_cfg_request query)") + fmt.Fprintln(os.Stderr) + os.Exit(0) +} + +type httpHeaders map[string]string + +func (h httpHeaders) String() string { + var m map[string]string = h + return fmt.Sprintf("%s", m) +} + +func (h httpHeaders) Set(value string) error { + parts := strings.Split(value, ": ") + if len(parts) != 2 { + return fmt.Errorf("header should be of format 'Key: Value'") + } + h[parts[0]] = parts[1] + return nil +} + +func main() { + flag.Usage = Usage + var host string + var port int + var protocol string + var urlString string + var framed bool + var useHttp bool + headers := make(httpHeaders) + var parsedUrl *url.URL + var trans thrift.TTransport + _ = strconv.Atoi + _ = math.Abs + flag.Usage = Usage + flag.StringVar(&host, "h", "localhost", "Specify host and port") + flag.IntVar(&port, "p", 9090, "Specify port") + flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") + flag.StringVar(&urlString, "u", "", "Specify the url") + flag.BoolVar(&framed, "framed", false, "Use framed transport") + flag.BoolVar(&useHttp, "http", false, "Use http") + flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")") + flag.Parse() + + if len(urlString) > 0 { + var err error + parsedUrl, err = url.Parse(urlString) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + host = parsedUrl.Host + useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https" + } else if useHttp { + _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + } + + cmd := flag.Arg(0) + var err error + if useHttp { + trans, err = thrift.NewTHttpClient(parsedUrl.String()) + if len(headers) > 0 { + httptrans := trans.(*thrift.THttpClient) + for key, value := range headers { + httptrans.SetHeader(key, value) + } + } + } else { + portStr := fmt.Sprint(port) + if strings.Contains(host, ":") { + host, portStr, err = net.SplitHostPort(host) + if err != nil { + fmt.Fprintln(os.Stderr, "error with host:", err) + os.Exit(1) + } + } + trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) + if err != nil { + fmt.Fprintln(os.Stderr, "error resolving address:", err) + os.Exit(1) + } + if framed { + trans = thrift.NewTFramedTransport(trans) + } + } + if err != nil { + fmt.Fprintln(os.Stderr, "Error creating transport", err) + os.Exit(1) + } + defer trans.Close() + var protocolFactory thrift.TProtocolFactory + switch protocol { + case "compact": + protocolFactory = thrift.NewTCompactProtocolFactory() + break + case "simplejson": + protocolFactory = thrift.NewTSimpleJSONProtocolFactory() + break + case "json": + protocolFactory = thrift.NewTJSONProtocolFactory() + break + case "binary", "": + protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() + break + default: + fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) + Usage() + os.Exit(1) + } + iprot := protocolFactory.GetProtocol(trans) + oprot := protocolFactory.GetProtocol(trans) + client := rrdb.NewMetaClient(thrift.NewTStandardClient(iprot, oprot)) + if err := trans.Open(); err != nil { + fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) + os.Exit(1) + } + + switch cmd { + case "query_cfg": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "QueryCfg requires 1 args") + flag.Usage() + } + arg128 := flag.Arg(1) + mbTrans129 := thrift.NewTMemoryBufferLen(len(arg128)) + defer mbTrans129.Close() + _, err130 := mbTrans129.WriteString(arg128) + if err130 != nil { + Usage() + return + } + factory131 := thrift.NewTJSONProtocolFactory() + jsProt132 := factory131.GetProtocol(mbTrans129) + argvalue0 := replication.NewQueryCfgRequest() + err133 := argvalue0.Read(jsProt132) + if err133 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.QueryCfg(context.Background(), value0)) + fmt.Print("\n") + break + case "": + Usage() + break + default: + fmt.Fprintln(os.Stderr, "Invalid function ", cmd) + } +} diff --git a/go-client/idl/rrdb/rrdb-consts.go b/go-client/idl/rrdb/rrdb-consts.go new file mode 100644 index 0000000000..a888b636ff --- /dev/null +++ b/go-client/idl/rrdb/rrdb-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package rrdb + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = replication.GoUnusedProtection__ +var _ = base.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/rrdb/rrdb-remote/rrdb-remote.go b/go-client/idl/rrdb/rrdb-remote/rrdb-remote.go new file mode 100755 index 0000000000..a74084a45a --- /dev/null +++ b/go-client/idl/rrdb/rrdb-remote/rrdb-remote.go @@ -0,0 +1,536 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package main + +import ( + "context" + "flag" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/incubator-pegasus/go-client/idl/rrdb" + "github.com/apache/thrift/lib/go/thrift" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" +) + +var _ = replication.GoUnusedProtection__ +var _ = base.GoUnusedProtection__ +var _ = rrdb.GoUnusedProtection__ + +func Usage() { + fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, "\nFunctions:") + fmt.Fprintln(os.Stderr, " update_response put(update_request update)") + fmt.Fprintln(os.Stderr, " update_response multi_put(multi_put_request request)") + fmt.Fprintln(os.Stderr, " update_response remove(blob key)") + fmt.Fprintln(os.Stderr, " multi_remove_response multi_remove(multi_remove_request request)") + fmt.Fprintln(os.Stderr, " incr_response incr(incr_request request)") + fmt.Fprintln(os.Stderr, " check_and_set_response check_and_set(check_and_set_request request)") + fmt.Fprintln(os.Stderr, " check_and_mutate_response check_and_mutate(check_and_mutate_request request)") + fmt.Fprintln(os.Stderr, " read_response get(blob key)") + fmt.Fprintln(os.Stderr, " multi_get_response multi_get(multi_get_request request)") + fmt.Fprintln(os.Stderr, " batch_get_response batch_get(batch_get_request request)") + fmt.Fprintln(os.Stderr, " count_response sortkey_count(blob hash_key)") + fmt.Fprintln(os.Stderr, " ttl_response ttl(blob key)") + fmt.Fprintln(os.Stderr, " scan_response get_scanner(get_scanner_request request)") + fmt.Fprintln(os.Stderr, " scan_response scan(scan_request request)") + fmt.Fprintln(os.Stderr, " void clear_scanner(i64 context_id)") + fmt.Fprintln(os.Stderr) + os.Exit(0) +} + +type httpHeaders map[string]string + +func (h httpHeaders) String() string { + var m map[string]string = h + return fmt.Sprintf("%s", m) +} + +func (h httpHeaders) Set(value string) error { + parts := strings.Split(value, ": ") + if len(parts) != 2 { + return fmt.Errorf("header should be of format 'Key: Value'") + } + h[parts[0]] = parts[1] + return nil +} + +func main() { + flag.Usage = Usage + var host string + var port int + var protocol string + var urlString string + var framed bool + var useHttp bool + headers := make(httpHeaders) + var parsedUrl *url.URL + var trans thrift.TTransport + _ = strconv.Atoi + _ = math.Abs + flag.Usage = Usage + flag.StringVar(&host, "h", "localhost", "Specify host and port") + flag.IntVar(&port, "p", 9090, "Specify port") + flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") + flag.StringVar(&urlString, "u", "", "Specify the url") + flag.BoolVar(&framed, "framed", false, "Use framed transport") + flag.BoolVar(&useHttp, "http", false, "Use http") + flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")") + flag.Parse() + + if len(urlString) > 0 { + var err error + parsedUrl, err = url.Parse(urlString) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + host = parsedUrl.Host + useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https" + } else if useHttp { + _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + } + + cmd := flag.Arg(0) + var err error + if useHttp { + trans, err = thrift.NewTHttpClient(parsedUrl.String()) + if len(headers) > 0 { + httptrans := trans.(*thrift.THttpClient) + for key, value := range headers { + httptrans.SetHeader(key, value) + } + } + } else { + portStr := fmt.Sprint(port) + if strings.Contains(host, ":") { + host, portStr, err = net.SplitHostPort(host) + if err != nil { + fmt.Fprintln(os.Stderr, "error with host:", err) + os.Exit(1) + } + } + trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) + if err != nil { + fmt.Fprintln(os.Stderr, "error resolving address:", err) + os.Exit(1) + } + if framed { + trans = thrift.NewTFramedTransport(trans) + } + } + if err != nil { + fmt.Fprintln(os.Stderr, "Error creating transport", err) + os.Exit(1) + } + defer trans.Close() + var protocolFactory thrift.TProtocolFactory + switch protocol { + case "compact": + protocolFactory = thrift.NewTCompactProtocolFactory() + break + case "simplejson": + protocolFactory = thrift.NewTSimpleJSONProtocolFactory() + break + case "json": + protocolFactory = thrift.NewTJSONProtocolFactory() + break + case "binary", "": + protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() + break + default: + fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) + Usage() + os.Exit(1) + } + iprot := protocolFactory.GetProtocol(trans) + oprot := protocolFactory.GetProtocol(trans) + client := rrdb.NewRrdbClient(thrift.NewTStandardClient(iprot, oprot)) + if err := trans.Open(); err != nil { + fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) + os.Exit(1) + } + + switch cmd { + case "put": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "Put requires 1 args") + flag.Usage() + } + arg39 := flag.Arg(1) + mbTrans40 := thrift.NewTMemoryBufferLen(len(arg39)) + defer mbTrans40.Close() + _, err41 := mbTrans40.WriteString(arg39) + if err41 != nil { + Usage() + return + } + factory42 := thrift.NewTJSONProtocolFactory() + jsProt43 := factory42.GetProtocol(mbTrans40) + argvalue0 := rrdb.NewUpdateRequest() + err44 := argvalue0.Read(jsProt43) + if err44 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.Put(context.Background(), value0)) + fmt.Print("\n") + break + case "multi_put": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "MultiPut requires 1 args") + flag.Usage() + } + arg45 := flag.Arg(1) + mbTrans46 := thrift.NewTMemoryBufferLen(len(arg45)) + defer mbTrans46.Close() + _, err47 := mbTrans46.WriteString(arg45) + if err47 != nil { + Usage() + return + } + factory48 := thrift.NewTJSONProtocolFactory() + jsProt49 := factory48.GetProtocol(mbTrans46) + argvalue0 := rrdb.NewMultiPutRequest() + err50 := argvalue0.Read(jsProt49) + if err50 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.MultiPut(context.Background(), value0)) + fmt.Print("\n") + break + case "remove": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "Remove requires 1 args") + flag.Usage() + } + arg51 := flag.Arg(1) + mbTrans52 := thrift.NewTMemoryBufferLen(len(arg51)) + defer mbTrans52.Close() + _, err53 := mbTrans52.WriteString(arg51) + if err53 != nil { + Usage() + return + } + factory54 := thrift.NewTJSONProtocolFactory() + jsProt55 := factory54.GetProtocol(mbTrans52) + argvalue0 := base.NewBlob() + err56 := argvalue0.Read(jsProt55) + if err56 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.Remove(context.Background(), value0)) + fmt.Print("\n") + break + case "multi_remove": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "MultiRemove requires 1 args") + flag.Usage() + } + arg57 := flag.Arg(1) + mbTrans58 := thrift.NewTMemoryBufferLen(len(arg57)) + defer mbTrans58.Close() + _, err59 := mbTrans58.WriteString(arg57) + if err59 != nil { + Usage() + return + } + factory60 := thrift.NewTJSONProtocolFactory() + jsProt61 := factory60.GetProtocol(mbTrans58) + argvalue0 := rrdb.NewMultiRemoveRequest() + err62 := argvalue0.Read(jsProt61) + if err62 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.MultiRemove(context.Background(), value0)) + fmt.Print("\n") + break + case "incr": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "Incr requires 1 args") + flag.Usage() + } + arg63 := flag.Arg(1) + mbTrans64 := thrift.NewTMemoryBufferLen(len(arg63)) + defer mbTrans64.Close() + _, err65 := mbTrans64.WriteString(arg63) + if err65 != nil { + Usage() + return + } + factory66 := thrift.NewTJSONProtocolFactory() + jsProt67 := factory66.GetProtocol(mbTrans64) + argvalue0 := rrdb.NewIncrRequest() + err68 := argvalue0.Read(jsProt67) + if err68 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.Incr(context.Background(), value0)) + fmt.Print("\n") + break + case "check_and_set": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "CheckAndSet requires 1 args") + flag.Usage() + } + arg69 := flag.Arg(1) + mbTrans70 := thrift.NewTMemoryBufferLen(len(arg69)) + defer mbTrans70.Close() + _, err71 := mbTrans70.WriteString(arg69) + if err71 != nil { + Usage() + return + } + factory72 := thrift.NewTJSONProtocolFactory() + jsProt73 := factory72.GetProtocol(mbTrans70) + argvalue0 := rrdb.NewCheckAndSetRequest() + err74 := argvalue0.Read(jsProt73) + if err74 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.CheckAndSet(context.Background(), value0)) + fmt.Print("\n") + break + case "check_and_mutate": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "CheckAndMutate requires 1 args") + flag.Usage() + } + arg75 := flag.Arg(1) + mbTrans76 := thrift.NewTMemoryBufferLen(len(arg75)) + defer mbTrans76.Close() + _, err77 := mbTrans76.WriteString(arg75) + if err77 != nil { + Usage() + return + } + factory78 := thrift.NewTJSONProtocolFactory() + jsProt79 := factory78.GetProtocol(mbTrans76) + argvalue0 := rrdb.NewCheckAndMutateRequest() + err80 := argvalue0.Read(jsProt79) + if err80 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.CheckAndMutate(context.Background(), value0)) + fmt.Print("\n") + break + case "get": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "Get requires 1 args") + flag.Usage() + } + arg81 := flag.Arg(1) + mbTrans82 := thrift.NewTMemoryBufferLen(len(arg81)) + defer mbTrans82.Close() + _, err83 := mbTrans82.WriteString(arg81) + if err83 != nil { + Usage() + return + } + factory84 := thrift.NewTJSONProtocolFactory() + jsProt85 := factory84.GetProtocol(mbTrans82) + argvalue0 := base.NewBlob() + err86 := argvalue0.Read(jsProt85) + if err86 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.Get(context.Background(), value0)) + fmt.Print("\n") + break + case "multi_get": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "MultiGet requires 1 args") + flag.Usage() + } + arg87 := flag.Arg(1) + mbTrans88 := thrift.NewTMemoryBufferLen(len(arg87)) + defer mbTrans88.Close() + _, err89 := mbTrans88.WriteString(arg87) + if err89 != nil { + Usage() + return + } + factory90 := thrift.NewTJSONProtocolFactory() + jsProt91 := factory90.GetProtocol(mbTrans88) + argvalue0 := rrdb.NewMultiGetRequest() + err92 := argvalue0.Read(jsProt91) + if err92 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.MultiGet(context.Background(), value0)) + fmt.Print("\n") + break + case "batch_get": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "BatchGet requires 1 args") + flag.Usage() + } + arg93 := flag.Arg(1) + mbTrans94 := thrift.NewTMemoryBufferLen(len(arg93)) + defer mbTrans94.Close() + _, err95 := mbTrans94.WriteString(arg93) + if err95 != nil { + Usage() + return + } + factory96 := thrift.NewTJSONProtocolFactory() + jsProt97 := factory96.GetProtocol(mbTrans94) + argvalue0 := rrdb.NewBatchGetRequest() + err98 := argvalue0.Read(jsProt97) + if err98 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.BatchGet(context.Background(), value0)) + fmt.Print("\n") + break + case "sortkey_count": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "SortkeyCount requires 1 args") + flag.Usage() + } + arg99 := flag.Arg(1) + mbTrans100 := thrift.NewTMemoryBufferLen(len(arg99)) + defer mbTrans100.Close() + _, err101 := mbTrans100.WriteString(arg99) + if err101 != nil { + Usage() + return + } + factory102 := thrift.NewTJSONProtocolFactory() + jsProt103 := factory102.GetProtocol(mbTrans100) + argvalue0 := base.NewBlob() + err104 := argvalue0.Read(jsProt103) + if err104 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.SortkeyCount(context.Background(), value0)) + fmt.Print("\n") + break + case "ttl": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "TTL requires 1 args") + flag.Usage() + } + arg105 := flag.Arg(1) + mbTrans106 := thrift.NewTMemoryBufferLen(len(arg105)) + defer mbTrans106.Close() + _, err107 := mbTrans106.WriteString(arg105) + if err107 != nil { + Usage() + return + } + factory108 := thrift.NewTJSONProtocolFactory() + jsProt109 := factory108.GetProtocol(mbTrans106) + argvalue0 := base.NewBlob() + err110 := argvalue0.Read(jsProt109) + if err110 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.TTL(context.Background(), value0)) + fmt.Print("\n") + break + case "get_scanner": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "GetScanner requires 1 args") + flag.Usage() + } + arg111 := flag.Arg(1) + mbTrans112 := thrift.NewTMemoryBufferLen(len(arg111)) + defer mbTrans112.Close() + _, err113 := mbTrans112.WriteString(arg111) + if err113 != nil { + Usage() + return + } + factory114 := thrift.NewTJSONProtocolFactory() + jsProt115 := factory114.GetProtocol(mbTrans112) + argvalue0 := rrdb.NewGetScannerRequest() + err116 := argvalue0.Read(jsProt115) + if err116 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.GetScanner(context.Background(), value0)) + fmt.Print("\n") + break + case "scan": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "Scan requires 1 args") + flag.Usage() + } + arg117 := flag.Arg(1) + mbTrans118 := thrift.NewTMemoryBufferLen(len(arg117)) + defer mbTrans118.Close() + _, err119 := mbTrans118.WriteString(arg117) + if err119 != nil { + Usage() + return + } + factory120 := thrift.NewTJSONProtocolFactory() + jsProt121 := factory120.GetProtocol(mbTrans118) + argvalue0 := rrdb.NewScanRequest() + err122 := argvalue0.Read(jsProt121) + if err122 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.Scan(context.Background(), value0)) + fmt.Print("\n") + break + case "clear_scanner": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "ClearScanner requires 1 args") + flag.Usage() + } + argvalue0, err123 := (strconv.ParseInt(flag.Arg(1), 10, 64)) + if err123 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.ClearScanner(context.Background(), value0)) + fmt.Print("\n") + break + case "": + Usage() + break + default: + fmt.Fprintln(os.Stderr, "Invalid function ", cmd) + } +} diff --git a/go-client/idl/rrdb/rrdb.go b/go-client/idl/rrdb/rrdb.go new file mode 100644 index 0000000000..31782bc3be --- /dev/null +++ b/go-client/idl/rrdb/rrdb.go @@ -0,0 +1,12123 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package rrdb + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = replication.GoUnusedProtection__ +var _ = base.GoUnusedProtection__ + +type FilterType int64 + +const ( + FilterType_FT_NO_FILTER FilterType = 0 + FilterType_FT_MATCH_ANYWHERE FilterType = 1 + FilterType_FT_MATCH_PREFIX FilterType = 2 + FilterType_FT_MATCH_POSTFIX FilterType = 3 +) + +func (p FilterType) String() string { + switch p { + case FilterType_FT_NO_FILTER: + return "FT_NO_FILTER" + case FilterType_FT_MATCH_ANYWHERE: + return "FT_MATCH_ANYWHERE" + case FilterType_FT_MATCH_PREFIX: + return "FT_MATCH_PREFIX" + case FilterType_FT_MATCH_POSTFIX: + return "FT_MATCH_POSTFIX" + } + return "" +} + +func FilterTypeFromString(s string) (FilterType, error) { + switch s { + case "FT_NO_FILTER": + return FilterType_FT_NO_FILTER, nil + case "FT_MATCH_ANYWHERE": + return FilterType_FT_MATCH_ANYWHERE, nil + case "FT_MATCH_PREFIX": + return FilterType_FT_MATCH_PREFIX, nil + case "FT_MATCH_POSTFIX": + return FilterType_FT_MATCH_POSTFIX, nil + } + return FilterType(0), fmt.Errorf("not a valid FilterType string") +} + +func FilterTypePtr(v FilterType) *FilterType { return &v } + +func (p FilterType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *FilterType) UnmarshalText(text []byte) error { + q, err := FilterTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *FilterType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = FilterType(v) + return nil +} + +func (p *FilterType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type CasCheckType int64 + +const ( + CasCheckType_CT_NO_CHECK CasCheckType = 0 + CasCheckType_CT_VALUE_NOT_EXIST CasCheckType = 1 + CasCheckType_CT_VALUE_NOT_EXIST_OR_EMPTY CasCheckType = 2 + CasCheckType_CT_VALUE_EXIST CasCheckType = 3 + CasCheckType_CT_VALUE_NOT_EMPTY CasCheckType = 4 + CasCheckType_CT_VALUE_MATCH_ANYWHERE CasCheckType = 5 + CasCheckType_CT_VALUE_MATCH_PREFIX CasCheckType = 6 + CasCheckType_CT_VALUE_MATCH_POSTFIX CasCheckType = 7 + CasCheckType_CT_VALUE_BYTES_LESS CasCheckType = 8 + CasCheckType_CT_VALUE_BYTES_LESS_OR_EQUAL CasCheckType = 9 + CasCheckType_CT_VALUE_BYTES_EQUAL CasCheckType = 10 + CasCheckType_CT_VALUE_BYTES_GREATER_OR_EQUAL CasCheckType = 11 + CasCheckType_CT_VALUE_BYTES_GREATER CasCheckType = 12 + CasCheckType_CT_VALUE_INT_LESS CasCheckType = 13 + CasCheckType_CT_VALUE_INT_LESS_OR_EQUAL CasCheckType = 14 + CasCheckType_CT_VALUE_INT_EQUAL CasCheckType = 15 + CasCheckType_CT_VALUE_INT_GREATER_OR_EQUAL CasCheckType = 16 + CasCheckType_CT_VALUE_INT_GREATER CasCheckType = 17 +) + +func (p CasCheckType) String() string { + switch p { + case CasCheckType_CT_NO_CHECK: + return "CT_NO_CHECK" + case CasCheckType_CT_VALUE_NOT_EXIST: + return "CT_VALUE_NOT_EXIST" + case CasCheckType_CT_VALUE_NOT_EXIST_OR_EMPTY: + return "CT_VALUE_NOT_EXIST_OR_EMPTY" + case CasCheckType_CT_VALUE_EXIST: + return "CT_VALUE_EXIST" + case CasCheckType_CT_VALUE_NOT_EMPTY: + return "CT_VALUE_NOT_EMPTY" + case CasCheckType_CT_VALUE_MATCH_ANYWHERE: + return "CT_VALUE_MATCH_ANYWHERE" + case CasCheckType_CT_VALUE_MATCH_PREFIX: + return "CT_VALUE_MATCH_PREFIX" + case CasCheckType_CT_VALUE_MATCH_POSTFIX: + return "CT_VALUE_MATCH_POSTFIX" + case CasCheckType_CT_VALUE_BYTES_LESS: + return "CT_VALUE_BYTES_LESS" + case CasCheckType_CT_VALUE_BYTES_LESS_OR_EQUAL: + return "CT_VALUE_BYTES_LESS_OR_EQUAL" + case CasCheckType_CT_VALUE_BYTES_EQUAL: + return "CT_VALUE_BYTES_EQUAL" + case CasCheckType_CT_VALUE_BYTES_GREATER_OR_EQUAL: + return "CT_VALUE_BYTES_GREATER_OR_EQUAL" + case CasCheckType_CT_VALUE_BYTES_GREATER: + return "CT_VALUE_BYTES_GREATER" + case CasCheckType_CT_VALUE_INT_LESS: + return "CT_VALUE_INT_LESS" + case CasCheckType_CT_VALUE_INT_LESS_OR_EQUAL: + return "CT_VALUE_INT_LESS_OR_EQUAL" + case CasCheckType_CT_VALUE_INT_EQUAL: + return "CT_VALUE_INT_EQUAL" + case CasCheckType_CT_VALUE_INT_GREATER_OR_EQUAL: + return "CT_VALUE_INT_GREATER_OR_EQUAL" + case CasCheckType_CT_VALUE_INT_GREATER: + return "CT_VALUE_INT_GREATER" + } + return "" +} + +func CasCheckTypeFromString(s string) (CasCheckType, error) { + switch s { + case "CT_NO_CHECK": + return CasCheckType_CT_NO_CHECK, nil + case "CT_VALUE_NOT_EXIST": + return CasCheckType_CT_VALUE_NOT_EXIST, nil + case "CT_VALUE_NOT_EXIST_OR_EMPTY": + return CasCheckType_CT_VALUE_NOT_EXIST_OR_EMPTY, nil + case "CT_VALUE_EXIST": + return CasCheckType_CT_VALUE_EXIST, nil + case "CT_VALUE_NOT_EMPTY": + return CasCheckType_CT_VALUE_NOT_EMPTY, nil + case "CT_VALUE_MATCH_ANYWHERE": + return CasCheckType_CT_VALUE_MATCH_ANYWHERE, nil + case "CT_VALUE_MATCH_PREFIX": + return CasCheckType_CT_VALUE_MATCH_PREFIX, nil + case "CT_VALUE_MATCH_POSTFIX": + return CasCheckType_CT_VALUE_MATCH_POSTFIX, nil + case "CT_VALUE_BYTES_LESS": + return CasCheckType_CT_VALUE_BYTES_LESS, nil + case "CT_VALUE_BYTES_LESS_OR_EQUAL": + return CasCheckType_CT_VALUE_BYTES_LESS_OR_EQUAL, nil + case "CT_VALUE_BYTES_EQUAL": + return CasCheckType_CT_VALUE_BYTES_EQUAL, nil + case "CT_VALUE_BYTES_GREATER_OR_EQUAL": + return CasCheckType_CT_VALUE_BYTES_GREATER_OR_EQUAL, nil + case "CT_VALUE_BYTES_GREATER": + return CasCheckType_CT_VALUE_BYTES_GREATER, nil + case "CT_VALUE_INT_LESS": + return CasCheckType_CT_VALUE_INT_LESS, nil + case "CT_VALUE_INT_LESS_OR_EQUAL": + return CasCheckType_CT_VALUE_INT_LESS_OR_EQUAL, nil + case "CT_VALUE_INT_EQUAL": + return CasCheckType_CT_VALUE_INT_EQUAL, nil + case "CT_VALUE_INT_GREATER_OR_EQUAL": + return CasCheckType_CT_VALUE_INT_GREATER_OR_EQUAL, nil + case "CT_VALUE_INT_GREATER": + return CasCheckType_CT_VALUE_INT_GREATER, nil + } + return CasCheckType(0), fmt.Errorf("not a valid CasCheckType string") +} + +func CasCheckTypePtr(v CasCheckType) *CasCheckType { return &v } + +func (p CasCheckType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *CasCheckType) UnmarshalText(text []byte) error { + q, err := CasCheckTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *CasCheckType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = CasCheckType(v) + return nil +} + +func (p *CasCheckType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type MutateOperation int64 + +const ( + MutateOperation_MO_PUT MutateOperation = 0 + MutateOperation_MO_DELETE MutateOperation = 1 +) + +func (p MutateOperation) String() string { + switch p { + case MutateOperation_MO_PUT: + return "MO_PUT" + case MutateOperation_MO_DELETE: + return "MO_DELETE" + } + return "" +} + +func MutateOperationFromString(s string) (MutateOperation, error) { + switch s { + case "MO_PUT": + return MutateOperation_MO_PUT, nil + case "MO_DELETE": + return MutateOperation_MO_DELETE, nil + } + return MutateOperation(0), fmt.Errorf("not a valid MutateOperation string") +} + +func MutateOperationPtr(v MutateOperation) *MutateOperation { return &v } + +func (p MutateOperation) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *MutateOperation) UnmarshalText(text []byte) error { + q, err := MutateOperationFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *MutateOperation) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = MutateOperation(v) + return nil +} + +func (p *MutateOperation) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Key +// - Value +// - ExpireTsSeconds +type UpdateRequest struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` + Value *base.Blob `thrift:"value,2" db:"value" json:"value"` + ExpireTsSeconds int32 `thrift:"expire_ts_seconds,3" db:"expire_ts_seconds" json:"expire_ts_seconds"` +} + +func NewUpdateRequest() *UpdateRequest { + return &UpdateRequest{} +} + +var UpdateRequest_Key_DEFAULT *base.Blob + +func (p *UpdateRequest) GetKey() *base.Blob { + if !p.IsSetKey() { + return UpdateRequest_Key_DEFAULT + } + return p.Key +} + +var UpdateRequest_Value_DEFAULT *base.Blob + +func (p *UpdateRequest) GetValue() *base.Blob { + if !p.IsSetValue() { + return UpdateRequest_Value_DEFAULT + } + return p.Value +} + +func (p *UpdateRequest) GetExpireTsSeconds() int32 { + return p.ExpireTsSeconds +} +func (p *UpdateRequest) IsSetKey() bool { + return p.Key != nil +} + +func (p *UpdateRequest) IsSetValue() bool { + return p.Value != nil +} + +func (p *UpdateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *UpdateRequest) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *UpdateRequest) ReadField2(iprot thrift.TProtocol) error { + p.Value = &base.Blob{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *UpdateRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ExpireTsSeconds = v + } + return nil +} + +func (p *UpdateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *UpdateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *UpdateRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := p.Value.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *UpdateRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("expire_ts_seconds", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.ExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.expire_ts_seconds (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:expire_ts_seconds: ", p), err) + } + return err +} + +func (p *UpdateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("UpdateRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - AppID +// - PartitionIndex +// - Decree +// - Server +type UpdateResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + AppID int32 `thrift:"app_id,2" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,3" db:"partition_index" json:"partition_index"` + Decree int64 `thrift:"decree,4" db:"decree" json:"decree"` + Server string `thrift:"server,5" db:"server" json:"server"` +} + +func NewUpdateResponse() *UpdateResponse { + return &UpdateResponse{} +} + +func (p *UpdateResponse) GetError() int32 { + return p.Error +} + +func (p *UpdateResponse) GetAppID() int32 { + return p.AppID +} + +func (p *UpdateResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *UpdateResponse) GetDecree() int64 { + return p.Decree +} + +func (p *UpdateResponse) GetServer() string { + return p.Server +} +func (p *UpdateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *UpdateResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *UpdateResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *UpdateResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *UpdateResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Decree = v + } + return nil +} + +func (p *UpdateResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *UpdateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *UpdateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *UpdateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_id: ", p), err) + } + return err +} + +func (p *UpdateResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:partition_index: ", p), err) + } + return err +} + +func (p *UpdateResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("decree", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.Decree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.decree (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:decree: ", p), err) + } + return err +} + +func (p *UpdateResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:server: ", p), err) + } + return err +} + +func (p *UpdateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("UpdateResponse(%+v)", *p) +} + +// Attributes: +// - Error +// - Value +// - AppID +// - PartitionIndex +// - Server +type ReadResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Value *base.Blob `thrift:"value,2" db:"value" json:"value"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + // unused field # 5 + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewReadResponse() *ReadResponse { + return &ReadResponse{} +} + +func (p *ReadResponse) GetError() int32 { + return p.Error +} + +var ReadResponse_Value_DEFAULT *base.Blob + +func (p *ReadResponse) GetValue() *base.Blob { + if !p.IsSetValue() { + return ReadResponse_Value_DEFAULT + } + return p.Value +} + +func (p *ReadResponse) GetAppID() int32 { + return p.AppID +} + +func (p *ReadResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *ReadResponse) GetServer() string { + return p.Server +} +func (p *ReadResponse) IsSetValue() bool { + return p.Value != nil +} + +func (p *ReadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReadResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *ReadResponse) ReadField2(iprot thrift.TProtocol) error { + p.Value = &base.Blob{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *ReadResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *ReadResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *ReadResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *ReadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("read_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *ReadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := p.Value.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *ReadResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *ReadResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *ReadResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *ReadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReadResponse(%+v)", *p) +} + +// Attributes: +// - Error +// - TTLSeconds +// - AppID +// - PartitionIndex +// - Server +type TTLResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + TTLSeconds int32 `thrift:"ttl_seconds,2" db:"ttl_seconds" json:"ttl_seconds"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + // unused field # 5 + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewTTLResponse() *TTLResponse { + return &TTLResponse{} +} + +func (p *TTLResponse) GetError() int32 { + return p.Error +} + +func (p *TTLResponse) GetTTLSeconds() int32 { + return p.TTLSeconds +} + +func (p *TTLResponse) GetAppID() int32 { + return p.AppID +} + +func (p *TTLResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *TTLResponse) GetServer() string { + return p.Server +} +func (p *TTLResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TTLResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *TTLResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TTLSeconds = v + } + return nil +} + +func (p *TTLResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *TTLResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *TTLResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *TTLResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ttl_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *TTLResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *TTLResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ttl_seconds", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ttl_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.TTLSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ttl_seconds (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ttl_seconds: ", p), err) + } + return err +} + +func (p *TTLResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *TTLResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *TTLResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *TTLResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTLResponse(%+v)", *p) +} + +// Attributes: +// - Error +// - Count +// - AppID +// - PartitionIndex +// - Server +type CountResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Count int64 `thrift:"count,2" db:"count" json:"count"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + // unused field # 5 + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewCountResponse() *CountResponse { + return &CountResponse{} +} + +func (p *CountResponse) GetError() int32 { + return p.Error +} + +func (p *CountResponse) GetCount() int64 { + return p.Count +} + +func (p *CountResponse) GetAppID() int32 { + return p.AppID +} + +func (p *CountResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *CountResponse) GetServer() string { + return p.Server +} +func (p *CountResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CountResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *CountResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Count = v + } + return nil +} + +func (p *CountResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *CountResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *CountResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *CountResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("count_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CountResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *CountResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("count", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:count: ", p), err) + } + if err := oprot.WriteI64(int64(p.Count)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:count: ", p), err) + } + return err +} + +func (p *CountResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *CountResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *CountResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *CountResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CountResponse(%+v)", *p) +} + +// Attributes: +// - Key +// - Value +// - ExpireTsSeconds +type KeyValue struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` + Value *base.Blob `thrift:"value,2" db:"value" json:"value"` + ExpireTsSeconds *int32 `thrift:"expire_ts_seconds,3" db:"expire_ts_seconds" json:"expire_ts_seconds,omitempty"` +} + +func NewKeyValue() *KeyValue { + return &KeyValue{} +} + +var KeyValue_Key_DEFAULT *base.Blob + +func (p *KeyValue) GetKey() *base.Blob { + if !p.IsSetKey() { + return KeyValue_Key_DEFAULT + } + return p.Key +} + +var KeyValue_Value_DEFAULT *base.Blob + +func (p *KeyValue) GetValue() *base.Blob { + if !p.IsSetValue() { + return KeyValue_Value_DEFAULT + } + return p.Value +} + +var KeyValue_ExpireTsSeconds_DEFAULT int32 + +func (p *KeyValue) GetExpireTsSeconds() int32 { + if !p.IsSetExpireTsSeconds() { + return KeyValue_ExpireTsSeconds_DEFAULT + } + return *p.ExpireTsSeconds +} +func (p *KeyValue) IsSetKey() bool { + return p.Key != nil +} + +func (p *KeyValue) IsSetValue() bool { + return p.Value != nil +} + +func (p *KeyValue) IsSetExpireTsSeconds() bool { + return p.ExpireTsSeconds != nil +} + +func (p *KeyValue) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *KeyValue) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *KeyValue) ReadField2(iprot thrift.TProtocol) error { + p.Value = &base.Blob{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *KeyValue) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ExpireTsSeconds = &v + } + return nil +} + +func (p *KeyValue) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("key_value"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *KeyValue) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *KeyValue) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := p.Value.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *KeyValue) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetExpireTsSeconds() { + if err := oprot.WriteFieldBegin("expire_ts_seconds", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(*p.ExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.expire_ts_seconds (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:expire_ts_seconds: ", p), err) + } + } + return err +} + +func (p *KeyValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("KeyValue(%+v)", *p) +} + +// Attributes: +// - HashKey +// - Kvs +// - ExpireTsSeconds +type MultiPutRequest struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + Kvs []*KeyValue `thrift:"kvs,2" db:"kvs" json:"kvs"` + ExpireTsSeconds int32 `thrift:"expire_ts_seconds,3" db:"expire_ts_seconds" json:"expire_ts_seconds"` +} + +func NewMultiPutRequest() *MultiPutRequest { + return &MultiPutRequest{} +} + +var MultiPutRequest_HashKey_DEFAULT *base.Blob + +func (p *MultiPutRequest) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return MultiPutRequest_HashKey_DEFAULT + } + return p.HashKey +} + +func (p *MultiPutRequest) GetKvs() []*KeyValue { + return p.Kvs +} + +func (p *MultiPutRequest) GetExpireTsSeconds() int32 { + return p.ExpireTsSeconds +} +func (p *MultiPutRequest) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *MultiPutRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MultiPutRequest) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *MultiPutRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*KeyValue, 0, size) + p.Kvs = tSlice + for i := 0; i < size; i++ { + _elem0 := &KeyValue{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Kvs = append(p.Kvs, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *MultiPutRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ExpireTsSeconds = v + } + return nil +} + +func (p *MultiPutRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_put_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MultiPutRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *MultiPutRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("kvs", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:kvs: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Kvs)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Kvs { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:kvs: ", p), err) + } + return err +} + +func (p *MultiPutRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("expire_ts_seconds", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.ExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.expire_ts_seconds (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:expire_ts_seconds: ", p), err) + } + return err +} + +func (p *MultiPutRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MultiPutRequest(%+v)", *p) +} + +// Attributes: +// - HashKey +// - SortKeys +// - MaxCount +type MultiRemoveRequest struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + SortKeys []*base.Blob `thrift:"sort_keys,2" db:"sort_keys" json:"sort_keys"` + MaxCount int64 `thrift:"max_count,3" db:"max_count" json:"max_count"` +} + +func NewMultiRemoveRequest() *MultiRemoveRequest { + return &MultiRemoveRequest{} +} + +var MultiRemoveRequest_HashKey_DEFAULT *base.Blob + +func (p *MultiRemoveRequest) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return MultiRemoveRequest_HashKey_DEFAULT + } + return p.HashKey +} + +func (p *MultiRemoveRequest) GetSortKeys() []*base.Blob { + return p.SortKeys +} + +func (p *MultiRemoveRequest) GetMaxCount() int64 { + return p.MaxCount +} +func (p *MultiRemoveRequest) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *MultiRemoveRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MultiRemoveRequest) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *MultiRemoveRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.Blob, 0, size) + p.SortKeys = tSlice + for i := 0; i < size; i++ { + _elem1 := &base.Blob{} + if err := _elem1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.SortKeys = append(p.SortKeys, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *MultiRemoveRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.MaxCount = v + } + return nil +} + +func (p *MultiRemoveRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_remove_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MultiRemoveRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *MultiRemoveRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_keys", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sort_keys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SortKeys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.SortKeys { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sort_keys: ", p), err) + } + return err +} + +func (p *MultiRemoveRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_count", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:max_count: ", p), err) + } + if err := oprot.WriteI64(int64(p.MaxCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_count (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:max_count: ", p), err) + } + return err +} + +func (p *MultiRemoveRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MultiRemoveRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - Count +// - AppID +// - PartitionIndex +// - Decree +// - Server +type MultiRemoveResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Count int64 `thrift:"count,2" db:"count" json:"count"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + Decree int64 `thrift:"decree,5" db:"decree" json:"decree"` + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewMultiRemoveResponse() *MultiRemoveResponse { + return &MultiRemoveResponse{} +} + +func (p *MultiRemoveResponse) GetError() int32 { + return p.Error +} + +func (p *MultiRemoveResponse) GetCount() int64 { + return p.Count +} + +func (p *MultiRemoveResponse) GetAppID() int32 { + return p.AppID +} + +func (p *MultiRemoveResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *MultiRemoveResponse) GetDecree() int64 { + return p.Decree +} + +func (p *MultiRemoveResponse) GetServer() string { + return p.Server +} +func (p *MultiRemoveResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MultiRemoveResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *MultiRemoveResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Count = v + } + return nil +} + +func (p *MultiRemoveResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *MultiRemoveResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *MultiRemoveResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.Decree = v + } + return nil +} + +func (p *MultiRemoveResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *MultiRemoveResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_remove_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MultiRemoveResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("count", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:count: ", p), err) + } + if err := oprot.WriteI64(int64(p.Count)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:count: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("decree", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.Decree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.decree (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:decree: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MultiRemoveResponse(%+v)", *p) +} + +// Attributes: +// - HashKey +// - SortKeys +// - MaxKvCount +// - MaxKvSize +// - NoValue +// - StartSortkey +// - StopSortkey +// - StartInclusive +// - StopInclusive +// - SortKeyFilterType +// - SortKeyFilterPattern +// - Reverse +type MultiGetRequest struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + SortKeys []*base.Blob `thrift:"sort_keys,2" db:"sort_keys" json:"sort_keys"` + MaxKvCount int32 `thrift:"max_kv_count,3" db:"max_kv_count" json:"max_kv_count"` + MaxKvSize int32 `thrift:"max_kv_size,4" db:"max_kv_size" json:"max_kv_size"` + NoValue bool `thrift:"no_value,5" db:"no_value" json:"no_value"` + StartSortkey *base.Blob `thrift:"start_sortkey,6" db:"start_sortkey" json:"start_sortkey"` + StopSortkey *base.Blob `thrift:"stop_sortkey,7" db:"stop_sortkey" json:"stop_sortkey"` + StartInclusive bool `thrift:"start_inclusive,8" db:"start_inclusive" json:"start_inclusive"` + StopInclusive bool `thrift:"stop_inclusive,9" db:"stop_inclusive" json:"stop_inclusive"` + SortKeyFilterType FilterType `thrift:"sort_key_filter_type,10" db:"sort_key_filter_type" json:"sort_key_filter_type"` + SortKeyFilterPattern *base.Blob `thrift:"sort_key_filter_pattern,11" db:"sort_key_filter_pattern" json:"sort_key_filter_pattern"` + Reverse bool `thrift:"reverse,12" db:"reverse" json:"reverse"` +} + +func NewMultiGetRequest() *MultiGetRequest { + return &MultiGetRequest{} +} + +var MultiGetRequest_HashKey_DEFAULT *base.Blob + +func (p *MultiGetRequest) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return MultiGetRequest_HashKey_DEFAULT + } + return p.HashKey +} + +func (p *MultiGetRequest) GetSortKeys() []*base.Blob { + return p.SortKeys +} + +func (p *MultiGetRequest) GetMaxKvCount() int32 { + return p.MaxKvCount +} + +func (p *MultiGetRequest) GetMaxKvSize() int32 { + return p.MaxKvSize +} + +func (p *MultiGetRequest) GetNoValue() bool { + return p.NoValue +} + +var MultiGetRequest_StartSortkey_DEFAULT *base.Blob + +func (p *MultiGetRequest) GetStartSortkey() *base.Blob { + if !p.IsSetStartSortkey() { + return MultiGetRequest_StartSortkey_DEFAULT + } + return p.StartSortkey +} + +var MultiGetRequest_StopSortkey_DEFAULT *base.Blob + +func (p *MultiGetRequest) GetStopSortkey() *base.Blob { + if !p.IsSetStopSortkey() { + return MultiGetRequest_StopSortkey_DEFAULT + } + return p.StopSortkey +} + +func (p *MultiGetRequest) GetStartInclusive() bool { + return p.StartInclusive +} + +func (p *MultiGetRequest) GetStopInclusive() bool { + return p.StopInclusive +} + +func (p *MultiGetRequest) GetSortKeyFilterType() FilterType { + return p.SortKeyFilterType +} + +var MultiGetRequest_SortKeyFilterPattern_DEFAULT *base.Blob + +func (p *MultiGetRequest) GetSortKeyFilterPattern() *base.Blob { + if !p.IsSetSortKeyFilterPattern() { + return MultiGetRequest_SortKeyFilterPattern_DEFAULT + } + return p.SortKeyFilterPattern +} + +func (p *MultiGetRequest) GetReverse() bool { + return p.Reverse +} +func (p *MultiGetRequest) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *MultiGetRequest) IsSetStartSortkey() bool { + return p.StartSortkey != nil +} + +func (p *MultiGetRequest) IsSetStopSortkey() bool { + return p.StopSortkey != nil +} + +func (p *MultiGetRequest) IsSetSortKeyFilterPattern() bool { + return p.SortKeyFilterPattern != nil +} + +func (p *MultiGetRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I32 { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField12(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MultiGetRequest) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *MultiGetRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.Blob, 0, size) + p.SortKeys = tSlice + for i := 0; i < size; i++ { + _elem2 := &base.Blob{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.SortKeys = append(p.SortKeys, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *MultiGetRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.MaxKvCount = v + } + return nil +} + +func (p *MultiGetRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.MaxKvSize = v + } + return nil +} + +func (p *MultiGetRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.NoValue = v + } + return nil +} + +func (p *MultiGetRequest) ReadField6(iprot thrift.TProtocol) error { + p.StartSortkey = &base.Blob{} + if err := p.StartSortkey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StartSortkey), err) + } + return nil +} + +func (p *MultiGetRequest) ReadField7(iprot thrift.TProtocol) error { + p.StopSortkey = &base.Blob{} + if err := p.StopSortkey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StopSortkey), err) + } + return nil +} + +func (p *MultiGetRequest) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.StartInclusive = v + } + return nil +} + +func (p *MultiGetRequest) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.StopInclusive = v + } + return nil +} + +func (p *MultiGetRequest) ReadField10(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + temp := FilterType(v) + p.SortKeyFilterType = temp + } + return nil +} + +func (p *MultiGetRequest) ReadField11(iprot thrift.TProtocol) error { + p.SortKeyFilterPattern = &base.Blob{} + if err := p.SortKeyFilterPattern.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKeyFilterPattern), err) + } + return nil +} + +func (p *MultiGetRequest) ReadField12(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.Reverse = v + } + return nil +} + +func (p *MultiGetRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_get_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + if err := p.writeField12(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MultiGetRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_keys", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sort_keys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SortKeys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.SortKeys { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sort_keys: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_kv_count", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:max_kv_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxKvCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_kv_count (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:max_kv_count: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_kv_size", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:max_kv_size: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxKvSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_kv_size (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:max_kv_size: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("no_value", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:no_value: ", p), err) + } + if err := oprot.WriteBool(bool(p.NoValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.no_value (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:no_value: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_sortkey", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:start_sortkey: ", p), err) + } + if err := p.StartSortkey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StartSortkey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:start_sortkey: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("stop_sortkey", thrift.STRUCT, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:stop_sortkey: ", p), err) + } + if err := p.StopSortkey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StopSortkey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:stop_sortkey: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_inclusive", thrift.BOOL, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:start_inclusive: ", p), err) + } + if err := oprot.WriteBool(bool(p.StartInclusive)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_inclusive (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:start_inclusive: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField9(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("stop_inclusive", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:stop_inclusive: ", p), err) + } + if err := oprot.WriteBool(bool(p.StopInclusive)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.stop_inclusive (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:stop_inclusive: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField10(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key_filter_type", thrift.I32, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:sort_key_filter_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.SortKeyFilterType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.sort_key_filter_type (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:sort_key_filter_type: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField11(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key_filter_pattern", thrift.STRUCT, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:sort_key_filter_pattern: ", p), err) + } + if err := p.SortKeyFilterPattern.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKeyFilterPattern), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:sort_key_filter_pattern: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField12(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("reverse", thrift.BOOL, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:reverse: ", p), err) + } + if err := oprot.WriteBool(bool(p.Reverse)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.reverse (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:reverse: ", p), err) + } + return err +} + +func (p *MultiGetRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MultiGetRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - Kvs +// - AppID +// - PartitionIndex +// - Server +type MultiGetResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Kvs []*KeyValue `thrift:"kvs,2" db:"kvs" json:"kvs"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + // unused field # 5 + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewMultiGetResponse() *MultiGetResponse { + return &MultiGetResponse{} +} + +func (p *MultiGetResponse) GetError() int32 { + return p.Error +} + +func (p *MultiGetResponse) GetKvs() []*KeyValue { + return p.Kvs +} + +func (p *MultiGetResponse) GetAppID() int32 { + return p.AppID +} + +func (p *MultiGetResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *MultiGetResponse) GetServer() string { + return p.Server +} +func (p *MultiGetResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MultiGetResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *MultiGetResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*KeyValue, 0, size) + p.Kvs = tSlice + for i := 0; i < size; i++ { + _elem3 := &KeyValue{} + if err := _elem3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + p.Kvs = append(p.Kvs, _elem3) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *MultiGetResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *MultiGetResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *MultiGetResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *MultiGetResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_get_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MultiGetResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *MultiGetResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("kvs", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:kvs: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Kvs)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Kvs { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:kvs: ", p), err) + } + return err +} + +func (p *MultiGetResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *MultiGetResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *MultiGetResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *MultiGetResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MultiGetResponse(%+v)", *p) +} + +// Attributes: +// - HashKey +// - SortKey +type FullKey struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + SortKey *base.Blob `thrift:"sort_key,2" db:"sort_key" json:"sort_key"` +} + +func NewFullKey() *FullKey { + return &FullKey{} +} + +var FullKey_HashKey_DEFAULT *base.Blob + +func (p *FullKey) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return FullKey_HashKey_DEFAULT + } + return p.HashKey +} + +var FullKey_SortKey_DEFAULT *base.Blob + +func (p *FullKey) GetSortKey() *base.Blob { + if !p.IsSetSortKey() { + return FullKey_SortKey_DEFAULT + } + return p.SortKey +} +func (p *FullKey) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *FullKey) IsSetSortKey() bool { + return p.SortKey != nil +} + +func (p *FullKey) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *FullKey) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *FullKey) ReadField2(iprot thrift.TProtocol) error { + p.SortKey = &base.Blob{} + if err := p.SortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKey), err) + } + return nil +} + +func (p *FullKey) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("full_key"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *FullKey) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *FullKey) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sort_key: ", p), err) + } + if err := p.SortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sort_key: ", p), err) + } + return err +} + +func (p *FullKey) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("FullKey(%+v)", *p) +} + +// Attributes: +// - Keys +type BatchGetRequest struct { + Keys []*FullKey `thrift:"keys,1" db:"keys" json:"keys"` +} + +func NewBatchGetRequest() *BatchGetRequest { + return &BatchGetRequest{} +} + +func (p *BatchGetRequest) GetKeys() []*FullKey { + return p.Keys +} +func (p *BatchGetRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BatchGetRequest) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*FullKey, 0, size) + p.Keys = tSlice + for i := 0; i < size; i++ { + _elem4 := &FullKey{} + if err := _elem4.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Keys = append(p.Keys, _elem4) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *BatchGetRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("batch_get_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BatchGetRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("keys", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:keys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Keys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Keys { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:keys: ", p), err) + } + return err +} + +func (p *BatchGetRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BatchGetRequest(%+v)", *p) +} + +// Attributes: +// - HashKey +// - SortKey +// - Value +type FullData struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + SortKey *base.Blob `thrift:"sort_key,2" db:"sort_key" json:"sort_key"` + Value *base.Blob `thrift:"value,3" db:"value" json:"value"` +} + +func NewFullData() *FullData { + return &FullData{} +} + +var FullData_HashKey_DEFAULT *base.Blob + +func (p *FullData) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return FullData_HashKey_DEFAULT + } + return p.HashKey +} + +var FullData_SortKey_DEFAULT *base.Blob + +func (p *FullData) GetSortKey() *base.Blob { + if !p.IsSetSortKey() { + return FullData_SortKey_DEFAULT + } + return p.SortKey +} + +var FullData_Value_DEFAULT *base.Blob + +func (p *FullData) GetValue() *base.Blob { + if !p.IsSetValue() { + return FullData_Value_DEFAULT + } + return p.Value +} +func (p *FullData) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *FullData) IsSetSortKey() bool { + return p.SortKey != nil +} + +func (p *FullData) IsSetValue() bool { + return p.Value != nil +} + +func (p *FullData) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *FullData) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *FullData) ReadField2(iprot thrift.TProtocol) error { + p.SortKey = &base.Blob{} + if err := p.SortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKey), err) + } + return nil +} + +func (p *FullData) ReadField3(iprot thrift.TProtocol) error { + p.Value = &base.Blob{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *FullData) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("full_data"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *FullData) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *FullData) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sort_key: ", p), err) + } + if err := p.SortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sort_key: ", p), err) + } + return err +} + +func (p *FullData) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:value: ", p), err) + } + if err := p.Value.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:value: ", p), err) + } + return err +} + +func (p *FullData) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("FullData(%+v)", *p) +} + +// Attributes: +// - Error +// - Data +// - AppID +// - PartitionIndex +// - Server +type BatchGetResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Data []*FullData `thrift:"data,2" db:"data" json:"data"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + // unused field # 5 + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewBatchGetResponse() *BatchGetResponse { + return &BatchGetResponse{} +} + +func (p *BatchGetResponse) GetError() int32 { + return p.Error +} + +func (p *BatchGetResponse) GetData() []*FullData { + return p.Data +} + +func (p *BatchGetResponse) GetAppID() int32 { + return p.AppID +} + +func (p *BatchGetResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *BatchGetResponse) GetServer() string { + return p.Server +} +func (p *BatchGetResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BatchGetResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *BatchGetResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*FullData, 0, size) + p.Data = tSlice + for i := 0; i < size; i++ { + _elem5 := &FullData{} + if err := _elem5.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) + } + p.Data = append(p.Data, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *BatchGetResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *BatchGetResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *BatchGetResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *BatchGetResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("batch_get_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BatchGetResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *BatchGetResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("data", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:data: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Data)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Data { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:data: ", p), err) + } + return err +} + +func (p *BatchGetResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *BatchGetResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *BatchGetResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *BatchGetResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BatchGetResponse(%+v)", *p) +} + +// Attributes: +// - Key +// - Increment +// - ExpireTsSeconds +type IncrRequest struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` + Increment int64 `thrift:"increment,2" db:"increment" json:"increment"` + ExpireTsSeconds int32 `thrift:"expire_ts_seconds,3" db:"expire_ts_seconds" json:"expire_ts_seconds"` +} + +func NewIncrRequest() *IncrRequest { + return &IncrRequest{} +} + +var IncrRequest_Key_DEFAULT *base.Blob + +func (p *IncrRequest) GetKey() *base.Blob { + if !p.IsSetKey() { + return IncrRequest_Key_DEFAULT + } + return p.Key +} + +func (p *IncrRequest) GetIncrement() int64 { + return p.Increment +} + +func (p *IncrRequest) GetExpireTsSeconds() int32 { + return p.ExpireTsSeconds +} +func (p *IncrRequest) IsSetKey() bool { + return p.Key != nil +} + +func (p *IncrRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *IncrRequest) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *IncrRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Increment = v + } + return nil +} + +func (p *IncrRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ExpireTsSeconds = v + } + return nil +} + +func (p *IncrRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("incr_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *IncrRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *IncrRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("increment", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:increment: ", p), err) + } + if err := oprot.WriteI64(int64(p.Increment)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.increment (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:increment: ", p), err) + } + return err +} + +func (p *IncrRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("expire_ts_seconds", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.ExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.expire_ts_seconds (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:expire_ts_seconds: ", p), err) + } + return err +} + +func (p *IncrRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("IncrRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - NewValue_ +// - AppID +// - PartitionIndex +// - Decree +// - Server +type IncrResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + NewValue_ int64 `thrift:"new_value,2" db:"new_value" json:"new_value"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + Decree int64 `thrift:"decree,5" db:"decree" json:"decree"` + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewIncrResponse() *IncrResponse { + return &IncrResponse{} +} + +func (p *IncrResponse) GetError() int32 { + return p.Error +} + +func (p *IncrResponse) GetNewValue_() int64 { + return p.NewValue_ +} + +func (p *IncrResponse) GetAppID() int32 { + return p.AppID +} + +func (p *IncrResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *IncrResponse) GetDecree() int64 { + return p.Decree +} + +func (p *IncrResponse) GetServer() string { + return p.Server +} +func (p *IncrResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *IncrResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *IncrResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewValue_ = v + } + return nil +} + +func (p *IncrResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *IncrResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *IncrResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.Decree = v + } + return nil +} + +func (p *IncrResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *IncrResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("incr_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *IncrResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *IncrResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_value", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_value: ", p), err) + } + if err := oprot.WriteI64(int64(p.NewValue_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_value: ", p), err) + } + return err +} + +func (p *IncrResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *IncrResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *IncrResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("decree", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.Decree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.decree (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:decree: ", p), err) + } + return err +} + +func (p *IncrResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *IncrResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("IncrResponse(%+v)", *p) +} + +// Attributes: +// - HashKey +// - CheckSortKey +// - CheckType +// - CheckOperand +// - SetDiffSortKey +// - SetSortKey +// - SetValue +// - SetExpireTsSeconds +// - ReturnCheckValue +type CheckAndSetRequest struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + CheckSortKey *base.Blob `thrift:"check_sort_key,2" db:"check_sort_key" json:"check_sort_key"` + CheckType CasCheckType `thrift:"check_type,3" db:"check_type" json:"check_type"` + CheckOperand *base.Blob `thrift:"check_operand,4" db:"check_operand" json:"check_operand"` + SetDiffSortKey bool `thrift:"set_diff_sort_key,5" db:"set_diff_sort_key" json:"set_diff_sort_key"` + SetSortKey *base.Blob `thrift:"set_sort_key,6" db:"set_sort_key" json:"set_sort_key"` + SetValue *base.Blob `thrift:"set_value,7" db:"set_value" json:"set_value"` + SetExpireTsSeconds int32 `thrift:"set_expire_ts_seconds,8" db:"set_expire_ts_seconds" json:"set_expire_ts_seconds"` + ReturnCheckValue bool `thrift:"return_check_value,9" db:"return_check_value" json:"return_check_value"` +} + +func NewCheckAndSetRequest() *CheckAndSetRequest { + return &CheckAndSetRequest{} +} + +var CheckAndSetRequest_HashKey_DEFAULT *base.Blob + +func (p *CheckAndSetRequest) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return CheckAndSetRequest_HashKey_DEFAULT + } + return p.HashKey +} + +var CheckAndSetRequest_CheckSortKey_DEFAULT *base.Blob + +func (p *CheckAndSetRequest) GetCheckSortKey() *base.Blob { + if !p.IsSetCheckSortKey() { + return CheckAndSetRequest_CheckSortKey_DEFAULT + } + return p.CheckSortKey +} + +func (p *CheckAndSetRequest) GetCheckType() CasCheckType { + return p.CheckType +} + +var CheckAndSetRequest_CheckOperand_DEFAULT *base.Blob + +func (p *CheckAndSetRequest) GetCheckOperand() *base.Blob { + if !p.IsSetCheckOperand() { + return CheckAndSetRequest_CheckOperand_DEFAULT + } + return p.CheckOperand +} + +func (p *CheckAndSetRequest) GetSetDiffSortKey() bool { + return p.SetDiffSortKey +} + +var CheckAndSetRequest_SetSortKey_DEFAULT *base.Blob + +func (p *CheckAndSetRequest) GetSetSortKey() *base.Blob { + if !p.IsSetSetSortKey() { + return CheckAndSetRequest_SetSortKey_DEFAULT + } + return p.SetSortKey +} + +var CheckAndSetRequest_SetValue_DEFAULT *base.Blob + +func (p *CheckAndSetRequest) GetSetValue() *base.Blob { + if !p.IsSetSetValue() { + return CheckAndSetRequest_SetValue_DEFAULT + } + return p.SetValue +} + +func (p *CheckAndSetRequest) GetSetExpireTsSeconds() int32 { + return p.SetExpireTsSeconds +} + +func (p *CheckAndSetRequest) GetReturnCheckValue() bool { + return p.ReturnCheckValue +} +func (p *CheckAndSetRequest) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *CheckAndSetRequest) IsSetCheckSortKey() bool { + return p.CheckSortKey != nil +} + +func (p *CheckAndSetRequest) IsSetCheckOperand() bool { + return p.CheckOperand != nil +} + +func (p *CheckAndSetRequest) IsSetSetSortKey() bool { + return p.SetSortKey != nil +} + +func (p *CheckAndSetRequest) IsSetSetValue() bool { + return p.SetValue != nil +} + +func (p *CheckAndSetRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.I32 { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField2(iprot thrift.TProtocol) error { + p.CheckSortKey = &base.Blob{} + if err := p.CheckSortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckSortKey), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := CasCheckType(v) + p.CheckType = temp + } + return nil +} + +func (p *CheckAndSetRequest) ReadField4(iprot thrift.TProtocol) error { + p.CheckOperand = &base.Blob{} + if err := p.CheckOperand.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckOperand), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.SetDiffSortKey = v + } + return nil +} + +func (p *CheckAndSetRequest) ReadField6(iprot thrift.TProtocol) error { + p.SetSortKey = &base.Blob{} + if err := p.SetSortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SetSortKey), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField7(iprot thrift.TProtocol) error { + p.SetValue = &base.Blob{} + if err := p.SetValue.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SetValue), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.SetExpireTsSeconds = v + } + return nil +} + +func (p *CheckAndSetRequest) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.ReturnCheckValue = v + } + return nil +} + +func (p *CheckAndSetRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_set_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CheckAndSetRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_sort_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:check_sort_key: ", p), err) + } + if err := p.CheckSortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckSortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:check_sort_key: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:check_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.CheckType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:check_type: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_operand", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:check_operand: ", p), err) + } + if err := p.CheckOperand.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckOperand), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:check_operand: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("set_diff_sort_key", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:set_diff_sort_key: ", p), err) + } + if err := oprot.WriteBool(bool(p.SetDiffSortKey)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.set_diff_sort_key (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:set_diff_sort_key: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("set_sort_key", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:set_sort_key: ", p), err) + } + if err := p.SetSortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SetSortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:set_sort_key: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("set_value", thrift.STRUCT, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:set_value: ", p), err) + } + if err := p.SetValue.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SetValue), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:set_value: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("set_expire_ts_seconds", thrift.I32, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:set_expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.SetExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.set_expire_ts_seconds (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:set_expire_ts_seconds: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField9(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("return_check_value", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:return_check_value: ", p), err) + } + if err := oprot.WriteBool(bool(p.ReturnCheckValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.return_check_value (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:return_check_value: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CheckAndSetRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - CheckValueReturned +// - CheckValueExist +// - CheckValue +// - AppID +// - PartitionIndex +// - Decree +// - Server +type CheckAndSetResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + CheckValueReturned bool `thrift:"check_value_returned,2" db:"check_value_returned" json:"check_value_returned"` + CheckValueExist bool `thrift:"check_value_exist,3" db:"check_value_exist" json:"check_value_exist"` + CheckValue *base.Blob `thrift:"check_value,4" db:"check_value" json:"check_value"` + AppID int32 `thrift:"app_id,5" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,6" db:"partition_index" json:"partition_index"` + Decree int64 `thrift:"decree,7" db:"decree" json:"decree"` + Server string `thrift:"server,8" db:"server" json:"server"` +} + +func NewCheckAndSetResponse() *CheckAndSetResponse { + return &CheckAndSetResponse{} +} + +func (p *CheckAndSetResponse) GetError() int32 { + return p.Error +} + +func (p *CheckAndSetResponse) GetCheckValueReturned() bool { + return p.CheckValueReturned +} + +func (p *CheckAndSetResponse) GetCheckValueExist() bool { + return p.CheckValueExist +} + +var CheckAndSetResponse_CheckValue_DEFAULT *base.Blob + +func (p *CheckAndSetResponse) GetCheckValue() *base.Blob { + if !p.IsSetCheckValue() { + return CheckAndSetResponse_CheckValue_DEFAULT + } + return p.CheckValue +} + +func (p *CheckAndSetResponse) GetAppID() int32 { + return p.AppID +} + +func (p *CheckAndSetResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *CheckAndSetResponse) GetDecree() int64 { + return p.Decree +} + +func (p *CheckAndSetResponse) GetServer() string { + return p.Server +} +func (p *CheckAndSetResponse) IsSetCheckValue() bool { + return p.CheckValue != nil +} + +func (p *CheckAndSetResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I64 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRING { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CheckAndSetResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.CheckValueReturned = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.CheckValueExist = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField4(iprot thrift.TProtocol) error { + p.CheckValue = &base.Blob{} + if err := p.CheckValue.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckValue), err) + } + return nil +} + +func (p *CheckAndSetResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.Decree = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *CheckAndSetResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_set_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CheckAndSetResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value_returned", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:check_value_returned: ", p), err) + } + if err := oprot.WriteBool(bool(p.CheckValueReturned)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_value_returned (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:check_value_returned: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value_exist", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:check_value_exist: ", p), err) + } + if err := oprot.WriteBool(bool(p.CheckValueExist)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_value_exist (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:check_value_exist: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:check_value: ", p), err) + } + if err := p.CheckValue.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckValue), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:check_value: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:app_id: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:partition_index: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("decree", thrift.I64, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.Decree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.decree (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:decree: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:server: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CheckAndSetResponse(%+v)", *p) +} + +// Attributes: +// - Operation +// - SortKey +// - Value +// - SetExpireTsSeconds +type Mutate struct { + Operation MutateOperation `thrift:"operation,1" db:"operation" json:"operation"` + SortKey *base.Blob `thrift:"sort_key,2" db:"sort_key" json:"sort_key"` + Value *base.Blob `thrift:"value,3" db:"value" json:"value"` + SetExpireTsSeconds int32 `thrift:"set_expire_ts_seconds,4" db:"set_expire_ts_seconds" json:"set_expire_ts_seconds"` +} + +func NewMutate() *Mutate { + return &Mutate{} +} + +func (p *Mutate) GetOperation() MutateOperation { + return p.Operation +} + +var Mutate_SortKey_DEFAULT *base.Blob + +func (p *Mutate) GetSortKey() *base.Blob { + if !p.IsSetSortKey() { + return Mutate_SortKey_DEFAULT + } + return p.SortKey +} + +var Mutate_Value_DEFAULT *base.Blob + +func (p *Mutate) GetValue() *base.Blob { + if !p.IsSetValue() { + return Mutate_Value_DEFAULT + } + return p.Value +} + +func (p *Mutate) GetSetExpireTsSeconds() int32 { + return p.SetExpireTsSeconds +} +func (p *Mutate) IsSetSortKey() bool { + return p.SortKey != nil +} + +func (p *Mutate) IsSetValue() bool { + return p.Value != nil +} + +func (p *Mutate) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Mutate) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := MutateOperation(v) + p.Operation = temp + } + return nil +} + +func (p *Mutate) ReadField2(iprot thrift.TProtocol) error { + p.SortKey = &base.Blob{} + if err := p.SortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKey), err) + } + return nil +} + +func (p *Mutate) ReadField3(iprot thrift.TProtocol) error { + p.Value = &base.Blob{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *Mutate) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.SetExpireTsSeconds = v + } + return nil +} + +func (p *Mutate) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("mutate"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Mutate) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("operation", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) + } + if err := oprot.WriteI32(int32(p.Operation)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) + } + return err +} + +func (p *Mutate) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sort_key: ", p), err) + } + if err := p.SortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sort_key: ", p), err) + } + return err +} + +func (p *Mutate) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:value: ", p), err) + } + if err := p.Value.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:value: ", p), err) + } + return err +} + +func (p *Mutate) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("set_expire_ts_seconds", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:set_expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.SetExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.set_expire_ts_seconds (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:set_expire_ts_seconds: ", p), err) + } + return err +} + +func (p *Mutate) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Mutate(%+v)", *p) +} + +// Attributes: +// - HashKey +// - CheckSortKey +// - CheckType +// - CheckOperand +// - MutateList +// - ReturnCheckValue +type CheckAndMutateRequest struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + CheckSortKey *base.Blob `thrift:"check_sort_key,2" db:"check_sort_key" json:"check_sort_key"` + CheckType CasCheckType `thrift:"check_type,3" db:"check_type" json:"check_type"` + CheckOperand *base.Blob `thrift:"check_operand,4" db:"check_operand" json:"check_operand"` + MutateList []*Mutate `thrift:"mutate_list,5" db:"mutate_list" json:"mutate_list"` + ReturnCheckValue bool `thrift:"return_check_value,6" db:"return_check_value" json:"return_check_value"` +} + +func NewCheckAndMutateRequest() *CheckAndMutateRequest { + return &CheckAndMutateRequest{} +} + +var CheckAndMutateRequest_HashKey_DEFAULT *base.Blob + +func (p *CheckAndMutateRequest) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return CheckAndMutateRequest_HashKey_DEFAULT + } + return p.HashKey +} + +var CheckAndMutateRequest_CheckSortKey_DEFAULT *base.Blob + +func (p *CheckAndMutateRequest) GetCheckSortKey() *base.Blob { + if !p.IsSetCheckSortKey() { + return CheckAndMutateRequest_CheckSortKey_DEFAULT + } + return p.CheckSortKey +} + +func (p *CheckAndMutateRequest) GetCheckType() CasCheckType { + return p.CheckType +} + +var CheckAndMutateRequest_CheckOperand_DEFAULT *base.Blob + +func (p *CheckAndMutateRequest) GetCheckOperand() *base.Blob { + if !p.IsSetCheckOperand() { + return CheckAndMutateRequest_CheckOperand_DEFAULT + } + return p.CheckOperand +} + +func (p *CheckAndMutateRequest) GetMutateList() []*Mutate { + return p.MutateList +} + +func (p *CheckAndMutateRequest) GetReturnCheckValue() bool { + return p.ReturnCheckValue +} +func (p *CheckAndMutateRequest) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *CheckAndMutateRequest) IsSetCheckSortKey() bool { + return p.CheckSortKey != nil +} + +func (p *CheckAndMutateRequest) IsSetCheckOperand() bool { + return p.CheckOperand != nil +} + +func (p *CheckAndMutateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.LIST { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField2(iprot thrift.TProtocol) error { + p.CheckSortKey = &base.Blob{} + if err := p.CheckSortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckSortKey), err) + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := CasCheckType(v) + p.CheckType = temp + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField4(iprot thrift.TProtocol) error { + p.CheckOperand = &base.Blob{} + if err := p.CheckOperand.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckOperand), err) + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Mutate, 0, size) + p.MutateList = tSlice + for i := 0; i < size; i++ { + _elem6 := &Mutate{} + if err := _elem6.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) + } + p.MutateList = append(p.MutateList, _elem6) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.ReturnCheckValue = v + } + return nil +} + +func (p *CheckAndMutateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_mutate_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CheckAndMutateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_sort_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:check_sort_key: ", p), err) + } + if err := p.CheckSortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckSortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:check_sort_key: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:check_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.CheckType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:check_type: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_operand", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:check_operand: ", p), err) + } + if err := p.CheckOperand.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckOperand), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:check_operand: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("mutate_list", thrift.LIST, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:mutate_list: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.MutateList)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.MutateList { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:mutate_list: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("return_check_value", thrift.BOOL, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:return_check_value: ", p), err) + } + if err := oprot.WriteBool(bool(p.ReturnCheckValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.return_check_value (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:return_check_value: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CheckAndMutateRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - CheckValueReturned +// - CheckValueExist +// - CheckValue +// - AppID +// - PartitionIndex +// - Decree +// - Server +type CheckAndMutateResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + CheckValueReturned bool `thrift:"check_value_returned,2" db:"check_value_returned" json:"check_value_returned"` + CheckValueExist bool `thrift:"check_value_exist,3" db:"check_value_exist" json:"check_value_exist"` + CheckValue *base.Blob `thrift:"check_value,4" db:"check_value" json:"check_value"` + AppID int32 `thrift:"app_id,5" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,6" db:"partition_index" json:"partition_index"` + Decree int64 `thrift:"decree,7" db:"decree" json:"decree"` + Server string `thrift:"server,8" db:"server" json:"server"` +} + +func NewCheckAndMutateResponse() *CheckAndMutateResponse { + return &CheckAndMutateResponse{} +} + +func (p *CheckAndMutateResponse) GetError() int32 { + return p.Error +} + +func (p *CheckAndMutateResponse) GetCheckValueReturned() bool { + return p.CheckValueReturned +} + +func (p *CheckAndMutateResponse) GetCheckValueExist() bool { + return p.CheckValueExist +} + +var CheckAndMutateResponse_CheckValue_DEFAULT *base.Blob + +func (p *CheckAndMutateResponse) GetCheckValue() *base.Blob { + if !p.IsSetCheckValue() { + return CheckAndMutateResponse_CheckValue_DEFAULT + } + return p.CheckValue +} + +func (p *CheckAndMutateResponse) GetAppID() int32 { + return p.AppID +} + +func (p *CheckAndMutateResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *CheckAndMutateResponse) GetDecree() int64 { + return p.Decree +} + +func (p *CheckAndMutateResponse) GetServer() string { + return p.Server +} +func (p *CheckAndMutateResponse) IsSetCheckValue() bool { + return p.CheckValue != nil +} + +func (p *CheckAndMutateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I64 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRING { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.CheckValueReturned = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.CheckValueExist = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField4(iprot thrift.TProtocol) error { + p.CheckValue = &base.Blob{} + if err := p.CheckValue.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckValue), err) + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.Decree = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *CheckAndMutateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_mutate_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CheckAndMutateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value_returned", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:check_value_returned: ", p), err) + } + if err := oprot.WriteBool(bool(p.CheckValueReturned)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_value_returned (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:check_value_returned: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value_exist", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:check_value_exist: ", p), err) + } + if err := oprot.WriteBool(bool(p.CheckValueExist)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_value_exist (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:check_value_exist: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:check_value: ", p), err) + } + if err := p.CheckValue.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckValue), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:check_value: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:app_id: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:partition_index: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("decree", thrift.I64, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.Decree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.decree (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:decree: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:server: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CheckAndMutateResponse(%+v)", *p) +} + +// Attributes: +// - StartKey +// - StopKey +// - StartInclusive +// - StopInclusive +// - BatchSize +// - NoValue +// - HashKeyFilterType +// - HashKeyFilterPattern +// - SortKeyFilterType +// - SortKeyFilterPattern +// - ValidatePartitionHash +// - ReturnExpireTs +// - FullScan +// - OnlyReturnCount +type GetScannerRequest struct { + StartKey *base.Blob `thrift:"start_key,1" db:"start_key" json:"start_key"` + StopKey *base.Blob `thrift:"stop_key,2" db:"stop_key" json:"stop_key"` + StartInclusive bool `thrift:"start_inclusive,3" db:"start_inclusive" json:"start_inclusive"` + StopInclusive bool `thrift:"stop_inclusive,4" db:"stop_inclusive" json:"stop_inclusive"` + BatchSize int32 `thrift:"batch_size,5" db:"batch_size" json:"batch_size"` + NoValue bool `thrift:"no_value,6" db:"no_value" json:"no_value"` + HashKeyFilterType FilterType `thrift:"hash_key_filter_type,7" db:"hash_key_filter_type" json:"hash_key_filter_type"` + HashKeyFilterPattern *base.Blob `thrift:"hash_key_filter_pattern,8" db:"hash_key_filter_pattern" json:"hash_key_filter_pattern"` + SortKeyFilterType FilterType `thrift:"sort_key_filter_type,9" db:"sort_key_filter_type" json:"sort_key_filter_type"` + SortKeyFilterPattern *base.Blob `thrift:"sort_key_filter_pattern,10" db:"sort_key_filter_pattern" json:"sort_key_filter_pattern"` + ValidatePartitionHash *bool `thrift:"validate_partition_hash,11" db:"validate_partition_hash" json:"validate_partition_hash,omitempty"` + ReturnExpireTs *bool `thrift:"return_expire_ts,12" db:"return_expire_ts" json:"return_expire_ts,omitempty"` + FullScan *bool `thrift:"full_scan,13" db:"full_scan" json:"full_scan,omitempty"` + OnlyReturnCount bool `thrift:"only_return_count,14" db:"only_return_count" json:"only_return_count"` +} + +func NewGetScannerRequest() *GetScannerRequest { + return &GetScannerRequest{} +} + +var GetScannerRequest_StartKey_DEFAULT *base.Blob + +func (p *GetScannerRequest) GetStartKey() *base.Blob { + if !p.IsSetStartKey() { + return GetScannerRequest_StartKey_DEFAULT + } + return p.StartKey +} + +var GetScannerRequest_StopKey_DEFAULT *base.Blob + +func (p *GetScannerRequest) GetStopKey() *base.Blob { + if !p.IsSetStopKey() { + return GetScannerRequest_StopKey_DEFAULT + } + return p.StopKey +} + +func (p *GetScannerRequest) GetStartInclusive() bool { + return p.StartInclusive +} + +func (p *GetScannerRequest) GetStopInclusive() bool { + return p.StopInclusive +} + +func (p *GetScannerRequest) GetBatchSize() int32 { + return p.BatchSize +} + +func (p *GetScannerRequest) GetNoValue() bool { + return p.NoValue +} + +func (p *GetScannerRequest) GetHashKeyFilterType() FilterType { + return p.HashKeyFilterType +} + +var GetScannerRequest_HashKeyFilterPattern_DEFAULT *base.Blob + +func (p *GetScannerRequest) GetHashKeyFilterPattern() *base.Blob { + if !p.IsSetHashKeyFilterPattern() { + return GetScannerRequest_HashKeyFilterPattern_DEFAULT + } + return p.HashKeyFilterPattern +} + +func (p *GetScannerRequest) GetSortKeyFilterType() FilterType { + return p.SortKeyFilterType +} + +var GetScannerRequest_SortKeyFilterPattern_DEFAULT *base.Blob + +func (p *GetScannerRequest) GetSortKeyFilterPattern() *base.Blob { + if !p.IsSetSortKeyFilterPattern() { + return GetScannerRequest_SortKeyFilterPattern_DEFAULT + } + return p.SortKeyFilterPattern +} + +var GetScannerRequest_ValidatePartitionHash_DEFAULT bool + +func (p *GetScannerRequest) GetValidatePartitionHash() bool { + if !p.IsSetValidatePartitionHash() { + return GetScannerRequest_ValidatePartitionHash_DEFAULT + } + return *p.ValidatePartitionHash +} + +var GetScannerRequest_ReturnExpireTs_DEFAULT bool + +func (p *GetScannerRequest) GetReturnExpireTs() bool { + if !p.IsSetReturnExpireTs() { + return GetScannerRequest_ReturnExpireTs_DEFAULT + } + return *p.ReturnExpireTs +} + +var GetScannerRequest_FullScan_DEFAULT bool + +func (p *GetScannerRequest) GetFullScan() bool { + if !p.IsSetFullScan() { + return GetScannerRequest_FullScan_DEFAULT + } + return *p.FullScan +} + +var GetScannerRequest_OnlyReturnCount_DEFAULT bool = false + +func (p *GetScannerRequest) GetOnlyReturnCount() bool { + return p.OnlyReturnCount +} +func (p *GetScannerRequest) IsSetStartKey() bool { + return p.StartKey != nil +} + +func (p *GetScannerRequest) IsSetStopKey() bool { + return p.StopKey != nil +} + +func (p *GetScannerRequest) IsSetHashKeyFilterPattern() bool { + return p.HashKeyFilterPattern != nil +} + +func (p *GetScannerRequest) IsSetSortKeyFilterPattern() bool { + return p.SortKeyFilterPattern != nil +} + +func (p *GetScannerRequest) IsSetValidatePartitionHash() bool { + return p.ValidatePartitionHash != nil +} + +func (p *GetScannerRequest) IsSetReturnExpireTs() bool { + return p.ReturnExpireTs != nil +} + +func (p *GetScannerRequest) IsSetFullScan() bool { + return p.FullScan != nil +} + +func (p *GetScannerRequest) IsSetOnlyReturnCount() bool { + return p.OnlyReturnCount != GetScannerRequest_OnlyReturnCount_DEFAULT +} + +func (p *GetScannerRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I32 { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField12(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 13: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField13(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 14: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField14(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *GetScannerRequest) ReadField1(iprot thrift.TProtocol) error { + p.StartKey = &base.Blob{} + if err := p.StartKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StartKey), err) + } + return nil +} + +func (p *GetScannerRequest) ReadField2(iprot thrift.TProtocol) error { + p.StopKey = &base.Blob{} + if err := p.StopKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StopKey), err) + } + return nil +} + +func (p *GetScannerRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.StartInclusive = v + } + return nil +} + +func (p *GetScannerRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.StopInclusive = v + } + return nil +} + +func (p *GetScannerRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BatchSize = v + } + return nil +} + +func (p *GetScannerRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.NoValue = v + } + return nil +} + +func (p *GetScannerRequest) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + temp := FilterType(v) + p.HashKeyFilterType = temp + } + return nil +} + +func (p *GetScannerRequest) ReadField8(iprot thrift.TProtocol) error { + p.HashKeyFilterPattern = &base.Blob{} + if err := p.HashKeyFilterPattern.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKeyFilterPattern), err) + } + return nil +} + +func (p *GetScannerRequest) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + temp := FilterType(v) + p.SortKeyFilterType = temp + } + return nil +} + +func (p *GetScannerRequest) ReadField10(iprot thrift.TProtocol) error { + p.SortKeyFilterPattern = &base.Blob{} + if err := p.SortKeyFilterPattern.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKeyFilterPattern), err) + } + return nil +} + +func (p *GetScannerRequest) ReadField11(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.ValidatePartitionHash = &v + } + return nil +} + +func (p *GetScannerRequest) ReadField12(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.ReturnExpireTs = &v + } + return nil +} + +func (p *GetScannerRequest) ReadField13(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 13: ", err) + } else { + p.FullScan = &v + } + return nil +} + +func (p *GetScannerRequest) ReadField14(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 14: ", err) + } else { + p.OnlyReturnCount = v + } + return nil +} + +func (p *GetScannerRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("get_scanner_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + if err := p.writeField12(oprot); err != nil { + return err + } + if err := p.writeField13(oprot); err != nil { + return err + } + if err := p.writeField14(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *GetScannerRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:start_key: ", p), err) + } + if err := p.StartKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StartKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:start_key: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("stop_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:stop_key: ", p), err) + } + if err := p.StopKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StopKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:stop_key: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_inclusive", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:start_inclusive: ", p), err) + } + if err := oprot.WriteBool(bool(p.StartInclusive)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_inclusive (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:start_inclusive: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("stop_inclusive", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stop_inclusive: ", p), err) + } + if err := oprot.WriteBool(bool(p.StopInclusive)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.stop_inclusive (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stop_inclusive: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("batch_size", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:batch_size: ", p), err) + } + if err := oprot.WriteI32(int32(p.BatchSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.batch_size (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:batch_size: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("no_value", thrift.BOOL, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:no_value: ", p), err) + } + if err := oprot.WriteBool(bool(p.NoValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.no_value (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:no_value: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key_filter_type", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:hash_key_filter_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.HashKeyFilterType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hash_key_filter_type (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:hash_key_filter_type: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key_filter_pattern", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:hash_key_filter_pattern: ", p), err) + } + if err := p.HashKeyFilterPattern.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKeyFilterPattern), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:hash_key_filter_pattern: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField9(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key_filter_type", thrift.I32, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:sort_key_filter_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.SortKeyFilterType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.sort_key_filter_type (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:sort_key_filter_type: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField10(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key_filter_pattern", thrift.STRUCT, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:sort_key_filter_pattern: ", p), err) + } + if err := p.SortKeyFilterPattern.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKeyFilterPattern), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:sort_key_filter_pattern: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetValidatePartitionHash() { + if err := oprot.WriteFieldBegin("validate_partition_hash", thrift.BOOL, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:validate_partition_hash: ", p), err) + } + if err := oprot.WriteBool(bool(*p.ValidatePartitionHash)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.validate_partition_hash (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:validate_partition_hash: ", p), err) + } + } + return err +} + +func (p *GetScannerRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetReturnExpireTs() { + if err := oprot.WriteFieldBegin("return_expire_ts", thrift.BOOL, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:return_expire_ts: ", p), err) + } + if err := oprot.WriteBool(bool(*p.ReturnExpireTs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.return_expire_ts (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:return_expire_ts: ", p), err) + } + } + return err +} + +func (p *GetScannerRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetFullScan() { + if err := oprot.WriteFieldBegin("full_scan", thrift.BOOL, 13); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 13:full_scan: ", p), err) + } + if err := oprot.WriteBool(bool(*p.FullScan)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.full_scan (13) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 13:full_scan: ", p), err) + } + } + return err +} + +func (p *GetScannerRequest) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetOnlyReturnCount() { + if err := oprot.WriteFieldBegin("only_return_count", thrift.BOOL, 14); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 14:only_return_count: ", p), err) + } + if err := oprot.WriteBool(bool(p.OnlyReturnCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.only_return_count (14) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 14:only_return_count: ", p), err) + } + } + return err +} + +func (p *GetScannerRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("GetScannerRequest(%+v)", *p) +} + +// Attributes: +// - ContextID +type ScanRequest struct { + ContextID int64 `thrift:"context_id,1" db:"context_id" json:"context_id"` +} + +func NewScanRequest() *ScanRequest { + return &ScanRequest{} +} + +func (p *ScanRequest) GetContextID() int64 { + return p.ContextID +} +func (p *ScanRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ScanRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.ContextID = v + } + return nil +} + +func (p *ScanRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("scan_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ScanRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("context_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:context_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.ContextID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.context_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:context_id: ", p), err) + } + return err +} + +func (p *ScanRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ScanRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - Kvs +// - ContextID +// - AppID +// - PartitionIndex +// - Server +// - KvCount +type ScanResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Kvs []*KeyValue `thrift:"kvs,2" db:"kvs" json:"kvs"` + ContextID int64 `thrift:"context_id,3" db:"context_id" json:"context_id"` + AppID int32 `thrift:"app_id,4" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,5" db:"partition_index" json:"partition_index"` + Server string `thrift:"server,6" db:"server" json:"server"` + KvCount *int32 `thrift:"kv_count,7" db:"kv_count" json:"kv_count,omitempty"` +} + +func NewScanResponse() *ScanResponse { + return &ScanResponse{} +} + +func (p *ScanResponse) GetError() int32 { + return p.Error +} + +func (p *ScanResponse) GetKvs() []*KeyValue { + return p.Kvs +} + +func (p *ScanResponse) GetContextID() int64 { + return p.ContextID +} + +func (p *ScanResponse) GetAppID() int32 { + return p.AppID +} + +func (p *ScanResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *ScanResponse) GetServer() string { + return p.Server +} + +var ScanResponse_KvCount_DEFAULT int32 + +func (p *ScanResponse) GetKvCount() int32 { + if !p.IsSetKvCount() { + return ScanResponse_KvCount_DEFAULT + } + return *p.KvCount +} +func (p *ScanResponse) IsSetKvCount() bool { + return p.KvCount != nil +} + +func (p *ScanResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ScanResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *ScanResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*KeyValue, 0, size) + p.Kvs = tSlice + for i := 0; i < size; i++ { + _elem7 := &KeyValue{} + if err := _elem7.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err) + } + p.Kvs = append(p.Kvs, _elem7) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ScanResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ContextID = v + } + return nil +} + +func (p *ScanResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *ScanResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *ScanResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *ScanResponse) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.KvCount = &v + } + return nil +} + +func (p *ScanResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("scan_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ScanResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("kvs", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:kvs: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Kvs)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Kvs { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:kvs: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("context_id", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:context_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.ContextID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.context_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:context_id: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_id: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:partition_index: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetKvCount() { + if err := oprot.WriteFieldBegin("kv_count", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:kv_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.KvCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.kv_count (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:kv_count: ", p), err) + } + } + return err +} + +func (p *ScanResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ScanResponse(%+v)", *p) +} + +type Rrdb interface { + // Parameters: + // - Update + Put(ctx context.Context, update *UpdateRequest) (r *UpdateResponse, err error) + // Parameters: + // - Request + MultiPut(ctx context.Context, request *MultiPutRequest) (r *UpdateResponse, err error) + // Parameters: + // - Key + Remove(ctx context.Context, key *base.Blob) (r *UpdateResponse, err error) + // Parameters: + // - Request + MultiRemove(ctx context.Context, request *MultiRemoveRequest) (r *MultiRemoveResponse, err error) + // Parameters: + // - Request + Incr(ctx context.Context, request *IncrRequest) (r *IncrResponse, err error) + // Parameters: + // - Request + CheckAndSet(ctx context.Context, request *CheckAndSetRequest) (r *CheckAndSetResponse, err error) + // Parameters: + // - Request + CheckAndMutate(ctx context.Context, request *CheckAndMutateRequest) (r *CheckAndMutateResponse, err error) + // Parameters: + // - Key + Get(ctx context.Context, key *base.Blob) (r *ReadResponse, err error) + // Parameters: + // - Request + MultiGet(ctx context.Context, request *MultiGetRequest) (r *MultiGetResponse, err error) + // Parameters: + // - Request + BatchGet(ctx context.Context, request *BatchGetRequest) (r *BatchGetResponse, err error) + // Parameters: + // - HashKey + SortkeyCount(ctx context.Context, hash_key *base.Blob) (r *CountResponse, err error) + // Parameters: + // - Key + TTL(ctx context.Context, key *base.Blob) (r *TTLResponse, err error) + // Parameters: + // - Request + GetScanner(ctx context.Context, request *GetScannerRequest) (r *ScanResponse, err error) + // Parameters: + // - Request + Scan(ctx context.Context, request *ScanRequest) (r *ScanResponse, err error) + // Parameters: + // - ContextID + ClearScanner(ctx context.Context, context_id int64) (err error) +} + +type RrdbClient struct { + c thrift.TClient +} + +func NewRrdbClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *RrdbClient { + return &RrdbClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewRrdbClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *RrdbClient { + return &RrdbClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewRrdbClient(c thrift.TClient) *RrdbClient { + return &RrdbClient{ + c: c, + } +} + +func (p *RrdbClient) Client_() thrift.TClient { + return p.c +} + +// Parameters: +// - Update +func (p *RrdbClient) Put(ctx context.Context, update *UpdateRequest) (r *UpdateResponse, err error) { + var _args8 RrdbPutArgs + _args8.Update = update + var _result9 RrdbPutResult + if err = p.Client_().Call(ctx, "put", &_args8, &_result9); err != nil { + return + } + return _result9.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) MultiPut(ctx context.Context, request *MultiPutRequest) (r *UpdateResponse, err error) { + var _args10 RrdbMultiPutArgs + _args10.Request = request + var _result11 RrdbMultiPutResult + if err = p.Client_().Call(ctx, "multi_put", &_args10, &_result11); err != nil { + return + } + return _result11.GetSuccess(), nil +} + +// Parameters: +// - Key +func (p *RrdbClient) Remove(ctx context.Context, key *base.Blob) (r *UpdateResponse, err error) { + var _args12 RrdbRemoveArgs + _args12.Key = key + var _result13 RrdbRemoveResult + if err = p.Client_().Call(ctx, "remove", &_args12, &_result13); err != nil { + return + } + return _result13.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) MultiRemove(ctx context.Context, request *MultiRemoveRequest) (r *MultiRemoveResponse, err error) { + var _args14 RrdbMultiRemoveArgs + _args14.Request = request + var _result15 RrdbMultiRemoveResult + if err = p.Client_().Call(ctx, "multi_remove", &_args14, &_result15); err != nil { + return + } + return _result15.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) Incr(ctx context.Context, request *IncrRequest) (r *IncrResponse, err error) { + var _args16 RrdbIncrArgs + _args16.Request = request + var _result17 RrdbIncrResult + if err = p.Client_().Call(ctx, "incr", &_args16, &_result17); err != nil { + return + } + return _result17.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) CheckAndSet(ctx context.Context, request *CheckAndSetRequest) (r *CheckAndSetResponse, err error) { + var _args18 RrdbCheckAndSetArgs + _args18.Request = request + var _result19 RrdbCheckAndSetResult + if err = p.Client_().Call(ctx, "check_and_set", &_args18, &_result19); err != nil { + return + } + return _result19.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) CheckAndMutate(ctx context.Context, request *CheckAndMutateRequest) (r *CheckAndMutateResponse, err error) { + var _args20 RrdbCheckAndMutateArgs + _args20.Request = request + var _result21 RrdbCheckAndMutateResult + if err = p.Client_().Call(ctx, "check_and_mutate", &_args20, &_result21); err != nil { + return + } + return _result21.GetSuccess(), nil +} + +// Parameters: +// - Key +func (p *RrdbClient) Get(ctx context.Context, key *base.Blob) (r *ReadResponse, err error) { + var _args22 RrdbGetArgs + _args22.Key = key + var _result23 RrdbGetResult + if err = p.Client_().Call(ctx, "get", &_args22, &_result23); err != nil { + return + } + return _result23.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) MultiGet(ctx context.Context, request *MultiGetRequest) (r *MultiGetResponse, err error) { + var _args24 RrdbMultiGetArgs + _args24.Request = request + var _result25 RrdbMultiGetResult + if err = p.Client_().Call(ctx, "multi_get", &_args24, &_result25); err != nil { + return + } + return _result25.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) BatchGet(ctx context.Context, request *BatchGetRequest) (r *BatchGetResponse, err error) { + var _args26 RrdbBatchGetArgs + _args26.Request = request + var _result27 RrdbBatchGetResult + if err = p.Client_().Call(ctx, "batch_get", &_args26, &_result27); err != nil { + return + } + return _result27.GetSuccess(), nil +} + +// Parameters: +// - HashKey +func (p *RrdbClient) SortkeyCount(ctx context.Context, hash_key *base.Blob) (r *CountResponse, err error) { + var _args28 RrdbSortkeyCountArgs + _args28.HashKey = hash_key + var _result29 RrdbSortkeyCountResult + if err = p.Client_().Call(ctx, "sortkey_count", &_args28, &_result29); err != nil { + return + } + return _result29.GetSuccess(), nil +} + +// Parameters: +// - Key +func (p *RrdbClient) TTL(ctx context.Context, key *base.Blob) (r *TTLResponse, err error) { + var _args30 RrdbTTLArgs + _args30.Key = key + var _result31 RrdbTTLResult + if err = p.Client_().Call(ctx, "ttl", &_args30, &_result31); err != nil { + return + } + return _result31.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) GetScanner(ctx context.Context, request *GetScannerRequest) (r *ScanResponse, err error) { + var _args32 RrdbGetScannerArgs + _args32.Request = request + var _result33 RrdbGetScannerResult + if err = p.Client_().Call(ctx, "get_scanner", &_args32, &_result33); err != nil { + return + } + return _result33.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) Scan(ctx context.Context, request *ScanRequest) (r *ScanResponse, err error) { + var _args34 RrdbScanArgs + _args34.Request = request + var _result35 RrdbScanResult + if err = p.Client_().Call(ctx, "scan", &_args34, &_result35); err != nil { + return + } + return _result35.GetSuccess(), nil +} + +// Parameters: +// - ContextID +func (p *RrdbClient) ClearScanner(ctx context.Context, context_id int64) (err error) { + var _args36 RrdbClearScannerArgs + _args36.ContextID = context_id + if err := p.Client_().Call(ctx, "clear_scanner", &_args36, nil); err != nil { + return err + } + return nil +} + +type RrdbProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Rrdb +} + +func (p *RrdbProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *RrdbProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *RrdbProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewRrdbProcessor(handler Rrdb) *RrdbProcessor { + + self37 := &RrdbProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self37.processorMap["put"] = &rrdbProcessorPut{handler: handler} + self37.processorMap["multi_put"] = &rrdbProcessorMultiPut{handler: handler} + self37.processorMap["remove"] = &rrdbProcessorRemove{handler: handler} + self37.processorMap["multi_remove"] = &rrdbProcessorMultiRemove{handler: handler} + self37.processorMap["incr"] = &rrdbProcessorIncr{handler: handler} + self37.processorMap["check_and_set"] = &rrdbProcessorCheckAndSet{handler: handler} + self37.processorMap["check_and_mutate"] = &rrdbProcessorCheckAndMutate{handler: handler} + self37.processorMap["get"] = &rrdbProcessorGet{handler: handler} + self37.processorMap["multi_get"] = &rrdbProcessorMultiGet{handler: handler} + self37.processorMap["batch_get"] = &rrdbProcessorBatchGet{handler: handler} + self37.processorMap["sortkey_count"] = &rrdbProcessorSortkeyCount{handler: handler} + self37.processorMap["ttl"] = &rrdbProcessorTTL{handler: handler} + self37.processorMap["get_scanner"] = &rrdbProcessorGetScanner{handler: handler} + self37.processorMap["scan"] = &rrdbProcessorScan{handler: handler} + self37.processorMap["clear_scanner"] = &rrdbProcessorClearScanner{handler: handler} + return self37 +} + +func (p *RrdbProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x38 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x38.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x38 + +} + +type rrdbProcessorPut struct { + handler Rrdb +} + +func (p *rrdbProcessorPut) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbPutArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("put", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbPutResult{} + var retval *UpdateResponse + var err2 error + if retval, err2 = p.handler.Put(ctx, args.Update); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing put: "+err2.Error()) + oprot.WriteMessageBegin("put", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("put", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorMultiPut struct { + handler Rrdb +} + +func (p *rrdbProcessorMultiPut) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbMultiPutArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("multi_put", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbMultiPutResult{} + var retval *UpdateResponse + var err2 error + if retval, err2 = p.handler.MultiPut(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing multi_put: "+err2.Error()) + oprot.WriteMessageBegin("multi_put", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("multi_put", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorRemove struct { + handler Rrdb +} + +func (p *rrdbProcessorRemove) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbRemoveArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("remove", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbRemoveResult{} + var retval *UpdateResponse + var err2 error + if retval, err2 = p.handler.Remove(ctx, args.Key); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing remove: "+err2.Error()) + oprot.WriteMessageBegin("remove", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("remove", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorMultiRemove struct { + handler Rrdb +} + +func (p *rrdbProcessorMultiRemove) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbMultiRemoveArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("multi_remove", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbMultiRemoveResult{} + var retval *MultiRemoveResponse + var err2 error + if retval, err2 = p.handler.MultiRemove(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing multi_remove: "+err2.Error()) + oprot.WriteMessageBegin("multi_remove", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("multi_remove", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorIncr struct { + handler Rrdb +} + +func (p *rrdbProcessorIncr) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbIncrArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("incr", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbIncrResult{} + var retval *IncrResponse + var err2 error + if retval, err2 = p.handler.Incr(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing incr: "+err2.Error()) + oprot.WriteMessageBegin("incr", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("incr", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorCheckAndSet struct { + handler Rrdb +} + +func (p *rrdbProcessorCheckAndSet) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbCheckAndSetArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("check_and_set", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbCheckAndSetResult{} + var retval *CheckAndSetResponse + var err2 error + if retval, err2 = p.handler.CheckAndSet(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing check_and_set: "+err2.Error()) + oprot.WriteMessageBegin("check_and_set", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("check_and_set", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorCheckAndMutate struct { + handler Rrdb +} + +func (p *rrdbProcessorCheckAndMutate) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbCheckAndMutateArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("check_and_mutate", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbCheckAndMutateResult{} + var retval *CheckAndMutateResponse + var err2 error + if retval, err2 = p.handler.CheckAndMutate(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing check_and_mutate: "+err2.Error()) + oprot.WriteMessageBegin("check_and_mutate", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("check_and_mutate", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorGet struct { + handler Rrdb +} + +func (p *rrdbProcessorGet) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbGetArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbGetResult{} + var retval *ReadResponse + var err2 error + if retval, err2 = p.handler.Get(ctx, args.Key); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get: "+err2.Error()) + oprot.WriteMessageBegin("get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("get", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorMultiGet struct { + handler Rrdb +} + +func (p *rrdbProcessorMultiGet) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbMultiGetArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("multi_get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbMultiGetResult{} + var retval *MultiGetResponse + var err2 error + if retval, err2 = p.handler.MultiGet(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing multi_get: "+err2.Error()) + oprot.WriteMessageBegin("multi_get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("multi_get", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorBatchGet struct { + handler Rrdb +} + +func (p *rrdbProcessorBatchGet) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbBatchGetArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("batch_get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbBatchGetResult{} + var retval *BatchGetResponse + var err2 error + if retval, err2 = p.handler.BatchGet(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing batch_get: "+err2.Error()) + oprot.WriteMessageBegin("batch_get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("batch_get", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorSortkeyCount struct { + handler Rrdb +} + +func (p *rrdbProcessorSortkeyCount) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbSortkeyCountArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("sortkey_count", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbSortkeyCountResult{} + var retval *CountResponse + var err2 error + if retval, err2 = p.handler.SortkeyCount(ctx, args.HashKey); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing sortkey_count: "+err2.Error()) + oprot.WriteMessageBegin("sortkey_count", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("sortkey_count", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorTTL struct { + handler Rrdb +} + +func (p *rrdbProcessorTTL) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbTTLArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("ttl", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbTTLResult{} + var retval *TTLResponse + var err2 error + if retval, err2 = p.handler.TTL(ctx, args.Key); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ttl: "+err2.Error()) + oprot.WriteMessageBegin("ttl", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("ttl", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorGetScanner struct { + handler Rrdb +} + +func (p *rrdbProcessorGetScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbGetScannerArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get_scanner", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbGetScannerResult{} + var retval *ScanResponse + var err2 error + if retval, err2 = p.handler.GetScanner(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_scanner: "+err2.Error()) + oprot.WriteMessageBegin("get_scanner", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("get_scanner", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorScan struct { + handler Rrdb +} + +func (p *rrdbProcessorScan) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbScanArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("scan", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbScanResult{} + var retval *ScanResponse + var err2 error + if retval, err2 = p.handler.Scan(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing scan: "+err2.Error()) + oprot.WriteMessageBegin("scan", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("scan", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorClearScanner struct { + handler Rrdb +} + +func (p *rrdbProcessorClearScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbClearScannerArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + if err2 = p.handler.ClearScanner(ctx, args.ContextID); err2 != nil { + return true, err2 + } + return true, nil +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Update +type RrdbPutArgs struct { + Update *UpdateRequest `thrift:"update,1" db:"update" json:"update"` +} + +func NewRrdbPutArgs() *RrdbPutArgs { + return &RrdbPutArgs{} +} + +var RrdbPutArgs_Update_DEFAULT *UpdateRequest + +func (p *RrdbPutArgs) GetUpdate() *UpdateRequest { + if !p.IsSetUpdate() { + return RrdbPutArgs_Update_DEFAULT + } + return p.Update +} +func (p *RrdbPutArgs) IsSetUpdate() bool { + return p.Update != nil +} + +func (p *RrdbPutArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbPutArgs) ReadField1(iprot thrift.TProtocol) error { + p.Update = &UpdateRequest{} + if err := p.Update.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Update), err) + } + return nil +} + +func (p *RrdbPutArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("put_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbPutArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("update", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:update: ", p), err) + } + if err := p.Update.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Update), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:update: ", p), err) + } + return err +} + +func (p *RrdbPutArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbPutArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbPutResult struct { + Success *UpdateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbPutResult() *RrdbPutResult { + return &RrdbPutResult{} +} + +var RrdbPutResult_Success_DEFAULT *UpdateResponse + +func (p *RrdbPutResult) GetSuccess() *UpdateResponse { + if !p.IsSetSuccess() { + return RrdbPutResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbPutResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbPutResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbPutResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &UpdateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbPutResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("put_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbPutResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbPutResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbPutResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbMultiPutArgs struct { + Request *MultiPutRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbMultiPutArgs() *RrdbMultiPutArgs { + return &RrdbMultiPutArgs{} +} + +var RrdbMultiPutArgs_Request_DEFAULT *MultiPutRequest + +func (p *RrdbMultiPutArgs) GetRequest() *MultiPutRequest { + if !p.IsSetRequest() { + return RrdbMultiPutArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbMultiPutArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbMultiPutArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiPutArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &MultiPutRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbMultiPutArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_put_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiPutArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbMultiPutArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiPutArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbMultiPutResult struct { + Success *UpdateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbMultiPutResult() *RrdbMultiPutResult { + return &RrdbMultiPutResult{} +} + +var RrdbMultiPutResult_Success_DEFAULT *UpdateResponse + +func (p *RrdbMultiPutResult) GetSuccess() *UpdateResponse { + if !p.IsSetSuccess() { + return RrdbMultiPutResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbMultiPutResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbMultiPutResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiPutResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &UpdateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbMultiPutResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_put_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiPutResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbMultiPutResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiPutResult(%+v)", *p) +} + +// Attributes: +// - Key +type RrdbRemoveArgs struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` +} + +func NewRrdbRemoveArgs() *RrdbRemoveArgs { + return &RrdbRemoveArgs{} +} + +var RrdbRemoveArgs_Key_DEFAULT *base.Blob + +func (p *RrdbRemoveArgs) GetKey() *base.Blob { + if !p.IsSetKey() { + return RrdbRemoveArgs_Key_DEFAULT + } + return p.Key +} +func (p *RrdbRemoveArgs) IsSetKey() bool { + return p.Key != nil +} + +func (p *RrdbRemoveArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbRemoveArgs) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *RrdbRemoveArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("remove_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbRemoveArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *RrdbRemoveArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbRemoveArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbRemoveResult struct { + Success *UpdateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbRemoveResult() *RrdbRemoveResult { + return &RrdbRemoveResult{} +} + +var RrdbRemoveResult_Success_DEFAULT *UpdateResponse + +func (p *RrdbRemoveResult) GetSuccess() *UpdateResponse { + if !p.IsSetSuccess() { + return RrdbRemoveResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbRemoveResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbRemoveResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbRemoveResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &UpdateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbRemoveResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("remove_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbRemoveResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbRemoveResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbRemoveResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbMultiRemoveArgs struct { + Request *MultiRemoveRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbMultiRemoveArgs() *RrdbMultiRemoveArgs { + return &RrdbMultiRemoveArgs{} +} + +var RrdbMultiRemoveArgs_Request_DEFAULT *MultiRemoveRequest + +func (p *RrdbMultiRemoveArgs) GetRequest() *MultiRemoveRequest { + if !p.IsSetRequest() { + return RrdbMultiRemoveArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbMultiRemoveArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbMultiRemoveArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiRemoveArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &MultiRemoveRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbMultiRemoveArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_remove_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiRemoveArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbMultiRemoveArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiRemoveArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbMultiRemoveResult struct { + Success *MultiRemoveResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbMultiRemoveResult() *RrdbMultiRemoveResult { + return &RrdbMultiRemoveResult{} +} + +var RrdbMultiRemoveResult_Success_DEFAULT *MultiRemoveResponse + +func (p *RrdbMultiRemoveResult) GetSuccess() *MultiRemoveResponse { + if !p.IsSetSuccess() { + return RrdbMultiRemoveResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbMultiRemoveResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbMultiRemoveResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiRemoveResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &MultiRemoveResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbMultiRemoveResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_remove_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiRemoveResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbMultiRemoveResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiRemoveResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbIncrArgs struct { + Request *IncrRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbIncrArgs() *RrdbIncrArgs { + return &RrdbIncrArgs{} +} + +var RrdbIncrArgs_Request_DEFAULT *IncrRequest + +func (p *RrdbIncrArgs) GetRequest() *IncrRequest { + if !p.IsSetRequest() { + return RrdbIncrArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbIncrArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbIncrArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbIncrArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &IncrRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbIncrArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("incr_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbIncrArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbIncrArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbIncrArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbIncrResult struct { + Success *IncrResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbIncrResult() *RrdbIncrResult { + return &RrdbIncrResult{} +} + +var RrdbIncrResult_Success_DEFAULT *IncrResponse + +func (p *RrdbIncrResult) GetSuccess() *IncrResponse { + if !p.IsSetSuccess() { + return RrdbIncrResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbIncrResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbIncrResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbIncrResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &IncrResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbIncrResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("incr_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbIncrResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbIncrResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbIncrResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbCheckAndSetArgs struct { + Request *CheckAndSetRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbCheckAndSetArgs() *RrdbCheckAndSetArgs { + return &RrdbCheckAndSetArgs{} +} + +var RrdbCheckAndSetArgs_Request_DEFAULT *CheckAndSetRequest + +func (p *RrdbCheckAndSetArgs) GetRequest() *CheckAndSetRequest { + if !p.IsSetRequest() { + return RrdbCheckAndSetArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbCheckAndSetArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbCheckAndSetArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbCheckAndSetArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &CheckAndSetRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbCheckAndSetArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_set_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbCheckAndSetArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbCheckAndSetArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbCheckAndSetArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbCheckAndSetResult struct { + Success *CheckAndSetResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbCheckAndSetResult() *RrdbCheckAndSetResult { + return &RrdbCheckAndSetResult{} +} + +var RrdbCheckAndSetResult_Success_DEFAULT *CheckAndSetResponse + +func (p *RrdbCheckAndSetResult) GetSuccess() *CheckAndSetResponse { + if !p.IsSetSuccess() { + return RrdbCheckAndSetResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbCheckAndSetResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbCheckAndSetResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbCheckAndSetResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &CheckAndSetResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbCheckAndSetResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_set_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbCheckAndSetResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbCheckAndSetResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbCheckAndSetResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbCheckAndMutateArgs struct { + Request *CheckAndMutateRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbCheckAndMutateArgs() *RrdbCheckAndMutateArgs { + return &RrdbCheckAndMutateArgs{} +} + +var RrdbCheckAndMutateArgs_Request_DEFAULT *CheckAndMutateRequest + +func (p *RrdbCheckAndMutateArgs) GetRequest() *CheckAndMutateRequest { + if !p.IsSetRequest() { + return RrdbCheckAndMutateArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbCheckAndMutateArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbCheckAndMutateArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbCheckAndMutateArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &CheckAndMutateRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbCheckAndMutateArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_mutate_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbCheckAndMutateArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbCheckAndMutateArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbCheckAndMutateArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbCheckAndMutateResult struct { + Success *CheckAndMutateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbCheckAndMutateResult() *RrdbCheckAndMutateResult { + return &RrdbCheckAndMutateResult{} +} + +var RrdbCheckAndMutateResult_Success_DEFAULT *CheckAndMutateResponse + +func (p *RrdbCheckAndMutateResult) GetSuccess() *CheckAndMutateResponse { + if !p.IsSetSuccess() { + return RrdbCheckAndMutateResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbCheckAndMutateResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbCheckAndMutateResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbCheckAndMutateResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &CheckAndMutateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbCheckAndMutateResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_mutate_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbCheckAndMutateResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbCheckAndMutateResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbCheckAndMutateResult(%+v)", *p) +} + +// Attributes: +// - Key +type RrdbGetArgs struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` +} + +func NewRrdbGetArgs() *RrdbGetArgs { + return &RrdbGetArgs{} +} + +var RrdbGetArgs_Key_DEFAULT *base.Blob + +func (p *RrdbGetArgs) GetKey() *base.Blob { + if !p.IsSetKey() { + return RrdbGetArgs_Key_DEFAULT + } + return p.Key +} +func (p *RrdbGetArgs) IsSetKey() bool { + return p.Key != nil +} + +func (p *RrdbGetArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbGetArgs) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *RrdbGetArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("get_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbGetArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *RrdbGetArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbGetArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbGetResult struct { + Success *ReadResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbGetResult() *RrdbGetResult { + return &RrdbGetResult{} +} + +var RrdbGetResult_Success_DEFAULT *ReadResponse + +func (p *RrdbGetResult) GetSuccess() *ReadResponse { + if !p.IsSetSuccess() { + return RrdbGetResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbGetResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbGetResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbGetResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ReadResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbGetResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("get_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbGetResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbGetResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbGetResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbMultiGetArgs struct { + Request *MultiGetRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbMultiGetArgs() *RrdbMultiGetArgs { + return &RrdbMultiGetArgs{} +} + +var RrdbMultiGetArgs_Request_DEFAULT *MultiGetRequest + +func (p *RrdbMultiGetArgs) GetRequest() *MultiGetRequest { + if !p.IsSetRequest() { + return RrdbMultiGetArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbMultiGetArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbMultiGetArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiGetArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &MultiGetRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbMultiGetArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_get_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiGetArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbMultiGetArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiGetArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbMultiGetResult struct { + Success *MultiGetResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbMultiGetResult() *RrdbMultiGetResult { + return &RrdbMultiGetResult{} +} + +var RrdbMultiGetResult_Success_DEFAULT *MultiGetResponse + +func (p *RrdbMultiGetResult) GetSuccess() *MultiGetResponse { + if !p.IsSetSuccess() { + return RrdbMultiGetResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbMultiGetResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbMultiGetResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiGetResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &MultiGetResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbMultiGetResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_get_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiGetResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbMultiGetResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiGetResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbBatchGetArgs struct { + Request *BatchGetRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbBatchGetArgs() *RrdbBatchGetArgs { + return &RrdbBatchGetArgs{} +} + +var RrdbBatchGetArgs_Request_DEFAULT *BatchGetRequest + +func (p *RrdbBatchGetArgs) GetRequest() *BatchGetRequest { + if !p.IsSetRequest() { + return RrdbBatchGetArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbBatchGetArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbBatchGetArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbBatchGetArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &BatchGetRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbBatchGetArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("batch_get_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbBatchGetArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbBatchGetArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbBatchGetArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbBatchGetResult struct { + Success *BatchGetResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbBatchGetResult() *RrdbBatchGetResult { + return &RrdbBatchGetResult{} +} + +var RrdbBatchGetResult_Success_DEFAULT *BatchGetResponse + +func (p *RrdbBatchGetResult) GetSuccess() *BatchGetResponse { + if !p.IsSetSuccess() { + return RrdbBatchGetResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbBatchGetResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbBatchGetResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbBatchGetResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &BatchGetResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbBatchGetResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("batch_get_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbBatchGetResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbBatchGetResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbBatchGetResult(%+v)", *p) +} + +// Attributes: +// - HashKey +type RrdbSortkeyCountArgs struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` +} + +func NewRrdbSortkeyCountArgs() *RrdbSortkeyCountArgs { + return &RrdbSortkeyCountArgs{} +} + +var RrdbSortkeyCountArgs_HashKey_DEFAULT *base.Blob + +func (p *RrdbSortkeyCountArgs) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return RrdbSortkeyCountArgs_HashKey_DEFAULT + } + return p.HashKey +} +func (p *RrdbSortkeyCountArgs) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *RrdbSortkeyCountArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbSortkeyCountArgs) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *RrdbSortkeyCountArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("sortkey_count_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbSortkeyCountArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *RrdbSortkeyCountArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbSortkeyCountArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbSortkeyCountResult struct { + Success *CountResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbSortkeyCountResult() *RrdbSortkeyCountResult { + return &RrdbSortkeyCountResult{} +} + +var RrdbSortkeyCountResult_Success_DEFAULT *CountResponse + +func (p *RrdbSortkeyCountResult) GetSuccess() *CountResponse { + if !p.IsSetSuccess() { + return RrdbSortkeyCountResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbSortkeyCountResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbSortkeyCountResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbSortkeyCountResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &CountResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbSortkeyCountResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("sortkey_count_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbSortkeyCountResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbSortkeyCountResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbSortkeyCountResult(%+v)", *p) +} + +// Attributes: +// - Key +type RrdbTTLArgs struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` +} + +func NewRrdbTTLArgs() *RrdbTTLArgs { + return &RrdbTTLArgs{} +} + +var RrdbTTLArgs_Key_DEFAULT *base.Blob + +func (p *RrdbTTLArgs) GetKey() *base.Blob { + if !p.IsSetKey() { + return RrdbTTLArgs_Key_DEFAULT + } + return p.Key +} +func (p *RrdbTTLArgs) IsSetKey() bool { + return p.Key != nil +} + +func (p *RrdbTTLArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbTTLArgs) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *RrdbTTLArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ttl_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbTTLArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *RrdbTTLArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbTTLArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbTTLResult struct { + Success *TTLResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbTTLResult() *RrdbTTLResult { + return &RrdbTTLResult{} +} + +var RrdbTTLResult_Success_DEFAULT *TTLResponse + +func (p *RrdbTTLResult) GetSuccess() *TTLResponse { + if !p.IsSetSuccess() { + return RrdbTTLResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbTTLResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbTTLResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbTTLResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &TTLResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbTTLResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ttl_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbTTLResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbTTLResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbTTLResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbGetScannerArgs struct { + Request *GetScannerRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbGetScannerArgs() *RrdbGetScannerArgs { + return &RrdbGetScannerArgs{} +} + +var RrdbGetScannerArgs_Request_DEFAULT *GetScannerRequest + +func (p *RrdbGetScannerArgs) GetRequest() *GetScannerRequest { + if !p.IsSetRequest() { + return RrdbGetScannerArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbGetScannerArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbGetScannerArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbGetScannerArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &GetScannerRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbGetScannerArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("get_scanner_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbGetScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbGetScannerArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbGetScannerArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbGetScannerResult struct { + Success *ScanResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbGetScannerResult() *RrdbGetScannerResult { + return &RrdbGetScannerResult{} +} + +var RrdbGetScannerResult_Success_DEFAULT *ScanResponse + +func (p *RrdbGetScannerResult) GetSuccess() *ScanResponse { + if !p.IsSetSuccess() { + return RrdbGetScannerResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbGetScannerResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbGetScannerResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbGetScannerResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ScanResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbGetScannerResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("get_scanner_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbGetScannerResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbGetScannerResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbGetScannerResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbScanArgs struct { + Request *ScanRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbScanArgs() *RrdbScanArgs { + return &RrdbScanArgs{} +} + +var RrdbScanArgs_Request_DEFAULT *ScanRequest + +func (p *RrdbScanArgs) GetRequest() *ScanRequest { + if !p.IsSetRequest() { + return RrdbScanArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbScanArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbScanArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbScanArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &ScanRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbScanArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("scan_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbScanArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbScanArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbScanArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbScanResult struct { + Success *ScanResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbScanResult() *RrdbScanResult { + return &RrdbScanResult{} +} + +var RrdbScanResult_Success_DEFAULT *ScanResponse + +func (p *RrdbScanResult) GetSuccess() *ScanResponse { + if !p.IsSetSuccess() { + return RrdbScanResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbScanResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbScanResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbScanResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ScanResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbScanResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("scan_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbScanResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbScanResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbScanResult(%+v)", *p) +} + +// Attributes: +// - ContextID +type RrdbClearScannerArgs struct { + ContextID int64 `thrift:"context_id,1" db:"context_id" json:"context_id"` +} + +func NewRrdbClearScannerArgs() *RrdbClearScannerArgs { + return &RrdbClearScannerArgs{} +} + +func (p *RrdbClearScannerArgs) GetContextID() int64 { + return p.ContextID +} +func (p *RrdbClearScannerArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbClearScannerArgs) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.ContextID = v + } + return nil +} + +func (p *RrdbClearScannerArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("clear_scanner_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbClearScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("context_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:context_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.ContextID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.context_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:context_id: ", p), err) + } + return err +} + +func (p *RrdbClearScannerArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbClearScannerArgs(%+v)", *p) +} + +type Meta interface { + // Parameters: + // - Query + QueryCfg(ctx context.Context, query *replication.QueryCfgRequest) (r *replication.QueryCfgResponse, err error) +} + +type MetaClient struct { + c thrift.TClient +} + +func NewMetaClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *MetaClient { + return &MetaClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewMetaClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *MetaClient { + return &MetaClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewMetaClient(c thrift.TClient) *MetaClient { + return &MetaClient{ + c: c, + } +} + +func (p *MetaClient) Client_() thrift.TClient { + return p.c +} + +// Parameters: +// - Query +func (p *MetaClient) QueryCfg(ctx context.Context, query *replication.QueryCfgRequest) (r *replication.QueryCfgResponse, err error) { + var _args124 MetaQueryCfgArgs + _args124.Query = query + var _result125 MetaQueryCfgResult + if err = p.Client_().Call(ctx, "query_cfg", &_args124, &_result125); err != nil { + return + } + return _result125.GetSuccess(), nil +} + +type MetaProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Meta +} + +func (p *MetaProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *MetaProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *MetaProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewMetaProcessor(handler Meta) *MetaProcessor { + + self126 := &MetaProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self126.processorMap["query_cfg"] = &metaProcessorQueryCfg{handler: handler} + return self126 +} + +func (p *MetaProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x127 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x127.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x127 + +} + +type metaProcessorQueryCfg struct { + handler Meta +} + +func (p *metaProcessorQueryCfg) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := MetaQueryCfgArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_cfg", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := MetaQueryCfgResult{} + var retval *replication.QueryCfgResponse + var err2 error + if retval, err2 = p.handler.QueryCfg(ctx, args.Query); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_cfg: "+err2.Error()) + oprot.WriteMessageBegin("query_cfg", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_cfg", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Query +type MetaQueryCfgArgs struct { + Query *replication.QueryCfgRequest `thrift:"query,1" db:"query" json:"query"` +} + +func NewMetaQueryCfgArgs() *MetaQueryCfgArgs { + return &MetaQueryCfgArgs{} +} + +var MetaQueryCfgArgs_Query_DEFAULT *replication.QueryCfgRequest + +func (p *MetaQueryCfgArgs) GetQuery() *replication.QueryCfgRequest { + if !p.IsSetQuery() { + return MetaQueryCfgArgs_Query_DEFAULT + } + return p.Query +} +func (p *MetaQueryCfgArgs) IsSetQuery() bool { + return p.Query != nil +} + +func (p *MetaQueryCfgArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MetaQueryCfgArgs) ReadField1(iprot thrift.TProtocol) error { + p.Query = &replication.QueryCfgRequest{} + if err := p.Query.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Query), err) + } + return nil +} + +func (p *MetaQueryCfgArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cfg_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MetaQueryCfgArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("query", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:query: ", p), err) + } + if err := p.Query.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Query), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:query: ", p), err) + } + return err +} + +func (p *MetaQueryCfgArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MetaQueryCfgArgs(%+v)", *p) +} + +// Attributes: +// - Success +type MetaQueryCfgResult struct { + Success *replication.QueryCfgResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewMetaQueryCfgResult() *MetaQueryCfgResult { + return &MetaQueryCfgResult{} +} + +var MetaQueryCfgResult_Success_DEFAULT *replication.QueryCfgResponse + +func (p *MetaQueryCfgResult) GetSuccess() *replication.QueryCfgResponse { + if !p.IsSetSuccess() { + return MetaQueryCfgResult_Success_DEFAULT + } + return p.Success +} +func (p *MetaQueryCfgResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *MetaQueryCfgResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MetaQueryCfgResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &replication.QueryCfgResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *MetaQueryCfgResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cfg_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MetaQueryCfgResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *MetaQueryCfgResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MetaQueryCfgResult(%+v)", *p) +} From eb4425cfaec3c5eeb72edf89d4a90b755fbb0c4e Mon Sep 17 00:00:00 2001 From: lengyuexuexuan <46274877+lengyuexuexuan@users.noreply.github.com> Date: Thu, 20 Jun 2024 11:24:57 +0800 Subject: [PATCH 07/29] fix(go-client): update config once replica server failed and forward to primary meta server if it was changed (#1916) https://github.com/apache/incubator-pegasus/issues/1880 https://github.com/apache/incubator-pegasus/issues/1856 As for https://github.com/apache/incubator-pegasus/issues/1856: when go client is writing to one partition and the replica node core dump, go client will finish after timeout without updating the configuration. In this case, the go client only restart to solve the problem. In this pr, the client would update configuration of table automatically when someone replica core dump. After testing, we found that the the replica error is "context.DeadlineExceeded" (incubator-pegasus/go-client/pegasus/table_connector.go) when the replica core dump. Therefore, when client meets the error, the go client will update configuration automatically. Besides, this request will not retry. Because only in the case of timeout, the configuration will be automatically updated. If you try again before then, it will still fail. There is also the risk of infinite retries. Therefore, it is better to directly return the request error to the user and let the user try again. As for https://github.com/apache/incubator-pegasus/issues/1880: When the client sends an RPC message "RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX" to the meta server, if the meta server isn't primary, the response that forward to the primary meta server will return. According to the above description, assuming that the client does not have a primary meta server configured, we can connect to the primary meta server in this way. About tests: 1. Start onebox, and the primary meta server is not added to the go client configuration. 2. The go client writes data to a certain partition and then kills the replica process. --- go-client/pegasus/table_connector.go | 1 + go-client/pegasus/table_connector_test.go | 8 ++- go-client/session/meta_call.go | 70 +++++++++++++++++++---- go-client/session/meta_session.go | 18 +++++- go-client/session/meta_session_test.go | 18 +++++- 5 files changed, 100 insertions(+), 15 deletions(-) diff --git a/go-client/pegasus/table_connector.go b/go-client/pegasus/table_connector.go index d1074a7bf3..c83461e393 100644 --- a/go-client/pegasus/table_connector.go +++ b/go-client/pegasus/table_connector.go @@ -703,6 +703,7 @@ func (p *pegasusTableConnector) handleReplicaError(err error, replica *session.R case base.ERR_TIMEOUT: case context.DeadlineExceeded: + confUpdate = true case context.Canceled: // timeout will not trigger a configuration update diff --git a/go-client/pegasus/table_connector_test.go b/go-client/pegasus/table_connector_test.go index 1b28747655..b4016748ea 100644 --- a/go-client/pegasus/table_connector_test.go +++ b/go-client/pegasus/table_connector_test.go @@ -269,8 +269,14 @@ func TestPegasusTableConnector_TriggerSelfUpdate(t *testing.T) { assert.True(t, confUpdate) assert.False(t, retry) + confUpdate, retry, err = ptb.handleReplicaError(context.DeadlineExceeded, nil) + <-ptb.confUpdateCh + assert.Error(t, err) + assert.True(t, confUpdate) + assert.False(t, retry) + { // Ensure: The following errors should not trigger configuration update - errorTypes := []error{base.ERR_TIMEOUT, context.DeadlineExceeded, base.ERR_CAPACITY_EXCEEDED, base.ERR_NOT_ENOUGH_MEMBER, base.ERR_BUSY, base.ERR_SPLITTING, base.ERR_DISK_INSUFFICIENT} + errorTypes := []error{base.ERR_TIMEOUT, base.ERR_CAPACITY_EXCEEDED, base.ERR_NOT_ENOUGH_MEMBER, base.ERR_BUSY, base.ERR_SPLITTING, base.ERR_DISK_INSUFFICIENT} for _, err := range errorTypes { channelEmpty := false diff --git a/go-client/session/meta_call.go b/go-client/session/meta_call.go index 2db6179ab1..d846aa09b8 100644 --- a/go-client/session/meta_call.go +++ b/go-client/session/meta_call.go @@ -26,6 +26,8 @@ import ( "time" "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/incubator-pegasus/go-client/pegalog" ) type metaCallFunc func(context.Context, *metaSession) (metaResponse, error) @@ -42,21 +44,24 @@ type metaCall struct { backupCh chan interface{} callFunc metaCallFunc - metas []*metaSession - lead int + metaIPAddrs []string + metas []*metaSession + lead int // After a Run successfully ends, the current leader will be set in this field. // If there is no meta failover, `newLead` equals to `lead`. newLead uint32 + lock sync.RWMutex } -func newMetaCall(lead int, metas []*metaSession, callFunc metaCallFunc) *metaCall { +func newMetaCall(lead int, metas []*metaSession, callFunc metaCallFunc, meatIPAddr []string) *metaCall { return &metaCall{ - metas: metas, - lead: lead, - newLead: uint32(lead), - respCh: make(chan metaResponse), - callFunc: callFunc, - backupCh: make(chan interface{}), + metas: metas, + metaIPAddrs: meatIPAddr, + lead: lead, + newLead: uint32(lead), + respCh: make(chan metaResponse), + callFunc: callFunc, + backupCh: make(chan interface{}), } } @@ -106,14 +111,44 @@ func (c *metaCall) Run(ctx context.Context) (metaResponse, error) { } // issueSingleMeta returns false if we should try another meta -func (c *metaCall) issueSingleMeta(ctx context.Context, i int) bool { - meta := c.metas[i] +func (c *metaCall) issueSingleMeta(ctx context.Context, curLeader int) bool { + meta := c.metas[curLeader] resp, err := c.callFunc(ctx, meta) + + if err == nil && resp.GetErr().Errno == base.ERR_FORWARD_TO_OTHERS.String() { + forwardAddr := c.getMetaServiceForwardAddress(resp) + if forwardAddr == nil { + return false + } + addr := forwardAddr.GetAddress() + found := false + c.lock.Lock() + for i := range c.metaIPAddrs { + if addr == c.metaIPAddrs[i] { + found = true + break + } + } + c.lock.Unlock() + if !found { + c.lock.Lock() + c.metaIPAddrs = append(c.metaIPAddrs, addr) + c.metas = append(c.metas, &metaSession{ + NodeSession: newNodeSession(addr, NodeTypeMeta), + logger: pegalog.GetLogger(), + }) + c.lock.Unlock() + curLeader = len(c.metas) - 1 + c.metas[curLeader].logger.Printf("add forward address %s as meta server", addr) + resp, err = c.callFunc(ctx, c.metas[curLeader]) + } + } + if err != nil || resp.GetErr().Errno == base.ERR_FORWARD_TO_OTHERS.String() { return false } // the RPC succeeds, this meta becomes the new leader now. - atomic.StoreUint32(&c.newLead, uint32(i)) + atomic.StoreUint32(&c.newLead, uint32(curLeader)) select { case <-ctx.Done(): case c.respCh <- resp: @@ -133,3 +168,14 @@ func (c *metaCall) issueBackupMetas(ctx context.Context) { }(i) } } + +func (c *metaCall) getMetaServiceForwardAddress(resp metaResponse) *base.RPCAddress { + rep, ok := resp.(*replication.QueryCfgResponse) + if !ok || rep.GetErr().Errno != base.ERR_FORWARD_TO_OTHERS.String() { + return nil + } else if rep.GetPartitions() == nil || len(rep.GetPartitions()) == 0 { + return nil + } else { + return rep.Partitions[0].Primary + } +} diff --git a/go-client/session/meta_session.go b/go-client/session/meta_session.go index c209cb8488..b0e962d1d9 100644 --- a/go-client/session/meta_session.go +++ b/go-client/session/meta_session.go @@ -94,10 +94,12 @@ func NewMetaManager(addrs []string, creator NodeSessionCreator) *MetaManager { func (m *MetaManager) call(ctx context.Context, callFunc metaCallFunc) (metaResponse, error) { lead := m.getCurrentLeader() - call := newMetaCall(lead, m.metas, callFunc) + call := newMetaCall(lead, m.metas, callFunc, m.metaIPAddrs) resp, err := call.Run(ctx) if err == nil { m.setCurrentLeader(int(call.newLead)) + m.setNewMetas(call.metas) + m.setMetaIPAddrs(call.metaIPAddrs) } return resp, err } @@ -131,6 +133,20 @@ func (m *MetaManager) setCurrentLeader(lead int) { m.currentLeader = lead } +func (m *MetaManager) setNewMetas(metas []*metaSession) { + m.mu.Lock() + defer m.mu.Unlock() + + m.metas = metas +} + +func (m *MetaManager) setMetaIPAddrs(metaIPAddrs []string) { + m.mu.Lock() + defer m.mu.Unlock() + + m.metaIPAddrs = metaIPAddrs +} + // Close the sessions. func (m *MetaManager) Close() error { funcs := make([]func() error, len(m.metas)) diff --git a/go-client/session/meta_session_test.go b/go-client/session/meta_session_test.go index d2cbf6cc3d..5014a4680e 100644 --- a/go-client/session/meta_session_test.go +++ b/go-client/session/meta_session_test.go @@ -118,7 +118,7 @@ func TestMetaManager_FirstMetaDead(t *testing.T) { for i := 0; i < 3; i++ { call := newMetaCall(mm.currentLeader, mm.metas, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { return ms.queryConfig(rpcCtx, "temp") - }) + }, []string{"0.0.0.0:12345", "0.0.0.0:34603", "0.0.0.0:34602", "0.0.0.0:34601"}) // This a trick for testing. If metaCall issue to other meta, not only to the leader, this nil channel will cause panic. call.backupCh = nil metaResp, err := call.Run(context.Background()) @@ -126,3 +126,19 @@ func TestMetaManager_FirstMetaDead(t *testing.T) { assert.Equal(t, metaResp.GetErr().Errno, base.ERR_OK.String()) } } + +// This case mocks the case that the server primary meta is not in the client metalist. +// And the client will forward to the primary meta automatically. +func TestNodeSession_ForwardToPrimaryMeta(t *testing.T) { + defer leaktest.Check(t)() + + metaList := []string{"0.0.0.0:34601", "0.0.0.0:34602", "0.0.0.0:34603"} + + for i := 0; i < 3; i++ { + mm := NewMetaManager(metaList[i:i+1], NewNodeSession) + defer mm.Close() + resp, err := mm.QueryConfig(context.Background(), "temp") + assert.Nil(t, err) + assert.Equal(t, resp.Err.Errno, base.ERR_OK.String()) + } +} From 347a8271d077131a114fe1a77030da191c26749c Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Fri, 21 Jun 2024 14:49:38 +0800 Subject: [PATCH 08/29] fix(FQDN): Fix the bug of crash caused by un-resolved IP address (#2044) https://github.com/apache/incubator-pegasus/issues/2007 In servers, we assume that the remote IPs may can't be reverse resolved, in this case, warning or error messages are logged instead of crashing. But in tests, we assume that all the IPs can be reverse resolved. --- src/client/replication_ddl_client.cpp | 2 +- .../test/failure_detector.cpp | 1 + src/meta/meta_bulk_load_service.cpp | 24 +++++++++---------- src/meta/server_state.cpp | 17 +++++++++---- src/redis_protocol/proxy_lib/proxy_layer.cpp | 11 ++++----- src/redis_protocol/proxy_lib/proxy_layer.h | 11 +++++---- src/replica/storage/simple_kv/test/case.cpp | 4 +++- src/runtime/rpc/asio_net_provider.cpp | 2 ++ src/runtime/rpc/asio_net_provider.h | 1 + src/runtime/rpc/group_host_port.h | 10 ++++++-- src/runtime/rpc/network.cpp | 5 +++- src/runtime/rpc/network.sim.cpp | 2 ++ src/runtime/rpc/rpc_engine.cpp | 11 +++++---- src/runtime/rpc/rpc_host_port.cpp | 10 ++++---- src/runtime/rpc/rpc_message.h | 8 +++++-- src/runtime/service_api_c.cpp | 4 ++-- src/runtime/test/host_port_test.cpp | 6 ++--- 17 files changed, 81 insertions(+), 48 deletions(-) diff --git a/src/client/replication_ddl_client.cpp b/src/client/replication_ddl_client.cpp index 13a2e5181a..d2086e8fbd 100644 --- a/src/client/replication_ddl_client.cpp +++ b/src/client/replication_ddl_client.cpp @@ -93,7 +93,7 @@ replication_ddl_client::replication_ddl_client(const std::vector _meta_server.assign_group("meta-servers"); for (const auto &m : meta_servers) { if (!_meta_server.group_host_port()->add(m)) { - LOG_WARNING("duplicate adress {}", m); + LOG_WARNING("duplicate address {}", m); } } } diff --git a/src/failure_detector/test/failure_detector.cpp b/src/failure_detector/test/failure_detector.cpp index 1e14ebece0..cfa8e81826 100644 --- a/src/failure_detector/test/failure_detector.cpp +++ b/src/failure_detector/test/failure_detector.cpp @@ -241,6 +241,7 @@ class test_master : public service_app for (auto &port : ports) { rpc_address addr(network::get_local_ipv4(), std::stoi(port)); const auto hp = ::dsn::host_port::from_address(addr); + CHECK(hp, "'{}' can not be reverse resolved", addr); _master_fd->add_allow_list(hp); } use_allow_list = true; diff --git a/src/meta/meta_bulk_load_service.cpp b/src/meta/meta_bulk_load_service.cpp index 9dd97141e3..7317d57ea1 100644 --- a/src/meta/meta_bulk_load_service.cpp +++ b/src/meta/meta_bulk_load_service.cpp @@ -451,19 +451,17 @@ void bulk_load_service::partition_bulk_load(const std::string &app_name, const g req->remote_root_path); bulk_load_rpc rpc(std::move(req), RPC_BULK_LOAD, 0_ms, 0, pid.thread_hash()); - rpc.call(pconfig.primary, _meta_svc->tracker(), [this, rpc](error_code err) mutable { - // fill host_port struct if needed - // remote server maybe not supported host_post, just have address - auto &bulk_load_resp = rpc.response(); - if (!bulk_load_resp.__isset.hp_group_bulk_load_state) { - bulk_load_resp.__set_hp_group_bulk_load_state({}); - for (const auto & [ addr, pbls ] : bulk_load_resp.group_bulk_load_state) { - bulk_load_resp.hp_group_bulk_load_state[host_port::from_address(addr)] = pbls; - } - } - - on_partition_bulk_load_reply(err, rpc.request(), rpc.response()); - }); + rpc.call( + pconfig.primary, _meta_svc->tracker(), [this, pid, rpc, pconfig](error_code err) mutable { + // The remote server may not support FQDN, but do not try to reverse resolve the + // IP addresses because they may be unresolved. Just warning and ignore this. + LOG_WARNING_IF(!rpc.response().__isset.hp_group_bulk_load_state, + "The {} primary {} doesn't support FQDN, the response " + "hp_group_bulk_load_state field is not set", + pid, + FMT_HOST_PORT_AND_IP(pconfig, primary)); + on_partition_bulk_load_reply(err, rpc.request(), rpc.response()); + }); } // ThreadPool: THREAD_POOL_META_STATE diff --git a/src/meta/server_state.cpp b/src/meta/server_state.cpp index 09dc7781be..e1cf2b26ac 100644 --- a/src/meta/server_state.cpp +++ b/src/meta/server_state.cpp @@ -1587,7 +1587,7 @@ void server_state::update_configuration_locally( break; case config_type::CT_REGISTER_CHILD: { ns->put_partition(gpid, true); - // TODO(yingchun): optimize this + // TODO(yingchun): optimize the duplicate loops. if (config_request->config.__isset.hp_secondaries) { for (const auto &secondary : config_request->config.hp_secondaries) { auto secondary_node = get_node_state(_nodes, secondary, false); @@ -1595,8 +1595,16 @@ void server_state::update_configuration_locally( } } else { for (const auto &secondary : config_request->config.secondaries) { - auto secondary_node = - get_node_state(_nodes, host_port::from_address(secondary), false); + const auto hp = host_port::from_address(secondary); + if (!hp) { + LOG_ERROR("The registering secondary {} for pid {} can no be reverse " + "resolved, skip registering it, please check the network " + "configuration", + secondary, + config_request->config.pid); + continue; + } + auto secondary_node = get_node_state(_nodes, hp, false); secondary_node->put_partition(gpid, false); } } @@ -1608,8 +1616,9 @@ void server_state::update_configuration_locally( } } else { CHECK_EQ(old_cfg.ballot, new_cfg.ballot); - const auto host_node = host_port::from_address(config_request->host_node); + // The non-stateful app is just for testing, so just check the host_node is resolvable. + CHECK(host_node, "'{}' can not be reverse resolved", config_request->host_node); new_cfg = old_cfg; partition_configuration_stateless pcs(new_cfg); if (config_request->type == config_type::type::CT_ADD_SECONDARY) { diff --git a/src/redis_protocol/proxy_lib/proxy_layer.cpp b/src/redis_protocol/proxy_lib/proxy_layer.cpp index f3f15e6d5a..b8eb1666d2 100644 --- a/src/redis_protocol/proxy_lib/proxy_layer.cpp +++ b/src/redis_protocol/proxy_lib/proxy_layer.cpp @@ -62,7 +62,7 @@ proxy_stub::proxy_stub(const proxy_session::factory &f, void proxy_stub::on_rpc_request(dsn::message_ex *request) { - auto source = ::dsn::host_port::from_address(request->header->from_address); + const auto &source = request->header->from_address; std::shared_ptr session; { ::dsn::zauto_read_lock l(_lock); @@ -87,11 +87,10 @@ void proxy_stub::on_rpc_request(dsn::message_ex *request) void proxy_stub::on_recv_remove_session_request(dsn::message_ex *request) { - auto source = ::dsn::host_port::from_address(request->header->from_address); - remove_session(source); + remove_session(request->header->from_address); } -void proxy_stub::remove_session(dsn::host_port remote) +void proxy_stub::remove_session(dsn::rpc_address remote) { std::shared_ptr session; { @@ -114,9 +113,9 @@ proxy_session::proxy_session(proxy_stub *op, dsn::message_ex *first_msg) CHECK_NOTNULL(first_msg, "null msg when create session"); _backup_one_request->add_ref(); - _session_remote = ::dsn::host_port::from_address(_backup_one_request->header->from_address); + _session_remote = _backup_one_request->header->from_address; _session_remote_str = _session_remote.to_string(); - CHECK_EQ_MSG(_session_remote.type(), HOST_TYPE_IPV4, "invalid host_port type"); + CHECK_EQ_MSG(_session_remote.type(), HOST_TYPE_IPV4, "invalid rpc_address type"); } proxy_session::~proxy_session() diff --git a/src/redis_protocol/proxy_lib/proxy_layer.h b/src/redis_protocol/proxy_lib/proxy_layer.h index 99884074ca..385c99d20d 100644 --- a/src/redis_protocol/proxy_lib/proxy_layer.h +++ b/src/redis_protocol/proxy_lib/proxy_layer.h @@ -25,6 +25,7 @@ #include #include +#include "runtime/rpc/rpc_address.h" #include "runtime/rpc/rpc_host_port.h" #include "runtime/serverlet.h" #include "runtime/task/task_code.h" @@ -79,8 +80,9 @@ class proxy_session : public std::enable_shared_from_this // when get message from raw parser, request & response of "dsn::message_ex*" are not in couple. // we need to backup one request to create a response struct. dsn::message_ex *_backup_one_request; - // the client for which this session served - dsn::host_port _session_remote; + // The client for which this session served for. + // The source IP address is possible to be reverse un-resolved, so use rpc_address directly. + dsn::rpc_address _session_remote; std::string _session_remote_str; }; @@ -107,14 +109,15 @@ class proxy_stub : public ::dsn::serverlet this->unregister_rpc_handler(RPC_CALL_RAW_MESSAGE); this->unregister_rpc_handler(RPC_CALL_RAW_SESSION_DISCONNECT); } - void remove_session(dsn::host_port remote_address); + void remove_session(dsn::rpc_address remote_address); private: void on_rpc_request(dsn::message_ex *request); void on_recv_remove_session_request(dsn::message_ex *); ::dsn::zrwlock_nr _lock; - std::unordered_map<::dsn::host_port, std::shared_ptr> _sessions; + // The source IP address is possible to be un-reverse resolved, so use rpc_address. + std::unordered_map<::dsn::rpc_address, std::shared_ptr> _sessions; proxy_session::factory _factory; ::dsn::host_port _uri_address; std::string _cluster; diff --git a/src/replica/storage/simple_kv/test/case.cpp b/src/replica/storage/simple_kv/test/case.cpp index 730b04e93c..bd4b9464dc 100644 --- a/src/replica/storage/simple_kv/test/case.cpp +++ b/src/replica/storage/simple_kv/test/case.cpp @@ -534,7 +534,9 @@ void event_on_rpc::init(message_ex *msg, task *tsk) if (msg != nullptr) { _trace_id = fmt::sprintf("%016llx", msg->header->trace_id); _rpc_name = msg->header->rpc_name; - _from = address_to_node(host_port::from_address(msg->header->from_address)); + const auto hp = host_port::from_address(msg->header->from_address); + CHECK(hp, "'{}' can not be reverse resolved", msg->header->from_address); + _from = address_to_node(hp); _to = address_to_node(msg->to_host_port); } } diff --git a/src/runtime/rpc/asio_net_provider.cpp b/src/runtime/rpc/asio_net_provider.cpp index 1bc85f2d5b..8c9457cbb3 100644 --- a/src/runtime/rpc/asio_net_provider.cpp +++ b/src/runtime/rpc/asio_net_provider.cpp @@ -147,6 +147,7 @@ error_code asio_network_provider::start(rpc_channel channel, int port, bool clie _address = rpc_address(get_local_ipv4(), port); _hp = ::dsn::host_port::from_address(_address); + LOG_WARNING_IF(!_hp, "'{}' can not be reverse resolved", _address); if (!client_only) { auto v4_addr = boost::asio::ip::address_v4::any(); //(ntohl(_address.ip)); @@ -456,6 +457,7 @@ error_code asio_udp_provider::start(rpc_channel channel, int port, bool client_o } _hp = ::dsn::host_port::from_address(_address); + LOG_WARNING_IF(!_hp, "'{}' can not be reverse resolved", _address); for (int i = 0; i < FLAGS_io_service_worker_count; i++) { _workers.push_back(std::make_shared([this, i]() { diff --git a/src/runtime/rpc/asio_net_provider.h b/src/runtime/rpc/asio_net_provider.h index 20eb6f9d91..c26ce1b191 100644 --- a/src/runtime/rpc/asio_net_provider.h +++ b/src/runtime/rpc/asio_net_provider.h @@ -95,6 +95,7 @@ class asio_network_provider : public connection_oriented_network std::vector> _io_services; std::vector> _workers; ::dsn::rpc_address _address; + // NOTE: '_hp' is possible to be invalid if '_address' can not be reverse resolved. ::dsn::host_port _hp; }; diff --git a/src/runtime/rpc/group_host_port.h b/src/runtime/rpc/group_host_port.h index f8ceea212f..ae1181358e 100644 --- a/src/runtime/rpc/group_host_port.h +++ b/src/runtime/rpc/group_host_port.h @@ -127,10 +127,16 @@ inline rpc_group_host_port::rpc_group_host_port(const rpc_group_address *g_addr) { _name = g_addr->name(); for (const auto &addr : g_addr->members()) { - CHECK_TRUE(add(host_port::from_address(addr))); + const auto hp = host_port::from_address(addr); + CHECK(hp, "'{}' can not be reverse resolved", addr); + CHECK_TRUE(add(hp)); } _update_leader_automatically = g_addr->is_update_leader_automatically(); - set_leader(host_port::from_address(g_addr->leader())); + if (g_addr->leader()) { + const auto hp = host_port::from_address(g_addr->leader()); + CHECK(hp, "'{}' can not be reverse resolved", g_addr->leader()); + set_leader(hp); + } } inline rpc_group_host_port &rpc_group_host_port::operator=(const rpc_group_host_port &other) diff --git a/src/runtime/rpc/network.cpp b/src/runtime/rpc/network.cpp index 72a9c55e9b..2c506fd720 100644 --- a/src/runtime/rpc/network.cpp +++ b/src/runtime/rpc/network.cpp @@ -388,7 +388,9 @@ rpc_session::rpc_session(connection_oriented_network &net, _message_sent(0), _net(net), _remote_addr(remote_addr), - _remote_host_port(host_port::from_address(remote_addr)), + // TODO(yingchun): '_remote_host_port' is possible to be invalid after this! + // TODO(yingchun): It's too cost to reverse resolve host in constructor. + _remote_host_port(host_port::from_address(_remote_addr)), _max_buffer_block_count_per_send(net.max_buffer_block_count_per_send()), _reader(net.message_buffer_block_size()), _parser(parser), @@ -396,6 +398,7 @@ rpc_session::rpc_session(connection_oriented_network &net, _matcher(_net.engine()->matcher()), _delay_server_receive_ms(0) { + LOG_WARNING_IF(!_remote_host_port, "'{}' can not be reverse resolved", _remote_addr); if (!is_client) { on_rpc_session_connected.execute(this); } diff --git a/src/runtime/rpc/network.sim.cpp b/src/runtime/rpc/network.sim.cpp index 4aad6b933a..ca1986ae8b 100644 --- a/src/runtime/rpc/network.sim.cpp +++ b/src/runtime/rpc/network.sim.cpp @@ -162,6 +162,7 @@ sim_network_provider::sim_network_provider(rpc_engine *rpc, network *inner_provi { _address = rpc_address::from_host_port("localhost", 1); _hp = ::dsn::host_port::from_address(_address); + LOG_WARNING_IF(!_hp, "'{}' can not be reverse resolved", _address); } error_code sim_network_provider::start(rpc_channel channel, int port, bool client_only) @@ -172,6 +173,7 @@ error_code sim_network_provider::start(rpc_channel channel, int port, bool clien _address = dsn::rpc_address::from_host_port("localhost", port); _hp = ::dsn::host_port::from_address(_address); + LOG_WARNING_IF(!_hp, "'{}' can not be reverse resolved", _address); auto hostname = boost::asio::ip::host_name(); if (!client_only) { for (int i = NET_HDR_INVALID + 1; i <= network_header_format::max_value(); i++) { diff --git a/src/runtime/rpc/rpc_engine.cpp b/src/runtime/rpc/rpc_engine.cpp index 0e80e7fd1e..24eb71be54 100644 --- a/src/runtime/rpc/rpc_engine.cpp +++ b/src/runtime/rpc/rpc_engine.cpp @@ -150,8 +150,9 @@ bool rpc_client_matcher::on_recv_reply(network *net, uint64_t key, message_ex *r case GRPC_TO_LEADER: if (req->server_address.group_address()->is_update_leader_automatically()) { req->server_address.group_address()->set_leader(addr); - req->server_host_port.group_host_port()->set_leader( - host_port::from_address(addr)); + const auto hp = host_port::from_address(addr); + CHECK(hp, "'{}' can not be reverse resolved", addr); + req->server_host_port.group_host_port()->set_leader(hp); } break; default: @@ -180,8 +181,9 @@ bool rpc_client_matcher::on_recv_reply(network *net, uint64_t key, message_ex *r req->server_address.group_address()->is_update_leader_automatically()) { req->server_address.group_address()->set_leader( reply->header->from_address); - req->server_host_port.group_host_port()->set_leader( - host_port::from_address(reply->header->from_address)); + const auto hp = host_port::from_address(reply->header->from_address); + CHECK(hp, "'{}' can not be reverse resolved", reply->header->from_address); + req->server_host_port.group_host_port()->set_leader(hp); } break; default: @@ -523,6 +525,7 @@ error_code rpc_engine::start(const service_app_spec &aspec) _local_primary_address = _client_nets[NET_HDR_DSN][0]->address(); _local_primary_address.set_port(aspec.ports.size() > 0 ? *aspec.ports.begin() : aspec.id); _local_primary_host_port = host_port::from_address(_local_primary_address); + CHECK(_local_primary_host_port, "'{}' can not be reverse resolved", _local_primary_address); LOG_INFO("=== service_node=[{}], primary_address=[{}({})] ===", _node->full_name(), diff --git a/src/runtime/rpc/rpc_host_port.cpp b/src/runtime/rpc/rpc_host_port.cpp index 1e8583da68..42ed764010 100644 --- a/src/runtime/rpc/rpc_host_port.cpp +++ b/src/runtime/rpc/rpc_host_port.cpp @@ -58,9 +58,11 @@ host_port host_port::from_address(rpc_address addr) WARNING, 100, "construct host_port '{}' from rpc_address '{}'", hp, addr); switch (addr.type()) { case HOST_TYPE_IPV4: { - CHECK_OK(lookup_hostname(htonl(addr.ip()), &hp._host), - "lookup_hostname failed for {}", - addr.ipv4_str()); + const auto s = lookup_hostname(htonl(addr.ip()), &hp._host); + if (dsn_unlikely(!s)) { + LOG_WARNING("lookup_hostname failed for {}: {}", addr.ipv4_str(), s.description()); + return hp; + } hp._port = addr.port(); } break; case HOST_TYPE_GROUP: { @@ -70,7 +72,7 @@ host_port host_port::from_address(rpc_address addr) break; } - // Now is valid. + // 'hp' become valid now. hp._type = addr.type(); return hp; } diff --git a/src/runtime/rpc/rpc_message.h b/src/runtime/rpc/rpc_message.h index 63e4ecb5be..0645b48cb7 100644 --- a/src/runtime/rpc/rpc_message.h +++ b/src/runtime/rpc/rpc_message.h @@ -138,8 +138,12 @@ class message_ex : public ref_counter, public extensible_object rpc_session_ptr io_session; // send/recv session rpc_address to_address; // always ipv4/v6 address, it is the to_node's net address rpc_address server_address; // used by requests, and may be of uri/group address - host_port to_host_port; // fqdn from 'to_address' - host_port server_host_port; // fqdn from 'server_address' + // hostname from 'to_address'. It's possible to be invalid if 'to_address' can not be reverse + // resolved. + host_port to_host_port; + // hostname from 'server_address'. It's possible to be invalid if 'server_address' can not be + // reverse resolved. + host_port server_host_port; dsn::task_code local_rpc_code; network_header_format hdr_format; int send_retry_count; diff --git a/src/runtime/service_api_c.cpp b/src/runtime/service_api_c.cpp index 276fec4f3a..f4ed022344 100644 --- a/src/runtime/service_api_c.cpp +++ b/src/runtime/service_api_c.cpp @@ -165,7 +165,7 @@ void dsn_rpc_call(dsn::rpc_address server, dsn::rpc_response_task *rpc_call) auto msg = rpc_call->get_request(); msg->server_address = server; - msg->server_host_port = dsn::host_port::from_address(server); + msg->server_host_port = dsn::host_port::from_address(msg->server_address); ::dsn::task::get_current_rpc()->call(msg, dsn::rpc_response_task_ptr(rpc_call)); } @@ -173,7 +173,7 @@ dsn::message_ex *dsn_rpc_call_wait(dsn::rpc_address server, dsn::message_ex *req { auto msg = ((::dsn::message_ex *)request); msg->server_address = server; - msg->server_host_port = dsn::host_port::from_address(server); + msg->server_host_port = dsn::host_port::from_address(msg->server_address); ::dsn::rpc_response_task *rtask = new ::dsn::rpc_response_task(msg, nullptr, 0); rtask->add_ref(); diff --git a/src/runtime/test/host_port_test.cpp b/src/runtime/test/host_port_test.cpp index b64622189f..70d3699e1e 100644 --- a/src/runtime/test/host_port_test.cpp +++ b/src/runtime/test/host_port_test.cpp @@ -71,8 +71,7 @@ TEST(host_port_test, host_port_build) { const auto addr = rpc_address::from_host_port("localhost", 8080); - host_port hp1 = host_port::from_address(addr); - ASSERT_EQ(hp, hp1); + ASSERT_EQ(hp, host_port::from_address(addr)); } } @@ -203,8 +202,7 @@ TEST(host_port_test, rpc_group_host_port) ASSERT_EQ(addr2, g_addr->leader()); ASSERT_EQ(2, g_addr->count()); - host_port hp_grp2; - hp_grp2 = host_port::from_address(addr_grp); + host_port hp_grp2 = host_port::from_address(addr_grp); ASSERT_EQ(HOST_TYPE_GROUP, hp_grp2.type()); auto g_hp = hp_grp2.group_host_port(); From e1c28f0a9e3d912836525c24f6d63246a9d2a852 Mon Sep 17 00:00:00 2001 From: Dan Wang Date: Fri, 21 Jun 2024 15:25:50 +0800 Subject: [PATCH 09/29] chore: fix dead link http://open-falcon.org/ (#2046) https://github.com/apache/incubator-pegasus/issues/2047 Use "github.com/open-falcon" instead of dead link "open-falcon.org". --- java-client/README.md | 2 +- rfcs/2020-08-27-metric-api.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/java-client/README.md b/java-client/README.md index 24229cbb83..1faf9a9a7e 100644 --- a/java-client/README.md +++ b/java-client/README.md @@ -108,7 +108,7 @@ For each type of request(get, set, multiset, etc.), we collect 8 metrics: We use io.dropwizard.metrics library to calculate the request count. -Currently, metrics are integrated with open-falcon(https://open-falcon.org/), +Currently, metrics are integrated with open-falcon(https://github.com/open-falcon), which push counters to local http agent http://127.0.0.1:1988/push/v1. diff --git a/rfcs/2020-08-27-metric-api.md b/rfcs/2020-08-27-metric-api.md index f09577fc51..2eb8543f96 100644 --- a/rfcs/2020-08-27-metric-api.md +++ b/rfcs/2020-08-27-metric-api.md @@ -25,7 +25,7 @@ This RFC proposes a new metric API in replace of the old perf-counter API. ## Motivation -The perf-counter API has bad naming convention to be parsed and queried over the external monitoring system like [Prometheus](https://prometheus.io/), or [open-falcon](http://open-falcon.org/). +The perf-counter API has bad naming convention to be parsed and queried over the external monitoring system like [Prometheus](https://prometheus.io/), or [open-falcon](https://github.com/open-falcon). Here are some examples of the perf-counter it exposes: From 5dd98c3e2ef1ed2e639c8900a824ae65a02cc548 Mon Sep 17 00:00:00 2001 From: Dan Wang Date: Fri, 21 Jun 2024 15:42:43 +0800 Subject: [PATCH 10/29] feat(remote_command): provide the query for the progress of decrees including both local writes and duplications (#2045) There are many kinds of decrees while writing locally and duplicating to remote clusters, for example, the max decree in prepare list, the last decree that has ever been committed, the last decree that has been applied into rocksdb memtable, the last decree that has been flushed into rocksdb sst files, the max decree that has been confirmed by remote cluster for duplication, etc.. These decrees are very useful while we want to watch the progress of all the local writes and duplications. These decrees might also help us diagnose the problems. Therefore, we provide a tool in the way of `remote_command` to show the decrees for each replica. --- src/common/json_helper.h | 6 ++ src/replica/duplication/replica_duplicator.h | 20 +++++ .../replica_duplicator_manager.cpp | 75 +++++++++++++---- .../duplication/replica_duplicator_manager.h | 48 +++++------ src/replica/replica.cpp | 6 +- src/replica/replica.h | 39 +++++++-- src/replica/replica_stub.cpp | 20 ++++- src/replica/replication_app_base.h | 6 ++ .../storage/simple_kv/simple_kv.server.impl.h | 4 +- .../simple_kv/test/simple_kv.server.impl.h | 4 +- src/replica/test/mock_utils.h | 2 + src/server/pegasus_server_impl.cpp | 83 +++++++++++-------- src/server/pegasus_server_impl.h | 2 + 13 files changed, 225 insertions(+), 90 deletions(-) diff --git a/src/common/json_helper.h b/src/common/json_helper.h index 4329bd2a8a..cbd94af945 100644 --- a/src/common/json_helper.h +++ b/src/common/json_helper.h @@ -237,6 +237,12 @@ JSON_DECODE_ENTRIES(input, t, __VA_ARGS__); \ } +#define JSON_ENCODE_OBJ(writer, name, ...) \ + do { \ + writer.Key(#name); \ + dsn::json::json_encode(writer, __VA_ARGS__); \ + } while (0) + namespace dsn { namespace json { diff --git a/src/replica/duplication/replica_duplicator.h b/src/replica/duplication/replica_duplicator.h index ebf4473b99..e9df7d7cb6 100644 --- a/src/replica/duplication/replica_duplicator.h +++ b/src/replica/duplication/replica_duplicator.h @@ -23,6 +23,7 @@ #include #include "common//duplication_common.h" +#include "common/json_helper.h" #include "common/replication_other_types.h" #include "duplication_types.h" #include "replica/replica_base.h" @@ -143,6 +144,25 @@ class replica_duplicator : public replica_base, public pipeline::base void set_duplication_plog_checking(bool checking); + // Encode current progress of this duplication into json. + template + void encode_progress(TWriter &writer) const + { + writer.StartObject(); + + JSON_ENCODE_OBJ(writer, dupid, _id); + JSON_ENCODE_OBJ(writer, remote_cluster_name, _remote_cluster_name); + JSON_ENCODE_OBJ(writer, remote_app_name, _remote_app_name); + + { + zauto_read_lock l(_lock); + JSON_ENCODE_OBJ(writer, confirmed_decree, _progress.last_decree); + JSON_ENCODE_OBJ(writer, persisted_decree, _progress.confirmed_decree); + } + + writer.EndObject(); + } + private: friend class duplication_test_base; friend class replica_duplicator_test; diff --git a/src/replica/duplication/replica_duplicator_manager.cpp b/src/replica/duplication/replica_duplicator_manager.cpp index d60bf57b20..9d2153559a 100644 --- a/src/replica/duplication/replica_duplicator_manager.cpp +++ b/src/replica/duplication/replica_duplicator_manager.cpp @@ -22,7 +22,11 @@ #include "common//duplication_common.h" #include "common/gpid.h" +#include "common/replication_enums.h" +#include "metadata_types.h" #include "replica/duplication/replica_duplicator.h" +#include "replica/duplication/replica_duplicator_manager.h" +#include "replica/replica.h" #include "replica_duplicator_manager.h" #include "utils/autoref_ptr.h" #include "utils/errors.h" @@ -41,29 +45,56 @@ replica_duplicator_manager::replica_duplicator_manager(replica *r) { } +void replica_duplicator_manager::update_duplication_map( + const std::map &new_dup_map) +{ + if (new_dup_map.empty() || _replica->status() != partition_status::PS_PRIMARY) { + remove_all_duplications(); + return; + } + + remove_non_existed_duplications(new_dup_map); + + for (const auto &kv2 : new_dup_map) { + sync_duplication(kv2.second); + } +} + std::vector replica_duplicator_manager::get_duplication_confirms_to_update() const { zauto_lock l(_lock); std::vector updates; - for (const auto &kv : _duplications) { - replica_duplicator *duplicator = kv.second.get(); - duplication_progress p = duplicator->progress(); - if (p.last_decree != p.confirmed_decree || - (kv.second->status() == duplication_status::DS_PREPARE && p.checkpoint_has_prepared)) { - if (p.last_decree < p.confirmed_decree) { - LOG_ERROR_PREFIX("invalid decree state: p.last_decree({}) < p.confirmed_decree({})", - p.last_decree, - p.confirmed_decree); - continue; - } - duplication_confirm_entry entry; - entry.dupid = duplicator->id(); - entry.confirmed_decree = p.last_decree; - entry.__set_checkpoint_prepared(p.checkpoint_has_prepared); - updates.emplace_back(entry); + for (const auto & [ _, dup ] : _duplications) { + // There are two conditions when we should send confirmed decrees to meta server to update + // the progress: + // + // 1. the acknowledged decree from remote cluster has changed, making it different from + // the one that is persisted in zk by meta server; otherwise, + // + // 2. the duplication has been in the stage of synchronizing checkpoint to the remote + // cluster, and the synchronized checkpoint has been ready. + const auto &progress = dup->progress(); + if (progress.last_decree == progress.confirmed_decree && + (dup->status() != duplication_status::DS_PREPARE || + !progress.checkpoint_has_prepared)) { + continue; } + + if (progress.last_decree < progress.confirmed_decree) { + LOG_ERROR_PREFIX( + "invalid decree state: progress.last_decree({}) < progress.confirmed_decree({})", + progress.last_decree, + progress.confirmed_decree); + continue; + } + + duplication_confirm_entry entry; + entry.dupid = dup->id(); + entry.confirmed_decree = progress.last_decree; + entry.__set_checkpoint_prepared(progress.checkpoint_has_prepared); + updates.emplace_back(entry); } return updates; } @@ -191,5 +222,17 @@ replica_duplicator_manager::get_dup_states() const return ret; } +void replica_duplicator_manager::remove_all_duplications() +{ + // fast path + if (_duplications.empty()) { + return; + } + + LOG_WARNING_PREFIX("remove all duplication, replica status = {}", + enum_to_string(_replica->status())); + _duplications.clear(); +} + } // namespace replication } // namespace dsn diff --git a/src/replica/duplication/replica_duplicator_manager.h b/src/replica/duplication/replica_duplicator_manager.h index 51bcbd1e1d..413176a16f 100644 --- a/src/replica/duplication/replica_duplicator_manager.h +++ b/src/replica/duplication/replica_duplicator_manager.h @@ -24,19 +24,16 @@ #include #include "common//duplication_common.h" -#include "common/replication_enums.h" #include "common/replication_other_types.h" #include "duplication_types.h" -#include "metadata_types.h" -#include "replica/replica.h" #include "replica/replica_base.h" #include "replica_duplicator.h" -#include "utils/fmt_logging.h" #include "utils/metrics.h" #include "utils/zlocks.h" namespace dsn { namespace replication { +class replica; /// replica_duplicator_manager manages the set of duplications on this replica. /// \see duplication_sync_timer @@ -51,19 +48,7 @@ class replica_duplicator_manager : public replica_base // - replica is not primary on replica-server perspective (status != PRIMARY) // - replica is not primary on meta-server perspective (progress.find(partition_id) == end()) // - the app is not assigned with duplication (dup_map.empty()) - void update_duplication_map(const std::map &new_dup_map) - { - if (new_dup_map.empty() || _replica->status() != partition_status::PS_PRIMARY) { - remove_all_duplications(); - return; - } - - remove_non_existed_duplications(new_dup_map); - - for (const auto &kv2 : new_dup_map) { - sync_duplication(kv2.second); - } - } + void update_duplication_map(const std::map &new_dup_map); /// collect updated duplication confirm points from this replica. std::vector get_duplication_confirms_to_update() const; @@ -93,21 +78,30 @@ class replica_duplicator_manager : public replica_base }; std::vector get_dup_states() const; + // Encode current progress of all duplication into json. + template + void encode_progress(TWriter &writer) const + { + zauto_lock l(_lock); + + if (_duplications.empty()) { + return; + } + + writer.Key("duplications"); + writer.StartArray(); + for (const auto & [ _, dup ] : _duplications) { + dup->encode_progress(writer); + } + writer.EndArray(); + } + private: void sync_duplication(const duplication_entry &ent); void remove_non_existed_duplications(const std::map &); - void remove_all_duplications() - { - // fast path - if (_duplications.empty()) - return; - - LOG_WARNING_PREFIX("remove all duplication, replica status = {}", - enum_to_string(_replica->status())); - _duplications.clear(); - } + void remove_all_duplications(); private: friend class duplication_sync_timer_test; diff --git a/src/replica/replica.cpp b/src/replica/replica.cpp index 5bb1f17b83..be31df14fd 100644 --- a/src/replica/replica.cpp +++ b/src/replica/replica.cpp @@ -41,10 +41,10 @@ #include "common/replication_common.h" #include "common/replication_enums.h" #include "consensus_types.h" -#include "duplication/replica_duplicator_manager.h" #include "duplication/replica_follower.h" #include "mutation.h" #include "mutation_log.h" +#include "replica/duplication/replica_duplicator_manager.h" #include "replica/prepare_list.h" #include "replica/replica_context.h" #include "replica/replication_app_base.h" @@ -578,6 +578,10 @@ mutation_ptr replica::new_mutation(decree decree) return mu; } +decree replica::last_applied_decree() const { return _app->last_committed_decree(); } + +decree replica::last_flushed_decree() const { return _app->last_flushed_decree(); } + decree replica::last_durable_decree() const { return _app->last_durable_decree(); } decree replica::last_prepared_decree() const diff --git a/src/replica/replica.h b/src/replica/replica.h index 3b90641cdd..ae0118dc05 100644 --- a/src/replica/replica.h +++ b/src/replica/replica.h @@ -35,8 +35,10 @@ #include #include +#include "common/json_helper.h" #include "common/replication_other_types.h" #include "dsn.layer2_types.h" +#include "duplication/replica_duplicator_manager.h" // IWYU pragma: keep #include "meta_admin_types.h" #include "metadata_types.h" #include "mutation.h" @@ -96,7 +98,6 @@ class replica; class replica_backup_manager; class replica_bulk_loader; class replica_disk_migrator; -class replica_duplicator_manager; class replica_follower; class replica_split_manager; class replica_stub; @@ -223,8 +224,37 @@ class replica : public serverlet, public ref_counter, public replica_ba const app_info *get_app_info() const { return &_app_info; } decree max_prepared_decree() const { return _prepare_list->max_decree(); } decree last_committed_decree() const { return _prepare_list->last_committed_decree(); } + + // The last decree that has been applied into rocksdb memtable. + decree last_applied_decree() const; + + // The last decree that has been flushed into rocksdb sst. + decree last_flushed_decree() const; + decree last_prepared_decree() const; decree last_durable_decree() const; + + // Encode current progress of decrees into json, including both local writes and duplications + // of this replica. + template + void encode_progress(TWriter &writer) const + { + writer.StartObject(); + + JSON_ENCODE_OBJ(writer, max_prepared_decree, max_prepared_decree()); + JSON_ENCODE_OBJ(writer, max_plog_decree, _private_log->max_decree(get_gpid())); + JSON_ENCODE_OBJ(writer, max_plog_commit_on_disk, _private_log->max_commit_on_disk()); + JSON_ENCODE_OBJ(writer, last_committed_decree, last_committed_decree()); + JSON_ENCODE_OBJ(writer, last_applied_decree, last_applied_decree()); + JSON_ENCODE_OBJ(writer, last_flushed_decree, last_flushed_decree()); + JSON_ENCODE_OBJ(writer, last_durable_decree, last_durable_decree()); + JSON_ENCODE_OBJ(writer, max_gc_decree, _private_log->max_gced_decree(get_gpid())); + + _duplication_mgr->encode_progress(writer); + + writer.EndObject(); + } + const std::string &dir() const { return _dir; } uint64_t create_time_milliseconds() const { return _create_time_ms; } const char *name() const { return replica_name(); } @@ -429,13 +459,6 @@ class replica : public serverlet, public ref_counter, public replica_ba error_code background_sync_checkpoint(); void catch_up_with_private_logs(partition_status::type s); void on_checkpoint_completed(error_code err); - void on_copy_checkpoint_ack(error_code err, - const std::shared_ptr &req, - const std::shared_ptr &resp); - void on_copy_checkpoint_file_completed(error_code err, - size_t sz, - std::shared_ptr resp, - const std::string &chk_dir); // Enable/Disable plog garbage collection to be executed. For example, to duplicate data // to target cluster, we could firstly disable plog garbage collection, then do copy_data. diff --git a/src/replica/replica_stub.cpp b/src/replica/replica_stub.cpp index 3558fb7a45..a12c22efb6 100644 --- a/src/replica/replica_stub.cpp +++ b/src/replica/replica_stub.cpp @@ -29,6 +29,7 @@ // IWYU pragma: no_include #include #include +#include #include #include #include @@ -37,8 +38,8 @@ #include #include #include -#include #include +#include #include #include @@ -47,6 +48,7 @@ #include "bulk_load/replica_bulk_loader.h" #include "common/backup_common.h" #include "common/duplication_common.h" +#include "common/json_helper.h" #include "common/replication.codes.h" #include "common/replication_enums.h" #include "disk_cleaner.h" @@ -2335,6 +2337,22 @@ void replica_stub::register_ctrl_command() }); })); + _cmds.emplace_back(::dsn::command_manager::instance().register_single_command( + "replica.query-progress", + "Query the progress of decrees, including both local writes and duplications for " + "replicas specified by comma-separated list of 'app_id' or 'app_id.partition_id', " + "or all replicas for empty", + "[id1,id2,...]", + [this](const std::vector &args) { + return exec_command_on_replica(args, true, [](const replica_ptr &rep) { + std::ostringstream out; + rapidjson::OStreamWrapper wrapper(out); + dsn::json::PrettyJsonWriter writer(wrapper); + rep->encode_progress(writer); + return out.str(); + }); + })); + #ifdef DSN_ENABLE_GPERF _cmds.emplace_back(::dsn::command_manager::instance().register_bool_command( _release_tcmalloc_memory, diff --git a/src/replica/replication_app_base.h b/src/replica/replication_app_base.h index c3559c095d..2a88618f64 100644 --- a/src/replica/replication_app_base.h +++ b/src/replica/replication_app_base.h @@ -238,7 +238,13 @@ class replication_app_base : public replica_base // // Query methods. // + + // Get the decree of the last flushed mutation. -1 means failed to get. + virtual replication::decree last_flushed_decree() const = 0; + + // Get the decree of the last created checkpoint. virtual replication::decree last_durable_decree() const = 0; + // The return type is generated by storage engine, e.g. rocksdb::Status::Code, 0 always mean OK. virtual int on_request(message_ex *request) WARN_UNUSED_RESULT = 0; diff --git a/src/replica/storage/simple_kv/simple_kv.server.impl.h b/src/replica/storage/simple_kv/simple_kv.server.impl.h index 240ed87899..b296c1f83d 100644 --- a/src/replica/storage/simple_kv/simple_kv.server.impl.h +++ b/src/replica/storage/simple_kv/simple_kv.server.impl.h @@ -70,7 +70,9 @@ class simple_kv_service_impl : public simple_kv_service virtual ::dsn::error_code stop(bool cleanup = false) override; - virtual int64_t last_durable_decree() const override { return _last_durable_decree; } + int64_t last_flushed_decree() const override { return _last_durable_decree; } + + int64_t last_durable_decree() const override { return _last_durable_decree; } virtual ::dsn::error_code sync_checkpoint() override; diff --git a/src/replica/storage/simple_kv/test/simple_kv.server.impl.h b/src/replica/storage/simple_kv/test/simple_kv.server.impl.h index 1235cdbc68..8b80396a02 100644 --- a/src/replica/storage/simple_kv/test/simple_kv.server.impl.h +++ b/src/replica/storage/simple_kv/test/simple_kv.server.impl.h @@ -82,7 +82,9 @@ class simple_kv_service_impl : public application::simple_kv_service virtual ::dsn::error_code stop(bool cleanup = false) override; - virtual int64_t last_durable_decree() const override { return _last_durable_decree; } + int64_t last_flushed_decree() const override { return _last_durable_decree; } + + int64_t last_durable_decree() const override { return _last_durable_decree; } virtual ::dsn::error_code sync_checkpoint() override; diff --git a/src/replica/test/mock_utils.h b/src/replica/test/mock_utils.h index 6d7725b787..cc631143b0 100644 --- a/src/replica/test/mock_utils.h +++ b/src/replica/test/mock_utils.h @@ -83,6 +83,8 @@ class mock_replication_app_base : public replication_app_base // we mock the followings void update_app_envs(const std::map &envs) override { _envs = envs; } void query_app_envs(std::map &out) override { out = _envs; } + + decree last_flushed_decree() const override { return _last_durable_decree; } decree last_durable_decree() const override { return _last_durable_decree; } // TODO(heyuchen): implement this function in further pull request diff --git a/src/server/pegasus_server_impl.cpp b/src/server/pegasus_server_impl.cpp index 35419d8efb..75c95a8128 100644 --- a/src/server/pegasus_server_impl.cpp +++ b/src/server/pegasus_server_impl.cpp @@ -2168,47 +2168,49 @@ ::dsn::error_code pegasus_server_impl::copy_checkpoint_to_dir_unsafe(const char } LOG_INFO_PREFIX("copy checkpoint to dir({}) succeed", checkpoint_dir); - if (checkpoint_decree != nullptr) { - rocksdb::DB *snapshot_db = nullptr; - std::vector handles_opened; - auto cleanup = [&](bool remove_checkpoint) { - if (remove_checkpoint && !::dsn::utils::filesystem::remove_path(checkpoint_dir)) { - LOG_ERROR_PREFIX("remove checkpoint directory {} failed", checkpoint_dir); - } - if (snapshot_db) { - for (auto handle : handles_opened) { - if (handle) { - snapshot_db->DestroyColumnFamilyHandle(handle); - handle = nullptr; - } + if (checkpoint_decree == nullptr) { + return ::dsn::ERR_OK; + } + + rocksdb::DB *snapshot_db = nullptr; + std::vector handles_opened; + auto cleanup = [&](bool remove_checkpoint) { + if (remove_checkpoint && !::dsn::utils::filesystem::remove_path(checkpoint_dir)) { + LOG_ERROR_PREFIX("remove checkpoint directory {} failed", checkpoint_dir); + } + if (snapshot_db) { + for (auto handle : handles_opened) { + if (handle) { + snapshot_db->DestroyColumnFamilyHandle(handle); + handle = nullptr; } - delete snapshot_db; - snapshot_db = nullptr; } - }; - - // Because of RocksDB's restriction, we have to to open default column family even though - // not use it - std::vector column_families( - {{meta_store::DATA_COLUMN_FAMILY_NAME, rocksdb::ColumnFamilyOptions()}, - {meta_store::META_COLUMN_FAMILY_NAME, rocksdb::ColumnFamilyOptions()}}); - status = rocksdb::DB::OpenForReadOnly( - _db_opts, checkpoint_dir, column_families, &handles_opened, &snapshot_db); - if (!status.ok()) { - LOG_ERROR_PREFIX( - "OpenForReadOnly from {} failed, error = {}", checkpoint_dir, status.ToString()); + delete snapshot_db; snapshot_db = nullptr; - cleanup(true); - return ::dsn::ERR_LOCAL_APP_FAILURE; } - CHECK_EQ_PREFIX(handles_opened.size(), 2); - CHECK_EQ_PREFIX(handles_opened[1]->GetName(), meta_store::META_COLUMN_FAMILY_NAME); - uint64_t last_flushed_decree = - _meta_store->get_decree_from_readonly_db(snapshot_db, handles_opened[1]); - *checkpoint_decree = last_flushed_decree; + }; - cleanup(false); + // Because of RocksDB's restriction, we have to to open default column family even though + // not use it + std::vector column_families( + {{meta_store::DATA_COLUMN_FAMILY_NAME, rocksdb::ColumnFamilyOptions()}, + {meta_store::META_COLUMN_FAMILY_NAME, rocksdb::ColumnFamilyOptions()}}); + status = rocksdb::DB::OpenForReadOnly( + _db_opts, checkpoint_dir, column_families, &handles_opened, &snapshot_db); + if (!status.ok()) { + LOG_ERROR_PREFIX( + "OpenForReadOnly from {} failed, error = {}", checkpoint_dir, status.ToString()); + snapshot_db = nullptr; + cleanup(true); + return ::dsn::ERR_LOCAL_APP_FAILURE; } + CHECK_EQ_PREFIX(handles_opened.size(), 2); + CHECK_EQ_PREFIX(handles_opened[1]->GetName(), meta_store::META_COLUMN_FAMILY_NAME); + uint64_t last_flushed_decree = + _meta_store->get_decree_from_readonly_db(snapshot_db, handles_opened[1]); + *checkpoint_decree = last_flushed_decree; + + cleanup(false); return ::dsn::ERR_OK; } @@ -2318,6 +2320,17 @@ pegasus_server_impl::storage_apply_checkpoint(chkpt_apply_mode mode, return ::dsn::ERR_OK; } +int64_t pegasus_server_impl::last_flushed_decree() const +{ + uint64_t decree = 0; + const auto &err = _meta_store->get_last_flushed_decree(&decree); + if (dsn_unlikely(err != dsn::ERR_OK)) { + return -1; + } + + return static_cast(decree); +} + bool pegasus_server_impl::validate_filter(::dsn::apps::filter_type::type filter_type, const ::dsn::blob &filter_pattern, const ::dsn::blob &value) diff --git a/src/server/pegasus_server_impl.h b/src/server/pegasus_server_impl.h index 361d9cbbae..d902e647dc 100644 --- a/src/server/pegasus_server_impl.h +++ b/src/server/pegasus_server_impl.h @@ -223,6 +223,8 @@ class pegasus_server_impl : public pegasus_read_service ::dsn::error_code storage_apply_checkpoint(chkpt_apply_mode mode, const dsn::replication::learn_state &state) override; + int64_t last_flushed_decree() const override; + int64_t last_durable_decree() const override { return _last_durable_decree.load(); } void update_app_envs(const std::map &envs) override; From 79add77cdf379bdb2125de47cef48f0865f25293 Mon Sep 17 00:00:00 2001 From: Dan Wang Date: Thu, 27 Jun 2024 16:57:49 +0800 Subject: [PATCH 11/29] fix(duplication): reduce the delay before last mutation is duplicated to the remote cluster (#2048) https://github.com/apache/incubator-pegasus/issues/2050 As is described by the issue, the problem is that we have to waits 2 ~ 3 minutes (until some empty write gets in) before the last mutation is duplicated to the remote cluster. The reason is that the last committed decree of the last mutation (i.e. `mutation.data.header.last_committed_decree`), rather than the decree of the last mutation (i.e. `mutation.data.header.decree`), is chosen as the max decree that is duplicated to the remote cluster. Instead, the max committed decree should be chosen as the max decree that is duplicated to the remote cluster. After the optimization, the delay has been reduced from 2 ~ 3 minutes to about 0.1 seconds. --- src/common/consensus.thrift | 13 ++ .../duplication/duplication_pipeline.cpp | 7 +- src/replica/duplication/mutation_batch.cpp | 38 ++-- src/replica/duplication/mutation_batch.h | 10 +- src/replica/duplication/replica_duplicator.h | 1 + .../duplication/test/duplication_test_base.h | 11 +- .../duplication/test/mutation_batch_test.cpp | 114 ++++++++---- src/replica/mutation_log.cpp | 152 +++++++++------- src/replica/mutation_log.h | 169 +++++++++++------- src/replica/replica.h | 1 + src/replica/test/mutation_log_test.cpp | 13 +- src/replica/test/replica_test_base.h | 12 +- 12 files changed, 357 insertions(+), 184 deletions(-) diff --git a/src/common/consensus.thrift b/src/common/consensus.thrift index 26312b8e36..8952c090c7 100644 --- a/src/common/consensus.thrift +++ b/src/common/consensus.thrift @@ -32,11 +32,24 @@ namespace cpp dsn.replication struct mutation_header { + // The partition that this mutation belongs to. 1:dsn.gpid pid; + + // The ID of the membership configuration that this mutation belongs to, + // increasing monotonically. 2:i64 ballot; + + // The decree of this mutation. 3:i64 decree; + + // The start offset of this mutation in the whole mutation log. 4:i64 log_offset; + + // The max of the decrees that have been committed before this mutation + // is prepared. 5:i64 last_committed_decree; + + // The unique timestamp that increases monotonically in microsecond. 6:i64 timestamp; } diff --git a/src/replica/duplication/duplication_pipeline.cpp b/src/replica/duplication/duplication_pipeline.cpp index 54abd83cfb..53e54ece33 100644 --- a/src/replica/duplication/duplication_pipeline.cpp +++ b/src/replica/duplication/duplication_pipeline.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -57,7 +58,11 @@ void load_mutation::run() { decree last_decree = _duplicator->progress().last_decree; _start_decree = last_decree + 1; - if (_replica->private_log()->max_commit_on_disk() < _start_decree) { + + // Load the mutations from plog that have been committed recently, if any. + const auto max_plog_committed_decree = + std::min(_replica->private_log()->max_decree_on_disk(), _replica->last_applied_decree()); + if (_start_decree > max_plog_committed_decree) { // wait 100ms for next try if no mutation was added. repeat(100_ms); return; diff --git a/src/replica/duplication/mutation_batch.cpp b/src/replica/duplication/mutation_batch.cpp index e6d91d5f29..bd2c8bf460 100644 --- a/src/replica/duplication/mutation_batch.cpp +++ b/src/replica/duplication/mutation_batch.cpp @@ -27,6 +27,7 @@ #include "metadata_types.h" #include "mutation_batch.h" #include "replica_duplicator.h" +#include "replica/replica.h" #include "runtime/task/task_code.h" #include "runtime/task/task_spec.h" #include "utils/autoref_ptr.h" @@ -55,8 +56,10 @@ mutation_buffer::mutation_buffer(replica_base *r, void mutation_buffer::commit(decree d, commit_type ct) { - if (d <= last_committed_decree()) + if (d <= last_committed_decree()) { + // Ignore the decrees that have been committed. return; + } CHECK_EQ_PREFIX(ct, COMMIT_TO_DECREE_HARD); @@ -85,8 +88,8 @@ void mutation_buffer::commit(decree d, commit_type ct) min_decree(), max_decree()); METRIC_VAR_SET(dup_recent_lost_mutations, min_decree() - last_committed_decree()); - // if next_commit_mutation loss, let last_commit_decree catch up with min_decree, and - // the next loop will commit from min_decree + // If next_commit_mutation loss, let last_commit_decree catch up with min_decree, and + // the next loop will commit from min_decree. _last_committed_decree = min_decree() - 1; return; } @@ -101,13 +104,13 @@ void mutation_buffer::commit(decree d, commit_type ct) error_s mutation_batch::add(mutation_ptr mu) { if (mu->get_decree() <= _mutation_buffer->last_committed_decree()) { - // ignore + // Ignore the mutations that have been committed. return error_s::ok(); } auto old = _mutation_buffer->get_mutation_by_decree(mu->get_decree()); if (old != nullptr && old->data.header.ballot >= mu->data.header.ballot) { - // ignore + // The mutation with duplicate decree would be ignored. return error_s::ok(); } @@ -123,6 +126,16 @@ error_s mutation_batch::add(mutation_ptr mu) _start_decree); } + if (mu->get_decree() <= _replica->last_applied_decree()) { + // Once this mutation has been applied into rocksdb memtable, commit it for duplication; + // otherwise, this mutation would be delayed at least several minutes to be duplicated to + // the remote cluster. It would not be duplicated until some new mutations (such as empty + // writes) enter, since the last decree that is committed for this replica is NOT + // mu->data.header.decree but rather mu->data.header.last_committed_decree. See also + // `mutation_header` in src/common/consensus.thrift. + _mutation_buffer->commit(mu->get_decree(), COMMIT_TO_DECREE_HARD); + } + return error_s::ok(); } @@ -140,7 +153,7 @@ mutation_tuple_set mutation_batch::move_all_mutations() return std::move(_loaded_mutations); } -mutation_batch::mutation_batch(replica_duplicator *r) : replica_base(r) +mutation_batch::mutation_batch(replica_duplicator *r) : replica_base(r), _replica(r->_replica) { // Prepend a special tag identifying this is a mutation_batch, // so `dxxx_replica` logging in prepare_list will print along with its real caller. @@ -149,25 +162,29 @@ mutation_batch::mutation_batch(replica_duplicator *r) : replica_base(r) r->get_gpid(), std::string("mutation_batch@") + r->replica_name(), r->app_name()); _mutation_buffer = std::make_unique( &base, 0, PREPARE_LIST_NUM_ENTRIES, [this](mutation_ptr &mu) { - // committer + // The committer for the prepare list, used for duplicating to add the committed + // mutations to the loading list, which would be shipped to the remote cluster + // later. add_mutation_if_valid(mu, _start_decree); }); - // start duplication from confirmed_decree + // Start duplication from the confirmed decree that has been persisted in the meta server. _mutation_buffer->reset(r->progress().confirmed_decree); } void mutation_batch::add_mutation_if_valid(mutation_ptr &mu, decree start_decree) { if (mu->get_decree() < start_decree) { - // ignore + // Ignore the mutations before start_decree. return; } + for (mutation_update &update : mu->data.updates) { - // ignore WRITE_EMPTY if (update.code == RPC_REPLICATION_WRITE_EMPTY) { + // Ignore empty writes. continue; } + // Ignore non-idempotent writes. // Normally a duplicating replica will reply non-idempotent writes with // ERR_OPERATION_DISABLED, but there could still be a mutation written @@ -176,6 +193,7 @@ void mutation_batch::add_mutation_if_valid(mutation_ptr &mu, decree start_decree if (!task_spec::get(update.code)->rpc_request_is_write_idempotent) { continue; } + blob bb; if (update.data.buffer() != nullptr) { bb = std::move(update.data); diff --git a/src/replica/duplication/mutation_batch.h b/src/replica/duplication/mutation_batch.h index 97795cea26..0cca5169e9 100644 --- a/src/replica/duplication/mutation_batch.h +++ b/src/replica/duplication/mutation_batch.h @@ -31,7 +31,7 @@ namespace dsn { namespace replication { - +class replica; class replica_duplicator; class mutation_buffer : public prepare_list @@ -57,15 +57,19 @@ class mutation_batch : replica_base explicit mutation_batch(replica_duplicator *r); + // Add mutations to prepare list. Only those who have been committed would be + // duplicated to the remote cluster. error_s add(mutation_ptr mu); + // Add the committed mutation to the loading list, which would be shipped to + // the remote cluster later. void add_mutation_if_valid(mutation_ptr &, decree start_decree); mutation_tuple_set move_all_mutations(); decree last_decree() const; - // mutations with decree < d will be ignored. + // Mutations with decree < d will be ignored. void set_start_decree(decree d); void reset_mutation_buffer(decree d); @@ -78,6 +82,8 @@ class mutation_batch : replica_base friend class replica_duplicator_test; friend class mutation_batch_test; + replica *_replica; + std::unique_ptr _mutation_buffer; mutation_tuple_set _loaded_mutations; decree _start_decree{invalid_decree}; diff --git a/src/replica/duplication/replica_duplicator.h b/src/replica/duplication/replica_duplicator.h index e9df7d7cb6..66f7ac7cec 100644 --- a/src/replica/duplication/replica_duplicator.h +++ b/src/replica/duplication/replica_duplicator.h @@ -170,6 +170,7 @@ class replica_duplicator : public replica_base, public pipeline::base friend class load_from_private_log_test; friend class ship_mutation_test; + friend class mutation_batch; friend class load_mutation; friend class ship_mutation; diff --git a/src/replica/duplication/test/duplication_test_base.h b/src/replica/duplication/test/duplication_test_base.h index eb914f38e0..69d935cc1d 100644 --- a/src/replica/duplication/test/duplication_test_base.h +++ b/src/replica/duplication/test/duplication_test_base.h @@ -76,13 +76,20 @@ class duplication_test_base : public replica_test_base return log_file_map; } - mutation_ptr create_test_mutation(int64_t decree, const std::string &data) override + mutation_ptr create_test_mutation(int64_t decree, + int64_t last_committed_decree, + const std::string &data) override { - auto mut = replica_test_base::create_test_mutation(decree, data); + auto mut = replica_test_base::create_test_mutation(decree, last_committed_decree, data); mut->data.updates[0].code = RPC_DUPLICATION_IDEMPOTENT_WRITE; // must be idempotent write return mut; } + mutation_ptr create_test_mutation(int64_t decree, const std::string &data) override + { + return duplication_test_base::create_test_mutation(decree, decree - 1, data); + } + void wait_all(const std::unique_ptr &dup) { dup->tracker()->wait_outstanding_tasks(); diff --git a/src/replica/duplication/test/mutation_batch_test.cpp b/src/replica/duplication/test/mutation_batch_test.cpp index 541531c5e1..dd5c277387 100644 --- a/src/replica/duplication/test/mutation_batch_test.cpp +++ b/src/replica/duplication/test/mutation_batch_test.cpp @@ -15,9 +15,11 @@ // specific language governing permissions and limitations // under the License. +#include #include -#include +#include #include +#include #include #include #include @@ -40,50 +42,102 @@ namespace replication { class mutation_batch_test : public duplication_test_base { public: - void - reset_buffer(const mutation_batch &batcher, const decree last_commit, decree start, decree end) + void reset_buffer(const decree last_commit, + const decree start, + const decree end, + mutation_batch &batcher) { batcher._mutation_buffer->reset(last_commit); batcher._mutation_buffer->_start_decree = start; batcher._mutation_buffer->_end_decree = end; } - void commit_buffer(const mutation_batch &batcher, const decree current_decree) + void commit_buffer(const decree current_decree, mutation_batch &batcher) { batcher._mutation_buffer->commit(current_decree, COMMIT_TO_DECREE_HARD); } + + void check_mutation_contents(const std::set &expected_mutations, + mutation_batch &batcher) + { + const auto all_mutations = batcher.move_all_mutations(); + + std::set actual_mutations; + std::transform(all_mutations.begin(), + all_mutations.end(), + std::inserter(actual_mutations, actual_mutations.end()), + [](const mutation_tuple &tuple) { return std::get<2>(tuple).to_string(); }); + + ASSERT_EQ(expected_mutations, actual_mutations); + } }; INSTANTIATE_TEST_SUITE_P(, mutation_batch_test, ::testing::Values(false, true)); -TEST_P(mutation_batch_test, add_mutation_if_valid) +TEST_P(mutation_batch_test, prepare_mutation) { auto duplicator = create_test_duplicator(0); mutation_batch batcher(duplicator.get()); - mutation_tuple_set result; + auto mu1 = create_test_mutation(1, 0, "first mutation"); + set_last_applied_decree(1); + ASSERT_TRUE(batcher.add(mu1)); + ASSERT_EQ(1, batcher.last_decree()); + + auto mu2 = create_test_mutation(2, 1, "abcde"); + set_last_applied_decree(2); + ASSERT_TRUE(batcher.add(mu2)); + ASSERT_EQ(2, batcher.last_decree()); + + auto mu3 = create_test_mutation(3, 2, "hello world"); + ASSERT_TRUE(batcher.add(mu3)); + + // The last decree has not been updated. + ASSERT_EQ(2, batcher.last_decree()); + + auto mu4 = create_test_mutation(4, 2, "foo bar"); + ASSERT_TRUE(batcher.add(mu4)); + ASSERT_EQ(2, batcher.last_decree()); + + // The committed mutation would be ignored. + auto mu2_another = create_test_mutation(2, 1, "another second mutation"); + ASSERT_TRUE(batcher.add(mu2_another)); + ASSERT_EQ(2, batcher.last_decree()); + + // The mutation with duplicate decree would be ignored. + auto mu3_another = create_test_mutation(3, 2, "123 xyz"); + ASSERT_TRUE(batcher.add(mu3_another)); + ASSERT_EQ(2, batcher.last_decree()); - std::string s = "hello"; - mutation_ptr mu1 = create_test_mutation(1, s); + auto mu5 = create_test_mutation(5, 2, "5th mutation"); + set_last_applied_decree(5); + ASSERT_TRUE(batcher.add(mu5)); + ASSERT_EQ(5, batcher.last_decree()); + + check_mutation_contents({"first mutation", "abcde", "hello world", "foo bar", "5th mutation"}, + batcher); +} + +TEST_P(mutation_batch_test, add_mutation_if_valid) +{ + auto duplicator = create_test_duplicator(0); + mutation_batch batcher(duplicator.get()); + + auto mu1 = create_test_mutation(1, "hello"); batcher.add_mutation_if_valid(mu1, 0); - result = batcher.move_all_mutations(); - mutation_tuple mt1 = *result.begin(); + check_mutation_contents({"hello"}, batcher); - s = "world"; - mutation_ptr mu2 = create_test_mutation(2, s); + auto mu2 = create_test_mutation(2, "world"); batcher.add_mutation_if_valid(mu2, 0); - result = batcher.move_all_mutations(); - mutation_tuple mt2 = *result.begin(); - - ASSERT_EQ(std::get<2>(mt1).to_string(), "hello"); - ASSERT_EQ(std::get<2>(mt2).to_string(), "world"); + check_mutation_contents({"world"}, batcher); - // decree 1 should be ignored - mutation_ptr mu3 = create_test_mutation(1, s); + // mu1 would be ignored, since its decree is less than the start decree. + batcher.add_mutation_if_valid(mu1, 2); batcher.add_mutation_if_valid(mu2, 2); + + auto mu3 = create_test_mutation(1, "hi"); batcher.add_mutation_if_valid(mu3, 1); - result = batcher.move_all_mutations(); - ASSERT_EQ(result.size(), 2); + check_mutation_contents({"hi", "world"}, batcher); } TEST_P(mutation_batch_test, ignore_non_idempotent_write) @@ -91,23 +145,23 @@ TEST_P(mutation_batch_test, ignore_non_idempotent_write) auto duplicator = create_test_duplicator(0); mutation_batch batcher(duplicator.get()); - std::string s = "hello"; - mutation_ptr mu = create_test_mutation(1, s); + auto mu = create_test_mutation(1, "hello"); mu->data.updates[0].code = RPC_DUPLICATION_NON_IDEMPOTENT_WRITE; batcher.add_mutation_if_valid(mu, 0); - mutation_tuple_set result = batcher.move_all_mutations(); - ASSERT_EQ(result.size(), 0); + check_mutation_contents({}, batcher); } TEST_P(mutation_batch_test, mutation_buffer_commit) { auto duplicator = create_test_duplicator(0); mutation_batch batcher(duplicator.get()); - // mock mutation_buffer[last=10, start=15, end=20], last + 1(next commit decree) is out of - // [start~end] - reset_buffer(batcher, 10, 15, 20); - commit_buffer(batcher, 15); - ASSERT_EQ(batcher.last_decree(), 14); + + // Mock mutation_buffer[last=10, start=15, end=20], last + 1(next commit decree) is out of + // [start~end], then last would become min_decree() - 1, see mutation_buffer::commit() for + // details. + reset_buffer(10, 15, 20, batcher); + commit_buffer(15, batcher); + ASSERT_EQ(14, batcher.last_decree()); } } // namespace replication diff --git a/src/replica/mutation_log.cpp b/src/replica/mutation_log.cpp index 84a1e5be37..20592fc477 100644 --- a/src/replica/mutation_log.cpp +++ b/src/replica/mutation_log.cpp @@ -243,6 +243,7 @@ void mutation_log_private::write_pending_mutations(bool release_lock_required) // move or reset pending variables std::shared_ptr pending = std::move(_pending_write); _issued_write = pending; + decree max_decree = _pending_write_max_decree; decree max_commit = _pending_write_max_commit; _pending_write_max_commit = 0; _pending_write_max_decree = 0; @@ -250,11 +251,12 @@ void mutation_log_private::write_pending_mutations(bool release_lock_required) // Free plog from lock during committing log block, in the meantime // new mutations can still be appended. _plock.unlock(); - commit_pending_mutations(pr.first, pending, max_commit); + commit_pending_mutations(pr.first, pending, max_decree, max_commit); } void mutation_log_private::commit_pending_mutations(log_file_ptr &lf, std::shared_ptr &pending, + decree max_decree, decree max_commit) { if (dsn_unlikely(FLAGS_enable_latency_tracer)) { @@ -263,64 +265,66 @@ void mutation_log_private::commit_pending_mutations(log_file_ptr &lf, } } - lf->commit_log_blocks(*pending, - LPC_WRITE_REPLICATION_LOG_PRIVATE, - &_tracker, - [this, lf, pending, max_commit](error_code err, size_t sz) mutable { - CHECK(_is_writing.load(std::memory_order_relaxed), ""); - - for (auto &block : pending->all_blocks()) { - auto hdr = (log_block_header *)block.front().data(); - CHECK_EQ(hdr->magic, 0xdeadbeef); - } - - if (dsn_unlikely(FLAGS_enable_latency_tracer)) { - for (const auto &mu : pending->mutations()) { - ADD_CUSTOM_POINT(mu->_tracer, "commit_pending_completed"); - } - } - - // notify the callbacks - // ATTENTION: callback may be called before this code block executed - // done. - for (auto &c : pending->callbacks()) { - c->enqueue(err, sz); - } - - if (err != ERR_OK) { - LOG_ERROR("write private log failed, err = {}", err); - _is_writing.store(false, std::memory_order_relaxed); - if (_io_error_callback) { - _io_error_callback(err); - } - return; - } - CHECK_EQ(sz, pending->size()); - - // flush to ensure that there is no gap between private log and - // in-memory buffer - // so that we can get all mutations in learning process. - // - // FIXME : the file could have been closed - if (FLAGS_plog_force_flush) { - lf->flush(); - } - - // update _private_max_commit_on_disk after written into log file done - update_max_commit_on_disk(max_commit); - - _is_writing.store(false, std::memory_order_relaxed); - - // start to write if possible - _plock.lock(); - - if (!_is_writing.load(std::memory_order_acquire) && _pending_write) { - write_pending_mutations(true); - } else { - _plock.unlock(); - } - }, - get_gpid().thread_hash()); + lf->commit_log_blocks( + *pending, + LPC_WRITE_REPLICATION_LOG_PRIVATE, + &_tracker, + [this, lf, pending, max_decree, max_commit](error_code err, size_t sz) mutable { + CHECK(_is_writing.load(std::memory_order_relaxed), ""); + + for (auto &block : pending->all_blocks()) { + auto hdr = (log_block_header *)block.front().data(); + CHECK_EQ(hdr->magic, 0xdeadbeef); + } + + if (dsn_unlikely(FLAGS_enable_latency_tracer)) { + for (const auto &mu : pending->mutations()) { + ADD_CUSTOM_POINT(mu->_tracer, "commit_pending_completed"); + } + } + + // notify the callbacks + // ATTENTION: callback may be called before this code block executed + // done. + for (auto &c : pending->callbacks()) { + c->enqueue(err, sz); + } + + if (err != ERR_OK) { + LOG_ERROR("write private log failed, err = {}", err); + _is_writing.store(false, std::memory_order_relaxed); + if (_io_error_callback) { + _io_error_callback(err); + } + return; + } + CHECK_EQ(sz, pending->size()); + + // flush to ensure that there is no gap between private log and + // in-memory buffer + // so that we can get all mutations in learning process. + // + // FIXME : the file could have been closed + if (FLAGS_plog_force_flush) { + lf->flush(); + } + + // Update both _plog_max_decree_on_disk and _plog_max_commit_on_disk + // after written into log file done. + update_max_decree_on_disk(max_decree, max_commit); + + _is_writing.store(false, std::memory_order_relaxed); + + // start to write if possible + _plock.lock(); + + if (!_is_writing.load(std::memory_order_acquire) && _pending_write) { + write_pending_mutations(true); + } else { + _plock.unlock(); + } + }, + get_gpid().thread_hash()); } /////////////////////////////////////////////////////////////// @@ -355,7 +359,8 @@ void mutation_log::init_states() // replica states _private_log_info = {0, 0}; - _private_max_commit_on_disk = 0; + _plog_max_decree_on_disk = 0; + _plog_max_commit_on_disk = 0; } error_code mutation_log::open(replay_callback read_callback, @@ -522,6 +527,7 @@ error_code mutation_log::open(replay_callback read_callback, if (ret) { this->update_max_decree_no_lock(mu->data.header.pid, mu->data.header.decree); if (this->_is_private) { + this->update_max_decree_on_disk_no_lock(mu->data.header.decree); this->update_max_commit_on_disk_no_lock(mu->data.header.last_committed_decree); } } @@ -702,11 +708,18 @@ decree mutation_log::max_decree(gpid gpid) const return _private_log_info.max_decree; } +decree mutation_log::max_decree_on_disk() const +{ + zauto_lock l(_lock); + CHECK(_is_private, "this method is only valid for private logs"); + return _plog_max_decree_on_disk; +} + decree mutation_log::max_commit_on_disk() const { zauto_lock l(_lock); CHECK(_is_private, "this method is only valid for private logs"); - return _private_max_commit_on_disk; + return _plog_max_commit_on_disk; } decree mutation_log::max_gced_decree(gpid gpid) const @@ -862,17 +875,26 @@ void mutation_log::update_max_decree_no_lock(gpid gpid, decree d) } } -void mutation_log::update_max_commit_on_disk(decree d) +void mutation_log::update_max_decree_on_disk(decree max_decree, decree max_commit) { zauto_lock l(_lock); - update_max_commit_on_disk_no_lock(d); + update_max_decree_on_disk_no_lock(max_decree); + update_max_commit_on_disk_no_lock(max_commit); +} + +void mutation_log::update_max_decree_on_disk_no_lock(decree d) +{ + CHECK(_is_private, "this method is only valid for private logs"); + if (d > _plog_max_decree_on_disk) { + _plog_max_decree_on_disk = d; + } } void mutation_log::update_max_commit_on_disk_no_lock(decree d) { CHECK(_is_private, "this method is only valid for private logs"); - if (d > _private_max_commit_on_disk) { - _private_max_commit_on_disk = d; + if (d > _plog_max_commit_on_disk) { + _plog_max_commit_on_disk = d; } } diff --git a/src/replica/mutation_log.h b/src/replica/mutation_log.h index c4ce671ecb..8de9e23bf6 100644 --- a/src/replica/mutation_log.h +++ b/src/replica/mutation_log.h @@ -76,9 +76,10 @@ class mutation_log : public ref_counter typedef std::function io_failure_callback; public: - // append a log mutation - // return value: nullptr for error - // thread safe + // Append a log mutation. + // Return value: nullptr for error. + // + // Thread safe. virtual ::dsn::task_ptr append(mutation_ptr &mu, dsn::task_code callback_code, dsn::task_tracker *tracker, @@ -86,34 +87,37 @@ class mutation_log : public ref_counter int hash = 0, int64_t *pending_size = nullptr) = 0; - // get learn state in memory, including pending and writing mutations - // return true if some data is filled into writer - // return false if no data is filled into writer - // thread safe + // Get learn state in memory, including pending and writing mutations: + // - return true if some data is filled into writer + // - return false if no data is filled into writer + // + // Thread safe virtual bool get_learn_state_in_memory(decree start_decree, binary_writer &writer) const { return false; } - // only for private log - // get in-memory mutations, including pending and writing mutations + // Only for private log. + // get in-memory mutations, including pending and writing mutations. virtual void get_in_memory_mutations(decree start_decree, ballot current_ballot, /*out*/ std::vector &mutations_list) const { } - // flush the pending buffer until all data is on disk - // thread safe + // Flush the pending buffer until all data is on disk. + // + // Thread safe. virtual void flush() = 0; - // flush the pending buffer at most once - // thread safe + // Flush the pending buffer at most once. + // + // Thread safe. virtual void flush_once() = 0; public: // - // ctors + // Ctors // when is_private = true, should specify "private_gpid" // mutation_log(const std::string &dir, int32_t max_log_file_mb, gpid gpid, replica *r = nullptr); @@ -121,22 +125,24 @@ class mutation_log : public ref_counter virtual ~mutation_log() = default; // - // initialization + // Initialization // - // open and replay - // returns ERR_OK if succeed - // not thread safe, but only be called when init + // Open and replay. + // return ERR_OK if succeed. + // Not thread safe, but only be called when init. error_code open(replay_callback read_callback, io_failure_callback write_error_callback); error_code open(replay_callback read_callback, io_failure_callback write_error_callback, const std::map &replay_condition); - // close the log - // thread safe + + // Close the log. + // + // Thread safe. void close(); // - // replay + // Replay. // static error_code replay(std::vector &log_files, replay_callback callback, @@ -173,55 +179,61 @@ class mutation_log : public ref_counter error_code reset_from(const std::string &dir, replay_callback, io_failure_callback); // - // maintain max_decree & valid_start_offset + // Maintain max_decree & valid_start_offset // - // when open a exist replica, need to set valid_start_offset on open - // thread safe + // valid_start_offset is needed to be set while opening an existing replica. + // + // Thread safe. void set_valid_start_offset_on_open(gpid gpid, int64_t valid_start_offset); - // when create a new replica, need to reset current max decree - // returns current global end offset, needs to be remebered by caller for gc usage - // thread safe + // Current max decree is needed to be reset, while creating a new replica. + // Return current global end offset, should be remebered by caller for gc usage. + // + // Thread safe. int64_t on_partition_reset(gpid gpid, decree max_decree); - // update current max decree - // thread safe + // Update current max decree. + // + // Thread safe. void update_max_decree(gpid gpid, decree d); - // update current max commit of private log - // thread safe - void update_max_commit_on_disk(decree d); + // Update current max decree and committed decree that have ever been written onto disk + // for plog. + // + // Thread safe. + void update_max_decree_on_disk(decree max_decree, decree max_commit); // - // garbage collection logs that are already covered by + // Garbage collection logs that are already covered by // durable state on disk, return deleted log segment count // - // garbage collection for private log, returns removed file count. - // can remove log files if satisfy all the conditions: + // Garbage collection for private log, returns removed file count. + // + // Log files could be removed once all the following conditions are satisfied: // - the file is not the current log file // - the file is not covered by reserve_max_size or reserve_max_time // - file.max_decree <= "durable_decree" || file.end_offset <= "valid_start_offset" - // that means, should reserve files if satisfy one of the conditions: + // which means, files should be reserved if one of the conditions is satisfied: // - the file is the current log file // - the file is covered by both reserve_max_size and reserve_max_time // - file.max_decree > "durable_decree" && file.end_offset > "valid_start_offset" - // thread safe + // + // Thread safe. int garbage_collection(gpid gpid, decree durable_decree, int64_t valid_start_offset, int64_t reserve_max_size, int64_t reserve_max_time); - // - // when this is a private log, log files are learned by remote replicas - // return true if private log surely covers the learning range - // + // When this is a private log, log files are learned by remote replicas + // return true if private log surely covers the learning range. bool get_learn_state(gpid gpid, decree start, /*out*/ learn_state &state) const; - // only valid for private log. - // get parent mutations in memory and private log files during partition split. + // Only valid for private log. + // + // Get parent mutations in memory and private log files during partition split. // `total_file_size` is used for the metrics of partition split. void get_parent_mutations_and_logs(gpid pid, decree start_decree, @@ -231,23 +243,28 @@ class mutation_log : public ref_counter /*out*/ uint64_t &total_file_size) const; // - // other inquiry routines + // Other inquiry routines // - // log dir - // thread safe (because nerver changed) + // Get log dir. + // + // Thread safe (because nerver changed). const std::string &dir() const { return _dir; } - // replica - replica *owner_replica() const { return _owner_replica; } - - // get current max decree for gpid - // returns 0 if not found - // thread safe + // Get current max decree for gpid. + // Return 0 if not found. + // + // Thread safe. decree max_decree(gpid gpid) const; - // get current max commit on disk of private log. - // thread safe + // Get current max decree on disk for plog. + // + // Thread safe. + decree max_decree_on_disk() const; + + // Get current max committed decree on disk for plog. + // + // Thread safe. decree max_commit_on_disk() const; // Decree of the maximum garbage-collected mutation. @@ -260,7 +277,7 @@ class mutation_log : public ref_counter // than the others, the max_gced_decree = 9. // Returns `invalid_decree` when plog directory is empty. // - // thread-safe & private log only + // Thread safe & private log only. decree max_gced_decree(gpid gpid) const; decree max_gced_decree_no_lock(gpid gpid) const; @@ -269,11 +286,14 @@ class mutation_log : public ref_counter // thread-safe log_file_map_by_index get_log_file_map() const; - // check the consistence of valid_start_offset - // thread safe + // Check the consistence of valid_start_offset + // + // Thread safe. void check_valid_start_offset(gpid gpid, int64_t valid_start_offset) const; - // get total size. + // Get the total size. + // + // Thread safe. int64_t total_size() const; void hint_switch_file() { _switch_file_hint = true; } @@ -282,20 +302,22 @@ class mutation_log : public ref_counter task_tracker *tracker() { return &_tracker; } protected: - // thread-safe // 'size' is data size to write; the '_global_end_offset' will be updated by 'size'. // can switch file only when create_new_log_if_needed = true; // return pair: the first is target file to write; the second is the global offset to start - // write + // write. + // + // Thread safe. std::pair mark_new_offset(size_t size, bool create_new_log_if_needed); - // thread-safe + + // Thread safe. int64_t get_global_offset() const { zauto_lock l(_lock); return _global_end_offset; } - // init memory states + // Init memory states. virtual void init_states(); private: @@ -310,10 +332,13 @@ class mutation_log : public ref_counter replay_callback callback, /*out*/ int64_t &end_offset); - // update max decree without lock + // Update max decree without lock. void update_max_decree_no_lock(gpid gpid, decree d); - // update max commit on disk without lock + // Update max decree on disk without lock. + void update_max_decree_on_disk_no_lock(decree d); + + // Update max committed decree on disk without lock. void update_max_commit_on_disk_no_lock(decree d); // create new log file and set it as the current log file @@ -323,7 +348,7 @@ class mutation_log : public ref_counter // - _lock.locked() error_code create_new_log_file(); - // get total size ithout lock. + // Get total size without lock. int64_t total_size_no_lock() const; protected: @@ -367,11 +392,16 @@ class mutation_log : public ref_counter // replica log info for private log replica_log_info _private_log_info; - decree - _private_max_commit_on_disk; // the max last_committed_decree of written mutations up to now - // used for limiting garbage collection of shared log, because - // the ending of private log should be covered by shared log + + // The max decree of the mutations that have ever been written onto the disk for plog. + decree _plog_max_decree_on_disk; + + // The max decree of the committed mutations that have ever been written onto the disk + // for plog. Since it is set with mutation.data.header.last_committed_decree, it must + // be less than _plog_max_decree_on_disk. + decree _plog_max_commit_on_disk; }; + typedef dsn::ref_ptr mutation_log_ptr; class mutation_log_private : public mutation_log, private replica_base @@ -418,6 +448,7 @@ class mutation_log_private : public mutation_log, private replica_base void commit_pending_mutations(log_file_ptr &lf, std::shared_ptr &pending, + decree max_decree, decree max_commit); void init_states() override; diff --git a/src/replica/replica.h b/src/replica/replica.h index ae0118dc05..12e505dcde 100644 --- a/src/replica/replica.h +++ b/src/replica/replica.h @@ -243,6 +243,7 @@ class replica : public serverlet, public ref_counter, public replica_ba JSON_ENCODE_OBJ(writer, max_prepared_decree, max_prepared_decree()); JSON_ENCODE_OBJ(writer, max_plog_decree, _private_log->max_decree(get_gpid())); + JSON_ENCODE_OBJ(writer, max_plog_decree_on_disk, _private_log->max_decree_on_disk()); JSON_ENCODE_OBJ(writer, max_plog_commit_on_disk, _private_log->max_commit_on_disk()); JSON_ENCODE_OBJ(writer, last_committed_decree, last_committed_decree()); JSON_ENCODE_OBJ(writer, last_applied_decree, last_applied_decree()); diff --git a/src/replica/test/mutation_log_test.cpp b/src/replica/test/mutation_log_test.cpp index 5d2f339b7a..ef79601580 100644 --- a/src/replica/test/mutation_log_test.cpp +++ b/src/replica/test/mutation_log_test.cpp @@ -291,13 +291,15 @@ class mutation_log_test : public replica_test_base void TearDown() override { utils::filesystem::remove_path(_log_dir); } - mutation_ptr create_test_mutation(decree d, const std::string &data) override + mutation_ptr create_test_mutation(int64_t decree, + int64_t last_committed_decree, + const std::string &data) override { mutation_ptr mu(new mutation()); mu->data.header.ballot = 1; - mu->data.header.decree = d; + mu->data.header.decree = decree; mu->data.header.pid = get_gpid(); - mu->data.header.last_committed_decree = d - 1; + mu->data.header.last_committed_decree = last_committed_decree; mu->data.header.log_offset = 0; binary_writer writer; @@ -313,6 +315,11 @@ class mutation_log_test : public replica_test_base return mu; } + mutation_ptr create_test_mutation(int64_t decree, const std::string &data) override + { + return mutation_log_test::create_test_mutation(decree, decree - 1, data); + } + static void ASSERT_BLOB_EQ(const blob &lhs, const blob &rhs) { ASSERT_EQ(std::string(lhs.data(), lhs.length()), std::string(rhs.data(), rhs.length())); diff --git a/src/replica/test/replica_test_base.h b/src/replica/test/replica_test_base.h index 9296e2a4e0..0fe479d8a7 100644 --- a/src/replica/test/replica_test_base.h +++ b/src/replica/test/replica_test_base.h @@ -61,13 +61,14 @@ class replica_test_base : public replica_stub_test_base _log_dir = _replica->dir(); } - virtual mutation_ptr create_test_mutation(int64_t decree, const std::string &data) + virtual mutation_ptr + create_test_mutation(int64_t decree, int64_t last_committed_decree, const std::string &data) { mutation_ptr mu(new mutation()); mu->data.header.ballot = 1; mu->data.header.decree = decree; mu->data.header.pid = _replica->get_gpid(); - mu->data.header.last_committed_decree = decree - 1; + mu->data.header.last_committed_decree = last_committed_decree; mu->data.header.log_offset = 0; mu->data.header.timestamp = decree; @@ -84,7 +85,14 @@ class replica_test_base : public replica_stub_test_base return mu; } + virtual mutation_ptr create_test_mutation(int64_t decree, const std::string &data) + { + return replica_test_base::create_test_mutation(decree, decree - 1, data); + } + gpid get_gpid() const { return _replica->get_gpid(); } + + void set_last_applied_decree(decree d) { _replica->set_app_last_committed_decree(d); } }; } // namespace replication From 7c1a40d87939a3c8f3609f9c9984b82edc897add Mon Sep 17 00:00:00 2001 From: "shalk(xiao kun)" Date: Thu, 27 Jun 2024 18:16:56 +0800 Subject: [PATCH 12/29] feat(java-client): use thrift-maven-plugin to generate thrift sources instead of script (#1997) https://github.com/apache/incubator-pegasus/issues/1664 Add thrift-maven-plugin by referencing following links: - https://issues.apache.org/jira/browse/THRIFT-1536 - https://github.com/apache/parquet-java/pull/600 --- .../workflows/lint_and_test_java-client.yml | 4 +-- java-client/README.md | 2 +- java-client/pom.xml | 31 +++++++++++++++++++ ...recompile_thrift.sh => download_thrift.sh} | 17 ---------- 4 files changed, 34 insertions(+), 20 deletions(-) rename java-client/scripts/{recompile_thrift.sh => download_thrift.sh} (73%) diff --git a/.github/workflows/lint_and_test_java-client.yml b/.github/workflows/lint_and_test_java-client.yml index a11ebb5ee4..b71d1e021e 100644 --- a/.github/workflows/lint_and_test_java-client.yml +++ b/.github/workflows/lint_and_test_java-client.yml @@ -98,9 +98,9 @@ jobs: export LD_LIBRARY_PATH=$(pwd)/thirdparty/output/lib:${JAVA_HOME}/jre/lib/amd64/server ulimit -s unlimited ./run.sh start_onebox - - name: Recompile thrift + - name: Download thrift working-directory: ./java-client/scripts - run: ./recompile_thrift.sh + run: ./download_thrift.sh - name: Run Java client tests working-directory: ./java-client run: mvn test --no-transfer-progress diff --git a/java-client/README.md b/java-client/README.md index 1faf9a9a7e..9cfdb9a703 100644 --- a/java-client/README.md +++ b/java-client/README.md @@ -24,7 +24,7 @@ under the License. ### 1. Prepare ``` -cd scripts && bash recompile_thrift.sh +cd scripts && bash download_thrift.sh ``` ### 2. Format the code diff --git a/java-client/pom.xml b/java-client/pom.xml index 2a23c3e9a8..14afe22508 100644 --- a/java-client/pom.xml +++ b/java-client/pom.xml @@ -69,6 +69,7 @@ 4.1.85.Final 1.3.7-1 0.11.0 + 0.10.0 1.3.2 3.7.2 none @@ -470,6 +471,36 @@ + + org.apache.thrift + thrift-maven-plugin + ${thrift-maven-plugin.version} + + ${project.basedir}/../idl + + backup.thrift + bulk_load.thrift + dsn.layer2.thrift + duplication.thrift + metadata.thrift + meta_admin.thrift + partition_split.thrift + rrdb.thrift + security.thrift + + thrift + ${project.basedir}/src/main/java + + + + thrift-sources + + compile + + generate-sources + + + ${project.basedir}/src/main/java ${project.basedir}/src/test/java diff --git a/java-client/scripts/recompile_thrift.sh b/java-client/scripts/download_thrift.sh similarity index 73% rename from java-client/scripts/recompile_thrift.sh rename to java-client/scripts/download_thrift.sh index 6efcba8fae..ea5e4f6ae7 100755 --- a/java-client/scripts/recompile_thrift.sh +++ b/java-client/scripts/download_thrift.sh @@ -48,21 +48,4 @@ if ! $thrift -version | grep "0.11.0" ; then exit 1 fi -TMP_DIR=./gen-java -rm -rf $TMP_DIR - -mkdir -p $TMP_DIR -$thrift --gen java ../../idl/backup.thrift -$thrift --gen java ../../idl/bulk_load.thrift -$thrift --gen java ../../idl/dsn.layer2.thrift -$thrift --gen java ../../idl/duplication.thrift -$thrift --gen java ../../idl/metadata.thrift -$thrift --gen java ../../idl/meta_admin.thrift -$thrift --gen java ../../idl/partition_split.thrift -$thrift --gen java ../../idl/rrdb.thrift -$thrift --gen java ../../idl/security.thrift - -cp -v -r $TMP_DIR/* ../src/main/java/ -rm -rf $TMP_DIR - echo "done" From 25e3d75249e67da4a24c5a7ac5d030d552fc1345 Mon Sep 17 00:00:00 2001 From: Samunroyu <36890229+Samunroyu@users.noreply.github.com> Date: Mon, 1 Jul 2024 11:04:25 +0800 Subject: [PATCH 13/29] chore(run.sh): link pegasus_shell only doesnt exist (#2053) Use run.sh start pegasus shell will create a symbolic link every time. But in docker production environment. It cant create a symbolic link in the only-read filesystem of container. So when symbolic link is exist we should not create. --- run.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/run.sh b/run.sh index 7e1b413213..dfe4f7ea98 100755 --- a/run.sh +++ b/run.sh @@ -1766,7 +1766,9 @@ function run_shell() cd ${ROOT} if [ -f ${ROOT}/bin/pegasus_shell/pegasus_shell ]; then # The pegasus_shell was packaged by pack_tools, to be used on production environment. - ln -s -f ${ROOT}/bin/pegasus_shell/pegasus_shell + if test ! -f ./pegasus_shell; then + ln -s -f ${ROOT}/bin/pegasus_shell/pegasus_shell + fi elif [ -f ${BUILD_LATEST_DIR}/output/bin/pegasus_shell/pegasus_shell ]; then # The pegasus_shell was built locally, to be used for test on development environment. ln -s -f ${BUILD_LATEST_DIR}/output/bin/pegasus_shell/pegasus_shell From 2f3457c7764e5080a90a50872c27b654a55860ef Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Wed, 3 Jul 2024 20:24:37 +0800 Subject: [PATCH 14/29] fix: Fix a typo in log when [network].enable_udp is disabled (#2055) --- src/runtime/task/task_spec.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/runtime/task/task_spec.cpp b/src/runtime/task/task_spec.cpp index 044fbe5c1b..f31b3598da 100644 --- a/src/runtime/task/task_spec.cpp +++ b/src/runtime/task/task_spec.cpp @@ -231,8 +231,8 @@ bool task_spec::init() } if (spec->rpc_call_channel == RPC_CHANNEL_UDP && !FLAGS_enable_udp) { - LOG_ERROR("task rpc_call_channel RPC_CHANNEL_UCP need udp service, make sure " - "[network].enable_udp"); + LOG_ERROR("task rpc_call_channel RPC_CHANNEL_UDP need udp service, make sure " + "[network].enable_udp is enabled"); return false; } } From e267c86dbfbb34d0968bafe913aa98b67d1446b4 Mon Sep 17 00:00:00 2001 From: Samunroyu <36890229+Samunroyu@users.noreply.github.com> Date: Thu, 4 Jul 2024 14:33:36 +0800 Subject: [PATCH 15/29] fix(scripts): downgrade_node does not work with set -e (#2056) The downgrade_node scripts usually used in the scale down replica server. The function of downgrade_node scripts implementation with wild char of shell output. - The character matching is not success for every single line in shell output. So add "set -e" will exit with 1 and report failed. - Fix a shell grammar problem. --- scripts/downgrade_node.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/scripts/downgrade_node.sh b/scripts/downgrade_node.sh index 3a405876cd..5d8c59c541 100755 --- a/scripts/downgrade_node.sh +++ b/scripts/downgrade_node.sh @@ -16,8 +16,6 @@ # specific language governing permissions and limitations # under the License. -set -e - PID=$$ function usage() @@ -63,7 +61,7 @@ echo "UID=$UID" echo "PID=$PID" echo -if [ [ "$cluster" != "" ]; then +if [ "$cluster" != "" ]; then echo "set_meta_level steady" | ./run.sh shell --cluster $cluster &>/tmp/$UID.$PID.pegasus.set_meta_level echo ls | ./run.sh shell --cluster $cluster &>/tmp/$UID.$PID.pegasus.ls else From 6cf8879e8fa0e9ce894992ad7977ea35de3f269c Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Thu, 4 Jul 2024 15:42:04 +0800 Subject: [PATCH 16/29] feat(backup): Add --force option for 'disable_backup_policy' shell command (#2057) Before this patch, once a backup policy is added and enabled, it's impossible to disable it when a new job of the policy is starting, even if there are some reasons block the job to complete. This patch add a new flag '-f|--force' to disable the policy by force, then it's possible to stop the job after restarting the servers. --- idl/backup.thrift | 7 +++- src/client/replication_ddl_client.cpp | 4 ++- src/client/replication_ddl_client.h | 2 +- src/meta/meta_backup_service.cpp | 13 +++++-- src/shell/commands.h | 1 + src/shell/commands/cold_backup.cpp | 51 +++++++++++++-------------- src/shell/main.cpp | 2 +- 7 files changed, 46 insertions(+), 34 deletions(-) diff --git a/idl/backup.thrift b/idl/backup.thrift index 2fdeded174..a73fa12e12 100644 --- a/idl/backup.thrift +++ b/idl/backup.thrift @@ -77,7 +77,12 @@ struct configuration_modify_backup_policy_request 4:optional i64 new_backup_interval_sec; 5:optional i32 backup_history_count_to_keep; 6:optional bool is_disable; - 7:optional string start_time; // restrict the start time of each backup, hour:minute + + // Restrict the start time of each backup, in the form of 'hh:mm', for example '02:05'. + 7:optional string start_time; + + // Force disable the policy, even if the policy is in during backup. + 8:optional bool force_disable; } struct configuration_modify_backup_policy_response diff --git a/src/client/replication_ddl_client.cpp b/src/client/replication_ddl_client.cpp index d2086e8fbd..c0d82ad67e 100644 --- a/src/client/replication_ddl_client.cpp +++ b/src/client/replication_ddl_client.cpp @@ -1079,11 +1079,13 @@ error_with replication_ddl_client::query_backup(in return call_rpc_sync(query_backup_status_rpc(std::move(req), RPC_CM_QUERY_BACKUP_STATUS)); } -dsn::error_code replication_ddl_client::disable_backup_policy(const std::string &policy_name) +dsn::error_code replication_ddl_client::disable_backup_policy(const std::string &policy_name, + bool force) { auto req = std::make_shared(); req->policy_name = policy_name; req->__set_is_disable(true); + req->__set_force_disable(force); auto resp_task = request_meta(RPC_CM_MODIFY_BACKUP_POLICY, req); diff --git a/src/client/replication_ddl_client.h b/src/client/replication_ddl_client.h index b36b2cf325..4e91636cc1 100644 --- a/src/client/replication_ddl_client.h +++ b/src/client/replication_ddl_client.h @@ -181,7 +181,7 @@ class replication_ddl_client dsn::error_code ls_backup_policy(bool json); - dsn::error_code disable_backup_policy(const std::string &policy_name); + dsn::error_code disable_backup_policy(const std::string &policy_name, bool force); dsn::error_code enable_backup_policy(const std::string &policy_name); diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp index 6423e40b1a..95a53d70e0 100644 --- a/src/meta/meta_backup_service.cpp +++ b/src/meta/meta_backup_service.cpp @@ -1634,9 +1634,16 @@ void backup_service::modify_backup_policy(configuration_modify_backup_policy_rpc if (request.__isset.is_disable) { if (request.is_disable) { if (is_under_backup) { - LOG_INFO("{}: policy is under backuping, not allow to disable", - cur_policy.policy_name); - response.err = ERR_BUSY; + if (request.__isset.force_disable && request.force_disable) { + LOG_INFO("{}: policy is under backuping, force to disable", + cur_policy.policy_name); + cur_policy.is_disable = true; + have_modify_policy = true; + } else { + LOG_INFO("{}: policy is under backuping, not allow to disable", + cur_policy.policy_name); + response.err = ERR_BUSY; + } } else if (!cur_policy.is_disable) { LOG_INFO("{}: policy is marked to disable", cur_policy.policy_name); cur_policy.is_disable = true; diff --git a/src/shell/commands.h b/src/shell/commands.h index 24754aa84d..3ea3c132c0 100644 --- a/src/shell/commands.h +++ b/src/shell/commands.h @@ -229,6 +229,7 @@ bool ls_backup_policy(command_executor *e, shell_context *sc, arguments args); bool modify_backup_policy(command_executor *e, shell_context *sc, arguments args); +extern const std::string disable_backup_policy_help; bool disable_backup_policy(command_executor *e, shell_context *sc, arguments args); bool enable_backup_policy(command_executor *e, shell_context *sc, arguments args); diff --git a/src/shell/commands/cold_backup.cpp b/src/shell/commands/cold_backup.cpp index 35cc2ff2e7..0a154d4616 100644 --- a/src/shell/commands/cold_backup.cpp +++ b/src/shell/commands/cold_backup.cpp @@ -20,6 +20,7 @@ // IWYU pragma: no_include #include #include +// IWYU pragma: no_include #include #include #include @@ -31,6 +32,7 @@ #include #include #include +#include #include #include "client/replication_ddl_client.h" @@ -162,7 +164,7 @@ bool query_backup_policy(command_executor *e, shell_context *sc, arguments args) const std::string query_backup_policy_help = "<-p|--policy_name> [-b|--backup_info_cnt] [-j|--json]"; argh::parser cmd(args.argc, args.argv, argh::parser::PREFER_PARAM_FOR_UNREG_OPTION); - RETURN_FALSE_IF_NOT(cmd.params().size() >= 1, + RETURN_FALSE_IF_NOT(!cmd.params().empty(), "invalid command, should be in the form of '{}'", query_backup_policy_help); @@ -303,37 +305,32 @@ bool modify_backup_policy(command_executor *e, shell_context *sc, arguments args return true; } +const std::string disable_backup_policy_help = "<-p|--policy_name str> [-f|--force]"; bool disable_backup_policy(command_executor *e, shell_context *sc, arguments args) { - static struct option long_options[] = {{"policy_name", required_argument, 0, 'p'}, - {0, 0, 0, 0}}; + const argh::parser cmd(args.argc, args.argv, argh::parser::PREFER_PARAM_FOR_UNREG_OPTION); + // TODO(yingchun): make the following code as a function. + RETURN_FALSE_IF_NOT(cmd.pos_args().size() == 1 && cmd.pos_args()[0] == "disable_backup_policy", + "invalid command, should be in the form of '{}'", + disable_backup_policy_help); + RETURN_FALSE_IF_NOT(cmd.flags().empty() || + (cmd.flags().size() == 1 && + (cmd.flags().count("force") == 1 || cmd.flags().count("f") == 1)), + "invalid command, should be in the form of '{}'", + disable_backup_policy_help); + RETURN_FALSE_IF_NOT(cmd.params().size() == 1 && (cmd.params().begin()->first == "policy_name" || + cmd.params().begin()->first == "p"), + "invalid command, should be in the form of '{}'", + disable_backup_policy_help); - std::string policy_name; - optind = 0; - while (true) { - int option_index = 0; - int c; - c = getopt_long(args.argc, args.argv, "p:", long_options, &option_index); - if (c == -1) - break; - switch (c) { - case 'p': - policy_name = optarg; - break; - default: - return false; - } - } + const std::string policy_name = cmd({"-p", "--policy_name"}).str(); + RETURN_FALSE_IF_NOT(!policy_name.empty(), "invalid command, policy_name should not be empty"); - if (policy_name.empty()) { - fprintf(stderr, "empty policy name\n"); - return false; - } + const bool force = cmd[{"-f", "--force"}]; - ::dsn::error_code ret = sc->ddl_client->disable_backup_policy(policy_name); - if (ret != dsn::ERR_OK) { - fprintf(stderr, "disable backup policy failed, with err = %s\n", ret.to_string()); - } + const auto ret = sc->ddl_client->disable_backup_policy(policy_name, force); + RETURN_FALSE_IF_NOT( + ret == dsn::ERR_OK, "disable backup policy failed, with err = {}", ret.to_string()); return true; } diff --git a/src/shell/main.cpp b/src/shell/main.cpp index 34bddc4c65..cb333a1984 100644 --- a/src/shell/main.cpp +++ b/src/shell/main.cpp @@ -436,7 +436,7 @@ static command_executor commands[] = { { "disable_backup_policy", "stop policy continue backup", - "<-p|--policy_name str>", + disable_backup_policy_help.c_str(), disable_backup_policy, }, { From b9853e4f48f3780c43559cedfe935ed472277c28 Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Thu, 4 Jul 2024 15:44:47 +0800 Subject: [PATCH 17/29] fix(backup): Fix the stack overflow when read large sst file (#2059) After refactoring to use RocksDB APIs to read files from local filesystem, it may cause stack overflow when the file to read is larger than the stack size (say 8MB). This patch changes to use heap instead of stack to store the file content. --- src/block_service/hdfs/hdfs_service.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/block_service/hdfs/hdfs_service.cpp b/src/block_service/hdfs/hdfs_service.cpp index e303710496..3defe88356 100644 --- a/src/block_service/hdfs/hdfs_service.cpp +++ b/src/block_service/hdfs/hdfs_service.cpp @@ -41,6 +41,7 @@ #include "utils/fmt_logging.h" #include "utils/safe_strerror_posix.h" #include "utils/strings.h" +#include "utils/utils.h" DSN_DEFINE_uint64(replication, hdfs_read_batch_size_bytes, @@ -435,8 +436,8 @@ dsn::task_ptr hdfs_file_object::upload(const upload_request &req, } rocksdb::Slice result; - char scratch[file_size]; - s = rfile->Read(file_size, &result, scratch); + auto scratch = dsn::utils::make_shared_array(file_size); + s = rfile->Read(file_size, &result, scratch.get()); if (!s.ok()) { LOG_ERROR( "read local file '{}' failed, err = {}", req.input_local_name, s.ToString()); From 0bc7ea0e8264ac1c44727c784bfc02e8e0cc493c Mon Sep 17 00:00:00 2001 From: Samunroyu <36890229+Samunroyu@users.noreply.github.com> Date: Thu, 4 Jul 2024 17:42:22 +0800 Subject: [PATCH 18/29] feat(remote_command): change some remote_command shell output to JSON format (#2058) Some remote commands shell output are format by json. And some remote command are not. Change the output of register_int_command, register_bool_command to JSON format to improve readability by programs (e.g., Python scripts). --- src/nfs/nfs_client_impl.cpp | 2 ++ src/nfs/nfs_server_impl.cpp | 1 + src/utils/command_manager.cpp | 13 +++++++++---- src/utils/command_manager.h | 18 +++++++++++++----- 4 files changed, 25 insertions(+), 9 deletions(-) diff --git a/src/nfs/nfs_client_impl.cpp b/src/nfs/nfs_client_impl.cpp index e736b2749f..04f96a16d5 100644 --- a/src/nfs/nfs_client_impl.cpp +++ b/src/nfs/nfs_client_impl.cpp @@ -26,6 +26,7 @@ #include "nfs_client_impl.h" +#include // IWYU pragma: no_include #include @@ -33,6 +34,7 @@ #include "fmt/core.h" #include "nfs/nfs_code_definition.h" #include "nfs/nfs_node.h" +#include "nlohmann/json.hpp" #include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep #include "runtime/rpc/rpc_host_port.h" #include "utils/blob.h" diff --git a/src/nfs/nfs_server_impl.cpp b/src/nfs/nfs_server_impl.cpp index df1418822d..21a7f3a8af 100644 --- a/src/nfs/nfs_server_impl.cpp +++ b/src/nfs/nfs_server_impl.cpp @@ -35,6 +35,7 @@ #include "absl/strings/string_view.h" #include "nfs/nfs_code_definition.h" +#include "nlohmann/json.hpp" #include "runtime/api_layer1.h" #include "runtime/task/async_calls.h" #include "utils/TokenBucket.h" diff --git a/src/utils/command_manager.cpp b/src/utils/command_manager.cpp index bcf37bb873..35b678b81b 100644 --- a/src/utils/command_manager.cpp +++ b/src/utils/command_manager.cpp @@ -129,27 +129,32 @@ std::string command_manager::set_bool(bool &value, const std::string &name, const std::vector &args) { + nlohmann::json msg; + msg["error"] = "ok"; // Query. if (args.empty()) { - return value ? "true" : "false"; + msg[name] = value ? "true" : "false"; + return msg.dump(2); } // Invalid arguments size. if (args.size() > 1) { - return fmt::format("ERR: invalid arguments, only one boolean argument is acceptable"); + msg["error"] = "ERR: invalid arguments, only one boolean argument is acceptable"; + return msg.dump(2); } // Invalid argument. bool new_value; if (!dsn::buf2bool(args[0], new_value, /* ignore_case */ true)) { - return fmt::format("ERR: invalid arguments, '{}' is not a boolean", args[0]); + msg["error"] = fmt::format("ERR: invalid arguments, '{}' is not a boolean", args[0]); + return msg.dump(2); } // Set to a new value. value = new_value; LOG_INFO("set {} to {} by remote command", name, new_value); - return "OK"; + return msg.dump(2); } command_manager::command_manager() diff --git a/src/utils/command_manager.h b/src/utils/command_manager.h index a73966845c..903ccd2900 100644 --- a/src/utils/command_manager.h +++ b/src/utils/command_manager.h @@ -32,6 +32,8 @@ #include #include #include +#include +#include #include #include @@ -134,34 +136,40 @@ class command_manager : public ::dsn::utils::singleton const std::vector &args, const std::function &validator) { + nlohmann::json msg; + msg["error"] = "ok"; // Query. if (args.empty()) { - return std::to_string(value); + msg[name] = fmt::format("{}", std::to_string(value)); + return msg.dump(2); } // Invalid arguments size. if (args.size() > 1) { - return fmt::format("ERR: invalid arguments, only one integer argument is acceptable"); + msg["error"] = "ERR: invalid arguments, only one integer argument is acceptable"; + return msg.dump(2); } // Reset to the default value. if (dsn::utils::iequals(args[0], "DEFAULT")) { value = default_value; - return "OK"; + msg[name] = default_value; + return msg.dump(2); } // Invalid argument. T new_value = 0; if (!internal::buf2signed(args[0], new_value) || !validator(static_cast(new_value))) { - return {"ERR: invalid arguments"}; + msg["error"] = "ERR: invalid arguments"; + return msg.dump(2); } // Set to a new value. value = new_value; LOG_INFO("set {} to {} by remote command", name, new_value); - return "OK"; + return msg.dump(2); } typedef ref_ptr command_instance_ptr; From e05d5d78d93d0e4911441e0af62afa1f6e9c8b10 Mon Sep 17 00:00:00 2001 From: Dan Wang Date: Fri, 5 Jul 2024 12:45:26 +0800 Subject: [PATCH 19/29] fix(rocksdb): fix the problem that the usage of block cache is not set for the metric (#2060) #2061 The monitor of the block cache usage is server-level and created by std::call_once in a replica-level object, running periodically to update the block cache usage. However, once the replica-level object is stopped, the server-level monitor would be cancelled; as a result, the block cache usage would never be updated. --- src/server/pegasus_server_impl.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/server/pegasus_server_impl.cpp b/src/server/pegasus_server_impl.cpp index 75c95a8128..6a76679eba 100644 --- a/src/server/pegasus_server_impl.cpp +++ b/src/server/pegasus_server_impl.cpp @@ -1817,6 +1817,12 @@ dsn::error_code pegasus_server_impl::start(int argc, char **argv) std::call_once(flag, [&]() { // The timer task will always running even though there is no replicas CHECK_NE(kServerStatUpdateTimeSec.count(), 0); + + // TODO(wangdan): _update_server_rdb_stat is server-level, thus it could not be simply + // cancelled in the destructor of pegasus_server_impl which is replica-level. + // + // We should refactor to make _update_server_rdb_stat exit gracefully by + // `_update_server_rdb_stat->cancel(true)`. _update_server_rdb_stat = dsn::tasking::enqueue_timer( LPC_REPLICATION_LONG_COMMON, nullptr, // TODO: the tracker is nullptr, we will fix it later @@ -1869,10 +1875,7 @@ ::dsn::error_code pegasus_server_impl::stop(bool clear_state) _update_replica_rdb_stat->cancel(true); _update_replica_rdb_stat = nullptr; } - if (_update_server_rdb_stat != nullptr) { - _update_server_rdb_stat->cancel(true); - _update_server_rdb_stat = nullptr; - } + _tracker.cancel_outstanding_tasks(); _context_cache.clear(); From 18a612ada1d9a7d910423adea0505487c0a0ea46 Mon Sep 17 00:00:00 2001 From: Dan Wang Date: Thu, 11 Jul 2024 14:52:58 +0800 Subject: [PATCH 20/29] fix(duplication): create checkpoint for the replica with 0 or 1 record (#2054) https://github.com/apache/incubator-pegasus/issues/2069 To create the checkpoint of the replica with 0 or 1 record immediately: - set the min decree for checkpoint to at least 1, which means the checkpoint would inevitably be created even if the replica is empty. - for the empty replica, an empty write would be committed to increase the decree to at least 1 to ensure that the checkpoint would be created. - the max decree in rocksdb memtable (the last applied decree) is considered as the min decree that should be covered by the checkpoint, which means currently all of the data in current rocksdb should be included into the created checkpoint. The following configuration is added to control the retry interval for triggering checkpoint: ```diff [replication] + trigger_checkpoint_retry_interval_ms = 100 ``` --- .../duplication/replica_duplicator.cpp | 70 ++++++++++----- src/replica/duplication/replica_duplicator.h | 12 ++- .../duplication/test/duplication_test_base.h | 7 +- .../test/replica_duplicator_test.cpp | 48 ++++++---- src/replica/replica.h | 24 ++++- src/replica/replica_chkpt.cpp | 89 +++++++++++++++---- src/replica/test/mock_utils.h | 7 ++ src/replica/test/replica_test.cpp | 74 ++++++++++++--- src/utils/errors.h | 2 +- 9 files changed, 252 insertions(+), 81 deletions(-) diff --git a/src/replica/duplication/replica_duplicator.cpp b/src/replica/duplication/replica_duplicator.cpp index 8102096518..31d7e9d94b 100644 --- a/src/replica/duplication/replica_duplicator.cpp +++ b/src/replica/duplication/replica_duplicator.cpp @@ -35,7 +35,6 @@ #include "load_from_private_log.h" #include "replica/mutation_log.h" #include "replica/replica.h" -#include "runtime/task/async_calls.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" @@ -64,10 +63,31 @@ replica_duplicator::replica_duplicator(const duplication_entry &ent, replica *r) auto it = ent.progress.find(get_gpid().get_partition_index()); if (it->second == invalid_decree) { - // keep current max committed_decree as start point. - // todo(jiashuo1) _start_point_decree hasn't be ready to persist zk, so if master restart, - // the value will be reset 0 - _start_point_decree = _progress.last_decree = _replica->private_log()->max_commit_on_disk(); + // Ensure that the checkpoint decree is at least 1. Otherwise, the checkpoint could not be + // created in time for empty replica; in consequence, the remote cluster would inevitably + // fail to pull the checkpoint files. + // + // The max decree in rocksdb memtable (the last applied decree) is considered as the min + // decree that should be covered by the checkpoint, which means currently all of the data + // in current rocksdb should be included into the created checkpoint. + // + // TODO(jiashuo1): _min_checkpoint_decree hasn't be ready to persist zk, so if master + // restart, the value will be reset to 0. + const auto last_applied_decree = _replica->last_applied_decree(); + _min_checkpoint_decree = std::max(last_applied_decree, static_cast(1)); + _progress.last_decree = last_applied_decree; + LOG_INFO_PREFIX("initialize checkpoint decree: min_checkpoint_decree={}, " + "last_committed_decree={}, last_applied_decree={}, " + "last_flushed_decree={}, last_durable_decree={}, " + "plog_max_decree_on_disk={}, plog_max_commit_on_disk={}", + _min_checkpoint_decree, + _replica->last_committed_decree(), + last_applied_decree, + _replica->last_flushed_decree(), + _replica->last_durable_decree(), + _replica->private_log()->max_decree_on_disk(), + _replica->private_log()->max_commit_on_disk()); + } else { _progress.last_decree = _progress.confirmed_decree = it->second; } @@ -86,17 +106,19 @@ replica_duplicator::replica_duplicator(const duplication_entry &ent, replica *r) void replica_duplicator::prepare_dup() { - LOG_INFO_PREFIX("start prepare checkpoint to catch up with latest durable decree: " - "start_point_decree({}) < last_durable_decree({}) = {}", - _start_point_decree, + LOG_INFO_PREFIX("start to trigger checkpoint: min_checkpoint_decree={}, " + "last_committed_decree={}, last_applied_decree={}, " + "last_flushed_decree={}, last_durable_decree={}, " + "plog_max_decree_on_disk={}, plog_max_commit_on_disk={}", + _min_checkpoint_decree, + _replica->last_committed_decree(), + _replica->last_applied_decree(), + _replica->last_flushed_decree(), _replica->last_durable_decree(), - _start_point_decree < _replica->last_durable_decree()); + _replica->private_log()->max_decree_on_disk(), + _replica->private_log()->max_commit_on_disk()); - tasking::enqueue( - LPC_REPLICATION_COMMON, - &_tracker, - [this]() { _replica->trigger_manual_emergency_checkpoint(_start_point_decree); }, - get_gpid().thread_hash()); + _replica->async_trigger_manual_emergency_checkpoint(_min_checkpoint_decree, 0); } void replica_duplicator::start_dup_log() @@ -162,19 +184,19 @@ void replica_duplicator::update_status_if_needed(duplication_status::type next_s return; } - // DS_PREPARE means replica is checkpointing, it may need trigger multi time to catch - // _start_point_decree of the plog + // DS_PREPARE means this replica is making checkpoint, which might need to be triggered + // multiple times to catch up with _min_checkpoint_decree. if (_status == next_status && next_status != duplication_status::DS_PREPARE) { return; } - LOG_INFO_PREFIX( - "update duplication status: {}=>{}[start_point={}, last_commit={}, last_durable={}]", - duplication_status_to_string(_status), - duplication_status_to_string(next_status), - _start_point_decree, - _replica->last_committed_decree(), - _replica->last_durable_decree()); + LOG_INFO_PREFIX("update duplication status: {}=>{} [min_checkpoint_decree={}, " + "last_committed_decree={}, last_durable_decree={}]", + duplication_status_to_string(_status), + duplication_status_to_string(next_status), + _min_checkpoint_decree, + _replica->last_committed_decree(), + _replica->last_durable_decree()); _status = next_status; if (_status == duplication_status::DS_PREPARE) { @@ -220,7 +242,7 @@ error_s replica_duplicator::update_progress(const duplication_progress &p) decree last_confirmed_decree = _progress.confirmed_decree; _progress.confirmed_decree = std::max(_progress.confirmed_decree, p.confirmed_decree); _progress.last_decree = std::max(_progress.last_decree, p.last_decree); - _progress.checkpoint_has_prepared = _start_point_decree <= _replica->last_durable_decree(); + _progress.checkpoint_has_prepared = _min_checkpoint_decree <= _replica->last_durable_decree(); if (_progress.confirmed_decree > _progress.last_decree) { return FMT_ERR(ERR_INVALID_STATE, diff --git a/src/replica/duplication/replica_duplicator.h b/src/replica/duplication/replica_duplicator.h index 66f7ac7cec..9a8deed0d9 100644 --- a/src/replica/duplication/replica_duplicator.h +++ b/src/replica/duplication/replica_duplicator.h @@ -39,12 +39,13 @@ namespace replication { class duplication_progress { public: - // check if checkpoint has catch up with `_start_point_decree` + // Check if checkpoint has covered `_min_checkpoint_decree`. bool checkpoint_has_prepared{false}; - // the maximum decree that's been persisted in meta server + + // The max decree that has been persisted in the meta server. decree confirmed_decree{invalid_decree}; - // the maximum decree that's been duplicated to remote. + // The max decree that has been duplicated to the remote cluster. decree last_decree{invalid_decree}; duplication_progress &set_last_decree(decree d) @@ -184,7 +185,10 @@ class replica_duplicator : public replica_base, public pipeline::base replica_stub *_stub; dsn::task_tracker _tracker; - decree _start_point_decree = invalid_decree; + // The min decree that should be covered by the checkpoint which is triggered by the + // newly added duplication. + decree _min_checkpoint_decree{invalid_decree}; + duplication_status::type _status{duplication_status::DS_INIT}; std::atomic _fail_mode{duplication_fail_mode::FAIL_SLOW}; diff --git a/src/replica/duplication/test/duplication_test_base.h b/src/replica/duplication/test/duplication_test_base.h index 69d935cc1d..cd54fe9d8c 100644 --- a/src/replica/duplication/test/duplication_test_base.h +++ b/src/replica/duplication/test/duplication_test_base.h @@ -54,17 +54,16 @@ class duplication_test_base : public replica_test_base return dup_entities[dupid].get(); } - std::unique_ptr create_test_duplicator(decree confirmed = invalid_decree, - decree start = invalid_decree) + std::unique_ptr + create_test_duplicator(decree confirmed_decree = invalid_decree) { duplication_entry dup_ent; dup_ent.dupid = 1; dup_ent.remote = "remote_address"; dup_ent.status = duplication_status::DS_PAUSE; - dup_ent.progress[_replica->get_gpid().get_partition_index()] = confirmed; + dup_ent.progress[_replica->get_gpid().get_partition_index()] = confirmed_decree; auto duplicator = std::make_unique(dup_ent, _replica.get()); - duplicator->_start_point_decree = start; return duplicator; } diff --git a/src/replica/duplication/test/replica_duplicator_test.cpp b/src/replica/duplication/test/replica_duplicator_test.cpp index 817e3090f8..78e1aabfb2 100644 --- a/src/replica/duplication/test/replica_duplicator_test.cpp +++ b/src/replica/duplication/test/replica_duplicator_test.cpp @@ -64,9 +64,9 @@ class replica_duplicator_test : public duplication_test_base decree last_durable_decree() const { return _replica->last_durable_decree(); } - decree log_dup_start_decree(const std::unique_ptr &dup) const + decree min_checkpoint_decree(const std::unique_ptr &dup) const { - return dup->_start_point_decree; + return dup->_min_checkpoint_decree; } void test_new_duplicator(const std::string &remote_app_name, bool specify_remote_app_name) @@ -157,39 +157,51 @@ TEST_P(replica_duplicator_test, pause_start_duplication) { test_pause_start_dupl TEST_P(replica_duplicator_test, duplication_progress) { auto duplicator = create_test_duplicator(); - ASSERT_EQ(duplicator->progress().last_decree, 0); // start duplication from empty plog - ASSERT_EQ(duplicator->progress().confirmed_decree, invalid_decree); + // Start duplication from empty replica. + ASSERT_EQ(1, min_checkpoint_decree(duplicator)); + ASSERT_EQ(0, duplicator->progress().last_decree); + ASSERT_EQ(invalid_decree, duplicator->progress().confirmed_decree); + + // Update the max decree that has been duplicated to the remote cluster. duplicator->update_progress(duplicator->progress().set_last_decree(10)); - ASSERT_EQ(duplicator->progress().last_decree, 10); - ASSERT_EQ(duplicator->progress().confirmed_decree, invalid_decree); + ASSERT_EQ(10, duplicator->progress().last_decree); + ASSERT_EQ(invalid_decree, duplicator->progress().confirmed_decree); + // Update the max decree that has been persisted in the meta server. duplicator->update_progress(duplicator->progress().set_confirmed_decree(10)); - ASSERT_EQ(duplicator->progress().confirmed_decree, 10); - ASSERT_EQ(duplicator->progress().last_decree, 10); + ASSERT_EQ(10, duplicator->progress().last_decree); + ASSERT_EQ(10, duplicator->progress().confirmed_decree); - ASSERT_EQ(duplicator->update_progress(duplicator->progress().set_confirmed_decree(1)), - error_s::make(ERR_INVALID_STATE, "never decrease confirmed_decree: new(1) old(10)")); + ASSERT_EQ(error_s::make(ERR_INVALID_STATE, "never decrease confirmed_decree: new(1) old(10)"), + duplicator->update_progress(duplicator->progress().set_confirmed_decree(1))); - ASSERT_EQ(duplicator->update_progress(duplicator->progress().set_confirmed_decree(12)), - error_s::make(ERR_INVALID_STATE, - "last_decree(10) should always larger than confirmed_decree(12)")); + ASSERT_EQ(error_s::make(ERR_INVALID_STATE, + "last_decree(10) should always larger than confirmed_decree(12)"), + duplicator->update_progress(duplicator->progress().set_confirmed_decree(12))); - auto duplicator_for_checkpoint = create_test_duplicator(invalid_decree, 100); + // Test that the checkpoint has not been created. + replica()->update_last_applied_decree(100); + auto duplicator_for_checkpoint = create_test_duplicator(); ASSERT_FALSE(duplicator_for_checkpoint->progress().checkpoint_has_prepared); - replica()->update_last_durable_decree(101); + // Test that the checkpoint has been created. + replica()->update_last_durable_decree(100); duplicator_for_checkpoint->update_progress(duplicator->progress()); ASSERT_TRUE(duplicator_for_checkpoint->progress().checkpoint_has_prepared); } -TEST_P(replica_duplicator_test, prapre_dup) +TEST_P(replica_duplicator_test, prepare_dup) { - auto duplicator = create_test_duplicator(invalid_decree, 100); + replica()->update_last_applied_decree(100); replica()->update_expect_last_durable_decree(100); + + auto duplicator = create_test_duplicator(); duplicator->prepare_dup(); wait_all(duplicator); - ASSERT_EQ(last_durable_decree(), log_dup_start_decree(duplicator)); + + ASSERT_EQ(100, min_checkpoint_decree(duplicator)); + ASSERT_EQ(100, last_durable_decree()); } } // namespace replication diff --git a/src/replica/replica.h b/src/replica/replica.h index 12e505dcde..802d07ef36 100644 --- a/src/replica/replica.h +++ b/src/replica/replica.h @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -268,7 +269,24 @@ class replica : public serverlet, public ref_counter, public replica_ba // // Duplication // - error_code trigger_manual_emergency_checkpoint(decree old_decree); + + using trigger_checkpoint_callback = std::function; + + // Choose a fixed thread from pool to trigger an emergency checkpoint asynchronously. + // A new checkpoint would still be created even if the replica is empty (hasn't received + // any write operation). + // + // Parameters: + // - `min_checkpoint_decree`: the min decree that should be covered by the triggered + // checkpoint. Should be a number greater than 0 which means a new checkpoint must be + // created. + // - `delay_ms`: the delayed time in milliseconds that the triggering task is put into + // the thread pool. + // - `callback`: the callback processor handling the error code of triggering checkpoint. + void async_trigger_manual_emergency_checkpoint(decree min_checkpoint_decree, + uint32_t delay_ms, + trigger_checkpoint_callback callback = {}); + void on_query_last_checkpoint(learn_response &response); std::shared_ptr get_duplication_manager() const { @@ -471,6 +489,10 @@ class replica : public serverlet, public ref_counter, public replica_ba bool is_plog_gc_enabled() const; std::string get_plog_gc_enabled_message() const; + // Trigger an emergency checkpoint for duplication. Once the replica is empty (hasn't + // received any write operation), there would be no checkpoint created. + error_code trigger_manual_emergency_checkpoint(decree min_checkpoint_decree); + ///////////////////////////////////////////////////////////////// // cold backup virtual void generate_backup_checkpoint(cold_backup_context_ptr backup_context); diff --git a/src/replica/replica_chkpt.cpp b/src/replica/replica_chkpt.cpp index 0145dcab0f..ea8ff41c37 100644 --- a/src/replica/replica_chkpt.cpp +++ b/src/replica/replica_chkpt.cpp @@ -42,6 +42,7 @@ #include "metadata_types.h" #include "mutation_log.h" #include "replica.h" +#include "replica/mutation.h" #include "replica/prepare_list.h" #include "replica/replica_context.h" #include "replica/replication_app_base.h" @@ -69,12 +70,14 @@ DSN_DEFINE_int32(replication, checkpoint_max_interval_hours, 2, "The maximum time interval in hours of replica checkpoints must be generated"); + DSN_DEFINE_int32(replication, log_private_reserve_max_size_mb, 1000, "The maximum size of useless private log to be reserved. NOTE: only when " "'log_private_reserve_max_size_mb' and 'log_private_reserve_max_time_seconds' are " "both satisfied, the useless logs can be reserved"); + DSN_DEFINE_int32( replication, log_private_reserve_max_time_seconds, @@ -83,6 +86,11 @@ DSN_DEFINE_int32( "when 'log_private_reserve_max_size_mb' and 'log_private_reserve_max_time_seconds' " "are both satisfied, the useless logs can be reserved"); +DSN_DEFINE_uint32(replication, + trigger_checkpoint_retry_interval_ms, + 100, + "The wait interval before next attempt for empty write."); + namespace dsn { namespace replication { @@ -186,8 +194,59 @@ void replica::on_checkpoint_timer() }); } +void replica::async_trigger_manual_emergency_checkpoint(decree min_checkpoint_decree, + uint32_t delay_ms, + trigger_checkpoint_callback callback) +{ + CHECK_GT_PREFIX_MSG(min_checkpoint_decree, + 0, + "min_checkpoint_decree should be a number greater than 0 " + "which means a new checkpoint must be created"); + + tasking::enqueue( + LPC_REPLICATION_COMMON, + &_tracker, + [min_checkpoint_decree, callback, this]() { + _checker.only_one_thread_access(); + + if (_app == nullptr) { + LOG_ERROR_PREFIX("app hasn't been initialized or has been released"); + return; + } + + const auto last_applied_decree = this->last_applied_decree(); + if (last_applied_decree == 0) { + LOG_INFO_PREFIX("ready to commit an empty write to trigger checkpoint: " + "min_checkpoint_decree={}, last_applied_decree={}, " + "last_durable_decree={}", + min_checkpoint_decree, + last_applied_decree, + last_durable_decree()); + + // For the empty replica, here we commit an empty write would be to increase + // the decree to at least 1, to ensure that the checkpoint would inevitably + // be created even if the replica is empty. + mutation_ptr mu = new_mutation(invalid_decree); + mu->add_client_request(RPC_REPLICATION_WRITE_EMPTY, nullptr); + init_prepare(mu, false); + + async_trigger_manual_emergency_checkpoint( + min_checkpoint_decree, FLAGS_trigger_checkpoint_retry_interval_ms, callback); + + return; + } + + const auto err = trigger_manual_emergency_checkpoint(min_checkpoint_decree); + if (callback) { + callback(err); + } + }, + get_gpid().thread_hash(), + std::chrono::milliseconds(delay_ms)); +} + // ThreadPool: THREAD_POOL_REPLICATION -error_code replica::trigger_manual_emergency_checkpoint(decree old_decree) +error_code replica::trigger_manual_emergency_checkpoint(decree min_checkpoint_decree) { _checker.only_one_thread_access(); @@ -196,20 +255,18 @@ error_code replica::trigger_manual_emergency_checkpoint(decree old_decree) return ERR_LOCAL_APP_FAILURE; } - if (old_decree <= _app->last_durable_decree()) { - LOG_INFO_PREFIX("checkpoint has been completed: old = {} vs latest = {}", - old_decree, - _app->last_durable_decree()); + const auto last_durable_decree = this->last_durable_decree(); + if (min_checkpoint_decree <= last_durable_decree) { + LOG_INFO_PREFIX( + "checkpoint has been completed: min_checkpoint_decree={}, last_durable_decree={}", + min_checkpoint_decree, + last_durable_decree); _is_manual_emergency_checkpointing = false; - _stub->_manual_emergency_checkpointing_count == 0 - ? 0 - : (--_stub->_manual_emergency_checkpointing_count); return ERR_OK; } if (_is_manual_emergency_checkpointing) { - LOG_WARNING_PREFIX("replica is checkpointing, last_durable_decree = {}", - _app->last_durable_decree()); + LOG_WARNING_PREFIX("replica is checkpointing, last_durable_decree={}", last_durable_decree); return ERR_BUSY; } @@ -307,9 +364,9 @@ error_code replica::background_async_checkpoint(bool is_emergency) if (_is_manual_emergency_checkpointing) { _is_manual_emergency_checkpointing = false; - _stub->_manual_emergency_checkpointing_count == 0 - ? 0 - : (--_stub->_manual_emergency_checkpointing_count); + if (_stub->_manual_emergency_checkpointing_count > 0) { + --_stub->_manual_emergency_checkpointing_count; + } } return err; @@ -330,9 +387,9 @@ error_code replica::background_async_checkpoint(bool is_emergency) if (_is_manual_emergency_checkpointing) { _is_manual_emergency_checkpointing = false; - _stub->_manual_emergency_checkpointing_count == 0 - ? 0 - : (--_stub->_manual_emergency_checkpointing_count); + if (_stub->_manual_emergency_checkpointing_count > 0) { + --_stub->_manual_emergency_checkpointing_count; + } } if (err == ERR_WRONG_TIMING) { // do nothing diff --git a/src/replica/test/mock_utils.h b/src/replica/test/mock_utils.h index cc631143b0..9debd41eda 100644 --- a/src/replica/test/mock_utils.h +++ b/src/replica/test/mock_utils.h @@ -100,6 +100,8 @@ class mock_replication_app_base : public replication_app_base return manual_compaction_status::IDLE; } + void set_last_applied_decree(decree d) { _last_committed_decree.store(d); } + void set_last_durable_decree(decree d) { _last_durable_decree = d; } void set_expect_last_durable_decree(decree d) { _expect_last_durable_decree = d; } @@ -218,6 +220,11 @@ class mock_replica : public replica backup_context->complete_checkpoint(); } + void update_last_applied_decree(decree decree) + { + dynamic_cast(_app.get())->set_last_applied_decree(decree); + } + void update_last_durable_decree(decree decree) { dynamic_cast(_app.get())->set_last_durable_decree(decree); diff --git a/src/replica/test/replica_test.cpp b/src/replica/test/replica_test.cpp index 7123dd85b7..a6b97b7f11 100644 --- a/src/replica/test/replica_test.cpp +++ b/src/replica/test/replica_test.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -56,6 +57,7 @@ #include "runtime/rpc/rpc_message.h" #include "runtime/task/task_code.h" #include "runtime/task/task_tracker.h" +#include "test_util/test_util.h" #include "utils/autoref_ptr.h" #include "utils/defer.h" #include "utils/env.h" @@ -65,10 +67,14 @@ #include "utils/fmt_logging.h" #include "utils/metrics.h" #include "utils/string_conv.h" +#include "utils/synchronize.h" #include "utils/test_macros.h" DSN_DECLARE_bool(fd_disabled); DSN_DECLARE_string(cold_backup_root); +DSN_DECLARE_uint32(mutation_2pc_min_replica_count); + +using pegasus::AssertEventually; namespace dsn { namespace replication { @@ -90,6 +96,7 @@ class replica_test : public replica_test_base mock_app_info(); _mock_replica = stub->generate_replica_ptr(_app_info, _pid, partition_status::PS_PRIMARY, 1); + _mock_replica->init_private_log(_log_dir); // set FLAGS_cold_backup_root manually. // FLAGS_cold_backup_root is set by configuration "replication.cold_backup_root", @@ -204,6 +211,25 @@ class replica_test : public replica_test_base bool is_checkpointing() { return _mock_replica->_is_manual_emergency_checkpointing; } + void test_trigger_manual_emergency_checkpoint(const decree min_checkpoint_decree, + const error_code expected_err, + std::function callback = {}) + { + dsn::utils::notify_event op_completed; + _mock_replica->async_trigger_manual_emergency_checkpoint( + min_checkpoint_decree, 0, [&](error_code actual_err) { + ASSERT_EQ(expected_err, actual_err); + + if (callback) { + callback(); + } + + op_completed.notify(); + }); + + op_completed.wait(); + } + bool has_gpid(gpid &pid) const { for (const auto &node : stub->_fs_manager.get_dir_nodes()) { @@ -426,28 +452,50 @@ TEST_P(replica_test, test_replica_backup_and_restore_with_specific_path) TEST_P(replica_test, test_trigger_manual_emergency_checkpoint) { - ASSERT_EQ(_mock_replica->trigger_manual_emergency_checkpoint(100), ERR_OK); - ASSERT_TRUE(is_checkpointing()); + // There is only one replica for the unit test. + PRESERVE_FLAG(mutation_2pc_min_replica_count); + FLAGS_mutation_2pc_min_replica_count = 1; + + // Initially the mutation log is empty. + ASSERT_EQ(0, _mock_replica->last_applied_decree()); + ASSERT_EQ(0, _mock_replica->last_durable_decree()); + + // Commit at least an empty write to make the replica become non-empty. + _mock_replica->update_expect_last_durable_decree(1); + test_trigger_manual_emergency_checkpoint(1, ERR_OK); + _mock_replica->tracker()->wait_outstanding_tasks(); + + // Committing multiple empty writes (retry multiple times) might make the last + // applied decree greater than 1. + ASSERT_LE(1, _mock_replica->last_applied_decree()); + ASSERT_EQ(1, _mock_replica->last_durable_decree()); + + test_trigger_manual_emergency_checkpoint( + 100, ERR_OK, [this]() { ASSERT_TRUE(is_checkpointing()); }); _mock_replica->update_last_durable_decree(100); - // test no need start checkpoint because `old_decree` < `last_durable` - ASSERT_EQ(_mock_replica->trigger_manual_emergency_checkpoint(100), ERR_OK); - ASSERT_FALSE(is_checkpointing()); + // There's no need to trigger checkpoint since min_checkpoint_decree <= last_durable_decree. + test_trigger_manual_emergency_checkpoint( + 100, ERR_OK, [this]() { ASSERT_FALSE(is_checkpointing()); }); - // test has existed running task + // There's already an existing running manual emergency checkpoint task. force_update_checkpointing(true); - ASSERT_EQ(_mock_replica->trigger_manual_emergency_checkpoint(101), ERR_BUSY); - ASSERT_TRUE(is_checkpointing()); - // test running task completed + test_trigger_manual_emergency_checkpoint( + 101, ERR_BUSY, [this]() { ASSERT_TRUE(is_checkpointing()); }); + + // Wait until the running task is completed. _mock_replica->tracker()->wait_outstanding_tasks(); ASSERT_FALSE(is_checkpointing()); - // test exceed max concurrent count - ASSERT_EQ(_mock_replica->trigger_manual_emergency_checkpoint(101), ERR_OK); + // The number of concurrent tasks exceeds the limit. + test_trigger_manual_emergency_checkpoint(101, ERR_OK); force_update_checkpointing(false); + + PRESERVE_FLAG(max_concurrent_manual_emergency_checkpointing_count); FLAGS_max_concurrent_manual_emergency_checkpointing_count = 1; - ASSERT_EQ(_mock_replica->trigger_manual_emergency_checkpoint(101), ERR_TRY_AGAIN); - ASSERT_FALSE(is_checkpointing()); + + test_trigger_manual_emergency_checkpoint( + 101, ERR_TRY_AGAIN, [this]() { ASSERT_FALSE(is_checkpointing()); }); _mock_replica->tracker()->wait_outstanding_tasks(); } diff --git a/src/utils/errors.h b/src/utils/errors.h index c611e1beff..8d5806efa5 100644 --- a/src/utils/errors.h +++ b/src/utils/errors.h @@ -136,7 +136,7 @@ class error_s return os << s.description(); } - friend bool operator==(const error_s lhs, const error_s &rhs) + friend bool operator==(const error_s &lhs, const error_s &rhs) { if (lhs._info && rhs._info) { return lhs._info->code == rhs._info->code && lhs._info->msg == rhs._info->msg; From 86ca8dd295fa37abda222d7058c7f384f0a07309 Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Thu, 11 Jul 2024 15:12:33 +0800 Subject: [PATCH 21/29] refactor(format): bump clang-format to 14 (#2063) After GitHub actions forced to run on node20 [1], and node20 depends on GLIBC_2.28, we have to run actions on newer operation systems which has higher builtin glibc. Before this patch, we are using clang-format-3.9 to format C++ code, but if we using newer OS (say Ubuntu 22.04), the clang-format-3.9 is too old and it's difficult to install such an old version. This patch bumps the clang-format to 14 as the code format tool, and update relative chores, such as updating `cpp_clang_format_linter` action job in .github/workflows/lint_and_test_cpp.yaml, removing clang-format-3.9 docker images, adding more options in `.clang-format` (almost all of the options are kept as old version and default values). The main part of this patch is the C++ code updating according to the newer clang-format, they are formated automatically. 1. https://github.blog/changelog/2024-03-07-github-actions-all-actions-will-run-on-node20-instead-of-node16-by-default/ --- .clang-format | 110 +++++++-- .github/workflows/lint_and_test_cpp.yaml | 6 +- .github/workflows/regular-build.yml | 6 +- docker/clang-format-3.9/Dockerfile | 28 --- scripts/compile_thrift.py | 2 +- scripts/format_files.sh | 4 +- scripts/run-clang-format.py | 2 +- src/aio/test/aio.cpp | 46 ++-- src/base/test/value_schema_test.cpp | 4 +- src/block_service/block_service.h | 6 +- src/block_service/block_service_manager.cpp | 18 +- src/block_service/local/local_service.h | 6 +- src/block_service/test/hdfs_service_test.cpp | 126 +++++----- src/client/partition_resolver.cpp | 54 ++--- src/client/partition_resolver_simple.cpp | 23 +- src/client/replication_ddl_client.cpp | 5 +- src/client/replication_ddl_client.h | 10 +- src/client_lib/client_factory.cpp | 2 +- src/client_lib/mutation.cpp | 2 +- src/client_lib/pegasus_client_factory_impl.h | 4 +- src/client_lib/pegasus_client_impl.cpp | 38 +-- src/client_lib/pegasus_scanner_impl.cpp | 27 ++- src/common/fs_manager.h | 4 +- src/common/replication_other_types.h | 4 +- src/failure_detector/failure_detector.cpp | 82 ++++--- src/failure_detector/failure_detector.h | 4 +- .../failure_detector_multimaster.cpp | 10 +- .../failure_detector_multimaster.h | 4 +- src/failure_detector/fd.client.h | 4 +- src/failure_detector/fd.server.h | 4 +- .../test/failure_detector.cpp | 9 +- src/geo/bench/bench.cpp | 5 +- src/geo/lib/geo_client.cpp | 51 ++-- src/http/test/uri_decoder_test.cpp | 2 +- src/include/pegasus/error.h | 2 +- src/include/rrdb/rrdb.code.definition.h | 4 +- src/meta/app_env_validator.cpp | 2 +- src/meta/backup_engine.cpp | 33 +-- src/meta/distributed_lock_service_simple.cpp | 17 +- src/meta/distributed_lock_service_simple.h | 4 +- src/meta/duplication/duplication_info.h | 19 +- .../duplication/meta_duplication_service.cpp | 10 +- src/meta/meta_backup_service.cpp | 225 +++++++++-------- src/meta/meta_bulk_load_service.cpp | 35 +-- src/meta/meta_server_failure_detector.cpp | 4 +- src/meta/meta_server_failure_detector.h | 4 +- src/meta/meta_service.cpp | 93 +++---- src/meta/meta_split_service.cpp | 22 +- src/meta/meta_state_service.h | 4 +- src/meta/meta_state_service_simple.cpp | 25 +- src/meta/meta_state_service_simple.h | 4 +- src/meta/meta_state_service_utils_impl.h | 48 ++-- src/meta/meta_state_service_zookeeper.cpp | 87 +++---- src/meta/meta_state_service_zookeeper.h | 4 +- src/meta/partition_guardian.cpp | 2 +- src/meta/server_state.cpp | 136 +++++------ .../balancer_simulator/balancer_simulator.cpp | 1 + src/meta/test/meta_app_operation_test.cpp | 61 ++--- src/meta/test/meta_bulk_load_service_test.cpp | 3 +- src/meta/test/state_sync_test.cpp | 22 +- src/meta/test/update_configuration_test.cpp | 10 +- src/nfs/nfs_client_impl.cpp | 44 ++-- src/nfs/nfs_code_definition.h | 4 +- src/nfs/nfs_node.cpp | 2 +- src/nfs/nfs_node.h | 2 +- src/nfs/test/main.cpp | 97 ++++---- src/perf_counter/perf_counter.h | 6 +- src/perf_counter/perf_counter_atomic.h | 2 +- src/perf_counter/perf_counter_wrapper.h | 2 +- src/ranger/ranger_resource_policy_manager.cpp | 35 +-- src/redis_protocol/proxy_lib/redis_parser.cpp | 39 +-- src/redis_protocol/proxy_lib/redis_parser.h | 6 +- src/remote_cmd/remote_command.cpp | 2 +- src/replica/backup/cold_backup_context.cpp | 228 +++++++++--------- src/replica/backup/replica_backup_manager.cpp | 34 +-- src/replica/bulk_load/replica_bulk_loader.cpp | 4 +- .../duplication/duplication_sync_timer.cpp | 13 +- src/replica/duplication/mutation_batch.cpp | 7 +- .../replica_duplicator_manager.cpp | 2 +- .../duplication/replica_duplicator_manager.h | 2 +- src/replica/duplication/replica_follower.cpp | 34 +-- src/replica/duplication/replica_follower.h | 34 +-- src/replica/mutation.cpp | 4 +- src/replica/mutation.h | 4 +- src/replica/mutation_cache.cpp | 4 +- src/replica/mutation_cache.h | 4 +- src/replica/mutation_log.cpp | 44 ++-- src/replica/prepare_list.cpp | 32 +-- src/replica/replica_2pc.cpp | 16 +- src/replica/replica_backup.cpp | 47 ++-- src/replica/replica_check.cpp | 43 ++-- src/replica/replica_chkpt.cpp | 42 ++-- src/replica/replica_config.cpp | 23 +- src/replica/replica_disk_migrator.cpp | 1 - src/replica/replica_failover.cpp | 4 +- src/replica/replica_init.cpp | 30 +-- src/replica/replica_learn.cpp | 156 ++++++------ src/replica/replica_restore.cpp | 13 +- src/replica/replica_stub.cpp | 173 ++++++------- src/replica/replica_throttle.cpp | 28 ++- src/replica/split/replica_split_manager.cpp | 30 +-- .../storage/simple_kv/simple_kv.app.example.h | 9 +- .../simple_kv/simple_kv.code.definition.h | 6 +- .../storage/simple_kv/simple_kv.server.h | 6 +- .../simple_kv/simple_kv.server.impl.cpp | 6 +- .../storage/simple_kv/simple_kv.server.impl.h | 6 +- src/replica/storage/simple_kv/test/case.cpp | 6 +- src/replica/storage/simple_kv/test/case.h | 6 +- src/replica/storage/simple_kv/test/checker.h | 6 +- src/replica/storage/simple_kv/test/client.cpp | 22 +- src/replica/storage/simple_kv/test/client.h | 6 +- src/replica/storage/simple_kv/test/common.cpp | 6 +- src/replica/storage/simple_kv/test/common.h | 6 +- .../storage/simple_kv/test/injector.cpp | 6 +- src/replica/storage/simple_kv/test/injector.h | 6 +- .../simple_kv/test/simple_kv.server.impl.cpp | 54 ++--- .../simple_kv/test/simple_kv.server.impl.h | 54 ++--- src/replica/test/log_file_test.cpp | 34 +-- src/replica/test/mock_utils.h | 4 +- src/replica/test/mutation_log_test.cpp | 26 +- src/replica/test/replica_learn_test.cpp | 5 +- src/replica/test/replica_test.cpp | 8 +- .../test/replication_service_test_app.h | 2 +- .../test/throttling_controller_test.cpp | 6 +- src/runtime/api_task.h | 2 +- src/runtime/env.sim.h | 4 +- src/runtime/fault_injector.cpp | 4 +- src/runtime/fault_injector.h | 4 +- src/runtime/nativerun.h | 4 +- src/runtime/node_scoper.h | 4 +- src/runtime/pipeline.h | 33 ++- src/runtime/profiler.cpp | 2 +- src/runtime/providers.common.h | 2 +- src/runtime/rpc/dsn_message_parser.cpp | 2 +- src/runtime/rpc/dsn_message_parser.h | 50 ++-- src/runtime/rpc/message_parser.cpp | 2 +- src/runtime/rpc/message_parser_manager.h | 2 +- src/runtime/rpc/network.sim.cpp | 4 +- src/runtime/rpc/raw_message_parser.cpp | 2 +- src/runtime/rpc/raw_message_parser.h | 50 ++-- src/runtime/rpc/rpc_address.h | 3 +- src/runtime/rpc/rpc_holder.h | 8 +- src/runtime/scheduler.cpp | 4 +- src/runtime/serverlet.h | 2 +- src/runtime/simulator.h | 5 +- src/runtime/task/async_calls.h | 2 +- src/runtime/task/future_types.h | 2 +- src/runtime/task/hpc_task_queue.cpp | 4 +- src/runtime/task/hpc_task_queue.h | 4 +- src/runtime/task/task.h | 6 +- src/runtime/task/task_code.cpp | 4 +- src/runtime/task/task_code.h | 6 +- src/runtime/task/task_engine.cpp | 15 +- src/runtime/task/task_engine.sim.cpp | 4 +- src/runtime/task/task_engine.sim.h | 4 +- src/runtime/task/task_spec.cpp | 2 +- src/runtime/task/task_spec.h | 11 +- src/runtime/task/task_tracker.cpp | 2 +- src/runtime/task/task_tracker.h | 2 +- src/runtime/task/task_worker.cpp | 2 +- src/runtime/task/task_worker.h | 2 +- src/runtime/task/timer_service.h | 2 +- src/runtime/test/async_call.cpp | 35 +-- src/runtime/test/sim_lock.cpp | 2 +- src/runtime/threadpool_code.cpp | 2 +- src/server/available_detector.cpp | 13 +- src/server/info_collector.cpp | 36 +-- src/server/info_collector_app.cpp | 4 +- src/server/info_collector_app.h | 4 +- src/server/pegasus_mutation_duplicator.cpp | 11 +- src/server/pegasus_scan_context.h | 4 +- src/server/pegasus_server_impl.cpp | 67 ++--- src/server/pegasus_server_impl.h | 4 +- src/server/pegasus_write_service_impl.h | 8 +- .../test/pegasus_mutation_duplicator_test.cpp | 19 +- src/server/test/pegasus_server_impl_test.cpp | 3 +- src/shell/command_helper.h | 52 ++-- src/shell/commands/bulk_load.cpp | 4 +- src/shell/commands/debugger.cpp | 10 +- src/shell/commands/local_partition_split.cpp | 12 +- src/shell/main.cpp | 115 +++++++-- .../function_test/base_api/test_basic.cpp | 3 +- .../function_test/base_api/test_batch_get.cpp | 34 +-- src/test/function_test/base_api/test_scan.cpp | 20 +- .../bulk_load/test_bulk_load.cpp | 5 +- .../function_test/restore/test_restore.cpp | 9 +- src/test/function_test/utils/test_util.cpp | 34 +-- src/test/function_test/utils/test_util.h | 34 +-- src/test/function_test/utils/utils.h | 10 +- src/test/kill_test/job.cpp | 4 +- src/test/kill_test/job.h | 4 +- src/test/kill_test/killer_handler.h | 4 +- src/test/kill_test/killer_handler_shell.cpp | 4 +- src/test/kill_test/killer_handler_shell.h | 4 +- src/utils/TokenBucket.h | 4 +- src/utils/binary_writer.cpp | 6 +- src/utils/binary_writer.h | 2 +- src/utils/chrono_literals.h | 4 +- src/utils/command_manager.h | 15 +- src/utils/configuration.cpp | 2 +- src/utils/crc.cpp | 8 +- src/utils/crc.h | 4 +- src/utils/customizable_id.h | 4 +- src/utils/distributed_lock_service.h | 26 +- src/utils/error_code.cpp | 2 +- src/utils/exp_delay.h | 2 +- src/utils/factory_store.h | 4 +- src/utils/function_traits.h | 2 +- src/utils/gpid.cpp | 2 +- src/utils/je_ctl.cpp | 10 +- src/utils/lockp.std.h | 4 +- src/utils/metrics.h | 7 +- src/utils/optional.h | 6 +- src/utils/preloadable.h | 4 +- src/utils/priority_queue.h | 4 +- src/utils/process_utils.cpp | 6 +- src/utils/process_utils.h | 4 +- src/utils/safe_strerror_posix.cpp | 4 +- src/utils/safe_strerror_posix.h | 4 +- src/utils/simple_logger.cpp | 2 +- src/utils/singleton_store.h | 4 +- src/utils/strings.cpp | 4 +- src/utils/synchronize.h | 6 +- src/utils/test/TokenBucketTest.cpp | 5 +- src/utils/test/file_system_test.cpp | 7 +- src/utils/test/long_adder_test.cpp | 2 +- src/utils/test/output_utils_test.cpp | 4 +- src/utils/thread_access_checker.cpp | 2 +- src/utils/thread_access_checker.h | 2 +- src/utils/threadpool_code.h | 2 +- src/utils/threadpool_spec.h | 2 +- src/utils/uniq_timestamp_us.h | 2 +- src/utils/work_queue.h | 2 +- src/utils/zlock_provider.h | 2 +- src/utils/zlocks.h | 8 +- .../distributed_lock_service_zookeeper.cpp | 4 +- .../distributed_lock_service_zookeeper.h | 4 +- src/zookeeper/lock_struct.cpp | 15 +- src/zookeeper/lock_struct.h | 4 +- src/zookeeper/lock_types.h | 4 +- .../test/distributed_lock_zookeeper.cpp | 37 +-- src/zookeeper/zookeeper_error.cpp | 4 +- src/zookeeper/zookeeper_error.h | 2 +- src/zookeeper/zookeeper_session.cpp | 4 +- src/zookeeper/zookeeper_session.h | 4 +- src/zookeeper/zookeeper_session_mgr.cpp | 4 +- 246 files changed, 2283 insertions(+), 2026 deletions(-) delete mode 100644 docker/clang-format-3.9/Dockerfile diff --git a/.clang-format b/.clang-format index 50a16dbaf4..26699cd819 100644 --- a/.clang-format +++ b/.clang-format @@ -14,56 +14,82 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. + +# Reference: https://releases.llvm.org/14.0.0/tools/clang/docs/ClangFormatStyleOptions.html --- Language: Cpp # BasedOnStyle: LLVM AccessModifierOffset: -4 AlignAfterOpenBracket: Align -AlignConsecutiveAssignments: false -AlignConsecutiveDeclarations: false -AlignEscapedNewlinesLeft: false -AlignOperands: true +AlignArrayOfStructures: None +AlignConsecutiveAssignments: None +AlignConsecutiveBitFields: None +AlignConsecutiveDeclarations: None +AlignConsecutiveMacros: None +AlignEscapedNewlines: Right +AlignOperands: Align AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true AllowAllParametersOfDeclarationOnNextLine: true -AllowShortBlocksOnASingleLine: false +AllowShortBlocksOnASingleLine: Never AllowShortCaseLabelsOnASingleLine: false +AllowShortEnumsOnASingleLine: true AllowShortFunctionsOnASingleLine: All -AllowShortIfStatementsOnASingleLine: false +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: All AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false -AlwaysBreakTemplateDeclarations: true +AlwaysBreakTemplateDeclarations: Yes BinPackArguments: false BinPackParameters: false -BraceWrapping: +BitFieldColonSpacing: Both +BraceWrapping: + AfterCaseLabel: false AfterClass: true - AfterControlStatement: false + AfterControlStatement: Never AfterEnum: true + AfterExternBlock: false AfterFunction: true AfterNamespace: false AfterObjCDeclaration: false - AfterStruct: true - AfterUnion: true - BeforeCatch: false - BeforeElse: false - IndentBraces: false + AfterStruct: true + AfterUnion: true + BeforeCatch: false + BeforeElse: false + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyNamespace: true + SplitEmptyRecord: true +BreakAfterJavaFieldAnnotations: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Custom +BreakBeforeConceptDeclarations: true BreakBeforeTernaryOperators: true +BreakConstructorInitializers: BeforeColon BreakConstructorInitializersBeforeComma: false -BreakAfterJavaFieldAnnotations: false +BreakInheritanceList: BeforeColon BreakStringLiterals: true ColumnLimit: 100 CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false ConstructorInitializerAllOnOneLineOrOnePerLine: true ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true +DeriveLineEnding: true DerivePointerAlignment: false DisableFormat: false +EmptyLineAfterAccessModifier: Never +EmptyLineBeforeAccessModifier: LogicalBlock ExperimentalAutoDetectBinPacking: true +FixNamespaceComments: true ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] +IfMacros: [ KJ_IF_MAYBE ] +IncludeBlocks: Preserve IncludeCategories: - Regex: '^"(llvm|llvm-c|clang|clang-c)/' Priority: 2 @@ -72,40 +98,86 @@ IncludeCategories: - Regex: '.*' Priority: 1 IncludeIsMainRegex: '$' +IndentAccessModifiers: false +IndentCaseBlocks: false IndentCaseLabels: false +IndentExternBlock: AfterExternBlock +IndentGotoLabels: true +IndentPPDirectives: None +IndentRequires: false IndentWidth: 4 IndentWrappedFunctionNames: false +InsertTrailingCommas: None JavaScriptQuotes: Leave JavaScriptWrapImports: true KeepEmptyLinesAtTheStartOfBlocks: true +LambdaBodyIndentation: Signature +Language: Cpp MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None +ObjCBinPackProtocolList: Auto ObjCBlockIndentWidth: 2 +ObjCBreakBeforeNestedBlockParam: true ObjCSpaceAfterProperty: false ObjCSpaceBeforeProtocolList: true +PPIndentWidth: -1 +PackConstructorInitializers: BinPack +PenaltyBreakAssignment: 4 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 +PenaltyBreakOpenParenthesis: 0 PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 4 PenaltyExcessCharacter: 1000000 +PenaltyIndentedWhitespace: 0 PenaltyReturnTypeOnItsOwnLine: 60 PointerAlignment: Right +QualifierAlignment: Leave +ReferenceAlignment: Pointer ReflowComments: true -SortIncludes: false +RemoveBracesLLVM: false +SeparateDefinitionBlocks: Leave +ShortNamespaceLines: 1 +SortIncludes: Never +SortJavaStaticImport: Before +SortUsingDeclarations: true SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceAroundPointerQualifiers: Default SpaceBeforeAssignmentOperators: true +SpaceBeforeCaseColon: false +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true SpaceBeforeParens: ControlStatements +SpaceBeforeParensOptions: + AfterControlStatements: true + AfterForeachMacros: true + AfterIfMacros: true +SpaceBeforeRangeBasedForLoopColon: true +SpaceBeforeSquareBrackets: false +SpaceInEmptyBlock: false SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 -SpacesInAngles: false -SpacesInContainerLiterals: true +SpacesInAngles: Never SpacesInCStyleCastParentheses: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: true +SpacesInLineCommentPrefix: + Maximum: -1 + Minimum: 1 SpacesInParentheses: false SpacesInSquareBrackets: false -Standard: Cpp11 +Standard: c++17 +StatementAttributeLikeMacros: [ Q_EMIT] +StatementMacros: [ Q_UNUSED, QT_REQUIRE_VERSION] TabWidth: 4 +UseCRLF: false UseTab: Never +WhitespaceSensitiveMacros: [ STRINGIZE, PP_STRINGIZE, BOOST_PP_STRINGIZE, NS_SWIFT_NAME, CF_SWIFT_NAME] ... diff --git a/.github/workflows/lint_and_test_cpp.yaml b/.github/workflows/lint_and_test_cpp.yaml index 8f6e0e300a..ab41a10cbb 100644 --- a/.github/workflows/lint_and_test_cpp.yaml +++ b/.github/workflows/lint_and_test_cpp.yaml @@ -51,13 +51,11 @@ env: jobs: cpp_clang_format_linter: name: Lint - runs-on: ubuntu-latest - container: - image: apache/pegasus:clang-format-3.9 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v3 - name: clang-format - run: ./scripts/run-clang-format.py --clang-format-executable clang-format-3.9 -e ./src/shell/linenoise -e ./src/shell/sds -e ./thirdparty -r . + run: ./scripts/run-clang-format.py --clang-format-executable clang-format-14 -e ./src/shell/linenoise -e ./src/shell/sds -e ./thirdparty -r . iwyu: name: IWYU diff --git a/.github/workflows/regular-build.yml b/.github/workflows/regular-build.yml index a8f4386217..d4c3fb1820 100644 --- a/.github/workflows/regular-build.yml +++ b/.github/workflows/regular-build.yml @@ -37,13 +37,11 @@ on: jobs: lint_cpp: name: Lint Cpp - runs-on: ubuntu-latest - container: - image: apache/pegasus:clang-format-3.9 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v3 - name: clang-format - run: ./scripts/run-clang-format.py --clang-format-executable clang-format-3.9 -e ./src/shell/linenoise -e ./src/shell/sds -e ./thirdparty -r . + run: ./scripts/run-clang-format.py --clang-format-executable clang-format-14 -e ./src/shell/linenoise -e ./src/shell/sds -e ./thirdparty -r . build_cpp: name: Build Cpp diff --git a/docker/clang-format-3.9/Dockerfile b/docker/clang-format-3.9/Dockerfile deleted file mode 100644 index 2ebea2de62..0000000000 --- a/docker/clang-format-3.9/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -FROM ubuntu:18.04 - -LABEL maintainer=wutao - -RUN sed -i 's/archive.ubuntu.com/mirrors.aliyun.com/' /etc/apt/sources.list \ - && apt-get update -y \ - && apt-get install --no-install-recommends -y software-properties-common \ - && add-apt-repository ppa:git-core/ppa \ - && apt-get update -y \ - && apt-get install --no-install-recommends -y clang-format-3.9 git \ - && rm -rf /var/lib/apt/lists/* diff --git a/scripts/compile_thrift.py b/scripts/compile_thrift.py index f6d46a4d20..9fb949a3a6 100755 --- a/scripts/compile_thrift.py +++ b/scripts/compile_thrift.py @@ -132,7 +132,7 @@ def compile_thrift_file(thrift_info): print(cmd) # TODO(wutao1): code format files - # os.system("clang-format-3.9 -i output/*") + # os.system("clang-format-14 -i output/*") if "include_fix" in thrift_info: fix_include(thrift_name, thrift_info["include_fix"]) diff --git a/scripts/format_files.sh b/scripts/format_files.sh index 5697d720b2..62e34ebdcd 100755 --- a/scripts/format_files.sh +++ b/scripts/format_files.sh @@ -29,13 +29,13 @@ thirdparty=./thirdparty if [ $# -eq 0 ]; then echo "formating all .h/.cpp files in $root_dir ..." find . -type f -not \( -wholename "$linenoise/*" -o -wholename "$sds/*" -o -wholename "$thirdparty/*" \) \ - -regextype posix-egrep -regex ".*\.(cpp|h)" | xargs clang-format-3.9 -i -style=file + -regextype posix-egrep -regex ".*\.(cpp|h)" | xargs clang-format-14 -i -style=file elif [ $1 = "-h" ]; then echo "USAGE: ./format-files.sh [] -- format .h/.cpp files in $root_dir/relative_path" echo " ./format-files.sh means format all .h/.cpp files in $root_dir" else echo "formating all .h/.cpp files in $root_dir/$1 ..." find ./$1 -type f -not \( -wholename "$linenoise/*" -o -wholename "$sds/*" -o -wholename "$thirdparty/*" \) \ - -regextype posix-egrep -regex ".*\.(cpp|h)" | xargs clang-format-3.9 -i -style=file + -regextype posix-egrep -regex ".*\.(cpp|h)" | xargs clang-format-14 -i -style=file fi diff --git a/scripts/run-clang-format.py b/scripts/run-clang-format.py index 4c069a944c..ce2263ccdd 100755 --- a/scripts/run-clang-format.py +++ b/scripts/run-clang-format.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # MIT License # diff --git a/src/aio/test/aio.cpp b/src/aio/test/aio.cpp index 3e3c53d3e3..9193b33922 100644 --- a/src/aio/test/aio.cpp +++ b/src/aio/test/aio.cpp @@ -388,17 +388,18 @@ TEST_P(aio_test, dsn_file) uint64_t offset = 0; while (true) { aio_result rin; - aio_task_ptr tin = file::read(fin, - kUnitBuffer, - 1024, - offset, - LPC_AIO_TEST_READ, - nullptr, - [&rin](dsn::error_code err, size_t sz) { - rin.err = err; - rin.sz = sz; - }, - 0); + aio_task_ptr tin = file::read( + fin, + kUnitBuffer, + 1024, + offset, + LPC_AIO_TEST_READ, + nullptr, + [&rin](dsn::error_code err, size_t sz) { + rin.err = err; + rin.sz = sz; + }, + 0); ASSERT_NE(nullptr, tin); if (dsn::tools::get_current_tool()->name() != "simulator") { @@ -420,17 +421,18 @@ TEST_P(aio_test, dsn_file) } aio_result rout; - aio_task_ptr tout = file::write(fout, - kUnitBuffer, - rin.sz, - offset, - LPC_AIO_TEST_WRITE, - nullptr, - [&rout](dsn::error_code err, size_t sz) { - rout.err = err; - rout.sz = sz; - }, - 0); + aio_task_ptr tout = file::write( + fout, + kUnitBuffer, + rin.sz, + offset, + LPC_AIO_TEST_WRITE, + nullptr, + [&rout](dsn::error_code err, size_t sz) { + rout.err = err; + rout.sz = sz; + }, + 0); ASSERT_NE(nullptr, tout); tout->wait(); ASSERT_EQ(ERR_OK, rout.err); diff --git a/src/base/test/value_schema_test.cpp b/src/base/test/value_schema_test.cpp index 07ecf5f547..246eab5ef7 100644 --- a/src/base/test/value_schema_test.cpp +++ b/src/base/test/value_schema_test.cpp @@ -117,7 +117,9 @@ TEST(value_schema, update_expire_ts) uint32_t expire_ts; uint32_t update_expire_ts; } tests[] = { - {0, 1000, 10086}, {1, 1000, 10086}, {2, 1000, 10086}, + {0, 1000, 10086}, + {1, 1000, 10086}, + {2, 1000, 10086}, }; for (const auto &t : tests) { diff --git a/src/block_service/block_service.h b/src/block_service/block_service.h index d351dcf44a..6c235ae50e 100644 --- a/src/block_service/block_service.h +++ b/src/block_service/block_service.h @@ -422,6 +422,6 @@ class block_file : public dsn::ref_counter protected: std::string _name; }; -} -} -} +} // namespace block_service +} // namespace dist +} // namespace dsn diff --git a/src/block_service/block_service_manager.cpp b/src/block_service/block_service_manager.cpp index f77361cac7..1d0cd45f95 100644 --- a/src/block_service/block_service_manager.cpp +++ b/src/block_service/block_service_manager.cpp @@ -108,10 +108,11 @@ static create_file_response create_block_file_sync(const std::string &remote_fil task_tracker *tracker) { create_file_response ret; - fs->create_file(create_file_request{remote_file_path, ignore_meta}, - TASK_CODE_EXEC_INLINED, - [&ret](const create_file_response &resp) { ret = resp; }, - tracker); + fs->create_file( + create_file_request{remote_file_path, ignore_meta}, + TASK_CODE_EXEC_INLINED, + [&ret](const create_file_response &resp) { ret = resp; }, + tracker); tracker->wait_outstanding_tasks(); return ret; } @@ -120,10 +121,11 @@ static download_response download_block_file_sync(const std::string &local_file_path, block_file *bf, task_tracker *tracker) { download_response ret; - bf->download(download_request{local_file_path, 0, -1}, - TASK_CODE_EXEC_INLINED, - [&ret](const download_response &resp) { ret = resp; }, - tracker); + bf->download( + download_request{local_file_path, 0, -1}, + TASK_CODE_EXEC_INLINED, + [&ret](const download_response &resp) { ret = resp; }, + tracker); tracker->wait_outstanding_tasks(); return ret; } diff --git a/src/block_service/local/local_service.h b/src/block_service/local/local_service.h index c67b9913e3..8bc3ffd56c 100644 --- a/src/block_service/local/local_service.h +++ b/src/block_service/local/local_service.h @@ -113,6 +113,6 @@ class local_file_object : public block_file std::string _md5_value; bool _has_meta_synced; }; -} -} -} +} // namespace block_service +} // namespace dist +} // namespace dsn diff --git a/src/block_service/test/hdfs_service_test.cpp b/src/block_service/test/hdfs_service_test.cpp index f96549ec98..b0880de2a3 100644 --- a/src/block_service/test/hdfs_service_test.cpp +++ b/src/block_service/test/hdfs_service_test.cpp @@ -144,20 +144,22 @@ TEST_P(HDFSClientTest, test_hdfs_read_write) // 1. clean up all old file in remote test directory. printf("clean up all old files.\n"); remove_path_response rem_resp; - s->remove_path(remove_path_request{kRemoteTestPath, true}, - LPC_TEST_HDFS, - [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, - nullptr) + s->remove_path( + remove_path_request{kRemoteTestPath, true}, + LPC_TEST_HDFS, + [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, + nullptr) ->wait(); ASSERT_TRUE(dsn::ERR_OK == rem_resp.err || dsn::ERR_OBJECT_NOT_FOUND == rem_resp.err); // 2. create file. printf("test write operation.\n"); create_file_response cf_resp; - s->create_file(create_file_request{kRemoteTestRWFile, false}, - LPC_TEST_HDFS, - [&cf_resp](const create_file_response &r) { cf_resp = r; }, - nullptr) + s->create_file( + create_file_request{kRemoteTestRWFile, false}, + LPC_TEST_HDFS, + [&cf_resp](const create_file_response &r) { cf_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, cf_resp.err); @@ -165,10 +167,11 @@ TEST_P(HDFSClientTest, test_hdfs_read_write) dsn::blob bb(kTestBuffer.c_str(), 0, kTestBufferLength); write_response w_resp; cf_resp.file_handle - ->write(write_request{bb}, - LPC_TEST_HDFS, - [&w_resp](const write_response &w) { w_resp = w; }, - nullptr) + ->write( + write_request{bb}, + LPC_TEST_HDFS, + [&w_resp](const write_response &w) { w_resp = w; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, w_resp.err); ASSERT_EQ(kTestBufferLength, w_resp.written_size); @@ -178,10 +181,11 @@ TEST_P(HDFSClientTest, test_hdfs_read_write) printf("test read just written contents.\n"); read_response r_resp; cf_resp.file_handle - ->read(read_request{0, -1}, - LPC_TEST_HDFS, - [&r_resp](const read_response &r) { r_resp = r; }, - nullptr) + ->read( + read_request{0, -1}, + LPC_TEST_HDFS, + [&r_resp](const read_response &r) { r_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, r_resp.err); ASSERT_EQ(kTestBufferLength, r_resp.buffer.length()); @@ -191,10 +195,11 @@ TEST_P(HDFSClientTest, test_hdfs_read_write) const uint64_t kOffset = 5; const int64_t kSize = 10; cf_resp.file_handle - ->read(read_request{kOffset, kSize}, - LPC_TEST_HDFS, - [&r_resp](const read_response &r) { r_resp = r; }, - nullptr) + ->read( + read_request{kOffset, kSize}, + LPC_TEST_HDFS, + [&r_resp](const read_response &r) { r_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, r_resp.err); ASSERT_EQ(kSize, r_resp.buffer.length()); @@ -225,40 +230,44 @@ TEST_P(HDFSClientTest, test_upload_and_download) // 1. clean up all old file in remote test directory. printf("clean up all old files.\n"); remove_path_response rem_resp; - s->remove_path(remove_path_request{kRemoteTestPath, true}, - LPC_TEST_HDFS, - [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, - nullptr) + s->remove_path( + remove_path_request{kRemoteTestPath, true}, + LPC_TEST_HDFS, + [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, + nullptr) ->wait(); ASSERT_TRUE(dsn::ERR_OK == rem_resp.err || dsn::ERR_OBJECT_NOT_FOUND == rem_resp.err); // 2. create file. fmt::printf("create and upload: {}.\n", kRemoteTestFile); create_file_response cf_resp; - s->create_file(create_file_request{kRemoteTestFile, true}, - LPC_TEST_HDFS, - [&cf_resp](const create_file_response &r) { cf_resp = r; }, - nullptr) + s->create_file( + create_file_request{kRemoteTestFile, true}, + LPC_TEST_HDFS, + [&cf_resp](const create_file_response &r) { cf_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, cf_resp.err); // 3. upload file. upload_response u_resp; cf_resp.file_handle - ->upload(upload_request{kLocalFile}, - LPC_TEST_HDFS, - [&u_resp](const upload_response &r) { u_resp = r; }, - nullptr) + ->upload( + upload_request{kLocalFile}, + LPC_TEST_HDFS, + [&u_resp](const upload_response &r) { u_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, u_resp.err); ASSERT_EQ(local_file_size, cf_resp.file_handle->get_size()); // 4. list directory. ls_response l_resp; - s->list_dir(ls_request{kRemoteTestPath}, - LPC_TEST_HDFS, - [&l_resp](const ls_response &resp) { l_resp = resp; }, - nullptr) + s->list_dir( + ls_request{kRemoteTestPath}, + LPC_TEST_HDFS, + [&l_resp](const ls_response &resp) { l_resp = resp; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, l_resp.err); ASSERT_EQ(1, l_resp.entries->size()); @@ -268,19 +277,21 @@ TEST_P(HDFSClientTest, test_upload_and_download) // 5. download file. download_response d_resp; fmt::printf("test download {}.\n", kRemoteTestFile); - s->create_file(create_file_request{kRemoteTestFile, false}, - LPC_TEST_HDFS, - [&cf_resp](const create_file_response &resp) { cf_resp = resp; }, - nullptr) + s->create_file( + create_file_request{kRemoteTestFile, false}, + LPC_TEST_HDFS, + [&cf_resp](const create_file_response &resp) { cf_resp = resp; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, cf_resp.err); ASSERT_EQ(local_file_size, cf_resp.file_handle->get_size()); std::string kLocalDownloadFile = "test_file_d"; cf_resp.file_handle - ->download(download_request{kLocalDownloadFile, 0, -1}, - LPC_TEST_HDFS, - [&d_resp](const download_response &resp) { d_resp = resp; }, - nullptr) + ->download( + download_request{kLocalDownloadFile, 0, -1}, + LPC_TEST_HDFS, + [&d_resp](const download_response &resp) { d_resp = resp; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, d_resp.err); ASSERT_EQ(local_file_size, d_resp.downloaded_size); @@ -342,10 +353,11 @@ TEST_P(HDFSClientTest, test_concurrent_upload_download) printf("clean up all old files.\n"); remove_path_response rem_resp; - s->remove_path(remove_path_request{"hdfs_concurrent_test", true}, - LPC_TEST_HDFS, - [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, - nullptr) + s->remove_path( + remove_path_request{"hdfs_concurrent_test", true}, + LPC_TEST_HDFS, + [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, + nullptr) ->wait(); ASSERT_TRUE(dsn::ERR_OK == rem_resp.err || dsn::ERR_OBJECT_NOT_FOUND == rem_resp.err); @@ -354,10 +366,11 @@ TEST_P(HDFSClientTest, test_concurrent_upload_download) std::vector block_files; for (int i = 0; i < total_files; ++i) { create_file_response cf_resp; - s->create_file(create_file_request{remote_file_names[i], true}, - LPC_TEST_HDFS, - [&cf_resp](const create_file_response &resp) { cf_resp = resp; }, - nullptr) + s->create_file( + create_file_request{remote_file_names[i], true}, + LPC_TEST_HDFS, + [&cf_resp](const create_file_response &resp) { cf_resp = resp; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, cf_resp.err); ASSERT_NE(nullptr, cf_resp.file_handle.get()); @@ -389,10 +402,11 @@ TEST_P(HDFSClientTest, test_concurrent_upload_download) std::vector block_files; for (int i = 0; i < total_files; ++i) { create_file_response cf_resp; - s->create_file(create_file_request{remote_file_names[i], true}, - LPC_TEST_HDFS, - [&cf_resp](const create_file_response &r) { cf_resp = r; }, - nullptr) + s->create_file( + create_file_request{remote_file_names[i], true}, + LPC_TEST_HDFS, + [&cf_resp](const create_file_response &r) { cf_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, cf_resp.err); ASSERT_NE(nullptr, cf_resp.file_handle.get()); diff --git a/src/client/partition_resolver.cpp b/src/client/partition_resolver.cpp index 96d505e924..7cc5b00e9c 100644 --- a/src/client/partition_resolver.cpp +++ b/src/client/partition_resolver.cpp @@ -67,9 +67,8 @@ void partition_resolver::call_task(const rpc_response_task_ptr &t) rpc_response_handler old_callback; t->fetch_current_handler(old_callback); - auto new_callback = [ this, deadline_ms, oc = std::move(old_callback) ]( - dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + auto new_callback = [this, deadline_ms, oc = std::move(old_callback)]( + dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (req->header->gpid.value() != 0 && err != ERR_OK && error_retry(err)) { on_access_failure(req->header->gpid.get_partition_index(), err); // still got time, retry @@ -92,11 +91,12 @@ void partition_resolver::call_task(const rpc_response_task_ptr &t) enum_to_string(ctask->state())); // sleep gap milliseconds before retry - tasking::enqueue(LPC_RPC_DELAY_CALL, - nullptr, - [r, ctask]() { r->call_task(ctask); }, - 0, - std::chrono::milliseconds(gap)); + tasking::enqueue( + LPC_RPC_DELAY_CALL, + nullptr, + [r, ctask]() { r->call_task(ctask); }, + 0, + std::chrono::milliseconds(gap)); return; } else { LOG_ERROR("service access failed ({}), no more time for further tries, set error " @@ -112,27 +112,27 @@ void partition_resolver::call_task(const rpc_response_task_ptr &t) }; t->replace_callback(std::move(new_callback)); - resolve(hdr.client.partition_hash, - [t](resolve_result &&result) mutable { - if (result.err != ERR_OK) { - t->enqueue(result.err, nullptr); - return; - } + resolve( + hdr.client.partition_hash, + [t](resolve_result &&result) mutable { + if (result.err != ERR_OK) { + t->enqueue(result.err, nullptr); + return; + } - // update gpid when necessary - auto &hdr = *(t->get_request()->header); - if (hdr.gpid.value() != result.pid.value()) { - if (hdr.client.thread_hash == 0 // thread_hash is not assigned by applications - || - hdr.gpid.value() != 0 // requests set to child redirect to parent - ) { - hdr.client.thread_hash = result.pid.thread_hash(); - } - hdr.gpid = result.pid; + // update gpid when necessary + auto &hdr = *(t->get_request()->header); + if (hdr.gpid.value() != result.pid.value()) { + if (hdr.client.thread_hash == 0 // thread_hash is not assigned by applications + || hdr.gpid.value() != 0 // requests set to child redirect to parent + ) { + hdr.client.thread_hash = result.pid.thread_hash(); } - dsn_rpc_call(dns_resolver::instance().resolve_address(result.hp), t.get()); - }, - hdr.client.timeout_ms); + hdr.gpid = result.pid; + } + dsn_rpc_call(dns_resolver::instance().resolve_address(result.hp), t.get()); + }, + hdr.client.timeout_ms); } } // namespace replication } // namespace dsn diff --git a/src/client/partition_resolver_simple.cpp b/src/client/partition_resolver_simple.cpp index d577425633..6282da64e4 100644 --- a/src/client/partition_resolver_simple.cpp +++ b/src/client/partition_resolver_simple.cpp @@ -187,11 +187,12 @@ void partition_resolver_simple::call(request_context_ptr &&request, bool from_me // delay 1 second for further config query if (from_meta_ack) { - tasking::enqueue(LPC_REPLICATION_DELAY_QUERY_CONFIG, - &_tracker, - [ =, req2 = request ]() mutable { call(std::move(req2), false); }, - 0, - std::chrono::seconds(1)); + tasking::enqueue( + LPC_REPLICATION_DELAY_QUERY_CONFIG, + &_tracker, + [=, req2 = request]() mutable { call(std::move(req2), false); }, + 0, + std::chrono::seconds(1)); return; } @@ -206,12 +207,12 @@ void partition_resolver_simple::call(request_context_ptr &&request, bool from_me { zauto_lock l(request->lock); if (request->timeout_timer == nullptr) { - request->timeout_timer = - tasking::enqueue(LPC_REPLICATION_CLIENT_REQUEST_TIMEOUT, - &_tracker, - [ =, req2 = request ]() mutable { on_timeout(std::move(req2)); }, - 0, - std::chrono::milliseconds(timeout_ms)); + request->timeout_timer = tasking::enqueue( + LPC_REPLICATION_CLIENT_REQUEST_TIMEOUT, + &_tracker, + [=, req2 = request]() mutable { on_timeout(std::move(req2)); }, + 0, + std::chrono::milliseconds(timeout_ms)); } } diff --git a/src/client/replication_ddl_client.cpp b/src/client/replication_ddl_client.cpp index c0d82ad67e..4aadfd20e0 100644 --- a/src/client/replication_ddl_client.cpp +++ b/src/client/replication_ddl_client.cpp @@ -547,7 +547,7 @@ dsn::error_code replication_ddl_client::list_nodes(const dsn::replication::node_ std::map tmp_map; int alive_node_count = 0; - for (const auto & [ hp, type ] : nodes) { + for (const auto &[hp, type] : nodes) { if (type == dsn::replication::node_status::NS_ALIVE) { alive_node_count++; } @@ -809,7 +809,7 @@ dsn::error_code replication_ddl_client::list_app(const std::string &app_name, tp_nodes.add_column("primary"); tp_nodes.add_column("secondary"); tp_nodes.add_column("total"); - for (const auto & [ hp, pri_and_sec_rep_cnts ] : node_stat) { + for (const auto &[hp, pri_and_sec_rep_cnts] : node_stat) { tp_nodes.add_row(node_name(hp, resolve_ip)); tp_nodes.append_data(pri_and_sec_rep_cnts.first); tp_nodes.append_data(pri_and_sec_rep_cnts.second); @@ -1464,7 +1464,6 @@ void replication_ddl_client::end_meta_request(const rpc_response_task_ptr &callb &_tracker, [this, attempt_count, callback]( error_code err, dsn::message_ex *request, dsn::message_ex *response) mutable { - FAIL_POINT_INJECT_NOT_RETURN_F( "ddl_client_request_meta", [&err, this](absl::string_view str) { err = pop_mock_error(); }); diff --git a/src/client/replication_ddl_client.h b/src/client/replication_ddl_client.h index 4e91636cc1..14006b7ab2 100644 --- a/src/client/replication_ddl_client.h +++ b/src/client/replication_ddl_client.h @@ -299,7 +299,6 @@ class replication_ddl_client &_tracker, [this, task]( error_code err, dsn::message_ex *request, dsn::message_ex *response) mutable { - FAIL_POINT_INJECT_NOT_RETURN_F( "ddl_client_request_meta", [&err, this](absl::string_view str) { err = pop_mock_error(); }); @@ -379,10 +378,11 @@ class replication_ddl_client static constexpr int MAX_RETRY = 2; error_code err = ERR_UNKNOWN; for (int retry = 0; retry < MAX_RETRY; retry++) { - task_ptr task = rpc.call(dsn::dns_resolver::instance().resolve_address(_meta_server), - &_tracker, - [&err](error_code code) { err = code; }, - reply_thread_hash); + task_ptr task = rpc.call( + dsn::dns_resolver::instance().resolve_address(_meta_server), + &_tracker, + [&err](error_code code) { err = code; }, + reply_thread_hash); task->wait(); if (err == ERR_OK) { break; diff --git a/src/client_lib/client_factory.cpp b/src/client_lib/client_factory.cpp index 29f2a44dd5..a1401fa79a 100644 --- a/src/client_lib/client_factory.cpp +++ b/src/client_lib/client_factory.cpp @@ -32,4 +32,4 @@ pegasus_client *pegasus_client_factory::get_client(const char *cluster_name, con return client::pegasus_client_factory_impl::get_client(cluster_name, app_name); } -} // namespace +} // namespace pegasus diff --git a/src/client_lib/mutation.cpp b/src/client_lib/mutation.cpp index ba0431c3af..260dfca84d 100644 --- a/src/client_lib/mutation.cpp +++ b/src/client_lib/mutation.cpp @@ -90,4 +90,4 @@ void pegasus_client::mutations::get_mutations(std::vector &mutations) co mutations[pair.first].set_expire_ts_seconds = pair.second + current_time; } } -} +} // namespace pegasus diff --git a/src/client_lib/pegasus_client_factory_impl.h b/src/client_lib/pegasus_client_factory_impl.h index da1e054533..87d5326af8 100644 --- a/src/client_lib/pegasus_client_factory_impl.h +++ b/src/client_lib/pegasus_client_factory_impl.h @@ -45,5 +45,5 @@ class pegasus_client_factory_impl static cluster_to_app_map _cluster_to_clients; static ::dsn::zlock *_map_lock; }; -} -} // namespace +} // namespace client +} // namespace pegasus diff --git a/src/client_lib/pegasus_client_impl.cpp b/src/client_lib/pegasus_client_impl.cpp index c6dfeb44f8..2ad491c4f8 100644 --- a/src/client_lib/pegasus_client_impl.cpp +++ b/src/client_lib/pegasus_client_impl.cpp @@ -126,8 +126,7 @@ void pegasus_client_impl::async_set(const std::string &hash_key, // wrap the user defined callback function, generate a new callback function. auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -214,8 +213,7 @@ void pegasus_client_impl::async_multi_set(const std::string &hash_key, auto partition_hash = pegasus_key_hash(tmp_key); // wrap the user-defined-callback-function, generate a new callback function. auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -275,8 +273,7 @@ void pegasus_client_impl::async_get(const std::string &hash_key, pegasus_generate_key(req, hash_key, sort_key); auto partition_hash = pegasus_key_hash(req); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -365,8 +362,7 @@ void pegasus_client_impl::async_multi_get(const std::string &hash_key, pegasus_generate_key(tmp_key, req.hash_key, ::dsn::blob()); auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -465,8 +461,7 @@ void pegasus_client_impl::async_multi_get(const std::string &hash_key, pegasus_generate_key(tmp_key, req.hash_key, ::dsn::blob()); auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -544,8 +539,7 @@ void pegasus_client_impl::async_multi_get_sortkeys(const std::string &hash_key, pegasus_generate_key(tmp_key, req.hash_key, ::dsn::blob()); auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -657,8 +651,7 @@ void pegasus_client_impl::async_del(const std::string &hash_key, auto partition_hash = pegasus_key_hash(req); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -738,8 +731,7 @@ void pegasus_client_impl::async_multi_del(const std::string &hash_key, auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -819,8 +811,7 @@ void pegasus_client_impl::async_incr(const std::string &hash_key, auto partition_hash = pegasus_key_hash(req.key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -923,8 +914,7 @@ void pegasus_client_impl::async_check_and_set(const std::string &hash_key, pegasus_generate_key(tmp_key, req.hash_key, ::dsn::blob()); auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -1053,8 +1043,7 @@ void pegasus_client_impl::async_check_and_mutate(const std::string &hash_key, pegasus_generate_key(tmp_key, req.hash_key, ::dsn::blob()); auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -1223,9 +1212,8 @@ void pegasus_client_impl::async_get_unordered_scanners( return; } - auto new_callback = [ user_callback = std::move(callback), max_split_count, options, this ]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + auto new_callback = [user_callback = std::move(callback), max_split_count, options, this]( + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { std::vector scanners; query_cfg_response response; if (err == ERR_OK) { diff --git a/src/client_lib/pegasus_scanner_impl.cpp b/src/client_lib/pegasus_scanner_impl.cpp index 51282e14d3..a6eb731ccc 100644 --- a/src/client_lib/pegasus_scanner_impl.cpp +++ b/src/client_lib/pegasus_scanner_impl.cpp @@ -266,12 +266,13 @@ void pegasus_client_impl::pegasus_scanner_impl::_next_batch() CHECK(!_rpc_started, ""); _rpc_started = true; - _client->scan(req, - [this](::dsn::error_code err, - dsn::message_ex *req, - dsn::message_ex *resp) mutable { _on_scan_response(err, req, resp); }, - std::chrono::milliseconds(_options.timeout_ms), - _hash); + _client->scan( + req, + [this](::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) mutable { + _on_scan_response(err, req, resp); + }, + std::chrono::milliseconds(_options.timeout_ms), + _hash); } void pegasus_client_impl::pegasus_scanner_impl::_start_scan() @@ -393,13 +394,13 @@ void pegasus_client_impl::pegasus_scanner_impl_wrapper::async_next( async_scan_next_callback_t &&callback) { // wrap shared_ptr _p with callback - _p->async_next([ __p = _p, user_callback = std::move(callback) ](int error_code, - std::string &&hash_key, - std::string &&sort_key, - std::string &&value, - internal_info &&info, - uint32_t expire_ts_seconds, - int32_t kv_count) { + _p->async_next([__p = _p, user_callback = std::move(callback)](int error_code, + std::string &&hash_key, + std::string &&sort_key, + std::string &&value, + internal_info &&info, + uint32_t expire_ts_seconds, + int32_t kv_count) { user_callback(error_code, std::move(hash_key), std::move(sort_key), diff --git a/src/common/fs_manager.h b/src/common/fs_manager.h index 4a9d1f69fc..5820d26887 100644 --- a/src/common/fs_manager.h +++ b/src/common/fs_manager.h @@ -191,5 +191,5 @@ class fs_manager FRIEND_TEST(open_replica_test, open_replica_add_decree_and_ballot_check); FRIEND_TEST(replica_test, test_auto_trash_of_corruption); }; -} // replication -} // dsn +} // namespace replication +} // namespace dsn diff --git a/src/common/replication_other_types.h b/src/common/replication_other_types.h index 7f8c51dcd7..a80457b583 100644 --- a/src/common/replication_other_types.h +++ b/src/common/replication_other_types.h @@ -122,7 +122,7 @@ class replica_helper const std::string &key, /*out*/ std::vector &servers); }; -} -} // namespace +} // namespace replication +} // namespace dsn #endif diff --git a/src/failure_detector/failure_detector.cpp b/src/failure_detector/failure_detector.cpp index 0cda09d5f0..62a0c7fc8a 100644 --- a/src/failure_detector/failure_detector.cpp +++ b/src/failure_detector/failure_detector.cpp @@ -101,12 +101,13 @@ error_code failure_detector::start(uint32_t check_interval_seconds, open_service(); // start periodically check job - _check_task = tasking::enqueue_timer(LPC_BEACON_CHECK, - &_tracker, - [this] { check_all_records(); }, - std::chrono::milliseconds(_check_interval_milliseconds), - -1, - std::chrono::milliseconds(_check_interval_milliseconds)); + _check_task = tasking::enqueue_timer( + LPC_BEACON_CHECK, + &_tracker, + [this] { check_all_records(); }, + std::chrono::milliseconds(_check_interval_milliseconds), + -1, + std::chrono::milliseconds(_check_interval_milliseconds)); _is_started = true; return ERR_OK; @@ -149,13 +150,13 @@ void failure_detector::register_master(const ::dsn::host_port &target) if (setup_timer) { // delay the beacon slightly to make first beacon greater than the // last_beacon_send_time_with_ack - ret.first->second.send_beacon_timer = - tasking::enqueue_timer(LPC_BEACON_SEND, - &_tracker, - [this, target]() { this->send_beacon(target, dsn_now_ms()); }, - std::chrono::milliseconds(_beacon_interval_milliseconds), - 0, - std::chrono::milliseconds(1)); + ret.first->second.send_beacon_timer = tasking::enqueue_timer( + LPC_BEACON_SEND, + &_tracker, + [this, target]() { this->send_beacon(target, dsn_now_ms()); }, + std::chrono::milliseconds(_beacon_interval_milliseconds), + 0, + std::chrono::milliseconds(1)); } } @@ -176,13 +177,13 @@ bool failure_detector::switch_master(const ::dsn::host_port &from, it->second.node = to; it->second.rejected = false; it->second.send_beacon_timer->cancel(true); - it->second.send_beacon_timer = - tasking::enqueue_timer(LPC_BEACON_SEND, - &_tracker, - [this, to]() { this->send_beacon(to, dsn_now_ms()); }, - std::chrono::milliseconds(_beacon_interval_milliseconds), - 0, - std::chrono::milliseconds(delay_milliseconds)); + it->second.send_beacon_timer = tasking::enqueue_timer( + LPC_BEACON_SEND, + &_tracker, + [this, to]() { this->send_beacon(to, dsn_now_ms()); }, + std::chrono::milliseconds(_beacon_interval_milliseconds), + 0, + std::chrono::milliseconds(delay_milliseconds)); _masters.insert(std::make_pair(to, it->second)); _masters.erase(from); @@ -592,24 +593,25 @@ void failure_detector::send_beacon(const host_port &target, uint64_t time) FMT_HOST_PORT_AND_IP(beacon, to_node), time); - ::dsn::rpc::call(addr_target, - RPC_FD_FAILURE_DETECTOR_PING, - beacon, - &_tracker, - [=](error_code err, beacon_ack &&resp) { - if (err != ::dsn::ERR_OK) { - beacon_ack ack; - ack.time = beacon.time; - SET_OBJ_IP_AND_HOST_PORT(ack, this_node, beacon, to_node); - RESET_IP_AND_HOST_PORT(ack, primary_node); - ack.is_master = false; - ack.allowed = true; - end_ping(err, ack, nullptr); - } else { - end_ping(err, std::move(resp), nullptr); - } - }, - std::chrono::milliseconds(_beacon_timeout_milliseconds)); -} + ::dsn::rpc::call( + addr_target, + RPC_FD_FAILURE_DETECTOR_PING, + beacon, + &_tracker, + [=](error_code err, beacon_ack &&resp) { + if (err != ::dsn::ERR_OK) { + beacon_ack ack; + ack.time = beacon.time; + SET_OBJ_IP_AND_HOST_PORT(ack, this_node, beacon, to_node); + RESET_IP_AND_HOST_PORT(ack, primary_node); + ack.is_master = false; + ack.allowed = true; + end_ping(err, ack, nullptr); + } else { + end_ping(err, std::move(resp), nullptr); + } + }, + std::chrono::milliseconds(_beacon_timeout_milliseconds)); } -} // end namespace +} // namespace fd +} // namespace dsn diff --git a/src/failure_detector/failure_detector.h b/src/failure_detector/failure_detector.h index 4ff0fc53eb..51fa5bf544 100644 --- a/src/failure_detector/failure_detector.h +++ b/src/failure_detector/failure_detector.h @@ -237,5 +237,5 @@ class failure_detector : public failure_detector_service, // subClass can rewrite these method. virtual void send_beacon(const host_port &node, uint64_t time); }; -} -} // end namespace +} // namespace fd +} // namespace dsn diff --git a/src/failure_detector/failure_detector_multimaster.cpp b/src/failure_detector/failure_detector_multimaster.cpp index e12fc69d14..03af27402f 100644 --- a/src/failure_detector/failure_detector_multimaster.cpp +++ b/src/failure_detector/failure_detector_multimaster.cpp @@ -132,13 +132,13 @@ void slave_failure_detector_with_multimaster::on_master_disconnected( void slave_failure_detector_with_multimaster::on_master_connected(const host_port &node) { /* - * well, this is called in on_ping_internal, which is called by rep::end_ping. - * So this function is called in the lock context of fd::_lock - */ + * well, this is called in on_ping_internal, which is called by rep::end_ping. + * So this function is called in the lock context of fd::_lock + */ bool is_primary = (_meta_servers.group_host_port()->leader() == node); if (is_primary) { _master_connected_callback(); } } -} -} // end namespace +} // namespace dist +} // namespace dsn diff --git a/src/failure_detector/failure_detector_multimaster.h b/src/failure_detector/failure_detector_multimaster.h index 3a803f9c6c..3f352c2b03 100644 --- a/src/failure_detector/failure_detector_multimaster.h +++ b/src/failure_detector/failure_detector_multimaster.h @@ -86,5 +86,5 @@ inline ::dsn::host_port slave_failure_detector_with_multimaster::current_server_ zauto_lock l(failure_detector::_lock); return _meta_servers.group_host_port()->leader(); } -} -} // end namespace +} // namespace dist +} // namespace dsn diff --git a/src/failure_detector/fd.client.h b/src/failure_detector/fd.client.h index 96fde50fe0..13fe0e5325 100644 --- a/src/failure_detector/fd.client.h +++ b/src/failure_detector/fd.client.h @@ -83,5 +83,5 @@ class failure_detector_client private: ::dsn::rpc_address _server; }; -} -} +} // namespace fd +} // namespace dsn diff --git a/src/failure_detector/fd.server.h b/src/failure_detector/fd.server.h index e518b8c47a..6bb686eb33 100644 --- a/src/failure_detector/fd.server.h +++ b/src/failure_detector/fd.server.h @@ -55,5 +55,5 @@ class failure_detector_service : public ::dsn::serverletunregister_rpc_handler(RPC_FD_FAILURE_DETECTOR_PING); } }; -} -} \ No newline at end of file +} // namespace fd +} // namespace dsn \ No newline at end of file diff --git a/src/failure_detector/test/failure_detector.cpp b/src/failure_detector/test/failure_detector.cpp index cfa8e81826..81e1fdf3cd 100644 --- a/src/failure_detector/test/failure_detector.cpp +++ b/src/failure_detector/test/failure_detector.cpp @@ -110,9 +110,10 @@ class worker_fd_test : public ::dsn::dist::slave_failure_detector_with_multimast public: worker_fd_test(replication::replica_stub *stub, std::vector &meta_servers) - : slave_failure_detector_with_multimaster(meta_servers, - [=]() { stub->on_meta_server_disconnected(); }, - [=]() { stub->on_meta_server_connected(); }) + : slave_failure_detector_with_multimaster( + meta_servers, + [=]() { stub->on_meta_server_disconnected(); }, + [=]() { stub->on_meta_server_connected(); }) { _send_ping_switch = false; } @@ -219,6 +220,7 @@ class test_worker : public service_app, public serverlet } worker_fd_test *fd() { return _worker_fd; } + private: worker_fd_test *_worker_fd; }; @@ -257,6 +259,7 @@ class test_master : public service_app error_code stop(bool) override { return ERR_OK; } master_fd_test *fd() { return _master_fd; } + private: master_fd_test *_master_fd; replication::fd_suboptions _opts; diff --git a/src/geo/bench/bench.cpp b/src/geo/bench/bench.cpp index 47687b0b89..42814687d3 100644 --- a/src/geo/bench/bench.cpp +++ b/src/geo/bench/bench.cpp @@ -44,8 +44,9 @@ static const int data_count = 10000; int main(int argc, char **argv) { if (argc < 7) { - std::cerr << "USAGE: " << argv[0] << " " - " [gen_data]" + std::cerr << "USAGE: " << argv[0] + << " " + " [gen_data]" << std::endl; return -1; } diff --git a/src/geo/lib/geo_client.cpp b/src/geo/lib/geo_client.cpp index bc415eeb34..1cbb7b49d6 100644 --- a/src/geo/lib/geo_client.cpp +++ b/src/geo/lib/geo_client.cpp @@ -166,7 +166,7 @@ void geo_client::async_set(const std::string &hash_key, hash_key, sort_key, true, - [ this, hash_key, sort_key, value, timeout_ms, ttl_seconds, cb = std::move(callback) ]( + [this, hash_key, sort_key, value, timeout_ms, ttl_seconds, cb = std::move(callback)]( int ec_, pegasus_client::internal_info &&info_) { if (ec_ != PERR_OK) { cb(ec_, std::move(info_)); @@ -261,7 +261,7 @@ void geo_client::async_get(const std::string &hash_key, _common_data_client->async_get( hash_key, sort_key, - [ this, &hash_key, &sort_key, id, cb = std::move(callback) ]( + [this, &hash_key, &sort_key, id, cb = std::move(callback)]( int ec_, std::string &&value_, pegasus_client::internal_info &&info_) { if (ec_ != PERR_OK) { cb(ec_, id, 0, 0); @@ -316,7 +316,7 @@ void geo_client::async_del(const std::string &hash_key, _common_data_client->async_get( hash_key, sort_key, - [ this, hash_key, sort_key, keep_common_data, timeout_ms, cb = std::move(callback) ]( + [this, hash_key, sort_key, keep_common_data, timeout_ms, cb = std::move(callback)]( int ec_, std::string &&value_, pegasus_client::internal_info &&info_) { if (ec_ == PERR_NOT_FOUND) { if (cb != nullptr) { @@ -510,16 +510,15 @@ void geo_client::async_search_radial(const std::string &hash_key, _common_data_client->async_get( hash_key, sort_key, - [ - this, - hash_key, - sort_key, - radius_m, - count, - sort_type, - timeout_ms, - cb = std::move(callback) - ](int ec_, std::string &&value_, pegasus_client::internal_info &&) mutable { + [this, + hash_key, + sort_key, + radius_m, + count, + sort_type, + timeout_ms, + cb = std::move(callback)]( + int ec_, std::string &&value_, pegasus_client::internal_info &&) mutable { if (ec_ != PERR_OK) { LOG_ERROR("get failed. hash_key={}, sort_key={}, error={}", utils::redact_sensitive_string(hash_key), @@ -566,8 +565,8 @@ void geo_client::async_search_radial(const S2LatLng &latlng, count, sort_type, timeout_ms, - [ this, count, sort_type, cb = std::move(callback) ]( - std::list> && results_) { + [this, count, sort_type, cb = std::move(callback)]( + std::list> &&results_) { std::list result; normalize_result(std::move(results_), count, sort_type, result); cb(PERR_OK, std::move(result)); @@ -605,13 +604,12 @@ void geo_client::async_get_result_from_cells(const S2CellUnion &cids, std::shared_ptr> send_finish = std::make_shared>(false); std::shared_ptr> scan_count = std::make_shared>(0); auto single_scan_finish_callback = - [ send_finish, scan_count, results, cb = std::move(callback) ]() - { - // NOTE: make sure fetch_sub is at first of the if expression to make it always execute - if (scan_count->fetch_sub(1) == 1 && send_finish->load()) { - cb(std::move(*results.get())); - } - }; + [send_finish, scan_count, results, cb = std::move(callback)]() { + // NOTE: make sure fetch_sub is at first of the if expression to make it always execute + if (scan_count->fetch_sub(1) == 1 && send_finish->load()) { + cb(std::move(*results.get())); + } + }; for (const auto &cid : cids) { if (cap_ptr->Contains(S2Cell(cid))) { @@ -873,7 +871,7 @@ void geo_client::start_scan(const std::string &hash_key, start_sort_key, stop_sort_key, options, - [ this, cap_ptr, count, cb = std::move(callback), &result ]( + [this, cap_ptr, count, cb = std::move(callback), &result]( int error_code, pegasus_client::pegasus_scanner *hash_scanner) mutable { if (error_code == PERR_OK) { do_scan(hash_scanner->get_smart_wrapper(), cap_ptr, count, std::move(cb), result); @@ -890,7 +888,7 @@ void geo_client::do_scan(pegasus_client::pegasus_scanner_wrapper scanner_wrapper std::list &result) { scanner_wrapper->async_next( - [ this, cap_ptr, count, scanner_wrapper, cb = std::move(callback), &result ]( + [this, cap_ptr, count, scanner_wrapper, cb = std::move(callback), &result]( int ret, std::string &&geo_hash_key, std::string &&geo_sort_key, @@ -984,9 +982,8 @@ void geo_client::async_distance(const std::string &hash_key1, std::shared_ptr ret = std::make_shared(PERR_OK); std::shared_ptr mutex = std::make_shared(); std::shared_ptr> get_result = std::make_shared>(); - auto async_get_callback = [ =, cb = std::move(callback) ]( - int ec_, std::string &&value_, pegasus_client::internal_info &&) - { + auto async_get_callback = [=, cb = std::move(callback)]( + int ec_, std::string &&value_, pegasus_client::internal_info &&) { if (ec_ != PERR_OK) { LOG_ERROR("get data failed. hash_key1={}, sort_key1={}, hash_key2={}, sort_key2={}, " "error={}", diff --git a/src/http/test/uri_decoder_test.cpp b/src/http/test/uri_decoder_test.cpp index b62e5b7bdf..43b964e601 100644 --- a/src/http/test/uri_decoder_test.cpp +++ b/src/http/test/uri_decoder_test.cpp @@ -90,5 +90,5 @@ TEST_F(uri_decoder_test, decode) } } -} // namespace dsn } // namespace uri +} // namespace dsn diff --git a/src/include/pegasus/error.h b/src/include/pegasus/error.h index 1638e6abe0..73cabfcafd 100644 --- a/src/include/pegasus/error.h +++ b/src/include/pegasus/error.h @@ -25,4 +25,4 @@ namespace pegasus { #include #undef PEGASUS_ERR_CODE -} // namespace +} // namespace pegasus diff --git a/src/include/rrdb/rrdb.code.definition.h b/src/include/rrdb/rrdb.code.definition.h index 9607883d1f..ac23da7d92 100644 --- a/src/include/rrdb/rrdb.code.definition.h +++ b/src/include/rrdb/rrdb.code.definition.h @@ -39,5 +39,5 @@ DEFINE_STORAGE_SCAN_RPC_CODE(RPC_RRDB_RRDB_SCAN) DEFINE_STORAGE_SCAN_RPC_CODE(RPC_RRDB_RRDB_CLEAR_SCANNER) DEFINE_STORAGE_SCAN_RPC_CODE(RPC_RRDB_RRDB_MULTI_GET) DEFINE_STORAGE_READ_RPC_CODE(RPC_RRDB_RRDB_BATCH_GET) -} -} +} // namespace apps +} // namespace dsn diff --git a/src/meta/app_env_validator.cpp b/src/meta/app_env_validator.cpp index 229389b3b2..c27bdb2cd5 100644 --- a/src/meta/app_env_validator.cpp +++ b/src/meta/app_env_validator.cpp @@ -51,7 +51,7 @@ app_env_validator::~app_env_validator() { deregister_handler("list"); } bool app_env_validator::validate_app_envs(const std::map &envs) { // only check rocksdb app envs currently - for (const auto & [ key, value ] : envs) { + for (const auto &[key, value] : envs) { if (replica_envs::ROCKSDB_STATIC_OPTIONS.find(key) == replica_envs::ROCKSDB_STATIC_OPTIONS.end() && replica_envs::ROCKSDB_DYNAMIC_OPTIONS.find(key) == diff --git a/src/meta/backup_engine.cpp b/src/meta/backup_engine.cpp index 1707fe65e2..600ea4e39a 100644 --- a/src/meta/backup_engine.cpp +++ b/src/meta/backup_engine.cpp @@ -190,11 +190,12 @@ void backup_engine::backup_app_partition(const gpid &pid) "backup_id({}): partition {} doesn't have a primary now, retry to backup it later.", _cur_backup.backup_id, pid); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, pid]() { backup_app_partition(pid); }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, pid]() { backup_app_partition(pid); }, + 0, + std::chrono::seconds(10)); return; } @@ -243,11 +244,12 @@ inline void backup_engine::handle_replica_backup_failed(const backup_response &r inline void backup_engine::retry_backup(const dsn::gpid pid) { - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, pid]() { backup_app_partition(pid); }, - 0, - std::chrono::seconds(1)); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, pid]() { backup_app_partition(pid); }, + 0, + std::chrono::seconds(1)); } void backup_engine::on_backup_reply(const error_code err, @@ -328,11 +330,12 @@ void backup_engine::write_backup_info() if (err != ERR_OK) { LOG_WARNING("backup_id({}): write backup info failed, retry it later.", _cur_backup.backup_id); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this]() { write_backup_info(); }, - 0, - std::chrono::seconds(1)); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this]() { write_backup_info(); }, + 0, + std::chrono::seconds(1)); return; } LOG_INFO("backup_id({}): successfully wrote backup info, backup for app {} completed.", diff --git a/src/meta/distributed_lock_service_simple.cpp b/src/meta/distributed_lock_service_simple.cpp index cc95287850..3eed9082cc 100644 --- a/src/meta/distributed_lock_service_simple.cpp +++ b/src/meta/distributed_lock_service_simple.cpp @@ -162,12 +162,13 @@ distributed_lock_service_simple::lock(const std::string &lock_id, } if (is_new) { - tasking::enqueue_timer(LPC_DIST_LOCK_SVC_RANDOM_EXPIRE, - &_tracker, - [=]() { random_lock_lease_expire(lock_id); }, - std::chrono::minutes(5), - 0, - std::chrono::seconds(1)); + tasking::enqueue_timer( + LPC_DIST_LOCK_SVC_RANDOM_EXPIRE, + &_tracker, + [=]() { random_lock_lease_expire(lock_id); }, + std::chrono::minutes(5), + 0, + std::chrono::seconds(1)); } if (err != ERR_IO_PENDING) { @@ -300,5 +301,5 @@ error_code distributed_lock_service_simple::query_cache(const std::string &lock_ } return err; } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/meta/distributed_lock_service_simple.h b/src/meta/distributed_lock_service_simple.h index fe43a950d4..0d5d5e46fb 100644 --- a/src/meta/distributed_lock_service_simple.h +++ b/src/meta/distributed_lock_service_simple.h @@ -107,5 +107,5 @@ class distributed_lock_service_simple : public distributed_lock_service dsn::task_tracker _tracker; }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/meta/duplication/duplication_info.h b/src/meta/duplication/duplication_info.h index feb7503082..848c602eb1 100644 --- a/src/meta/duplication/duplication_info.h +++ b/src/meta/duplication/duplication_info.h @@ -108,8 +108,9 @@ class duplication_info bool is_valid_alteration(duplication_status::type to_status) const { - return to_status == _status || (to_status == duplication_status::DS_PREPARE && - _status == duplication_status::DS_INIT) || + return to_status == _status || + (to_status == duplication_status::DS_PREPARE && + _status == duplication_status::DS_INIT) || (to_status == duplication_status::DS_APP && _status == duplication_status::DS_PREPARE) || (to_status == duplication_status::DS_LOG && @@ -173,13 +174,13 @@ class duplication_info bool all_checkpoint_has_prepared() { int prepared = 0; - bool completed = - std::all_of(_progress.begin(), - _progress.end(), - [&](std::pair item) -> bool { - prepared = item.second.checkpoint_prepared ? prepared + 1 : prepared; - return item.second.checkpoint_prepared; - }); + bool completed = std::all_of(_progress.begin(), + _progress.end(), + [&](std::pair item) -> bool { + prepared = item.second.checkpoint_prepared ? prepared + 1 + : prepared; + return item.second.checkpoint_prepared; + }); if (!completed) { LOG_WARNING("replica checkpoint still running: {}/{}", prepared, _progress.size()); } diff --git a/src/meta/duplication/meta_duplication_service.cpp b/src/meta/duplication/meta_duplication_service.cpp index 77642c1f52..45c4bf7db9 100644 --- a/src/meta/duplication/meta_duplication_service.cpp +++ b/src/meta/duplication/meta_duplication_service.cpp @@ -78,7 +78,7 @@ void meta_duplication_service::query_duplication_info(const duplication_query_re } response.appid = app->app_id; - for (const auto & [ _, dup ] : app->duplications) { + for (const auto &[_, dup] : app->duplications) { dup->append_if_valid_for_query(*app, response.entry_list); } } @@ -259,7 +259,7 @@ void meta_duplication_service::add_duplication(duplication_add_rpc rpc) request.app_name, enum_to_string(app->status)); - for (const auto & [ _, dup_info ] : app->duplications) { + for (const auto &[_, dup_info] : app->duplications) { if (dup_info->remote_cluster_name == request.remote_cluster_name) { dup = dup_info; break; @@ -283,13 +283,13 @@ void meta_duplication_service::add_duplication(duplication_add_rpc rpc) dup->remote_app_name); } else { // Check if other apps of this cluster are duplicated to the same remote app. - for (const auto & [ app_name, cur_app_state ] : _state->_exist_apps) { + for (const auto &[app_name, cur_app_state] : _state->_exist_apps) { if (app_name == request.app_name) { // Skip this app since we want to check other apps. continue; } - for (const auto & [ _, dup_info ] : cur_app_state->duplications) { + for (const auto &[_, dup_info] : cur_app_state->duplications) { LOG_WARNING_DUP_HINT_AND_RETURN_IF_NOT( dup_info->remote_cluster_name != request.remote_cluster_name || dup_info->remote_app_name != remote_app_name, @@ -768,7 +768,7 @@ void meta_duplication_service::do_restore_duplication(dupid_t dup_id, // restore duplication info from json _meta_svc->get_meta_storage()->get_data( std::string(store_path), - [ dup_id, this, app = std::move(app), store_path ](const blob &json) { + [dup_id, this, app = std::move(app), store_path](const blob &json) { zauto_write_lock l(app_lock()); auto dup = duplication_info::decode_from_blob(dup_id, diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp index 95a53d70e0..63e1fac30a 100644 --- a/src/meta/meta_backup_service.cpp +++ b/src/meta/meta_backup_service.cpp @@ -184,14 +184,15 @@ void policy_context::start_backup_app_meta_unlocked(int32_t app_id) LOG_ERROR("{}: create file {} failed, restart this backup later", _backup_sig, create_file_req.file_name); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, app_id]() { - zauto_lock l(_lock); - start_backup_app_meta_unlocked(app_id); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, app_id]() { + zauto_lock l(_lock); + start_backup_app_meta_unlocked(app_id); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); return; } CHECK_NOTNULL(remote_file, @@ -223,14 +224,15 @@ void policy_context::start_backup_app_meta_unlocked(int32_t app_id) LOG_WARNING("write {} failed, reason({}), try it later", remote_file->file_name(), resp.err); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, app_id]() { - zauto_lock l(_lock); - start_backup_app_meta_unlocked(app_id); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, app_id]() { + zauto_lock l(_lock); + start_backup_app_meta_unlocked(app_id); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); } }, &_tracker); @@ -290,14 +292,15 @@ void policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id, LOG_ERROR("{}: create file {} failed, restart this backup later", _backup_sig, create_file_req.file_name); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, app_id, write_callback]() { - zauto_lock l(_lock); - write_backup_app_finish_flag_unlocked(app_id, write_callback); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, app_id, write_callback]() { + zauto_lock l(_lock); + write_backup_app_finish_flag_unlocked(app_id, write_callback); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); return; } @@ -339,14 +342,15 @@ void policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id, LOG_WARNING("write {} failed, reason({}), try it later", remote_file->file_name(), resp.err); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, app_id, write_callback]() { - zauto_lock l(_lock); - write_backup_app_finish_flag_unlocked(app_id, write_callback); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, app_id, write_callback]() { + zauto_lock l(_lock); + write_backup_app_finish_flag_unlocked(app_id, write_callback); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); } }); } @@ -406,14 +410,15 @@ void policy_context::write_backup_info_unlocked(const backup_info &b_info, LOG_ERROR("{}: create file {} failed, restart this backup later", _backup_sig, create_file_req.file_name); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, b_info, write_callback]() { - zauto_lock l(_lock); - write_backup_info_unlocked(b_info, write_callback); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, b_info, write_callback]() { + zauto_lock l(_lock); + write_backup_info_unlocked(b_info, write_callback); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); return; } @@ -446,14 +451,15 @@ void policy_context::write_backup_info_unlocked(const backup_info &b_info, LOG_WARNING("write {} failed, reason({}), try it later", remote_file->file_name(), resp.err); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, b_info, write_callback]() { - zauto_lock l(_lock); - write_backup_info_unlocked(b_info, write_callback); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, b_info, write_callback]() { + zauto_lock l(_lock); + write_backup_info_unlocked(b_info, write_callback); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); } }); } @@ -532,14 +538,15 @@ void policy_context::start_backup_partition_unlocked(gpid pid) LOG_WARNING("{}: partition {} doesn't have a primary now, retry to backup it later", _backup_sig, pid); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, pid]() { - zauto_lock l(_lock); - start_backup_partition_unlocked(pid); - }, - 0, - _backup_service->backup_option().reconfiguration_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, pid]() { + zauto_lock l(_lock); + start_backup_partition_unlocked(pid); + }, + 0, + _backup_service->backup_option().reconfiguration_retry_delay_ms); return; } @@ -627,14 +634,15 @@ void policy_context::on_backup_reply(error_code err, } // retry to backup the partition. - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, pid]() { - zauto_lock l(_lock); - start_backup_partition_unlocked(pid); - }, - 0, - _backup_service->backup_option().request_backup_period_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, pid]() { + zauto_lock l(_lock); + start_backup_partition_unlocked(pid); + }, + 0, + _backup_service->backup_option().request_backup_period_ms); } void policy_context::initialize_backup_progress_unlocked() @@ -709,15 +717,16 @@ void policy_context::sync_backup_to_remote_storage_unlocked(const backup_info &b LOG_ERROR("{}: sync backup info({}) to remote storage got timeout, retry it later", _policy.policy_name, b_info.backup_id); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, b_info, sync_callback, create_new_node]() { - zauto_lock l(_lock); - sync_backup_to_remote_storage_unlocked( - std::move(b_info), std::move(sync_callback), create_new_node); - }, - 0, - _backup_service->backup_option().meta_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, b_info, sync_callback, create_new_node]() { + zauto_lock l(_lock); + sync_backup_to_remote_storage_unlocked( + std::move(b_info), std::move(sync_callback), create_new_node); + }, + 0, + _backup_service->backup_option().meta_retry_delay_ms); } else { CHECK(false, "{}: we can't handle this right now, error({})", _backup_sig, err); } @@ -737,14 +746,15 @@ void policy_context::continue_current_backup_unlocked() if (_policy.is_disable) { LOG_INFO("{}: policy is disabled, ignore this backup and try it later", _policy.policy_name); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this]() { - zauto_lock l(_lock); - issue_new_backup_unlocked(); - }, - 0, - _backup_service->backup_option().issue_backup_interval_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this]() { + zauto_lock l(_lock); + issue_new_backup_unlocked(); + }, + 0, + _backup_service->backup_option().issue_backup_interval_ms); return; } @@ -822,26 +832,28 @@ void policy_context::issue_new_backup_unlocked() // before issue new backup, we check whether the policy is dropped if (_policy.is_disable) { LOG_INFO("{}: policy is disabled, just ignore backup, try it later", _policy.policy_name); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this]() { - zauto_lock l(_lock); - issue_new_backup_unlocked(); - }, - 0, - _backup_service->backup_option().issue_backup_interval_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this]() { + zauto_lock l(_lock); + issue_new_backup_unlocked(); + }, + 0, + _backup_service->backup_option().issue_backup_interval_ms); return; } if (!should_start_backup_unlocked()) { - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this]() { - zauto_lock l(_lock); - issue_new_backup_unlocked(); - }, - 0, - _backup_service->backup_option().issue_backup_interval_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this]() { + zauto_lock l(_lock); + issue_new_backup_unlocked(); + }, + 0, + _backup_service->backup_option().issue_backup_interval_ms); LOG_INFO("{}: start issue new backup {}ms later", _policy.policy_name, _backup_service->backup_option().issue_backup_interval_ms.count()); @@ -854,14 +866,15 @@ void policy_context::issue_new_backup_unlocked() // TODO: just ignore this backup and wait next backup LOG_WARNING("{}: all apps have been dropped, ignore this backup and retry it later", _backup_sig); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this]() { - zauto_lock l(_lock); - issue_new_backup_unlocked(); - }, - 0, - _backup_service->backup_option().issue_backup_interval_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this]() { + zauto_lock l(_lock); + issue_new_backup_unlocked(); + }, + 0, + _backup_service->backup_option().issue_backup_interval_ms); } else { task_ptr continue_to_backup = tasking::create_task(LPC_DEFAULT_CALLBACK, &_tracker, [this]() { @@ -1397,7 +1410,7 @@ void backup_service::do_add_policy(dsn::message_ex *req, _meta_svc->get_remote_storage()->create_node( policy_path, LPC_DEFAULT_CALLBACK, // TASK_CODE_EXEC_INLINED, - [ this, req, p, hint_msg, policy_name = cur_policy.policy_name ](error_code err) { + [this, req, p, hint_msg, policy_name = cur_policy.policy_name](error_code err) { if (err == ERR_OK || err == ERR_NODE_ALREADY_EXIST) { configuration_add_backup_policy_response resp; resp.hint_message = hint_msg; diff --git a/src/meta/meta_bulk_load_service.cpp b/src/meta/meta_bulk_load_service.cpp index 7317d57ea1..0845f4f208 100644 --- a/src/meta/meta_bulk_load_service.cpp +++ b/src/meta/meta_bulk_load_service.cpp @@ -155,10 +155,11 @@ void bulk_load_service::on_start_bulk_load(start_bulk_load_rpc rpc) // avoid possible load balancing _meta_svc->set_function_level(meta_function_level::fl_steady); - tasking::enqueue(LPC_META_STATE_NORMAL, - _meta_svc->tracker(), - [this, rpc, app]() { do_start_app_bulk_load(std::move(app), std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + _meta_svc->tracker(), + [this, rpc, app]() { do_start_app_bulk_load(std::move(app), std::move(rpc)); }, + server_state::sStateHash); } // ThreadPool: THREAD_POOL_META_SERVER @@ -372,11 +373,12 @@ bool bulk_load_service::check_partition_status( pconfig = app->partitions[pid.get_partition_index()]; if (!pconfig.hp_primary) { LOG_WARNING("app({}) partition({}) primary is invalid, try it later", app_name, pid); - tasking::enqueue(LPC_META_STATE_NORMAL, - _meta_svc->tracker(), - [retry_function, app_name, pid]() { retry_function(app_name, pid); }, - 0, - std::chrono::seconds(1)); + tasking::enqueue( + LPC_META_STATE_NORMAL, + _meta_svc->tracker(), + [retry_function, app_name, pid]() { retry_function(app_name, pid); }, + 0, + std::chrono::seconds(1)); return false; } @@ -398,11 +400,12 @@ bool bulk_load_service::check_partition_status( app_name, pid, dsn::enum_to_string(p_status)); - tasking::enqueue(LPC_META_STATE_NORMAL, - _meta_svc->tracker(), - [retry_function, app_name, pid]() { retry_function(app_name, pid); }, - 0, - std::chrono::seconds(1)); + tasking::enqueue( + LPC_META_STATE_NORMAL, + _meta_svc->tracker(), + [retry_function, app_name, pid]() { retry_function(app_name, pid); }, + 0, + std::chrono::seconds(1)); return false; } return true; @@ -1609,13 +1612,13 @@ void bulk_load_service::on_query_bulk_load_status(query_bulk_load_rpc rpc) response.bulk_load_states.resize(partition_count); response.__set_hp_bulk_load_states( std::vector>(partition_count)); - for (const auto & [ pid, pbls_by_hps ] : _partitions_bulk_load_state) { + for (const auto &[pid, pbls_by_hps] : _partitions_bulk_load_state) { if (pid.get_app_id() == app_id) { auto pidx = pid.get_partition_index(); response.hp_bulk_load_states[pidx] = pbls_by_hps; std::map pbls_by_addrs; - for (const auto & [ hp, pbls ] : pbls_by_hps) { + for (const auto &[hp, pbls] : pbls_by_hps) { pbls_by_addrs[dsn::dns_resolver::instance().resolve_address(hp)] = pbls; } response.bulk_load_states[pidx] = pbls_by_addrs; diff --git a/src/meta/meta_server_failure_detector.cpp b/src/meta/meta_server_failure_detector.cpp index 56cc04e46b..1d900fbcbe 100644 --- a/src/meta/meta_server_failure_detector.cpp +++ b/src/meta/meta_server_failure_detector.cpp @@ -341,5 +341,5 @@ meta_server_failure_detector::get_stability_map_for_test() { return &_stablity; } -} -} +} // namespace replication +} // namespace dsn diff --git a/src/meta/meta_server_failure_detector.h b/src/meta/meta_server_failure_detector.h index a4f7b6d122..f656479a1a 100644 --- a/src/meta/meta_server_failure_detector.h +++ b/src/meta/meta_server_failure_detector.h @@ -154,5 +154,5 @@ class meta_server_failure_detector : public fd::failure_detector void set_leader_for_test(const host_port &leader_host_port, bool is_myself_leader); stability_map *get_stability_map_for_test(); }; -} -} +} // namespace replication +} // namespace dsn diff --git a/src/meta/meta_service.cpp b/src/meta/meta_service.cpp index b619bd55b7..a5c62ea418 100644 --- a/src/meta/meta_service.cpp +++ b/src/meta/meta_service.cpp @@ -982,10 +982,11 @@ void meta_service::on_add_duplication(duplication_add_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _dup_svc->add_duplication(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _dup_svc->add_duplication(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_modify_duplication(duplication_modify_rpc rpc) @@ -998,10 +999,11 @@ void meta_service::on_modify_duplication(duplication_modify_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _dup_svc->modify_duplication(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _dup_svc->modify_duplication(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_query_duplication_info(duplication_query_rpc rpc) @@ -1023,16 +1025,17 @@ void meta_service::on_duplication_sync(duplication_sync_rpc rpc) return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { - if (_dup_svc) { - _dup_svc->duplication_sync(std::move(rpc)); - } else { - rpc.response().err = ERR_SERVICE_NOT_ACTIVE; - } - }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { + if (_dup_svc) { + _dup_svc->duplication_sync(std::move(rpc)); + } else { + rpc.response().err = ERR_SERVICE_NOT_ACTIVE; + } + }, + server_state::sStateHash); } void meta_service::recover_duplication_from_meta_state() @@ -1113,10 +1116,11 @@ void meta_service::on_start_partition_split(start_split_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _split_svc->start_partition_split(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _split_svc->start_partition_split(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_control_partition_split(control_split_rpc rpc) @@ -1130,10 +1134,11 @@ void meta_service::on_control_partition_split(control_split_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _split_svc->control_partition_split(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _split_svc->control_partition_split(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_query_partition_split(query_split_rpc rpc) @@ -1156,10 +1161,11 @@ void meta_service::on_register_child_on_meta(register_child_rpc rpc) return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _split_svc->register_child_on_meta(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _split_svc->register_child_on_meta(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_notify_stop_split(notify_stop_split_rpc rpc) @@ -1172,10 +1178,11 @@ void meta_service::on_notify_stop_split(notify_stop_split_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _split_svc->notify_stop_split(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _split_svc->notify_stop_split(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_query_child_state(query_child_state_rpc rpc) @@ -1216,10 +1223,11 @@ void meta_service::on_control_bulk_load(control_bulk_load_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _bulk_load_svc->on_control_bulk_load(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _bulk_load_svc->on_control_bulk_load(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_query_bulk_load_status(query_bulk_load_rpc rpc) @@ -1247,10 +1255,11 @@ void meta_service::on_clear_bulk_load(clear_bulk_load_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _bulk_load_svc->on_clear_bulk_load(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _bulk_load_svc->on_clear_bulk_load(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_start_backup_app(start_backup_app_rpc rpc) diff --git a/src/meta/meta_split_service.cpp b/src/meta/meta_split_service.cpp index 00f4449a54..ca5072c6be 100644 --- a/src/meta/meta_split_service.cpp +++ b/src/meta/meta_split_service.cpp @@ -284,15 +284,15 @@ void meta_split_service::on_add_child_on_remote_storage_reply(error_code ec, (ec == ERR_NODE_ALREADY_EXIST && create_new)) { // retry register child on remote storage bool retry_create_new = (ec == ERR_TIMEOUT) ? create_new : false; int delay = (ec == ERR_TIMEOUT) ? 1 : 0; - parent_context.pending_sync_task = - tasking::enqueue(LPC_META_STATE_HIGH, - nullptr, - [this, parent_context, rpc, retry_create_new]() mutable { - parent_context.pending_sync_task = - add_child_on_remote_storage(rpc, retry_create_new); - }, - 0, - std::chrono::seconds(delay)); + parent_context.pending_sync_task = tasking::enqueue( + LPC_META_STATE_HIGH, + nullptr, + [this, parent_context, rpc, retry_create_new]() mutable { + parent_context.pending_sync_task = + add_child_on_remote_storage(rpc, retry_create_new); + }, + 0, + std::chrono::seconds(delay)); return; } CHECK_EQ_MSG(ec, ERR_OK, "we can't handle this right now"); @@ -405,8 +405,8 @@ void meta_split_service::do_control_single(std::shared_ptr app, contr auto iter = app->helpers->split_states.status.find(parent_pidx); if (iter == app->helpers->split_states.status.end()) { - response.err = - control_type == split_control_type::PAUSE ? ERR_CHILD_REGISTERED : ERR_INVALID_STATE; + response.err = control_type == split_control_type::PAUSE ? ERR_CHILD_REGISTERED + : ERR_INVALID_STATE; response.__set_hint_msg(fmt::format("partition[{}] is not splitting", parent_pidx)); LOG_ERROR("{} split for app({}) failed, {}", control_type_str(control_type), diff --git a/src/meta/meta_state_service.h b/src/meta/meta_state_service.h index d79f1b3652..7f2126c19a 100644 --- a/src/meta/meta_state_service.h +++ b/src/meta/meta_state_service.h @@ -177,5 +177,5 @@ class meta_state_service const err_stringv_callback &cb_get_children, dsn::task_tracker *tracker = nullptr) = 0; }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/meta/meta_state_service_simple.cpp b/src/meta/meta_state_service_simple.cpp index aa54612032..5eaab761d5 100644 --- a/src/meta/meta_state_service_simple.cpp +++ b/src/meta/meta_state_service_simple.cpp @@ -237,8 +237,8 @@ error_code meta_state_service_simple::apply_transaction( error_code meta_state_service_simple::initialize(const std::vector &args) { - const char *work_dir = - args.empty() ? service_app::current_service_app_info().data_dir.c_str() : args[0].c_str(); + const char *work_dir = args.empty() ? service_app::current_service_app_info().data_dir.c_str() + : args[0].c_str(); _offset = 0; std::string log_path = dsn::utils::filesystem::path_combine(work_dir, "meta_state_service.log"); @@ -426,9 +426,10 @@ task_ptr meta_state_service_simple::submit_transaction( CHECK_EQ_MSG(dest - batch.get(), total_size, "memcpy error"); task_ptr task(new error_code_future(cb_code, cb_transaction, 0)); task->set_tracker(tracker); - write_log(blob(batch, total_size), - [this, t_entries] { return apply_transaction(t_entries); }, - task); + write_log( + blob(batch, total_size), + [this, t_entries] { return apply_transaction(t_entries); }, + task); return task; } } @@ -441,9 +442,10 @@ task_ptr meta_state_service_simple::create_node(const std::string &node, { task_ptr task(new error_code_future(cb_code, cb_create, 0)); task->set_tracker(tracker); - write_log(create_node_log::get_log(node, value), - [=] { return create_node_internal(node, value); }, - task); + write_log( + create_node_log::get_log(node, value), + [=] { return create_node_internal(node, value); }, + task); return task; } @@ -455,9 +457,10 @@ task_ptr meta_state_service_simple::delete_node(const std::string &node, { task_ptr task(new error_code_future(cb_code, cb_delete, 0)); task->set_tracker(tracker); - write_log(delete_node_log::get_log(node, recursively_delete), - [=] { return delete_node_internal(node, recursively_delete); }, - task); + write_log( + delete_node_log::get_log(node, recursively_delete), + [=] { return delete_node_internal(node, recursively_delete); }, + task); return task; } diff --git a/src/meta/meta_state_service_simple.h b/src/meta/meta_state_service_simple.h index 6a415531f6..f563f6c1bf 100644 --- a/src/meta/meta_state_service_simple.h +++ b/src/meta/meta_state_service_simple.h @@ -225,12 +225,12 @@ class meta_state_service_simple : public meta_state_service shared_blob.length() - sizeof(log_header); return shared_blob; } - static void write(binary_writer &writer, const Head &head, const Tail &... tail) + static void write(binary_writer &writer, const Head &head, const Tail &...tail) { marshall(writer, head, DSF_THRIFT_BINARY); log_struct::write(writer, tail...); } - static void parse(binary_reader &reader, Head &head, Tail &... tail) + static void parse(binary_reader &reader, Head &head, Tail &...tail) { unmarshall(reader, head, DSF_THRIFT_BINARY); log_struct::parse(reader, tail...); diff --git a/src/meta/meta_state_service_utils_impl.h b/src/meta/meta_state_service_utils_impl.h index f6a1b75918..c13063bdb0 100644 --- a/src/meta/meta_state_service_utils_impl.h +++ b/src/meta/meta_state_service_utils_impl.h @@ -127,11 +127,12 @@ struct on_create_recursively : operation args->nodes.pop(); } - remote_storage()->create_node(_cur_path, - LPC_META_STATE_HIGH, - [op = *this](error_code ec) mutable { op.on_error(ec); }, - args->nodes.empty() ? args->val : blob(), - tracker()); + remote_storage()->create_node( + _cur_path, + LPC_META_STATE_HIGH, + [op = *this](error_code ec) mutable { op.on_error(ec); }, + args->nodes.empty() ? args->val : blob(), + tracker()); } void on_error(error_code ec) @@ -167,11 +168,12 @@ struct on_create : operation void run() { - remote_storage()->create_node(args->node, - LPC_META_STATE_HIGH, - [op = *this](error_code ec) mutable { op.on_error(ec); }, - args->val, - tracker()); + remote_storage()->create_node( + args->node, + LPC_META_STATE_HIGH, + [op = *this](error_code ec) mutable { op.on_error(ec); }, + args->val, + tracker()); } void on_error(error_code ec) @@ -197,11 +199,12 @@ struct on_delete : operation void run() { - remote_storage()->delete_node(args->node, - args->is_recursively_delete, - LPC_META_STATE_HIGH, - [op = *this](error_code ec) mutable { op.on_error(ec); }, - tracker()); + remote_storage()->delete_node( + args->node, + args->is_recursively_delete, + LPC_META_STATE_HIGH, + [op = *this](error_code ec) mutable { op.on_error(ec); }, + tracker()); } void on_error(error_code ec) @@ -211,8 +214,8 @@ struct on_delete : operation return; } - auto type = - args->is_recursively_delete ? op_type::OP_DELETE_RECURSIVELY : op_type::OP_DELETE; + auto type = args->is_recursively_delete ? op_type::OP_DELETE_RECURSIVELY + : op_type::OP_DELETE; operation::on_error(this, type, ec, args->node); } }; @@ -257,11 +260,12 @@ struct on_set_data : operation void run() { - remote_storage()->set_data(args->node, - args->val, - LPC_META_STATE_HIGH, - [op = *this](error_code ec) mutable { op.on_error(ec); }, - tracker()); + remote_storage()->set_data( + args->node, + args->val, + LPC_META_STATE_HIGH, + [op = *this](error_code ec) mutable { op.on_error(ec); }, + tracker()); } void on_error(error_code ec) diff --git a/src/meta/meta_state_service_zookeeper.cpp b/src/meta/meta_state_service_zookeeper.cpp index c300d8f50a..e9c659b6a5 100644 --- a/src/meta/meta_state_service_zookeeper.cpp +++ b/src/meta/meta_state_service_zookeeper.cpp @@ -58,6 +58,7 @@ class zoo_transaction : public meta_state_service::transaction_entries virtual error_code get_result(unsigned int entry_index) override; std::shared_ptr packet() { return _pkt; } + private: std::shared_ptr _pkt; }; @@ -262,47 +263,49 @@ task_ptr meta_state_service_zookeeper::delete_node(const std::string &node, { error_code_future_ptr tsk(new error_code_future(cb_code, cb_delete, 0)); tsk->set_tracker(tracker); - err_stringv_callback after_get_children = [node, recursively_delete, cb_code, tsk, this]( - error_code err, const std::vector &children) { - if (ERR_OK != err) - tsk->enqueue_with(err); - else if (children.empty()) - delete_empty_node( - node, cb_code, [tsk](error_code err) { tsk->enqueue_with(err); }, &_tracker); - else if (!recursively_delete) - tsk->enqueue_with(ERR_INVALID_PARAMETERS); - else { - std::atomic_int *child_count = new std::atomic_int(); - std::atomic_int *error_count = new std::atomic_int(); - - child_count->store((int)children.size()); - error_count->store(0); - - for (auto &child : children) { - delete_node(node + "/" + child, - true, - cb_code, - [=](error_code err) { - if (ERR_OK != err) - ++(*error_count); - int result = --(*child_count); - if (0 == result) { - if (0 == *error_count) - delete_empty_node( - node, - cb_code, - [tsk](error_code err) { tsk->enqueue_with(err); }, - &_tracker); - else - tsk->enqueue_with(ERR_FILE_OPERATION_FAILED); - delete child_count; - delete error_count; - } - }, - &_tracker); + err_stringv_callback after_get_children = + [node, recursively_delete, cb_code, tsk, this](error_code err, + const std::vector &children) { + if (ERR_OK != err) + tsk->enqueue_with(err); + else if (children.empty()) + delete_empty_node( + node, cb_code, [tsk](error_code err) { tsk->enqueue_with(err); }, &_tracker); + else if (!recursively_delete) + tsk->enqueue_with(ERR_INVALID_PARAMETERS); + else { + std::atomic_int *child_count = new std::atomic_int(); + std::atomic_int *error_count = new std::atomic_int(); + + child_count->store((int)children.size()); + error_count->store(0); + + for (auto &child : children) { + delete_node( + node + "/" + child, + true, + cb_code, + [=](error_code err) { + if (ERR_OK != err) + ++(*error_count); + int result = --(*child_count); + if (0 == result) { + if (0 == *error_count) + delete_empty_node( + node, + cb_code, + [tsk](error_code err) { tsk->enqueue_with(err); }, + &_tracker); + else + tsk->enqueue_with(ERR_FILE_OPERATION_FAILED); + delete child_count; + delete error_count; + } + }, + &_tracker); + } } - } - }; + }; get_children(node, cb_code, after_get_children, &_tracker); return tsk; @@ -429,5 +432,5 @@ void meta_state_service_zookeeper::visit_zookeeper_internal(ref_this, break; } } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/meta/meta_state_service_zookeeper.h b/src/meta/meta_state_service_zookeeper.h index 9c450f543e..2fda95bf02 100644 --- a/src/meta/meta_state_service_zookeeper.h +++ b/src/meta/meta_state_service_zookeeper.h @@ -119,5 +119,5 @@ class meta_state_service_zookeeper : public meta_state_service, public ref_count task_ptr callback, void *result /*zookeeper_session::zoo_opcontext**/); }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/meta/partition_guardian.cpp b/src/meta/partition_guardian.cpp index 37f627f9ea..70b13bae95 100644 --- a/src/meta/partition_guardian.cpp +++ b/src/meta/partition_guardian.cpp @@ -601,7 +601,7 @@ pc_status partition_guardian::on_missing_secondary(meta_view &view, const dsn::g "gpid({}) refuse to use selected node({}) as it is in black list", gpid, node); } newly_partitions *min_server_np = nullptr; - for (auto & [ _, ns ] : *view.nodes) { + for (auto &[_, ns] : *view.nodes) { if (!ns.alive() || is_member(pc, ns.host_port()) || in_black_list(ns.host_port())) { continue; } diff --git a/src/meta/server_state.cpp b/src/meta/server_state.cpp index e1cf2b26ac..51621b4336 100644 --- a/src/meta/server_state.cpp +++ b/src/meta/server_state.cpp @@ -487,10 +487,11 @@ error_code server_state::sync_apps_to_remote_storage() error_code err; dist::meta_state_service *storage = _meta_svc->get_remote_storage(); - auto t = storage->create_node(apps_path, - LPC_META_CALLBACK, - [&err](error_code ec) { err = ec; }, - blob(lock_state, 0, strlen(lock_state))); + auto t = storage->create_node( + apps_path, + LPC_META_CALLBACK, + [&err](error_code ec) { err = ec; }, + blob(lock_state, 0, strlen(lock_state))); t->wait(); if (err != ERR_NODE_ALREADY_EXIST && err != ERR_OK) { @@ -510,19 +511,19 @@ error_code server_state::sync_apps_to_remote_storage() "invalid app status"); blob value = app->to_json(app_status::AS_CREATING == app->status ? app_status::AS_AVAILABLE : app_status::AS_DROPPED); - storage->create_node(path, - LPC_META_CALLBACK, - [&err, path](error_code ec) { - if (ec != ERR_OK && ec != ERR_NODE_ALREADY_EXIST) { - LOG_WARNING( - "create app node failed, path({}) reason({})", path, ec); - err = ec; - } else { - LOG_INFO("create app node {} ok", path); - } - }, - value, - &tracker); + storage->create_node( + path, + LPC_META_CALLBACK, + [&err, path](error_code ec) { + if (ec != ERR_OK && ec != ERR_NODE_ALREADY_EXIST) { + LOG_WARNING("create app node failed, path({}) reason({})", path, ec); + err = ec; + } else { + LOG_INFO("create app node {} ok", path); + } + }, + value, + &tracker); } tracker.wait_outstanding_tasks(); @@ -534,8 +535,8 @@ error_code server_state::sync_apps_to_remote_storage() for (auto &kv : _all_apps) { std::shared_ptr &app = kv.second; for (unsigned int i = 0; i != app->partition_count; ++i) { - task_ptr init_callback = - tasking::create_task(LPC_META_STATE_HIGH, &tracker, [] {}, sStateHash); + task_ptr init_callback = tasking::create_task( + LPC_META_STATE_HIGH, &tracker, [] {}, sStateHash); init_app_partition_node(app, i, init_callback); } } @@ -561,8 +562,9 @@ dsn::error_code server_state::sync_apps_from_remote_storage() dsn::task_tracker tracker; dist::meta_state_service *storage = _meta_svc->get_remote_storage(); - auto sync_partition = [this, storage, &err, &tracker]( - std::shared_ptr &app, int partition_id, const std::string &partition_path) { + auto sync_partition = [this, storage, &err, &tracker](std::shared_ptr &app, + int partition_id, + const std::string &partition_path) { storage->get_data( partition_path, LPC_META_CALLBACK, @@ -1381,8 +1383,8 @@ void server_state::recall_app(dsn::message_ex *msg) if (has_seconds_expired(target_app->expire_second)) { response.err = ERR_APP_NOT_EXIST; } else { - std::string &new_app_name = - (request.new_app_name == "") ? target_app->app_name : request.new_app_name; + std::string &new_app_name = (request.new_app_name == "") ? target_app->app_name + : request.new_app_name; if (_exist_apps.find(new_app_name) != _exist_apps.end()) { response.err = ERR_INVALID_PARAMETERS; } else { @@ -1722,15 +1724,14 @@ void server_state::on_update_configuration_on_remote_reply( CHECK(app->status == app_status::AS_AVAILABLE || app->status == app_status::AS_DROPPING, "if app removed, this task should be cancelled"); if (ec == ERR_TIMEOUT) { - cc.pending_sync_task = - tasking::enqueue(LPC_META_STATE_HIGH, - tracker(), - [this, config_request, &cc]() mutable { - cc.pending_sync_task = - update_configuration_on_remote(config_request); - }, - 0, - std::chrono::seconds(1)); + cc.pending_sync_task = tasking::enqueue( + LPC_META_STATE_HIGH, + tracker(), + [this, config_request, &cc]() mutable { + cc.pending_sync_task = update_configuration_on_remote(config_request); + }, + 0, + std::chrono::seconds(1)); } else if (ec == ERR_OK) { update_configuration_locally(*app, config_request); cc.pending_sync_task = nullptr; @@ -2268,8 +2269,9 @@ error_code server_state::construct_partitions( std::ostringstream oss; if (skip_lost_partitions) { oss << "WARNING: partition(" << app->app_id << "." - << pc.pid.get_partition_index() << ") has no replica collected, force " - "recover the lost partition to empty" + << pc.pid.get_partition_index() + << ") has no replica collected, force " + "recover the lost partition to empty" << std::endl; } else { oss << "ERROR: partition(" << app->app_id << "." @@ -2586,9 +2588,8 @@ bool server_state::check_all_partitions() if (!add_secondary_proposed[i] && pc.hp_secondaries.empty()) { const auto &action = add_secondary_actions[i]; CHECK(action.hp_node, ""); - if (_add_secondary_enable_flow_control && - add_secondary_running_nodes[action.hp_node] >= - _add_secondary_max_count_for_one_node) { + if (_add_secondary_enable_flow_control && add_secondary_running_nodes[action.hp_node] >= + _add_secondary_max_count_for_one_node) { // ignore continue; } @@ -2607,9 +2608,8 @@ bool server_state::check_all_partitions() CHECK(action.hp_node, ""); gpid pid = add_secondary_gpids[i]; partition_configuration &pc = *get_config(_all_apps, pid); - if (_add_secondary_enable_flow_control && - add_secondary_running_nodes[action.hp_node] >= - _add_secondary_max_count_for_one_node) { + if (_add_secondary_enable_flow_control && add_secondary_running_nodes[action.hp_node] >= + _add_secondary_max_count_for_one_node) { LOG_INFO("do not send {} proposal for gpid({}) for flow control reason, target = " "{}, node = {}", ::dsn::enum_to_string(action.type), @@ -2754,8 +2754,7 @@ void server_state::do_update_app_info(const std::string &app_path, { // persistent envs to zookeeper blob value = dsn::json::json_forwarder::encode(info); - auto new_cb = [ this, app_path, info, user_cb = std::move(cb) ](error_code ec) - { + auto new_cb = [this, app_path, info, user_cb = std::move(cb)](error_code ec) { if (ec == ERR_OK) { user_cb(ec); } else if (ec == ERR_TIMEOUT) { @@ -3708,21 +3707,21 @@ task_ptr server_state::update_partition_max_replica_count_on_remote( new_ballot); // NOTICE: pending_sync_task should be reassigned - return tasking::enqueue(LPC_META_STATE_HIGH, - tracker(), - [this, app, new_partition_config, on_partition_updated]() mutable { - const auto &gpid = new_partition_config.pid; - const auto partition_index = gpid.get_partition_index(); + return tasking::enqueue( + LPC_META_STATE_HIGH, + tracker(), + [this, app, new_partition_config, on_partition_updated]() mutable { + const auto &gpid = new_partition_config.pid; + const auto partition_index = gpid.get_partition_index(); - zauto_write_lock l(_lock); + zauto_write_lock l(_lock); - auto &context = app->helpers->contexts[partition_index]; - context.pending_sync_task = - update_partition_max_replica_count_on_remote( - app, new_partition_config, on_partition_updated); - }, - server_state::sStateHash, - std::chrono::seconds(1)); + auto &context = app->helpers->contexts[partition_index]; + context.pending_sync_task = update_partition_max_replica_count_on_remote( + app, new_partition_config, on_partition_updated); + }, + server_state::sStateHash, + std::chrono::seconds(1)); } LOG_INFO("request for updating partition-level max_replica_count on remote storage: " @@ -3776,22 +3775,21 @@ void server_state::on_update_partition_max_replica_count_on_remote_reply( auto &context = app->helpers->contexts[partition_index]; if (ec == ERR_TIMEOUT) { // NOTICE: pending_sync_task need to be reassigned - context.pending_sync_task = - tasking::enqueue(LPC_META_STATE_HIGH, - tracker(), - [this, app, new_partition_config, on_partition_updated]() mutable { - const auto &gpid = new_partition_config.pid; - const auto partition_index = gpid.get_partition_index(); + context.pending_sync_task = tasking::enqueue( + LPC_META_STATE_HIGH, + tracker(), + [this, app, new_partition_config, on_partition_updated]() mutable { + const auto &gpid = new_partition_config.pid; + const auto partition_index = gpid.get_partition_index(); - zauto_write_lock l(_lock); + zauto_write_lock l(_lock); - auto &context = app->helpers->contexts[partition_index]; - context.pending_sync_task = - update_partition_max_replica_count_on_remote( - app, new_partition_config, on_partition_updated); - }, - server_state::sStateHash, - std::chrono::seconds(1)); + auto &context = app->helpers->contexts[partition_index]; + context.pending_sync_task = update_partition_max_replica_count_on_remote( + app, new_partition_config, on_partition_updated); + }, + server_state::sStateHash, + std::chrono::seconds(1)); return; } diff --git a/src/meta/test/balancer_simulator/balancer_simulator.cpp b/src/meta/test/balancer_simulator/balancer_simulator.cpp index 16b50407e4..f5ab9d8aeb 100644 --- a/src/meta/test/balancer_simulator/balancer_simulator.cpp +++ b/src/meta/test/balancer_simulator/balancer_simulator.cpp @@ -72,6 +72,7 @@ class simple_priority_queue } const dsn::host_port &top() const { return container.front(); } bool empty() const { return container.empty(); } + private: std::vector container; server_load_balancer::node_comparator cmp; diff --git a/src/meta/test/meta_app_operation_test.cpp b/src/meta/test/meta_app_operation_test.cpp index 8cd472aae6..2fbce6477d 100644 --- a/src/meta/test/meta_app_operation_test.cpp +++ b/src/meta/test/meta_app_operation_test.cpp @@ -174,11 +174,12 @@ class meta_app_operation_test : public meta_test_base auto ainfo = *(reinterpret_cast(app.get())); auto json_config = dsn::json::json_forwarder::encode(ainfo); dsn::task_tracker tracker; - _ms->get_remote_storage()->set_data(app_path, - json_config, - LPC_META_STATE_HIGH, - [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, - &tracker); + _ms->get_remote_storage()->set_data( + app_path, + json_config, + LPC_META_STATE_HIGH, + [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, + &tracker); tracker.wait_outstanding_tasks(); } @@ -227,11 +228,12 @@ class meta_app_operation_test : public meta_test_base auto json_config = dsn::json::json_forwarder::encode(partition_config); dsn::task_tracker tracker; - _ms->get_remote_storage()->set_data(partition_path, - json_config, - LPC_META_STATE_HIGH, - [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, - &tracker); + _ms->get_remote_storage()->set_data( + partition_path, + json_config, + LPC_META_STATE_HIGH, + [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, + &tracker); tracker.wait_outstanding_tasks(); } @@ -243,11 +245,12 @@ class meta_app_operation_test : public meta_test_base auto ainfo = *(reinterpret_cast(app.get())); auto json_config = dsn::json::json_forwarder::encode(ainfo); dsn::task_tracker tracker; - _ms->get_remote_storage()->set_data(app_path, - json_config, - LPC_META_STATE_HIGH, - [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, - &tracker); + _ms->get_remote_storage()->set_data( + app_path, + json_config, + LPC_META_STATE_HIGH, + [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, + &tracker); tracker.wait_outstanding_tasks(); } @@ -269,8 +272,8 @@ class meta_app_operation_test : public meta_test_base _ms->get_remote_storage()->get_data( partition_path, LPC_META_CALLBACK, - [ expected_pid = partition_config.pid, - expected_max_replica_count ](error_code ec, const blob &value) { + [expected_pid = partition_config.pid, + expected_max_replica_count](error_code ec, const blob &value) { ASSERT_EQ(ec, ERR_OK); partition_configuration partition_config; @@ -725,10 +728,9 @@ TEST_F(meta_app_operation_test, get_max_replica_count) auto partition_index = static_cast(random32(0, partition_count - 1)); set_partition_max_replica_count(test.app_name, partition_index, 2); recover_partition_max_replica_count = - [ this, app_name = test.app_name, partition_index ]() - { - set_partition_max_replica_count(app_name, partition_index, 3); - }; + [this, app_name = test.app_name, partition_index]() { + set_partition_max_replica_count(app_name, partition_index, 3); + }; } const auto resp = get_max_replica_count(test.app_name); @@ -878,15 +880,14 @@ TEST_F(meta_app_operation_test, set_max_replica_count) // recover automatically the original FLAGS_min_live_node_count_for_unfreeze, // FLAGS_min_allowed_replica_count and FLAGS_max_allowed_replica_count - auto recover = defer([ - reserved_min_live_node_count_for_unfreeze = FLAGS_min_live_node_count_for_unfreeze, - reserved_min_allowed_replica_count = FLAGS_min_allowed_replica_count, - reserved_max_allowed_replica_count = FLAGS_max_allowed_replica_count - ]() { - FLAGS_max_allowed_replica_count = reserved_max_allowed_replica_count; - FLAGS_min_allowed_replica_count = reserved_min_allowed_replica_count; - FLAGS_min_live_node_count_for_unfreeze = reserved_min_live_node_count_for_unfreeze; - }); + auto recover = defer( + [reserved_min_live_node_count_for_unfreeze = FLAGS_min_live_node_count_for_unfreeze, + reserved_min_allowed_replica_count = FLAGS_min_allowed_replica_count, + reserved_max_allowed_replica_count = FLAGS_max_allowed_replica_count]() { + FLAGS_max_allowed_replica_count = reserved_max_allowed_replica_count; + FLAGS_min_allowed_replica_count = reserved_min_allowed_replica_count; + FLAGS_min_live_node_count_for_unfreeze = reserved_min_live_node_count_for_unfreeze; + }); FLAGS_min_live_node_count_for_unfreeze = test.min_live_node_count_for_unfreeze; FLAGS_min_allowed_replica_count = test.min_allowed_replica_count; FLAGS_max_allowed_replica_count = test.max_allowed_replica_count; diff --git a/src/meta/test/meta_bulk_load_service_test.cpp b/src/meta/test/meta_bulk_load_service_test.cpp index 2cd4e3c644..e9868312ef 100644 --- a/src/meta/test/meta_bulk_load_service_test.cpp +++ b/src/meta/test/meta_bulk_load_service_test.cpp @@ -498,6 +498,7 @@ class bulk_load_service_test : public meta_test_base meta_op_status get_op_status() { return _ms->get_op_status(); } void unlock_meta_op_status() { return _ms->unlock_meta_op_status(); } + public: int32_t APP_ID = 1; std::string APP_NAME = "bulk_load_test"; @@ -798,7 +799,7 @@ class bulk_load_process_test : public bulk_load_service_test if (!_resp.__isset.hp_group_bulk_load_state) { _resp.__set_hp_group_bulk_load_state({}); } - for (const auto & [ addr_and_hp, state ] : state_by_hosts) { + for (const auto &[addr_and_hp, state] : state_by_hosts) { _resp.group_bulk_load_state[addr_and_hp.first] = state; _resp.hp_group_bulk_load_state[addr_and_hp.second] = state; } diff --git a/src/meta/test/state_sync_test.cpp b/src/meta/test/state_sync_test.cpp index 7c08181a80..f0429317d9 100644 --- a/src/meta/test/state_sync_test.cpp +++ b/src/meta/test/state_sync_test.cpp @@ -218,11 +218,12 @@ void meta_service_test_app::state_sync_test() dsn::error_code ec; dsn::dist::meta_state_service *storage = svc->get_remote_storage(); storage - ->delete_node(apps_root, - true, - LPC_META_CALLBACK, - [&ec](dsn::error_code error) { ec = error; }, - nullptr) + ->delete_node( + apps_root, + true, + LPC_META_CALLBACK, + [&ec](dsn::error_code error) { ec = error; }, + nullptr) ->wait(); ASSERT_TRUE(dsn::ERR_OK == ec || dsn::ERR_OBJECT_NOT_FOUND == ec); } @@ -341,11 +342,12 @@ void meta_service_test_app::state_sync_test() dsn::dist::meta_state_service *storage = svc->get_remote_storage(); storage - ->delete_node(ss2->get_partition_path(dsn::gpid{apps_count, 0}), - false, - LPC_META_CALLBACK, - [&ec](dsn::error_code error) { ec = error; }, - nullptr) + ->delete_node( + ss2->get_partition_path(dsn::gpid{apps_count, 0}), + false, + LPC_META_CALLBACK, + [&ec](dsn::error_code error) { ec = error; }, + nullptr) ->wait(); ASSERT_EQ(ec, dsn::ERR_OK); diff --git a/src/meta/test/update_configuration_test.cpp b/src/meta/test/update_configuration_test.cpp index d8a947f28e..8f6223dec2 100644 --- a/src/meta/test/update_configuration_test.cpp +++ b/src/meta/test/update_configuration_test.cpp @@ -453,11 +453,11 @@ void meta_service_test_app::apply_balancer_test() ss->set_replica_migration_subscriber_for_test(migration_actions); while (true) { - dsn::task_ptr tsk = - dsn::tasking::enqueue(LPC_META_STATE_NORMAL, - nullptr, - [&result, ss]() { result = ss->check_all_partitions(); }, - server_state::sStateHash); + dsn::task_ptr tsk = dsn::tasking::enqueue( + LPC_META_STATE_NORMAL, + nullptr, + [&result, ss]() { result = ss->check_all_partitions(); }, + server_state::sStateHash); tsk->wait(); if (result) break; diff --git a/src/nfs/nfs_client_impl.cpp b/src/nfs/nfs_client_impl.cpp index 04f96a16d5..867d1891a9 100644 --- a/src/nfs/nfs_client_impl.cpp +++ b/src/nfs/nfs_client_impl.cpp @@ -151,12 +151,13 @@ void nfs_client_impl::begin_remote_copy(std::shared_ptr &rc req->nfs_task = nfs_task; req->is_finished = false; - async_nfs_get_file_size(req->file_size_req, - [=](error_code err, get_file_size_response &&resp) { - end_get_file_size(err, std::move(resp), req); - }, - std::chrono::milliseconds(FLAGS_rpc_timeout_ms), - req->file_size_req.source); + async_nfs_get_file_size( + req->file_size_req, + [=](error_code err, get_file_size_response &&resp) { + end_get_file_size(err, std::move(resp), req); + }, + std::chrono::milliseconds(FLAGS_rpc_timeout_ms), + req->file_size_req.source); } void nfs_client_impl::end_get_file_size(::dsn::error_code err, @@ -230,7 +231,8 @@ void nfs_client_impl::end_get_file_size(::dsn::error_code err, _copy_requests_low.push(std::move(copy_requests)); } - tasking::enqueue(LPC_NFS_COPY_FILE, nullptr, [this]() { continue_copy(); }, 0); + tasking::enqueue( + LPC_NFS_COPY_FILE, nullptr, [this]() { continue_copy(); }, 0); } void nfs_client_impl::continue_copy() @@ -305,20 +307,20 @@ void nfs_client_impl::continue_copy() copy_req.is_last = req->is_last; copy_req.__set_source_disk_tag(ureq->file_size_req.source_disk_tag); copy_req.__set_pid(ureq->file_size_req.pid); - req->remote_copy_task = - async_nfs_copy(copy_req, - [=](error_code err, copy_response &&resp) { - end_copy(err, std::move(resp), req); - // reset task to release memory quickly. - // should do this after end_copy() done. - if (req->is_ready_for_write) { - ::dsn::task_ptr tsk; - zauto_lock l(req->lock); - tsk = std::move(req->remote_copy_task); - } - }, - std::chrono::milliseconds(FLAGS_rpc_timeout_ms), - req->file_ctx->user_req->file_size_req.source); + req->remote_copy_task = async_nfs_copy( + copy_req, + [=](error_code err, copy_response &&resp) { + end_copy(err, std::move(resp), req); + // reset task to release memory quickly. + // should do this after end_copy() done. + if (req->is_ready_for_write) { + ::dsn::task_ptr tsk; + zauto_lock l(req->lock); + tsk = std::move(req->remote_copy_task); + } + }, + std::chrono::milliseconds(FLAGS_rpc_timeout_ms), + req->file_ctx->user_req->file_size_req.source); } else { --ureq->concurrent_copy_count; --_concurrent_copy_request_count; diff --git a/src/nfs/nfs_code_definition.h b/src/nfs/nfs_code_definition.h index a517d6c980..a848cf9028 100644 --- a/src/nfs/nfs_code_definition.h +++ b/src/nfs/nfs_code_definition.h @@ -40,5 +40,5 @@ DEFINE_TASK_CODE(LPC_NFS_FILE_CLOSE_TIMER, TASK_PRIORITY_COMMON, THREAD_POOL_DEF DEFINE_TASK_CODE_AIO(LPC_NFS_WRITE, TASK_PRIORITY_COMMON, THREAD_POOL_DEFAULT) DEFINE_TASK_CODE_AIO(LPC_NFS_COPY_FILE, TASK_PRIORITY_COMMON, THREAD_POOL_DEFAULT) -} -} +} // namespace service +} // namespace dsn diff --git a/src/nfs/nfs_node.cpp b/src/nfs/nfs_node.cpp index e4282dcbe1..ea0b8564cf 100644 --- a/src/nfs/nfs_node.cpp +++ b/src/nfs/nfs_node.cpp @@ -109,4 +109,4 @@ aio_task_ptr nfs_node::copy_remote_files(std::shared_ptr &r call(request, cb); return cb; } -} +} // namespace dsn diff --git a/src/nfs/nfs_node.h b/src/nfs/nfs_node.h index f22810cd84..9edada83cd 100644 --- a/src/nfs/nfs_node.h +++ b/src/nfs/nfs_node.h @@ -113,4 +113,4 @@ class nfs_node protected: virtual void call(std::shared_ptr rci, aio_task *callback) = 0; }; -} +} // namespace dsn diff --git a/src/nfs/test/main.cpp b/src/nfs/test/main.cpp index c8ebd6d4f8..acabe19a4a 100644 --- a/src/nfs/test/main.cpp +++ b/src/nfs/test/main.cpp @@ -109,22 +109,23 @@ TEST_P(nfs_test, basic) ASSERT_TRUE(dst_filenames.empty()); aio_result r; - auto t = nfs->copy_remote_files(dsn::host_port("localhost", 20101), - "default", - ".", - kSrcFilenames, - "default", - kDstDir, - fake_pid, - false, - false, - LPC_AIO_TEST_NFS, - nullptr, - [&r](dsn::error_code err, size_t sz) { - r.err = err; - r.sz = sz; - }, - 0); + auto t = nfs->copy_remote_files( + dsn::host_port("localhost", 20101), + "default", + ".", + kSrcFilenames, + "default", + kDstDir, + fake_pid, + false, + false, + LPC_AIO_TEST_NFS, + nullptr, + [&r](dsn::error_code err, size_t sz) { + r.err = err; + r.sz = sz; + }, + 0); ASSERT_NE(nullptr, t); ASSERT_TRUE(t->wait(20000)); ASSERT_EQ(r.err, t->error()); @@ -151,22 +152,23 @@ TEST_P(nfs_test, basic) // copy files to the destination directory, files will be overwritten. { aio_result r; - auto t = nfs->copy_remote_files(dsn::host_port("localhost", 20101), - "default", - ".", - kSrcFilenames, - "default", - kDstDir, - fake_pid, - true, - false, - LPC_AIO_TEST_NFS, - nullptr, - [&r](dsn::error_code err, size_t sz) { - r.err = err; - r.sz = sz; - }, - 0); + auto t = nfs->copy_remote_files( + dsn::host_port("localhost", 20101), + "default", + ".", + kSrcFilenames, + "default", + kDstDir, + fake_pid, + true, + false, + LPC_AIO_TEST_NFS, + nullptr, + [&r](dsn::error_code err, size_t sz) { + r.err = err; + r.sz = sz; + }, + 0); ASSERT_NE(nullptr, t); ASSERT_TRUE(t->wait(20000)); ASSERT_EQ(r.err, t->error()); @@ -203,21 +205,22 @@ TEST_P(nfs_test, basic) ASSERT_FALSE(utils::filesystem::directory_exists(kNewDstDir)); aio_result r; - auto t = nfs->copy_remote_directory(dsn::host_port("localhost", 20101), - "default", - kDstDir, - "default", - kNewDstDir, - fake_pid, - false, - false, - LPC_AIO_TEST_NFS, - nullptr, - [&r](dsn::error_code err, size_t sz) { - r.err = err; - r.sz = sz; - }, - 0); + auto t = nfs->copy_remote_directory( + dsn::host_port("localhost", 20101), + "default", + kDstDir, + "default", + kNewDstDir, + fake_pid, + false, + false, + LPC_AIO_TEST_NFS, + nullptr, + [&r](dsn::error_code err, size_t sz) { + r.err = err; + r.sz = sz; + }, + 0); ASSERT_NE(nullptr, t); ASSERT_TRUE(t->wait(20000)); ASSERT_EQ(r.err, t->error()); diff --git a/src/perf_counter/perf_counter.h b/src/perf_counter/perf_counter.h index 1425bc228c..3118b8aacf 100644 --- a/src/perf_counter/perf_counter.h +++ b/src/perf_counter/perf_counter.h @@ -35,7 +35,8 @@ #include "utils/autoref_ptr.h" #include "utils/fmt_utils.h" -typedef enum dsn_perf_counter_type_t { +typedef enum dsn_perf_counter_type_t +{ COUNTER_TYPE_NUMBER, COUNTER_TYPE_VOLATILE_NUMBER, // special kind of NUMBER which will be reset on get COUNTER_TYPE_RATE, @@ -45,7 +46,8 @@ typedef enum dsn_perf_counter_type_t { } dsn_perf_counter_type_t; USER_DEFINED_ENUM_FORMATTER(dsn_perf_counter_type_t) -typedef enum dsn_perf_counter_percentile_type_t { +typedef enum dsn_perf_counter_percentile_type_t +{ COUNTER_PERCENTILE_50, COUNTER_PERCENTILE_90, COUNTER_PERCENTILE_95, diff --git a/src/perf_counter/perf_counter_atomic.h b/src/perf_counter/perf_counter_atomic.h index bd7fbd152c..40ca5a4444 100644 --- a/src/perf_counter/perf_counter_atomic.h +++ b/src/perf_counter/perf_counter_atomic.h @@ -432,4 +432,4 @@ class perf_counter_number_percentile_atomic : public perf_counter }; #pragma pack(pop) -} // namespace +} // namespace dsn diff --git a/src/perf_counter/perf_counter_wrapper.h b/src/perf_counter/perf_counter_wrapper.h index 26d25e551d..6bd51a5bd5 100644 --- a/src/perf_counter/perf_counter_wrapper.h +++ b/src/perf_counter/perf_counter_wrapper.h @@ -108,4 +108,4 @@ class perf_counter_wrapper // use raw pointer to make the class object small, so it can be accessed quickly dsn::perf_counter *_counter; }; -} +} // namespace dsn diff --git a/src/ranger/ranger_resource_policy_manager.cpp b/src/ranger/ranger_resource_policy_manager.cpp index 4de545da83..ed373d86fa 100644 --- a/src/ranger/ranger_resource_policy_manager.cpp +++ b/src/ranger/ranger_resource_policy_manager.cpp @@ -207,12 +207,13 @@ void ranger_resource_policy_manager::start() CHECK_NOTNULL(_meta_svc, ""); _ranger_policy_meta_root = dsn::utils::filesystem::concat_path_unix_style( _meta_svc->cluster_root(), "ranger_policy_meta_root"); - tasking::enqueue_timer(LPC_USE_RANGER_ACCESS_CONTROL, - &_tracker, - [this]() { this->update_policies_from_ranger_service(); }, - std::chrono::seconds(FLAGS_update_ranger_policy_interval_sec), - 0, - std::chrono::milliseconds(1)); + tasking::enqueue_timer( + LPC_USE_RANGER_ACCESS_CONTROL, + &_tracker, + [this]() { this->update_policies_from_ranger_service(); }, + std::chrono::seconds(FLAGS_update_ranger_policy_interval_sec), + 0, + std::chrono::milliseconds(1)); } access_control_result ranger_resource_policy_manager::allowed( @@ -499,11 +500,12 @@ void ranger_resource_policy_manager::start_to_dump_and_sync_policies() } CHECK_EQ(err, dsn::ERR_TIMEOUT); LOG_ERROR("Create Ranger policy meta root timeout, retry later."); - dsn::tasking::enqueue(LPC_USE_RANGER_ACCESS_CONTROL, - &_tracker, - [this]() { start_to_dump_and_sync_policies(); }, - 0, - kLoadRangerPolicyRetryDelayMs); + dsn::tasking::enqueue( + LPC_USE_RANGER_ACCESS_CONTROL, + &_tracker, + [this]() { start_to_dump_and_sync_policies(); }, + 0, + kLoadRangerPolicyRetryDelayMs); }); } @@ -534,11 +536,12 @@ void ranger_resource_policy_manager::dump_policies_to_remote_storage() // The return error code is not 'ERR_TIMEOUT', use assert here. CHECK_EQ(e, dsn::ERR_TIMEOUT); LOG_ERROR("Dump Ranger policies to remote storage timeout, retry later."); - dsn::tasking::enqueue(LPC_USE_RANGER_ACCESS_CONTROL, - &_tracker, - [this]() { dump_policies_to_remote_storage(); }, - 0, - kLoadRangerPolicyRetryDelayMs); + dsn::tasking::enqueue( + LPC_USE_RANGER_ACCESS_CONTROL, + &_tracker, + [this]() { dump_policies_to_remote_storage(); }, + 0, + kLoadRangerPolicyRetryDelayMs); }); } diff --git a/src/redis_protocol/proxy_lib/redis_parser.cpp b/src/redis_protocol/proxy_lib/redis_parser.cpp index 794d482e99..9aea607eac 100644 --- a/src/redis_protocol/proxy_lib/redis_parser.cpp +++ b/src/redis_protocol/proxy_lib/redis_parser.cpp @@ -446,8 +446,9 @@ void redis_parser::set_internal(redis_parser::message_entry &entry) // with a reference to prevent the object from being destroyed std::shared_ptr ref_this = shared_from_this(); LOG_DEBUG_PREFIX("send SET command({})", entry.sequence_id); - auto on_set_reply = [ref_this, this, &entry]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + auto on_set_reply = [ref_this, this, &entry](::dsn::error_code ec, + dsn::message_ex *, + dsn::message_ex *response) { // when the "is_session_reset" flag is set, the socket may be broken. // so continue to reply the message is not necessary if (_is_session_reset.load(std::memory_order_acquire)) { @@ -551,8 +552,9 @@ void redis_parser::setex(message_entry &entry) } std::shared_ptr ref_this = shared_from_this(); - auto on_setex_reply = [ref_this, this, &entry]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + auto on_setex_reply = [ref_this, this, &entry](::dsn::error_code ec, + dsn::message_ex *, + dsn::message_ex *response) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("SETEX command seqid({}) got reply, but session has reset", entry.sequence_id); @@ -599,8 +601,9 @@ void redis_parser::get(message_entry &entry) } else { LOG_DEBUG_PREFIX("send GET command seqid({})", entry.sequence_id); std::shared_ptr ref_this = shared_from_this(); - auto on_get_reply = [ref_this, this, &entry]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + auto on_get_reply = [ref_this, this, &entry](::dsn::error_code ec, + dsn::message_ex *, + dsn::message_ex *response) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("GET command({}) got reply, but session has reset", entry.sequence_id); @@ -653,8 +656,9 @@ void redis_parser::del_internal(message_entry &entry) } else { LOG_DEBUG_PREFIX("send DEL command seqid({})", entry.sequence_id); std::shared_ptr ref_this = shared_from_this(); - auto on_del_reply = [ref_this, this, &entry]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + auto on_del_reply = [ref_this, this, &entry](::dsn::error_code ec, + dsn::message_ex *, + dsn::message_ex *response) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("DEL command seqid({}) got reply, but session has reset", entry.sequence_id); @@ -738,8 +742,9 @@ void redis_parser::ttl(message_entry &entry) } else { LOG_DEBUG_PREFIX("send PTTL/TTL command seqid({})", entry.sequence_id); std::shared_ptr ref_this = shared_from_this(); - auto on_ttl_reply = [ref_this, this, &entry, is_ttl]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + auto on_ttl_reply = [ref_this, this, &entry, is_ttl](::dsn::error_code ec, + dsn::message_ex *, + dsn::message_ex *response) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("TTL/PTTL command seqid({}) got reply, but session has reset", entry.sequence_id); @@ -832,7 +837,7 @@ void redis_parser::geo_radius(message_entry &entry) std::shared_ptr ref_this = shared_from_this(); auto search_callback = [ref_this, this, &entry, unit, WITHCOORD, WITHDIST, WITHHASH]( - int ec, std::list &&results) { + int ec, std::list &&results) { process_geo_radius_result( entry, unit, WITHCOORD, WITHDIST, WITHHASH, ec, std::move(results)); }; @@ -886,7 +891,7 @@ void redis_parser::geo_radius_by_member(message_entry &entry) std::shared_ptr ref_this = shared_from_this(); auto search_callback = [ref_this, this, &entry, unit, WITHCOORD, WITHDIST, WITHHASH]( - int ec, std::list &&results) { + int ec, std::list &&results) { process_geo_radius_result( entry, unit, WITHCOORD, WITHDIST, WITHHASH, ec, std::move(results)); }; @@ -945,7 +950,7 @@ void redis_parser::counter_internal(message_entry &entry) std::shared_ptr ref_this = shared_from_this(); auto on_incr_reply = [ref_this, this, command, &entry]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_WARNING_PREFIX("command {} seqid({}) got reply, but session has reset", command, @@ -1143,7 +1148,7 @@ void redis_parser::geo_add(message_entry &entry) std::make_shared>(member_count); std::shared_ptr result(new redis_integer()); auto set_latlng_callback = [ref_this, this, &entry, result, set_count]( - int error_code, pegasus_client::internal_info &&info) { + int error_code, pegasus_client::internal_info &&info) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("GEOADD command seqid({}) got reply, but session has reset", entry.sequence_id); @@ -1243,8 +1248,10 @@ void redis_parser::geo_pos(message_entry &entry) std::make_shared>(member_count); std::shared_ptr result(new redis_array()); result->resize(member_count); - auto get_latlng_callback = [ref_this, this, &entry, result, get_count]( - int error_code, int index, double lat_degrees, double lng_degrees) { + auto get_latlng_callback = [ref_this, this, &entry, result, get_count](int error_code, + int index, + double lat_degrees, + double lng_degrees) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("GEOPOS command seqid({}) got reply, but session has reset", entry.sequence_id); diff --git a/src/redis_protocol/proxy_lib/redis_parser.h b/src/redis_protocol/proxy_lib/redis_parser.h index f66bce0002..1e7b40e8c3 100644 --- a/src/redis_protocol/proxy_lib/redis_parser.h +++ b/src/redis_protocol/proxy_lib/redis_parser.h @@ -44,7 +44,7 @@ class binary_writer; namespace apps { class rrdb_client; } -} +} // namespace dsn class proxy_test; @@ -265,5 +265,5 @@ class redis_parser : public proxy_session redis_parser(proxy_stub *op, dsn::message_ex *first_msg); ~redis_parser() override; }; -} -} // namespace +} // namespace proxy +} // namespace pegasus diff --git a/src/remote_cmd/remote_command.cpp b/src/remote_cmd/remote_command.cpp index a40bb7b1e3..8fc79bdfb4 100644 --- a/src/remote_cmd/remote_command.cpp +++ b/src/remote_cmd/remote_command.cpp @@ -52,7 +52,7 @@ task_ptr async_call_remote(rpc_address remote, request->cmd = cmd; request->arguments = arguments; remote_command_rpc rpc(std::move(request), RPC_CLI_CLI_CALL, timeout); - return rpc.call(remote, nullptr, [ cb = std::move(callback), rpc ](error_code ec) { + return rpc.call(remote, nullptr, [cb = std::move(callback), rpc](error_code ec) { cb(ec, rpc.response()); }); } diff --git a/src/replica/backup/cold_backup_context.cpp b/src/replica/backup/cold_backup_context.cpp index a96dd94e8e..28f3cd791a 100644 --- a/src/replica/backup/cold_backup_context.cpp +++ b/src/replica/backup/cold_backup_context.cpp @@ -226,24 +226,25 @@ void cold_backup_context::check_backup_on_remote() // before retry, should add_ref(), and must release_ref() after retry add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this]() { - // before retry, should check whether the status is ready for - // check - if (!is_ready_for_check()) { - LOG_INFO("{}: backup status has changed to {}, ignore " - "checking backup on remote", - name, - cold_backup_status_to_string(status())); - ignore_check(); - } else { - check_backup_on_remote(); - } - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this]() { + // before retry, should check whether the status is ready for + // check + if (!is_ready_for_check()) { + LOG_INFO("{}: backup status has changed to {}, ignore " + "checking backup on remote", + name, + cold_backup_status_to_string(status())); + ignore_check(); + } else { + check_backup_on_remote(); + } + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { LOG_ERROR("{}: block service create file failed, file = {}, err = {}", name, @@ -290,22 +291,23 @@ void cold_backup_context::read_current_chkpt_file( file_handle->file_name()); add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this, file_handle]() { - if (!is_ready_for_check()) { - LOG_INFO("{}: backup status has changed to {}, ignore " - "checking backup on remote", - name, - cold_backup_status_to_string(status())); - ignore_check(); - } else { - read_current_chkpt_file(file_handle); - } - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this, file_handle]() { + if (!is_ready_for_check()) { + LOG_INFO("{}: backup status has changed to {}, ignore " + "checking backup on remote", + name, + cold_backup_status_to_string(status())); + ignore_check(); + } else { + read_current_chkpt_file(file_handle); + } + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { LOG_ERROR("{}: read remote file failed, file = {}, err = {}", name, @@ -369,22 +371,23 @@ void cold_backup_context::remote_chkpt_dir_exist(const std::string &chkpt_dirnam chkpt_dirname); add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this, chkpt_dirname]() { - if (!is_ready_for_check()) { - LOG_INFO("{}: backup status has changed to {}, ignore " - "checking backup on remote", - name, - cold_backup_status_to_string(status())); - ignore_check(); - } else { - remote_chkpt_dir_exist(chkpt_dirname); - } - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this, chkpt_dirname]() { + if (!is_ready_for_check()) { + LOG_INFO("{}: backup status has changed to {}, ignore " + "checking backup on remote", + name, + cold_backup_status_to_string(status())); + ignore_check(); + } else { + remote_chkpt_dir_exist(chkpt_dirname); + } + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { LOG_ERROR("{}: block service list remote dir failed, dirname = {}, err = {}", name, @@ -681,35 +684,36 @@ void cold_backup_context::upload_file(const std::string &local_filename) local_filename); add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this, local_filename]() { - // TODO: status change from ColdBackupUploading to - // ColdBackupPaused, and upload file timeout, but when callback - // is executed it catches the status(ColdBackupPaused) - // now, if status back to ColdBackupUploading very soon, and - // call upload_checkpoint_to_remote() here, - // upload_checkpoint_to_remote() maybe acquire the _lock first, - // then stop give back file(upload timeout), the file is still - // in uploading this file will not be uploaded until you call - // upload_checkpoint_to_remote() after it's given back - if (!is_ready_for_upload()) { - std::string full_path_local_file = - ::dsn::utils::filesystem::path_combine(checkpoint_dir, - local_filename); - LOG_INFO("{}: backup status has changed to {}, stop " - "upload checkpoint file to remote, file = {}", - name, - cold_backup_status_to_string(status()), - full_path_local_file); - file_upload_uncomplete(local_filename); - } else { - upload_file(local_filename); - } - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this, local_filename]() { + // TODO: status change from ColdBackupUploading to + // ColdBackupPaused, and upload file timeout, but when callback + // is executed it catches the status(ColdBackupPaused) + // now, if status back to ColdBackupUploading very soon, and + // call upload_checkpoint_to_remote() here, + // upload_checkpoint_to_remote() maybe acquire the _lock first, + // then stop give back file(upload timeout), the file is still + // in uploading this file will not be uploaded until you call + // upload_checkpoint_to_remote() after it's given back + if (!is_ready_for_upload()) { + std::string full_path_local_file = + ::dsn::utils::filesystem::path_combine(checkpoint_dir, + local_filename); + LOG_INFO("{}: backup status has changed to {}, stop " + "upload checkpoint file to remote, file = {}", + name, + cold_backup_status_to_string(status()), + full_path_local_file); + file_upload_uncomplete(local_filename); + } else { + upload_file(local_filename); + } + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { LOG_ERROR("{}: block service create file failed, file = {}, err = {}", name, @@ -911,22 +915,23 @@ void cold_backup_context::write_current_chkpt_file(const std::string &value) current_chkpt_file); add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this, value]() { - if (!is_ready_for_upload()) { - LOG_INFO("{}: backup status has changed to {}, stop write " - "current checkpoint file", - name, - cold_backup_status_to_string(status())); - } else { - write_current_chkpt_file(value); - } - - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this, value]() { + if (!is_ready_for_upload()) { + LOG_INFO("{}: backup status has changed to {}, stop write " + "current checkpoint file", + name, + cold_backup_status_to_string(status())); + } else { + write_current_chkpt_file(value); + } + + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { LOG_ERROR("{}: block service create file failed, file = {}, err = {}", name, @@ -963,22 +968,23 @@ void cold_backup_context::on_write(const dist::block_service::block_file_ptr &fi file_handle->file_name()); add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this, file_handle, value, callback]() { - if (!is_ready_for_upload()) { - LOG_INFO("{}: backup status has changed to {}, stop write " - "remote file, file = {}", - name, - cold_backup_status_to_string(status()), - file_handle->file_name()); - } else { - on_write(file_handle, value, callback); - } - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this, file_handle, value, callback]() { + if (!is_ready_for_upload()) { + LOG_INFO("{}: backup status has changed to {}, stop write " + "remote file, file = {}", + name, + cold_backup_status_to_string(status()), + file_handle->file_name()); + } else { + on_write(file_handle, value, callback); + } + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { // here, must call the callback to release_ref callback(false); diff --git a/src/replica/backup/replica_backup_manager.cpp b/src/replica/backup/replica_backup_manager.cpp index 2fb70b7664..c9349441ac 100644 --- a/src/replica/backup/replica_backup_manager.cpp +++ b/src/replica/backup/replica_backup_manager.cpp @@ -126,11 +126,12 @@ void replica_backup_manager::on_clear_cold_backup(const backup_clear_request &re "{}: delay clearing obsoleted cold backup context, cause backup_status == " "ColdBackupCheckpointing", backup_context->name); - tasking::enqueue(LPC_REPLICATION_COLD_BACKUP, - &_replica->_tracker, - [this, request]() { on_clear_cold_backup(request); }, - get_gpid().thread_hash(), - std::chrono::seconds(100)); + tasking::enqueue( + LPC_REPLICATION_COLD_BACKUP, + &_replica->_tracker, + [this, request]() { on_clear_cold_backup(request); }, + get_gpid().thread_hash(), + std::chrono::seconds(100)); return; } @@ -143,12 +144,12 @@ void replica_backup_manager::on_clear_cold_backup(const backup_clear_request &re void replica_backup_manager::start_collect_backup_info() { if (_collect_info_timer == nullptr) { - _collect_info_timer = - tasking::enqueue_timer(LPC_PER_REPLICA_COLLECT_INFO_TIMER, - &_replica->_tracker, - [this]() { collect_backup_info(); }, - std::chrono::milliseconds(FLAGS_gc_interval_ms), - get_gpid().thread_hash()); + _collect_info_timer = tasking::enqueue_timer( + LPC_PER_REPLICA_COLLECT_INFO_TIMER, + &_replica->_tracker, + [this]() { collect_backup_info(); }, + std::chrono::milliseconds(FLAGS_gc_interval_ms), + get_gpid().thread_hash()); } } @@ -192,11 +193,12 @@ void replica_backup_manager::background_clear_backup_checkpoint(const std::strin LOG_INFO_PREFIX("schedule to clear all checkpoint dirs of policy({}) after {} minutes", policy_name, FLAGS_cold_backup_checkpoint_reserve_minutes); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - &_replica->_tracker, - [this, policy_name]() { clear_backup_checkpoint(policy_name); }, - get_gpid().thread_hash(), - std::chrono::minutes(FLAGS_cold_backup_checkpoint_reserve_minutes)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + &_replica->_tracker, + [this, policy_name]() { clear_backup_checkpoint(policy_name); }, + get_gpid().thread_hash(), + std::chrono::minutes(FLAGS_cold_backup_checkpoint_reserve_minutes)); } // clear all checkpoint dirs of the policy diff --git a/src/replica/bulk_load/replica_bulk_loader.cpp b/src/replica/bulk_load/replica_bulk_loader.cpp index a96b8337fb..a3c27e99e4 100644 --- a/src/replica/bulk_load/replica_bulk_loader.cpp +++ b/src/replica/bulk_load/replica_bulk_loader.cpp @@ -1031,8 +1031,8 @@ void replica_bulk_loader::report_group_cleaned_up(bulk_load_response &response) for (const auto &target_hp : _replica->_primary_states.membership.hp_secondaries) { const auto &secondary_state = _replica->_primary_states.secondary_bulk_load_states[target_hp]; - bool is_cleaned_up = - secondary_state.__isset.is_cleaned_up ? secondary_state.is_cleaned_up : false; + bool is_cleaned_up = secondary_state.__isset.is_cleaned_up ? secondary_state.is_cleaned_up + : false; LOG_INFO_PREFIX( "secondary = {}, bulk load states cleaned_up = {}", target_hp, is_cleaned_up); SET_VALUE_FROM_HOST_PORT(response, group_bulk_load_state, target_hp, secondary_state); diff --git a/src/replica/duplication/duplication_sync_timer.cpp b/src/replica/duplication/duplication_sync_timer.cpp index 3d0df01aa4..7337d91890 100644 --- a/src/replica/duplication/duplication_sync_timer.cpp +++ b/src/replica/duplication/duplication_sync_timer.cpp @@ -176,12 +176,13 @@ void duplication_sync_timer::start() { LOG_INFO("run duplication sync periodically in {}s", FLAGS_duplication_sync_period_second); - _timer_task = tasking::enqueue_timer(LPC_DUPLICATION_SYNC_TIMER, - &_stub->_tracker, - [this]() { run(); }, - FLAGS_duplication_sync_period_second * 1_s, - 0, - FLAGS_duplication_sync_period_second * 1_s); + _timer_task = tasking::enqueue_timer( + LPC_DUPLICATION_SYNC_TIMER, + &_stub->_tracker, + [this]() { run(); }, + FLAGS_duplication_sync_period_second * 1_s, + 0, + FLAGS_duplication_sync_period_second * 1_s); } std::multimap diff --git a/src/replica/duplication/mutation_batch.cpp b/src/replica/duplication/mutation_batch.cpp index bd2c8bf460..f50e7c113c 100644 --- a/src/replica/duplication/mutation_batch.cpp +++ b/src/replica/duplication/mutation_batch.cpp @@ -162,9 +162,10 @@ mutation_batch::mutation_batch(replica_duplicator *r) : replica_base(r), _replic r->get_gpid(), std::string("mutation_batch@") + r->replica_name(), r->app_name()); _mutation_buffer = std::make_unique( &base, 0, PREPARE_LIST_NUM_ENTRIES, [this](mutation_ptr &mu) { - // The committer for the prepare list, used for duplicating to add the committed - // mutations to the loading list, which would be shipped to the remote cluster - // later. + // The committer for the prepare list, used for + // duplicating to add the committed mutations to the + // loading list, which would be shipped to the remote + // cluster later. add_mutation_if_valid(mu, _start_decree); }); diff --git a/src/replica/duplication/replica_duplicator_manager.cpp b/src/replica/duplication/replica_duplicator_manager.cpp index 9d2153559a..d7a6437c2e 100644 --- a/src/replica/duplication/replica_duplicator_manager.cpp +++ b/src/replica/duplication/replica_duplicator_manager.cpp @@ -66,7 +66,7 @@ replica_duplicator_manager::get_duplication_confirms_to_update() const zauto_lock l(_lock); std::vector updates; - for (const auto & [ _, dup ] : _duplications) { + for (const auto &[_, dup] : _duplications) { // There are two conditions when we should send confirmed decrees to meta server to update // the progress: // diff --git a/src/replica/duplication/replica_duplicator_manager.h b/src/replica/duplication/replica_duplicator_manager.h index 413176a16f..f8f95d3822 100644 --- a/src/replica/duplication/replica_duplicator_manager.h +++ b/src/replica/duplication/replica_duplicator_manager.h @@ -90,7 +90,7 @@ class replica_duplicator_manager : public replica_base writer.Key("duplications"); writer.StartArray(); - for (const auto & [ _, dup ] : _duplications) { + for (const auto &[_, dup] : _duplications) { dup->encode_progress(writer); } writer.EndArray(); diff --git a/src/replica/duplication/replica_follower.cpp b/src/replica/duplication/replica_follower.cpp index 6678017c7c..5d0c29e91f 100644 --- a/src/replica/duplication/replica_follower.cpp +++ b/src/replica/duplication/replica_follower.cpp @@ -1,21 +1,21 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ #include "replica_follower.h" diff --git a/src/replica/duplication/replica_follower.h b/src/replica/duplication/replica_follower.h index d6711c4b0c..70c5a7bfcc 100644 --- a/src/replica/duplication/replica_follower.h +++ b/src/replica/duplication/replica_follower.h @@ -1,21 +1,21 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ #pragma once diff --git a/src/replica/mutation.cpp b/src/replica/mutation.cpp index f6b3c7e896..de754cb9cf 100644 --- a/src/replica/mutation.cpp +++ b/src/replica/mutation.cpp @@ -463,5 +463,5 @@ void mutation_queue::clear(std::vector &queued_mutations) // is handled by prepare_list // _current_op_count = 0; } -} -} // namespace end +} // namespace replication +} // namespace dsn diff --git a/src/replica/mutation.h b/src/replica/mutation.h index d5b7f238ad..cd99d957c4 100644 --- a/src/replica/mutation.h +++ b/src/replica/mutation.h @@ -250,5 +250,5 @@ class mutation_queue mutation_ptr _pending_mutation; slist _hdr; }; -} -} // namespace +} // namespace replication +} // namespace dsn diff --git a/src/replica/mutation_cache.cpp b/src/replica/mutation_cache.cpp index f1e8f25d67..603ab28fcb 100644 --- a/src/replica/mutation_cache.cpp +++ b/src/replica/mutation_cache.cpp @@ -147,5 +147,5 @@ mutation_ptr mutation_cache::get_mutation_by_decree(decree decree) else return _array[(_start_idx + (decree - _start_decree) + _max_count) % _max_count]; } -} -} // namespace end +} // namespace replication +} // namespace dsn diff --git a/src/replica/mutation_cache.h b/src/replica/mutation_cache.h index ac01127360..8e2c0d6b1f 100644 --- a/src/replica/mutation_cache.h +++ b/src/replica/mutation_cache.h @@ -71,5 +71,5 @@ class mutation_cache decree _start_decree; std::atomic _end_decree; }; -} -} // namespace +} // namespace replication +} // namespace dsn diff --git a/src/replica/mutation_log.cpp b/src/replica/mutation_log.cpp index 20592fc477..df3ca2e6b2 100644 --- a/src/replica/mutation_log.cpp +++ b/src/replica/mutation_log.cpp @@ -167,8 +167,8 @@ void mutation_log_private::get_in_memory_mutations(decree start_decree, for (auto &mu : issued_write->mutations()) { // if start_ballot is invalid or equal to mu.ballot, check decree // otherwise check ballot - ballot current_ballot = - (start_ballot == invalid_ballot) ? invalid_ballot : mu->get_ballot(); + ballot current_ballot = (start_ballot == invalid_ballot) ? invalid_ballot + : mu->get_ballot(); if ((mu->get_decree() >= start_decree && start_ballot == current_ballot) || current_ballot > start_ballot) { mutation_list.push_back(mutation::copy_no_reply(mu)); @@ -179,8 +179,8 @@ void mutation_log_private::get_in_memory_mutations(decree start_decree, for (auto &mu : pending_mutations) { // if start_ballot is invalid or equal to mu.ballot, check decree // otherwise check ballot - ballot current_ballot = - (start_ballot == invalid_ballot) ? invalid_ballot : mu->get_ballot(); + ballot current_ballot = (start_ballot == invalid_ballot) ? invalid_ballot + : mu->get_ballot(); if ((mu->get_decree() >= start_decree && start_ballot == current_ballot) || current_ballot > start_ballot) { mutation_list.push_back(mutation::copy_no_reply(mu)); @@ -537,8 +537,8 @@ error_code mutation_log::open(replay_callback read_callback, end_offset); if (ERR_OK == err) { - _global_start_offset = - _log_files.size() > 0 ? _log_files.begin()->second->start_offset() : 0; + _global_start_offset = _log_files.size() > 0 ? _log_files.begin()->second->start_offset() + : 0; _global_end_offset = end_offset; _last_file_index = _log_files.size() > 0 ? _log_files.rbegin()->first : 0; _is_opened = true; @@ -623,22 +623,22 @@ error_code mutation_log::create_new_log_file() blk->add(temp_writer.get_buffer()); _global_end_offset += blk->size(); - logf->commit_log_block(*blk, - _current_log_file->start_offset(), - LPC_WRITE_REPLICATION_LOG_COMMON, - &_tracker, - [this, blk, logf](::dsn::error_code err, size_t sz) { - delete blk; - if (ERR_OK != err) { - LOG_ERROR( - "write mutation log file header failed, file = {}, err = {}", - logf->path(), - err); - CHECK(_io_error_callback, ""); - _io_error_callback(err); - } - }, - 0); + logf->commit_log_block( + *blk, + _current_log_file->start_offset(), + LPC_WRITE_REPLICATION_LOG_COMMON, + &_tracker, + [this, blk, logf](::dsn::error_code err, size_t sz) { + delete blk; + if (ERR_OK != err) { + LOG_ERROR("write mutation log file header failed, file = {}, err = {}", + logf->path(), + err); + CHECK(_io_error_callback, ""); + _io_error_callback(err); + } + }, + 0); CHECK_EQ_MSG(_global_end_offset, _current_log_file->start_offset() + sizeof(log_block_header) + header_len, diff --git a/src/replica/prepare_list.cpp b/src/replica/prepare_list.cpp index e6ba29260d..d2fdff2334 100644 --- a/src/replica/prepare_list.cpp +++ b/src/replica/prepare_list.cpp @@ -107,22 +107,22 @@ error_code prepare_list::prepare(mutation_ptr &mu, CHECK_EQ_PREFIX_MSG(mutation_cache::put(mu), ERR_OK, "mutation_cache::put failed"); return ERR_OK; - //// delayed commit - only when capacity is an issue - // case partition_status::PS_POTENTIAL_SECONDARY: - // while (true) - // { - // error_code err = mutation_cache::put(mu); - // if (err == ERR_CAPACITY_EXCEEDED) - // { - // CHECK_GE(mu->data.header.last_committed_decree, min_decree()); - // commit (min_decree(), true); - // pop_min(); - // } - // else - // break; - // } - // CHECK_EQ(err, ERR_OK); - // return ERR_OK; + //// delayed commit - only when capacity is an issue + // case partition_status::PS_POTENTIAL_SECONDARY: + // while (true) + // { + // error_code err = mutation_cache::put(mu); + // if (err == ERR_CAPACITY_EXCEEDED) + // { + // CHECK_GE(mu->data.header.last_committed_decree, min_decree()); + // commit (min_decree(), true); + // pop_min(); + // } + // else + // break; + // } + // CHECK_EQ(err, ERR_OK); + // return ERR_OK; case partition_status::PS_INACTIVE: // only possible during init if (mu->data.header.last_committed_decree > max_decree()) { diff --git a/src/replica/replica_2pc.cpp b/src/replica/replica_2pc.cpp index ccbf3c87cf..5e7943a6b1 100644 --- a/src/replica/replica_2pc.cpp +++ b/src/replica/replica_2pc.cpp @@ -383,14 +383,14 @@ void replica::send_prepare_message(const ::dsn::host_port &hp, mu->write_to(writer, msg); } - mu->remote_tasks()[hp] = - rpc::call(dsn::dns_resolver::instance().resolve_address(hp), - msg, - &_tracker, - [=](error_code err, dsn::message_ex *request, dsn::message_ex *reply) { - on_prepare_reply(std::make_pair(mu, rconfig.status), err, request, reply); - }, - get_gpid().thread_hash()); + mu->remote_tasks()[hp] = rpc::call( + dsn::dns_resolver::instance().resolve_address(hp), + msg, + &_tracker, + [=](error_code err, dsn::message_ex *request, dsn::message_ex *reply) { + on_prepare_reply(std::make_pair(mu, rconfig.status), err, request, reply); + }, + get_gpid().thread_hash()); LOG_DEBUG_PREFIX("mutation {} send_prepare_message to {} as {}", mu->name(), diff --git a/src/replica/replica_backup.cpp b/src/replica/replica_backup.cpp index 427eabc822..2afae09a6c 100644 --- a/src/replica/replica_backup.cpp +++ b/src/replica/replica_backup.cpp @@ -125,14 +125,15 @@ void replica::on_cold_backup(const backup_request &request, /*out*/ backup_respo LOG_INFO("{}: delay clearing obsoleted cold backup context, cause backup_status == " "ColdBackupCheckpointing", new_context->name); - tasking::enqueue(LPC_REPLICATION_COLD_BACKUP, - &_tracker, - [this, request]() { - backup_response response; - on_cold_backup(request, response); - }, - get_gpid().thread_hash(), - std::chrono::seconds(100)); + tasking::enqueue( + LPC_REPLICATION_COLD_BACKUP, + &_tracker, + [this, request]() { + backup_response response; + on_cold_backup(request, response); + }, + get_gpid().thread_hash(), + std::chrono::seconds(100)); } else { // TODO(wutao1): deleting cold backup context should be // extracted as a function like try_delete_cold_backup_context; @@ -488,13 +489,14 @@ void replica::generate_backup_checkpoint(cold_backup_context_ptr backup_context) file_infos.size(), total_size); // TODO: in primary, this will make the request send to secondary again - tasking::enqueue(LPC_REPLICATION_COLD_BACKUP, - &_tracker, - [this, backup_context]() { - backup_response response; - on_cold_backup(backup_context->request, response); - }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_COLD_BACKUP, + &_tracker, + [this, backup_context]() { + backup_response response; + on_cold_backup(backup_context->request, response); + }, + get_gpid().thread_hash()); } else { backup_context->fail_checkpoint("statistic file info under checkpoint failed"); return; @@ -728,13 +730,14 @@ void replica::local_create_backup_checkpoint(cold_backup_context_ptr backup_cont } backup_context->checkpoint_file_total_size = total_size; backup_context->complete_checkpoint(); - tasking::enqueue(LPC_REPLICATION_COLD_BACKUP, - &_tracker, - [this, backup_context]() { - backup_response response; - on_cold_backup(backup_context->request, response); - }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_COLD_BACKUP, + &_tracker, + [this, backup_context]() { + backup_response response; + on_cold_backup(backup_context->request, response); + }, + get_gpid().thread_hash()); } } diff --git a/src/replica/replica_check.cpp b/src/replica/replica_check.cpp index c4a86dec59..adb4771b52 100644 --- a/src/replica/replica_check.cpp +++ b/src/replica/replica_check.cpp @@ -92,12 +92,12 @@ void replica::init_group_check() return; CHECK(nullptr == _primary_states.group_check_task, ""); - _primary_states.group_check_task = - tasking::enqueue_timer(LPC_GROUP_CHECK, - &_tracker, - [this] { broadcast_group_check(); }, - std::chrono::milliseconds(FLAGS_group_check_interval_ms), - get_gpid().thread_hash()); + _primary_states.group_check_task = tasking::enqueue_timer( + LPC_GROUP_CHECK, + &_tracker, + [this] { broadcast_group_check(); }, + std::chrono::milliseconds(FLAGS_group_check_interval_ms), + get_gpid().thread_hash()); } void replica::broadcast_group_check() @@ -152,17 +152,17 @@ void replica::broadcast_group_check() LOG_INFO_PREFIX("send group check to {} with state {}", hp, enum_to_string(it->second)); - dsn::task_ptr callback_task = - rpc::call(addr, - RPC_GROUP_CHECK, - *request, - &_tracker, - [=](error_code err, group_check_response &&resp) { - auto alloc = std::make_shared(std::move(resp)); - on_group_check_reply(err, request, alloc); - }, - std::chrono::milliseconds(0), - get_gpid().thread_hash()); + dsn::task_ptr callback_task = rpc::call( + addr, + RPC_GROUP_CHECK, + *request, + &_tracker, + [=](error_code err, group_check_response &&resp) { + auto alloc = std::make_shared(std::move(resp)); + on_group_check_reply(err, request, alloc); + }, + std::chrono::milliseconds(0), + get_gpid().thread_hash()); _primary_states.group_check_pending_replies[hp] = callback_task; } @@ -274,10 +274,11 @@ void replica::on_group_check_reply(error_code err, void replica::inject_error(error_code err) { - tasking::enqueue(LPC_REPLICATION_ERROR, - &_tracker, - [this, err]() { handle_local_failure(err); }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_ERROR, + &_tracker, + [this, err]() { handle_local_failure(err); }, + get_gpid().thread_hash()); } } // namespace replication } // namespace dsn diff --git a/src/replica/replica_chkpt.cpp b/src/replica/replica_chkpt.cpp index ea8ff41c37..368d1a4ab0 100644 --- a/src/replica/replica_chkpt.cpp +++ b/src/replica/replica_chkpt.cpp @@ -300,11 +300,12 @@ void replica::init_checkpoint(bool is_emergency) // // we may issue a new task to do backgroup_async_checkpoint // even if the old one hasn't finished yet - tasking::enqueue(LPC_CHECKPOINT_REPLICA, - &_tracker, - [this, is_emergency] { background_async_checkpoint(is_emergency); }, - 0, - 10_ms); + tasking::enqueue( + LPC_CHECKPOINT_REPLICA, + &_tracker, + [this, is_emergency] { background_async_checkpoint(is_emergency); }, + 0, + 10_ms); if (is_emergency) { METRIC_VAR_INCREMENT(emergency_checkpoints); @@ -377,11 +378,12 @@ error_code replica::background_async_checkpoint(bool is_emergency) LOG_INFO_PREFIX("call app.async_checkpoint() returns ERR_TRY_AGAIN, time_used_ns = {}" ", schedule later checkpoint after 10 seconds", used_time); - tasking::enqueue(LPC_PER_REPLICA_CHECKPOINT_TIMER, - &_tracker, - [this] { init_checkpoint(false); }, - get_gpid().thread_hash(), - std::chrono::seconds(10)); + tasking::enqueue( + LPC_PER_REPLICA_CHECKPOINT_TIMER, + &_tracker, + [this] { init_checkpoint(false); }, + get_gpid().thread_hash(), + std::chrono::seconds(10)); return err; } @@ -445,11 +447,11 @@ void replica::catch_up_with_private_logs(partition_status::type s) auto err = apply_learned_state_from_private_log(state); if (s == partition_status::PS_POTENTIAL_SECONDARY) { - _potential_secondary_states.learn_remote_files_completed_task = - tasking::create_task(LPC_CHECKPOINT_REPLICA_COMPLETED, - &_tracker, - [this, err]() { this->on_learn_remote_state_completed(err); }, - get_gpid().thread_hash()); + _potential_secondary_states.learn_remote_files_completed_task = tasking::create_task( + LPC_CHECKPOINT_REPLICA_COMPLETED, + &_tracker, + [this, err]() { this->on_learn_remote_state_completed(err); }, + get_gpid().thread_hash()); _potential_secondary_states.learn_remote_files_completed_task->enqueue(); } else if (s == partition_status::PS_PARTITION_SPLIT) { _split_states.async_learn_task = tasking::enqueue( @@ -458,11 +460,11 @@ void replica::catch_up_with_private_logs(partition_status::type s) std::bind(&replica_split_manager::child_catch_up_states, get_split_manager()), get_gpid().thread_hash()); } else { - _secondary_states.checkpoint_completed_task = - tasking::create_task(LPC_CHECKPOINT_REPLICA_COMPLETED, - &_tracker, - [this, err]() { this->on_checkpoint_completed(err); }, - get_gpid().thread_hash()); + _secondary_states.checkpoint_completed_task = tasking::create_task( + LPC_CHECKPOINT_REPLICA_COMPLETED, + &_tracker, + [this, err]() { this->on_checkpoint_completed(err); }, + get_gpid().thread_hash()); _secondary_states.checkpoint_completed_task->enqueue(); } } diff --git a/src/replica/replica_config.cpp b/src/replica/replica_config.cpp index d6e97ed99a..ab03c4c4ee 100644 --- a/src/replica/replica_config.cpp +++ b/src/replica/replica_config.cpp @@ -425,14 +425,14 @@ void replica::update_configuration_on_meta_server(config_type::type type, rpc_address target( dsn::dns_resolver::instance().resolve_address(_stub->_failure_detector->get_servers())); - _primary_states.reconfiguration_task = - rpc::call(target, - msg, - &_tracker, - [=](error_code err, dsn::message_ex *reqmsg, dsn::message_ex *response) { - on_update_configuration_on_meta_server_reply(err, reqmsg, response, request); - }, - get_gpid().thread_hash()); + _primary_states.reconfiguration_task = rpc::call( + target, + msg, + &_tracker, + [=](error_code err, dsn::message_ex *reqmsg, dsn::message_ex *response) { + on_update_configuration_on_meta_server_reply(err, reqmsg, response, request); + }, + get_gpid().thread_hash()); } void replica::on_update_configuration_on_meta_server_reply( @@ -464,7 +464,7 @@ void replica::on_update_configuration_on_meta_server_reply( _primary_states.reconfiguration_task = tasking::enqueue( LPC_DELAY_UPDATE_CONFIG, &_tracker, - [ this, request, req2 = std::move(req) ]() { + [this, request, req2 = std::move(req)]() { rpc_address target(dsn::dns_resolver::instance().resolve_address( _stub->_failure_detector->get_servers())); rpc_response_task_ptr t = rpc::create_rpc_response_task( @@ -1105,9 +1105,8 @@ void replica::on_config_sync(const app_info &info, if (status() == partition_status::PS_INACTIVE && !_inactive_is_transient) { if (config.hp_primary == _stub->primary_host_port() // dead primary - || - !config.hp_primary // primary is dead (otherwise let primary remove this) - ) { + || !config.hp_primary // primary is dead (otherwise let primary remove this) + ) { LOG_INFO_PREFIX("downgrade myself as inactive is not transient, remote_config({})", boost::lexical_cast(config)); _stub->remove_replica_on_meta_server(_app_info, config); diff --git a/src/replica/replica_disk_migrator.cpp b/src/replica/replica_disk_migrator.cpp index 47d48d0669..189c81c42b 100644 --- a/src/replica/replica_disk_migrator.cpp +++ b/src/replica/replica_disk_migrator.cpp @@ -56,7 +56,6 @@ void replica_disk_migrator::on_migrate_replica(replica_disk_migrate_rpc rpc) LPC_REPLICATION_COMMON, _replica->tracker(), [=]() { - if (!check_migration_args(rpc)) { return; } diff --git a/src/replica/replica_failover.cpp b/src/replica/replica_failover.cpp index 6cd7895f7a..994b38fb93 100644 --- a/src/replica/replica_failover.cpp +++ b/src/replica/replica_failover.cpp @@ -121,5 +121,5 @@ void replica::on_meta_server_disconnected() set_inactive_state_transient(true); } } -} -} // namespace +} // namespace replication +} // namespace dsn diff --git a/src/replica/replica_init.cpp b/src/replica/replica_init.cpp index 1cee376e64..664587f636 100644 --- a/src/replica/replica_init.cpp +++ b/src/replica/replica_init.cpp @@ -157,10 +157,11 @@ error_code replica::init_app_and_prepare_list(bool create_new) err = _private_log->open( [this](int log_length, mutation_ptr &mu) { return replay_mutation(mu, true); }, [this](error_code err) { - tasking::enqueue(LPC_REPLICATION_ERROR, - &_tracker, - [this, err]() { handle_local_failure(err); }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_ERROR, + &_tracker, + [this, err]() { handle_local_failure(err); }, + get_gpid().thread_hash()); }, replay_condition); @@ -227,21 +228,22 @@ error_code replica::init_app_and_prepare_list(bool create_new) LOG_INFO_PREFIX("plog_dir = {}", log_dir); err = _private_log->open(nullptr, [this](error_code err) { - tasking::enqueue(LPC_REPLICATION_ERROR, - &_tracker, - [this, err]() { handle_local_failure(err); }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_ERROR, + &_tracker, + [this, err]() { handle_local_failure(err); }, + get_gpid().thread_hash()); }); } if (err == ERR_OK) { if (_checkpoint_timer == nullptr && !FLAGS_checkpoint_disabled) { - _checkpoint_timer = - tasking::enqueue_timer(LPC_PER_REPLICA_CHECKPOINT_TIMER, - &_tracker, - [this] { on_checkpoint_timer(); }, - std::chrono::seconds(FLAGS_checkpoint_interval_seconds), - get_gpid().thread_hash()); + _checkpoint_timer = tasking::enqueue_timer( + LPC_PER_REPLICA_CHECKPOINT_TIMER, + &_tracker, + [this] { on_checkpoint_timer(); }, + std::chrono::seconds(FLAGS_checkpoint_interval_seconds), + get_gpid().thread_hash()); } _backup_mgr->start_collect_backup_info(); diff --git a/src/replica/replica_learn.cpp b/src/replica/replica_learn.cpp index bb8414ce4e..b1c30692e5 100644 --- a/src/replica/replica_learn.cpp +++ b/src/replica/replica_learn.cpp @@ -174,13 +174,14 @@ void replica::init_learn(uint64_t signature) METRIC_VAR_INCREMENT(learn_rounds); _potential_secondary_states.learning_round_is_running = true; _potential_secondary_states.catchup_with_private_log_task = - tasking::create_task(LPC_CATCHUP_WITH_PRIVATE_LOGS, - &_tracker, - [this]() { - this->catch_up_with_private_logs( - partition_status::PS_POTENTIAL_SECONDARY); - }, - get_gpid().thread_hash()); + tasking::create_task( + LPC_CATCHUP_WITH_PRIVATE_LOGS, + &_tracker, + [this]() { + this->catch_up_with_private_logs( + partition_status::PS_POTENTIAL_SECONDARY); + }, + get_gpid().thread_hash()); _potential_secondary_states.catchup_with_private_log_task->enqueue(); return; // incomplete @@ -261,7 +262,7 @@ void replica::init_learn(uint64_t signature) dsn::dns_resolver::instance().resolve_address(primary), msg, &_tracker, - [ this, req_cap = std::move(request) ](error_code err, learn_response && resp) mutable { + [this, req_cap = std::move(request)](error_code err, learn_response &&resp) mutable { on_learn_reply(err, std::move(req_cap), std::move(resp)); }); } @@ -699,14 +700,14 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response } if (err != ERR_OK) { - _potential_secondary_states.learn_remote_files_task = - tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES, &_tracker, [ - this, - err, - copy_start = _potential_secondary_states.duration_ms(), - req_cap = std::move(req), - resp_cap = std::move(resp) - ]() mutable { + _potential_secondary_states.learn_remote_files_task = tasking::create_task( + LPC_LEARN_REMOTE_DELTA_FILES, + &_tracker, + [this, + err, + copy_start = _potential_secondary_states.duration_ms(), + req_cap = std::move(req), + resp_cap = std::move(resp)]() mutable { on_copy_remote_state_completed( err, 0, copy_start, std::move(req_cap), std::move(resp_cap)); }); @@ -849,14 +850,14 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response // go to next stage _potential_secondary_states.learning_status = learner_status::LearningWithPrepare; - _potential_secondary_states.learn_remote_files_task = - tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES, &_tracker, [ - this, - err, - copy_start = _potential_secondary_states.duration_ms(), - req_cap = std::move(req), - resp_cap = std::move(resp) - ]() mutable { + _potential_secondary_states.learn_remote_files_task = tasking::create_task( + LPC_LEARN_REMOTE_DELTA_FILES, + &_tracker, + [this, + err, + copy_start = _potential_secondary_states.duration_ms(), + req_cap = std::move(req), + resp_cap = std::move(resp)]() mutable { on_copy_remote_state_completed( err, 0, copy_start, std::move(req_cap), std::move(resp_cap)); }); @@ -876,18 +877,18 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response learn_dir); _potential_secondary_states.learn_remote_files_task = - tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES, &_tracker, [ - this, - copy_start = _potential_secondary_states.duration_ms(), - req_cap = std::move(req), - resp_cap = std::move(resp) - ]() mutable { - on_copy_remote_state_completed(ERR_FILE_OPERATION_FAILED, - 0, - copy_start, - std::move(req_cap), - std::move(resp_cap)); - }); + tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES, + &_tracker, + [this, + copy_start = _potential_secondary_states.duration_ms(), + req_cap = std::move(req), + resp_cap = std::move(resp)]() mutable { + on_copy_remote_state_completed(ERR_FILE_OPERATION_FAILED, + 0, + copy_start, + std::move(req_cap), + std::move(resp_cap)); + }); _potential_secondary_states.learn_remote_files_task->enqueue(); return; } @@ -915,23 +916,21 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response high_priority, LPC_REPLICATION_COPY_REMOTE_FILES, &_tracker, - [ - this, - copy_start = _potential_secondary_states.duration_ms(), - req_cap = std::move(req), - resp_copy = resp - ](error_code err, size_t sz) mutable { + [this, + copy_start = _potential_secondary_states.duration_ms(), + req_cap = std::move(req), + resp_copy = resp](error_code err, size_t sz) mutable { on_copy_remote_state_completed( err, sz, copy_start, std::move(req_cap), std::move(resp_copy)); }); } else { - _potential_secondary_states.learn_remote_files_task = - tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES, &_tracker, [ - this, - copy_start = _potential_secondary_states.duration_ms(), - req_cap = std::move(req), - resp_cap = std::move(resp) - ]() mutable { + _potential_secondary_states.learn_remote_files_task = tasking::create_task( + LPC_LEARN_REMOTE_DELTA_FILES, + &_tracker, + [this, + copy_start = _potential_secondary_states.duration_ms(), + req_cap = std::move(req), + resp_cap = std::move(resp)]() mutable { on_copy_remote_state_completed( ERR_OK, 0, copy_start, std::move(req_cap), std::move(resp_cap)); }); @@ -1177,11 +1176,11 @@ void replica::on_copy_remote_state_completed(error_code err, // cleanup _potential_secondary_states.learn_remote_files_task = nullptr; - _potential_secondary_states.learn_remote_files_completed_task = - tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES_COMPLETED, - &_tracker, - [this, err]() { on_learn_remote_state_completed(err); }, - get_gpid().thread_hash()); + _potential_secondary_states.learn_remote_files_completed_task = tasking::create_task( + LPC_LEARN_REMOTE_DELTA_FILES_COMPLETED, + &_tracker, + [this, err]() { on_learn_remote_state_completed(err); }, + get_gpid().thread_hash()); _potential_secondary_states.learn_remote_files_completed_task->enqueue(); } @@ -1307,11 +1306,11 @@ void replica::notify_learn_completion() host_port primary; GET_HOST_PORT(_config, primary, primary); - _potential_secondary_states.completion_notify_task = - rpc::call(dsn::dns_resolver::instance().resolve_address(primary), msg, &_tracker, [ - this, - report = std::move(report) - ](error_code err, learn_notify_response && resp) mutable { + _potential_secondary_states.completion_notify_task = rpc::call( + dsn::dns_resolver::instance().resolve_address(primary), + msg, + &_tracker, + [this, report = std::move(report)](error_code err, learn_notify_response &&resp) mutable { on_learn_completion_notification_reply(err, std::move(report), std::move(resp)); }); } @@ -1477,10 +1476,11 @@ error_code replica::apply_learned_state_from_private_log(learn_state &state) _app->learn_dir(), [](int log_length, mutation_ptr &mu) { return true; }, [this](error_code err) { - tasking::enqueue(LPC_REPLICATION_ERROR, - &_tracker, - [this, err]() { handle_local_failure(err); }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_ERROR, + &_tracker, + [this, err]() { handle_local_failure(err); }, + get_gpid().thread_hash()); }); if (err != ERR_OK) { LOG_ERROR_PREFIX("failed to reset this private log with logs in learn/ dir: {}", err); @@ -1522,21 +1522,21 @@ error_code replica::apply_learned_state_from_private_log(learn_state &state) } }); - err = mutation_log::replay(state.files, - [&plist](int log_length, mutation_ptr &mu) { - auto d = mu->data.header.decree; - if (d <= plist.last_committed_decree()) - return false; - - auto old = plist.get_mutation_by_decree(d); - if (old != nullptr && - old->data.header.ballot >= mu->data.header.ballot) - return false; - - plist.prepare(mu, partition_status::PS_SECONDARY); - return true; - }, - offset); + err = mutation_log::replay( + state.files, + [&plist](int log_length, mutation_ptr &mu) { + auto d = mu->data.header.decree; + if (d <= plist.last_committed_decree()) + return false; + + auto old = plist.get_mutation_by_decree(d); + if (old != nullptr && old->data.header.ballot >= mu->data.header.ballot) + return false; + + plist.prepare(mu, partition_status::PS_SECONDARY); + return true; + }, + offset); // update first_learn_start_decree, the position where the first round of LT_LOG starts from. // we use this value to determine whether to learn back from min_confirmed_decree diff --git a/src/replica/replica_restore.cpp b/src/replica/replica_restore.cpp index 063a294c46..a5c5d39ad4 100644 --- a/src/replica/replica_restore.cpp +++ b/src/replica/replica_restore.cpp @@ -263,10 +263,11 @@ dsn::error_code replica::find_valid_checkpoint(const configuration_restore_reque // TODO: check the md5sum read_response r; create_response.file_handle - ->read(read_request{0, -1}, - TASK_CODE_EXEC_INLINED, - [&r](const read_response &resp) { r = resp; }, - nullptr) + ->read( + read_request{0, -1}, + TASK_CODE_EXEC_INLINED, + [&r](const read_response &resp) { r = resp; }, + nullptr) ->wait(); if (r.err != dsn::ERR_OK) { @@ -470,5 +471,5 @@ void replica::update_restore_progress(uint64_t f_size) cur_download_size, cur_porgress); } -} -} +} // namespace replication +} // namespace dsn diff --git a/src/replica/replica_stub.cpp b/src/replica/replica_stub.cpp index a12c22efb6..2abd52b443 100644 --- a/src/replica/replica_stub.cpp +++ b/src/replica/replica_stub.cpp @@ -520,34 +520,34 @@ void replica_stub::initialize(const replication_options &opts, bool clear /* = f continue; } - load_tasks.push_back( - tasking::create_task(LPC_REPLICATION_INIT_LOAD, - &_tracker, - [this, dn, dir, &rps, &rps_lock] { - LOG_INFO("process dir {}", dir); - - auto r = load_replica(dn, dir.c_str()); - if (r == nullptr) { - return; - } - LOG_INFO("{}@{}: load replica '{}' success, = <{}, {}>, last_prepared_decree = {}", - r->get_gpid(), - dsn_primary_host_port(), - dir, - r->last_durable_decree(), - r->last_committed_decree(), - r->last_prepared_decree()); - - utils::auto_lock l(rps_lock); - CHECK(rps.find(r->get_gpid()) == rps.end(), - "conflict replica dir: {} <--> {}", - r->dir(), - rps[r->get_gpid()]->dir()); - - rps[r->get_gpid()] = r; - }, - load_tasks.size())); + load_tasks.push_back(tasking::create_task( + LPC_REPLICATION_INIT_LOAD, + &_tracker, + [this, dn, dir, &rps, &rps_lock] { + LOG_INFO("process dir {}", dir); + + auto r = load_replica(dn, dir.c_str()); + if (r == nullptr) { + return; + } + LOG_INFO("{}@{}: load replica '{}' success, = <{}, {}>, last_prepared_decree = {}", + r->get_gpid(), + dsn_primary_host_port(), + dir, + r->last_durable_decree(), + r->last_committed_decree(), + r->last_prepared_decree()); + + utils::auto_lock l(rps_lock); + CHECK(rps.find(r->get_gpid()) == rps.end(), + "conflict replica dir: {} <--> {}", + r->dir(), + rps[r->get_gpid()]->dir()); + + rps[r->get_gpid()] = r; + }, + load_tasks.size())); load_tasks.back()->enqueue(); } } @@ -611,13 +611,13 @@ void replica_stub::initialize(const replication_options &opts, bool clear /* = f // disk stat if (!FLAGS_disk_stat_disabled) { - _disk_stat_timer_task = - ::dsn::tasking::enqueue_timer(LPC_DISK_STAT, - &_tracker, - [this]() { on_disk_stat(); }, - std::chrono::seconds(FLAGS_disk_stat_interval_seconds), - 0, - std::chrono::seconds(FLAGS_disk_stat_interval_seconds)); + _disk_stat_timer_task = ::dsn::tasking::enqueue_timer( + LPC_DISK_STAT, + &_tracker, + [this]() { on_disk_stat(); }, + std::chrono::seconds(FLAGS_disk_stat_interval_seconds), + 0, + std::chrono::seconds(FLAGS_disk_stat_interval_seconds)); } // attach rps @@ -639,11 +639,12 @@ void replica_stub::initialize(const replication_options &opts, bool clear /* = f if (now_time_ms < dsn::utils::process_start_millis() + delay_time_ms) { uint64_t delay = dsn::utils::process_start_millis() + delay_time_ms - now_time_ms; LOG_INFO("delay for {} ms to make failure detector timeout", delay); - tasking::enqueue(LPC_REPLICA_SERVER_DELAY_START, - &_tracker, - [this]() { this->initialize_start(); }, - 0, - std::chrono::milliseconds(delay)); + tasking::enqueue( + LPC_REPLICA_SERVER_DELAY_START, + &_tracker, + [this]() { this->initialize_start(); }, + 0, + std::chrono::milliseconds(delay)); } else { initialize_start(); } @@ -660,16 +661,16 @@ void replica_stub::initialize_start() // start timer for configuration sync if (!FLAGS_config_sync_disabled) { - _config_sync_timer_task = - tasking::enqueue_timer(LPC_QUERY_CONFIGURATION_ALL, - &_tracker, - [this]() { - zauto_lock l(_state_lock); - this->query_configuration_by_node(); - }, - std::chrono::milliseconds(FLAGS_config_sync_interval_ms), - 0, - std::chrono::milliseconds(FLAGS_config_sync_interval_ms)); + _config_sync_timer_task = tasking::enqueue_timer( + LPC_QUERY_CONFIGURATION_ALL, + &_tracker, + [this]() { + zauto_lock l(_state_lock); + this->query_configuration_by_node(); + }, + std::chrono::milliseconds(FLAGS_config_sync_interval_ms), + 0, + std::chrono::milliseconds(FLAGS_config_sync_interval_ms)); } #ifdef DSN_ENABLE_GPERF @@ -759,7 +760,7 @@ std::vector replica_stub::get_all_primaries() const std::vector result; { zauto_read_lock l(_replicas_lock); - for (const auto & [ _, r ] : _replicas) { + for (const auto &[_, r] : _replicas) { if (r->status() != partition_status::PS_PRIMARY) { continue; } @@ -1322,15 +1323,16 @@ void replica_stub::on_node_query_reply(error_code err, int delay_ms = 500; LOG_INFO("resend query node partitions request after {} ms for resp.err = ERR_BUSY", delay_ms); - _config_query_task = tasking::enqueue(LPC_QUERY_CONFIGURATION_ALL, - &_tracker, - [this]() { - zauto_lock l(_state_lock); - _config_query_task = nullptr; - this->query_configuration_by_node(); - }, - 0, - std::chrono::milliseconds(delay_ms)); + _config_query_task = tasking::enqueue( + LPC_QUERY_CONFIGURATION_ALL, + &_tracker, + [this]() { + zauto_lock l(_state_lock); + _config_query_task = nullptr; + this->query_configuration_by_node(); + }, + 0, + std::chrono::milliseconds(delay_ms)); return; } if (resp.err != ERR_OK) { @@ -1632,7 +1634,7 @@ void replica_stub::on_replicas_stat() uint64_t splitting_max_duration_time_ms = 0; uint64_t splitting_max_async_learn_time_ms = 0; uint64_t splitting_max_copy_file_size = 0; - for (const auto & [ _, rep_stat_info ] : rep_stat_info_by_gpid) { + for (const auto &[_, rep_stat_info] : rep_stat_info_by_gpid) { const auto &rep = rep_stat_info.rep; if (rep->status() == partition_status::PS_POTENTIAL_SECONDARY) { learning_count++; @@ -2076,11 +2078,12 @@ task_ptr replica_stub::begin_close_replica(replica_ptr r) app_info a_info = *(r->get_app_info()); replica_info r_info; get_replica_info(r_info, r); - task_ptr task = tasking::enqueue(LPC_CLOSE_REPLICA, - &_tracker, - [=]() { close_replica(r); }, - 0, - std::chrono::milliseconds(delay_ms)); + task_ptr task = tasking::enqueue( + LPC_CLOSE_REPLICA, + &_tracker, + [=]() { close_replica(r); }, + 0, + std::chrono::milliseconds(delay_ms)); _closing_replicas[id] = std::make_tuple(task, r, std::move(a_info), std::move(r_info)); METRIC_VAR_INCREMENT(closing_replicas); return task; @@ -2455,20 +2458,21 @@ replica_stub::exec_command_on_replica(const std::vector &args, std::map> results; // id => status,result for (auto &kv : choosed_rs) { replica_ptr rep = kv.second; - task_ptr tsk = tasking::enqueue(LPC_EXEC_COMMAND_ON_REPLICA, - rep->tracker(), - [rep, &func, &results_lock, &results]() { - partition_status::type status = rep->status(); - if (status != partition_status::PS_PRIMARY && - status != partition_status::PS_SECONDARY) - return; - std::string result = func(rep); - ::dsn::zauto_lock l(results_lock); - auto &value = results[rep->get_gpid()]; - value.first = status; - value.second = result; - }, - rep->get_gpid().thread_hash()); + task_ptr tsk = tasking::enqueue( + LPC_EXEC_COMMAND_ON_REPLICA, + rep->tracker(), + [rep, &func, &results_lock, &results]() { + partition_status::type status = rep->status(); + if (status != partition_status::PS_PRIMARY && + status != partition_status::PS_SECONDARY) + return; + std::string result = func(rep); + ::dsn::zauto_lock l(results_lock); + auto &value = results[rep->get_gpid()]; + value.first = status; + value.second = result; + }, + rep->get_gpid().thread_hash()); tasks.emplace_back(std::move(tsk)); } @@ -2716,10 +2720,11 @@ replica_stub::split_replica_exec(dsn::task_code code, gpid pid, local_execution [](absl::string_view) { return ERR_OK; }); replica_ptr replica = pid.get_app_id() == 0 ? nullptr : get_replica(pid); if (replica && handler) { - tasking::enqueue(code, - replica.get()->tracker(), - [handler, replica]() { handler(replica->get_split_manager()); }, - pid.thread_hash()); + tasking::enqueue( + code, + replica.get()->tracker(), + [handler, replica]() { handler(replica->get_split_manager()); }, + pid.thread_hash()); return ERR_OK; } LOG_WARNING("replica({}) is invalid", pid); diff --git a/src/replica/replica_throttle.cpp b/src/replica/replica_throttle.cpp index 1d5ee36efa..8092206f4b 100644 --- a/src/replica/replica_throttle.cpp +++ b/src/replica/replica_throttle.cpp @@ -47,19 +47,20 @@ namespace replication { tasking::enqueue( \ LPC_##op_type##_THROTTLING_DELAY, \ &_tracker, \ - [ this, req = message_ptr(request) ]() { on_client_##op_type(req, true); }, \ + [this, req = message_ptr(request)]() { on_client_##op_type(req, true); }, \ get_gpid().thread_hash(), \ std::chrono::milliseconds(delay_ms)); \ METRIC_VAR_INCREMENT(throttling_delayed_##op_type##_requests); \ } else { /** type == utils::throttling_controller::REJECT **/ \ if (delay_ms > 0) { \ - tasking::enqueue(LPC_##op_type##_THROTTLING_DELAY, \ - &_tracker, \ - [ this, req = message_ptr(request) ]() { \ - response_client_##op_type(req, ERR_BUSY); \ - }, \ - get_gpid().thread_hash(), \ - std::chrono::milliseconds(delay_ms)); \ + tasking::enqueue( \ + LPC_##op_type##_THROTTLING_DELAY, \ + &_tracker, \ + [this, req = message_ptr(request)]() { \ + response_client_##op_type(req, ERR_BUSY); \ + }, \ + get_gpid().thread_hash(), \ + std::chrono::milliseconds(delay_ms)); \ } else { \ response_client_##op_type(request, ERR_BUSY); \ } \ @@ -89,11 +90,12 @@ bool replica::throttle_backup_request(message_ex *request) request->header->client.timeout_ms, 1, delay_ms); if (type != utils::throttling_controller::PASS) { if (type == utils::throttling_controller::DELAY) { - tasking::enqueue(LPC_read_THROTTLING_DELAY, - &_tracker, - [ this, req = message_ptr(request) ]() { on_client_read(req, true); }, - get_gpid().thread_hash(), - std::chrono::milliseconds(delay_ms)); + tasking::enqueue( + LPC_read_THROTTLING_DELAY, + &_tracker, + [this, req = message_ptr(request)]() { on_client_read(req, true); }, + get_gpid().thread_hash(), + std::chrono::milliseconds(delay_ms)); METRIC_VAR_INCREMENT(throttling_delayed_backup_requests); } else { /** type == utils::throttling_controller::REJECT **/ METRIC_VAR_INCREMENT(throttling_rejected_backup_requests); diff --git a/src/replica/split/replica_split_manager.cpp b/src/replica/split/replica_split_manager.cpp index 9844c8ee68..e0b193692f 100644 --- a/src/replica/split/replica_split_manager.cpp +++ b/src/replica/split/replica_split_manager.cpp @@ -484,21 +484,21 @@ replica_split_manager::child_apply_private_logs(std::vector plog_fi }); // replay private log - ec = mutation_log::replay(plog_files, - [&plist](int log_length, mutation_ptr &mu) { - decree d = mu->data.header.decree; - if (d <= plist.last_committed_decree()) { - return false; - } - mutation_ptr origin_mu = plist.get_mutation_by_decree(d); - if (origin_mu != nullptr && - origin_mu->data.header.ballot >= mu->data.header.ballot) { - return false; - } - plist.prepare(mu, partition_status::PS_SECONDARY); - return true; - }, - offset); + ec = mutation_log::replay( + plog_files, + [&plist](int log_length, mutation_ptr &mu) { + decree d = mu->data.header.decree; + if (d <= plist.last_committed_decree()) { + return false; + } + mutation_ptr origin_mu = plist.get_mutation_by_decree(d); + if (origin_mu != nullptr && origin_mu->data.header.ballot >= mu->data.header.ballot) { + return false; + } + plist.prepare(mu, partition_status::PS_SECONDARY); + return true; + }, + offset); if (ec != ERR_OK) { LOG_ERROR_PREFIX( "replay private_log files failed, file count={}, app last_committed_decree={}", diff --git a/src/replica/storage/simple_kv/simple_kv.app.example.h b/src/replica/storage/simple_kv/simple_kv.app.example.h index d1345b79ec..97478d27b5 100644 --- a/src/replica/storage/simple_kv/simple_kv.app.example.h +++ b/src/replica/storage/simple_kv/simple_kv.app.example.h @@ -48,10 +48,11 @@ class simple_kv_client_app : public ::dsn::service_app const auto hp = host_port::from_string(args[2]); _simple_kv_client.reset(new simple_kv_client(args[1].c_str(), {hp}, args[3].c_str())); - _timer = ::dsn::tasking::enqueue_timer(LPC_SIMPLE_KV_TEST_TIMER, - &_tracker, - [this] { on_test_timer(); }, - std::chrono::seconds(1)); + _timer = ::dsn::tasking::enqueue_timer( + LPC_SIMPLE_KV_TEST_TIMER, + &_tracker, + [this] { on_test_timer(); }, + std::chrono::seconds(1)); return ::dsn::ERR_OK; } diff --git a/src/replica/storage/simple_kv/simple_kv.code.definition.h b/src/replica/storage/simple_kv/simple_kv.code.definition.h index b668f3a950..370a2eaba6 100644 --- a/src/replica/storage/simple_kv/simple_kv.code.definition.h +++ b/src/replica/storage/simple_kv/simple_kv.code.definition.h @@ -37,6 +37,6 @@ DEFINE_STORAGE_WRITE_RPC_CODE(RPC_SIMPLE_KV_SIMPLE_KV_APPEND, ALLOW_BATCH, NOT_I // test timer task code DEFINE_TASK_CODE(LPC_SIMPLE_KV_TEST_TIMER, TASK_PRIORITY_COMMON, ::dsn::THREAD_POOL_DEFAULT) -} -} -} +} // namespace application +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/simple_kv.server.h b/src/replica/storage/simple_kv/simple_kv.server.h index 84d6ab2c82..6acfc85a05 100644 --- a/src/replica/storage/simple_kv/simple_kv.server.h +++ b/src/replica/storage/simple_kv/simple_kv.server.h @@ -95,6 +95,6 @@ class simple_kv_service : public replication_app_base, public storage_serverlet< svc->on_append(pr, reply); } }; -} -} -} +} // namespace application +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/simple_kv.server.impl.cpp b/src/replica/storage/simple_kv/simple_kv.server.impl.cpp index a0002bc54a..473d322818 100644 --- a/src/replica/storage/simple_kv/simple_kv.server.impl.cpp +++ b/src/replica/storage/simple_kv/simple_kv.server.impl.cpp @@ -329,6 +329,6 @@ ::dsn::error_code simple_kv_service_impl::storage_apply_checkpoint(chkpt_apply_m } } } -} -} -} // namespace +} // namespace application +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/simple_kv.server.impl.h b/src/replica/storage/simple_kv/simple_kv.server.impl.h index b296c1f83d..d27608df19 100644 --- a/src/replica/storage/simple_kv/simple_kv.server.impl.h +++ b/src/replica/storage/simple_kv/simple_kv.server.impl.h @@ -120,6 +120,6 @@ class simple_kv_service_impl : public simple_kv_service simple_kv _store; int64_t _last_durable_decree; }; -} -} -} // namespace +} // namespace application +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/case.cpp b/src/replica/storage/simple_kv/test/case.cpp index bd4b9464dc..d94eafaa56 100644 --- a/src/replica/storage/simple_kv/test/case.cpp +++ b/src/replica/storage/simple_kv/test/case.cpp @@ -1395,6 +1395,6 @@ void test_case::internal_register_creator(const std::string &name, case_line_cre CHECK(_creators.find(name) == _creators.end(), ""); _creators[name] = creator; } -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/case.h b/src/replica/storage/simple_kv/test/case.h index 4e22a3a9b5..1cd5fe0547 100644 --- a/src/replica/storage/simple_kv/test/case.h +++ b/src/replica/storage/simple_kv/test/case.h @@ -511,9 +511,9 @@ class test_case : public dsn::utils::singleton int _null_loop_count; dsn::zsemaphore _client_sema; }; -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn USER_DEFINED_STRUCTURE_FORMATTER(::dsn::replication::test::case_line); USER_DEFINED_STRUCTURE_FORMATTER(::dsn::replication::test::event); diff --git a/src/replica/storage/simple_kv/test/checker.h b/src/replica/storage/simple_kv/test/checker.h index c94059edda..87e6d8e1a2 100644 --- a/src/replica/storage/simple_kv/test/checker.h +++ b/src/replica/storage/simple_kv/test/checker.h @@ -116,6 +116,6 @@ class wrap_checker : public dsn::tools::checker }; void install_checkers(); -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/client.cpp b/src/replica/storage/simple_kv/test/client.cpp index dbafc10d20..ff1587bda5 100644 --- a/src/replica/storage/simple_kv/test/client.cpp +++ b/src/replica/storage/simple_kv/test/client.cpp @@ -141,11 +141,12 @@ void simple_kv_client_app::begin_write(int id, ctx->req.value = value; ctx->timeout_ms = timeout_ms; auto &req = ctx->req; - _simple_kv_client->write(req, - [ctx](error_code err, int32_t resp) { - test_case::instance().on_end_write(ctx->id, err, resp); - }, - std::chrono::milliseconds(timeout_ms)); + _simple_kv_client->write( + req, + [ctx](error_code err, int32_t resp) { + test_case::instance().on_end_write(ctx->id, err, resp); + }, + std::chrono::milliseconds(timeout_ms)); } void simple_kv_client_app::send_config_to_meta(const host_port &receiver, @@ -183,11 +184,12 @@ void simple_kv_client_app::begin_read(int id, const std::string &key, int timeou ctx->id = id; ctx->key = key; ctx->timeout_ms = timeout_ms; - _simple_kv_client->read(key, - [ctx](error_code err, std::string &&resp) { - test_case::instance().on_end_read(ctx->id, err, resp); - }, - std::chrono::milliseconds(timeout_ms)); + _simple_kv_client->read( + key, + [ctx](error_code err, std::string &&resp) { + test_case::instance().on_end_read(ctx->id, err, resp); + }, + std::chrono::milliseconds(timeout_ms)); } } // namespace test } // namespace replication diff --git a/src/replica/storage/simple_kv/test/client.h b/src/replica/storage/simple_kv/test/client.h index 623863d4ca..138e751528 100644 --- a/src/replica/storage/simple_kv/test/client.h +++ b/src/replica/storage/simple_kv/test/client.h @@ -68,6 +68,6 @@ class simple_kv_client_app : public ::dsn::service_app host_port _service_addr; dsn::task_tracker _tracker; }; -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/common.cpp b/src/replica/storage/simple_kv/test/common.cpp index 5d2e8ced29..6564e7fb72 100644 --- a/src/replica/storage/simple_kv/test/common.cpp +++ b/src/replica/storage/simple_kv/test/common.cpp @@ -325,6 +325,6 @@ void parti_config::convert_from(const partition_configuration &c) } std::sort(secondaries.begin(), secondaries.end()); } -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/common.h b/src/replica/storage/simple_kv/test/common.h index f120b167da..2a0acf3b89 100644 --- a/src/replica/storage/simple_kv/test/common.h +++ b/src/replica/storage/simple_kv/test/common.h @@ -205,9 +205,9 @@ struct parti_config return os << pc.to_string(); } }; -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn USER_DEFINED_STRUCTURE_FORMATTER(::dsn::replication::test::parti_config); USER_DEFINED_STRUCTURE_FORMATTER(::dsn::replication::test::replica_id); diff --git a/src/replica/storage/simple_kv/test/injector.cpp b/src/replica/storage/simple_kv/test/injector.cpp index 0b3f856ba6..9dd9d3a4bf 100644 --- a/src/replica/storage/simple_kv/test/injector.cpp +++ b/src/replica/storage/simple_kv/test/injector.cpp @@ -198,6 +198,6 @@ void test_injector::install(service_spec &svc_spec) } test_injector::test_injector(const char *name) : toollet(name) {} -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/injector.h b/src/replica/storage/simple_kv/test/injector.h index b1fc30ff06..7419da9f91 100644 --- a/src/replica/storage/simple_kv/test/injector.h +++ b/src/replica/storage/simple_kv/test/injector.h @@ -40,6 +40,6 @@ class test_injector : public dsn::tools::toollet test_injector(const char *name); virtual void install(service_spec &spec); }; -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp b/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp index 19db55f6a5..56fc195bf9 100644 --- a/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp +++ b/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp @@ -1,28 +1,28 @@ /* -* The MIT License (MIT) -* -* Copyright (c) 2015 Microsoft Corporation -* -* -=- Robust Distributed System Nucleus (rDSN) -=- -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in -* all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -* THE SOFTWARE. -*/ + * The MIT License (MIT) + * + * Copyright (c) 2015 Microsoft Corporation + * + * -=- Robust Distributed System Nucleus (rDSN) -=- + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ #include "simple_kv.server.impl.h" @@ -362,6 +362,6 @@ ::dsn::error_code simple_kv_service_impl::storage_apply_checkpoint(chkpt_apply_m } } } -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/simple_kv.server.impl.h b/src/replica/storage/simple_kv/test/simple_kv.server.impl.h index 8b80396a02..a66c30583e 100644 --- a/src/replica/storage/simple_kv/test/simple_kv.server.impl.h +++ b/src/replica/storage/simple_kv/test/simple_kv.server.impl.h @@ -1,28 +1,28 @@ /* -* The MIT License (MIT) -* -* Copyright (c) 2015 Microsoft Corporation -* -* -=- Robust Distributed System Nucleus (rDSN) -=- -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in -* all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -* THE SOFTWARE. -*/ + * The MIT License (MIT) + * + * Copyright (c) 2015 Microsoft Corporation + * + * -=- Robust Distributed System Nucleus (rDSN) -=- + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ #pragma once @@ -133,6 +133,6 @@ class simple_kv_service_impl : public application::simple_kv_service int64_t _last_durable_decree; }; -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/test/log_file_test.cpp b/src/replica/test/log_file_test.cpp index 32f491b567..3ac902451a 100644 --- a/src/replica/test/log_file_test.cpp +++ b/src/replica/test/log_file_test.cpp @@ -63,14 +63,15 @@ TEST_P(log_file_test, commit_log_blocks) for (int i = 0; i < 5; i++) { appender->append_mutation(create_test_mutation(1 + i, "test"), nullptr); } - auto tsk = _logf->commit_log_blocks(*appender, - LPC_WRITE_REPLICATION_LOG_PRIVATE, - nullptr, - [&](error_code err, size_t sz) { - ASSERT_EQ(err, ERR_OK); - ASSERT_EQ(sz, appender->size()); - }, - 0); + auto tsk = _logf->commit_log_blocks( + *appender, + LPC_WRITE_REPLICATION_LOG_PRIVATE, + nullptr, + [&](error_code err, size_t sz) { + ASSERT_EQ(err, ERR_OK); + ASSERT_EQ(sz, appender->size()); + }, + 0); tsk->wait(); ASSERT_EQ(tsk->get_aio_context()->buffer_size, appender->size()); ASSERT_EQ(tsk->get_aio_context()->file_offset, @@ -83,14 +84,15 @@ TEST_P(log_file_test, commit_log_blocks) appender->append_mutation(create_test_mutation(1 + i, std::string(1024, 'a')), nullptr); } ASSERT_GT(appender->all_blocks().size(), 1); - tsk = _logf->commit_log_blocks(*appender, - LPC_WRITE_REPLICATION_LOG_PRIVATE, - nullptr, - [&](error_code err, size_t sz) { - ASSERT_EQ(err, ERR_OK); - ASSERT_EQ(sz, appender->size()); - }, - 0); + tsk = _logf->commit_log_blocks( + *appender, + LPC_WRITE_REPLICATION_LOG_PRIVATE, + nullptr, + [&](error_code err, size_t sz) { + ASSERT_EQ(err, ERR_OK); + ASSERT_EQ(sz, appender->size()); + }, + 0); tsk->wait(); ASSERT_EQ(tsk->get_aio_context()->buffer_size, appender->size()); ASSERT_EQ(tsk->get_aio_context()->file_offset, appender->start_offset() - _start_offset); diff --git a/src/replica/test/mock_utils.h b/src/replica/test/mock_utils.h index 9debd41eda..01c3bc2ae8 100644 --- a/src/replica/test/mock_utils.h +++ b/src/replica/test/mock_utils.h @@ -423,8 +423,8 @@ class mock_mutation_log_private : public mutation_log_private std::vector &mutation_list) const override { for (auto &mu : _mu_list) { - ballot current_ballot = - (start_ballot == invalid_ballot) ? invalid_ballot : mu->get_ballot(); + ballot current_ballot = (start_ballot == invalid_ballot) ? invalid_ballot + : mu->get_ballot(); if ((mu->get_decree() >= start_decree && start_ballot == current_ballot) || current_ballot > start_ballot) { mutation_list.push_back(mu); diff --git a/src/replica/test/mutation_log_test.cpp b/src/replica/test/mutation_log_test.cpp index ef79601580..0009e800e7 100644 --- a/src/replica/test/mutation_log_test.cpp +++ b/src/replica/test/mutation_log_test.cpp @@ -547,12 +547,13 @@ TEST_P(mutation_log_test, reset_from) // reset from the tmp log dir. std::vector actual; - auto err = mlog->reset_from(_log_dir + ".tmp", - [&](int, mutation_ptr &mu) -> bool { - actual.push_back(mu); - return true; - }, - [](error_code err) { ASSERT_EQ(err, ERR_OK); }); + auto err = mlog->reset_from( + _log_dir + ".tmp", + [&](int, mutation_ptr &mu) -> bool { + actual.push_back(mu); + return true; + }, + [](error_code err) { ASSERT_EQ(err, ERR_OK); }); ASSERT_EQ(err, ERR_OK); ASSERT_EQ(actual.size(), expected.size()); @@ -593,12 +594,13 @@ TEST_P(mutation_log_test, reset_from_while_writing) // reset from the tmp log dir. std::vector actual; - auto err = mlog->reset_from(_log_dir + ".test", - [&](int, mutation_ptr &mu) -> bool { - actual.push_back(mu); - return true; - }, - [](error_code err) { ASSERT_EQ(err, ERR_OK); }); + auto err = mlog->reset_from( + _log_dir + ".test", + [&](int, mutation_ptr &mu) -> bool { + actual.push_back(mu); + return true; + }, + [](error_code err) { ASSERT_EQ(err, ERR_OK); }); ASSERT_EQ(err, ERR_OK); mlog->flush(); diff --git a/src/replica/test/replica_learn_test.cpp b/src/replica/test/replica_learn_test.cpp index 6e3a206d0f..b7ff94bc5c 100644 --- a/src/replica/test/replica_learn_test.cpp +++ b/src/replica/test/replica_learn_test.cpp @@ -131,7 +131,6 @@ class replica_learn_test : public duplication_test_base {0, invalid_decree, 5, 2, invalid_decree, 1}, // learn_start_decree_for_dup(3) > learn_start_decree_no_dup(2) {1, invalid_decree, 5, 2, invalid_decree, 2}, - }; int id = 1; @@ -149,8 +148,8 @@ class replica_learn_test : public duplication_test_base auto dup = create_test_duplicator(tt.min_confirmed_decree); add_dup(_replica.get(), std::move(dup)); - ASSERT_EQ(_replica->get_learn_start_decree(req), tt.wlearn_start_decree) << "case #" - << id; + ASSERT_EQ(_replica->get_learn_start_decree(req), tt.wlearn_start_decree) + << "case #" << id; id++; } } diff --git a/src/replica/test/replica_test.cpp b/src/replica/test/replica_test.cpp index a6b97b7f11..a6812ff4ff 100644 --- a/src/replica/test/replica_test.cpp +++ b/src/replica/test/replica_test.cpp @@ -340,9 +340,7 @@ TEST_P(replica_test, query_data_version_test) std::string expected_response_json; } tests[] = {{"", http_status_code::kBadRequest, "app_id should not be empty"}, {"wrong", http_status_code::kBadRequest, "invalid app_id=wrong"}, - {"2", - http_status_code::kOk, - R"({"1":{"data_version":"1"}})"}, + {"2", http_status_code::kOk, R"({"1":{"data_version":"1"}})"}, {"4", http_status_code::kNotFound, "app_id=4 not found"}}; for (const auto &test : tests) { http_request req; @@ -593,8 +591,8 @@ void replica_test::test_auto_trash(error_code ec) } ASSERT_EQ(moved_to_err_path, found_err_path); ASSERT_FALSE(has_gpid(_pid)); - ASSERT_EQ(moved_to_err_path, dn->status == disk_status::NORMAL) << moved_to_err_path << ", " - << enum_to_string(dn->status); + ASSERT_EQ(moved_to_err_path, dn->status == disk_status::NORMAL) + << moved_to_err_path << ", " << enum_to_string(dn->status); ASSERT_EQ(!moved_to_err_path, dn->status == disk_status::IO_ERROR) << moved_to_err_path << ", " << enum_to_string(dn->status); diff --git a/src/replica/test/replication_service_test_app.h b/src/replica/test/replication_service_test_app.h index 9d3edc3c42..3417b2ce28 100644 --- a/src/replica/test/replication_service_test_app.h +++ b/src/replica/test/replication_service_test_app.h @@ -27,8 +27,8 @@ #pragma once #include "replica/replication_service_app.h" -using ::dsn::replication::replication_service_app; using ::dsn::error_code; +using ::dsn::replication::replication_service_app; class replication_service_test_app : public replication_service_app { diff --git a/src/replica/test/throttling_controller_test.cpp b/src/replica/test/throttling_controller_test.cpp index 120ad0b4f8..499eaed51d 100644 --- a/src/replica/test/throttling_controller_test.cpp +++ b/src/replica/test/throttling_controller_test.cpp @@ -110,7 +110,11 @@ class throttling_controller_test : public ::testing::Test // invalid argument std::string test_cases_2[] = { - "20m*delay*100", "20B*delay*100", "20KB*delay*100", "20Mb*delay*100", "20MB*delay*100", + "20m*delay*100", + "20B*delay*100", + "20KB*delay*100", + "20Mb*delay*100", + "20MB*delay*100", }; for (const std::string &tc : test_cases_2) { ASSERT_FALSE(cntl.parse_from_env(tc, 4, parse_err, env_changed, old_value)); diff --git a/src/runtime/api_task.h b/src/runtime/api_task.h index 477dc4bef0..9db9c3f2fa 100644 --- a/src/runtime/api_task.h +++ b/src/runtime/api_task.h @@ -70,7 +70,7 @@ class raw_task; class rpc_request_task; class rpc_response_task; class aio_task; -} +} // namespace dsn /*! apps updates the value at dsn_task_queue_virtual_length_ptr(..) to control the length of a vitual queue (bound to current code + hash) to diff --git a/src/runtime/env.sim.h b/src/runtime/env.sim.h index 2785bb102e..4846d75618 100644 --- a/src/runtime/env.sim.h +++ b/src/runtime/env.sim.h @@ -41,5 +41,5 @@ class sim_env_provider : public env_provider private: static void on_worker_start(task_worker *worker); }; -} -} // end namespace +} // namespace tools +} // namespace dsn diff --git a/src/runtime/fault_injector.cpp b/src/runtime/fault_injector.cpp index 653c052a45..c7d54e192f 100644 --- a/src/runtime/fault_injector.cpp +++ b/src/runtime/fault_injector.cpp @@ -354,5 +354,5 @@ void fault_injector::install(service_spec &spec) } fault_injector::fault_injector(const char *name) : toollet(name) {} -} -} +} // namespace tools +} // namespace dsn diff --git a/src/runtime/fault_injector.h b/src/runtime/fault_injector.h index cae753acf5..00ebb1e051 100644 --- a/src/runtime/fault_injector.h +++ b/src/runtime/fault_injector.h @@ -92,5 +92,5 @@ class fault_injector : public toollet explicit fault_injector(const char *name); void install(service_spec &spec) override; }; -} -} +} // namespace tools +} // namespace dsn diff --git a/src/runtime/nativerun.h b/src/runtime/nativerun.h index 2acdae6cca..d57b541699 100644 --- a/src/runtime/nativerun.h +++ b/src/runtime/nativerun.h @@ -43,5 +43,5 @@ class nativerun : public tool_app virtual void run() override; }; -} -} // end namespace dsn::tools +} // namespace tools +} // namespace dsn diff --git a/src/runtime/node_scoper.h b/src/runtime/node_scoper.h index 5b43e2ca5b..d6000911b0 100644 --- a/src/runtime/node_scoper.h +++ b/src/runtime/node_scoper.h @@ -47,5 +47,5 @@ class node_scoper }; // ---- inline implementation ------ -} -} // end namespace dsn::tools +} // namespace tools +} // namespace dsn diff --git a/src/runtime/pipeline.h b/src/runtime/pipeline.h index e4dbcd8d84..c538bf2efa 100644 --- a/src/runtime/pipeline.h +++ b/src/runtime/pipeline.h @@ -106,7 +106,7 @@ struct result // }); // ``` // - void step_down_next_stage(Args &&... args) + void step_down_next_stage(Args &&...args) { CHECK_NOTNULL(__func, "no next stage is linked"); __func(std::make_tuple(std::forward(args)...)); @@ -181,16 +181,14 @@ struct base : environment // link to node of existing pipeline if (next.__pipeline != nullptr) { - this_stage->__func = [next_ptr = &next](ArgsTupleType && args) mutable - { + this_stage->__func = [next_ptr = &next](ArgsTupleType &&args) mutable { absl::apply(&NextStage::async, std::tuple_cat(std::make_tuple(next_ptr), std::move(args))); }; } else { next.__conf = this_stage->__conf; next.__pipeline = this_stage->__pipeline; - this_stage->__func = [next_ptr = &next](ArgsTupleType && args) mutable - { + this_stage->__func = [next_ptr = &next](ArgsTupleType &&args) mutable { if (next_ptr->paused()) { return; } @@ -240,22 +238,23 @@ template struct when : environment { /// Run this stage within current context. - virtual void run(Args &&... in) = 0; + virtual void run(Args &&...in) = 0; - void repeat(Args &&... in, std::chrono::milliseconds delay_ms = 0_ms) + void repeat(Args &&...in, std::chrono::milliseconds delay_ms = 0_ms) { auto arg_tuple = std::make_tuple(this, std::forward(in)...); - schedule([ this, args = std::move(arg_tuple) ]() mutable { - if (paused()) { - return; - } - absl::apply(&when::run, std::move(args)); - }, - delay_ms); + schedule( + [this, args = std::move(arg_tuple)]() mutable { + if (paused()) { + return; + } + absl::apply(&when::run, std::move(args)); + }, + delay_ms); } /// Run this stage asynchronously in its environment. - void async(Args &&... in) { repeat(std::forward(in)...); } + void async(Args &&...in) { repeat(std::forward(in)...); } bool paused() const { return __pipeline->paused(); } @@ -279,9 +278,9 @@ inline void base::run_pipeline() template struct do_when : when { - explicit do_when(std::function &&func) : _cb(std::move(func)) {} + explicit do_when(std::function &&func) : _cb(std::move(func)) {} - void run(Args &&... args) override { _cb(std::forward(args)...); } + void run(Args &&...args) override { _cb(std::forward(args)...); } virtual ~do_when() = default; diff --git a/src/runtime/profiler.cpp b/src/runtime/profiler.cpp index 7571714d89..7852f1fcb1 100644 --- a/src/runtime/profiler.cpp +++ b/src/runtime/profiler.cpp @@ -370,7 +370,7 @@ metric_entity_ptr instantiate_profiler_metric_entity(const std::string &task_nam task_spec_profiler::task_spec_profiler(int code) : collect_call_count(false), is_profile(false), - call_counts(new std::atomic[ s_task_code_max + 1 ]), + call_counts(new std::atomic[s_task_code_max + 1]), _task_name(dsn::task_code(code).to_string()), _profiler_metric_entity(instantiate_profiler_metric_entity(_task_name)) { diff --git a/src/runtime/providers.common.h b/src/runtime/providers.common.h index 08190ae037..da2a3c36ec 100644 --- a/src/runtime/providers.common.h +++ b/src/runtime/providers.common.h @@ -30,4 +30,4 @@ namespace dsn { namespace tools { extern void register_common_providers(); } -} +} // namespace dsn diff --git a/src/runtime/rpc/dsn_message_parser.cpp b/src/runtime/rpc/dsn_message_parser.cpp index b9dc05524d..00dc6901eb 100644 --- a/src/runtime/rpc/dsn_message_parser.cpp +++ b/src/runtime/rpc/dsn_message_parser.cpp @@ -211,4 +211,4 @@ int dsn_message_parser::get_buffers_on_send(message_ex *msg, /*out*/ send_buf *b return true; } } -} +} // namespace dsn diff --git a/src/runtime/rpc/dsn_message_parser.h b/src/runtime/rpc/dsn_message_parser.h index bdc16672de..f458f7a61f 100644 --- a/src/runtime/rpc/dsn_message_parser.h +++ b/src/runtime/rpc/dsn_message_parser.h @@ -1,28 +1,28 @@ /* -* The MIT License (MIT) -* -* Copyright (c) 2015 Microsoft Corporation -* -* -=- Robust Distributed System Nucleus (rDSN) -=- -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in -* all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -* THE SOFTWARE. -*/ + * The MIT License (MIT) + * + * Copyright (c) 2015 Microsoft Corporation + * + * -=- Robust Distributed System Nucleus (rDSN) -=- + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ #pragma once @@ -55,4 +55,4 @@ class dsn_message_parser : public message_parser private: bool _header_checked; }; -} +} // namespace dsn diff --git a/src/runtime/rpc/message_parser.cpp b/src/runtime/rpc/message_parser.cpp index e9c0da964c..e523635e99 100644 --- a/src/runtime/rpc/message_parser.cpp +++ b/src/runtime/rpc/message_parser.cpp @@ -200,4 +200,4 @@ message_parser *message_parser_manager::create_parser(network_header_format fmt) else return nullptr; } -} +} // namespace dsn diff --git a/src/runtime/rpc/message_parser_manager.h b/src/runtime/rpc/message_parser_manager.h index b3fc7fa4c9..670deb9826 100644 --- a/src/runtime/rpc/message_parser_manager.h +++ b/src/runtime/rpc/message_parser_manager.h @@ -58,4 +58,4 @@ class message_parser_manager : public utils::singleton std::vector _factory_vec; }; -} +} // namespace dsn diff --git a/src/runtime/rpc/network.sim.cpp b/src/runtime/rpc/network.sim.cpp index ca1986ae8b..595f86cb1d 100644 --- a/src/runtime/rpc/network.sim.cpp +++ b/src/runtime/rpc/network.sim.cpp @@ -195,5 +195,5 @@ uint32_t sim_network_provider::net_delay_milliseconds() const FLAGS_max_message_delay_microseconds) / 1000; } -} -} // end namespace +} // namespace tools +} // namespace dsn diff --git a/src/runtime/rpc/raw_message_parser.cpp b/src/runtime/rpc/raw_message_parser.cpp index 53cde8de6c..dbe9f327e8 100644 --- a/src/runtime/rpc/raw_message_parser.cpp +++ b/src/runtime/rpc/raw_message_parser.cpp @@ -130,4 +130,4 @@ int raw_message_parser::get_buffers_on_send(message_ex *msg, send_buf *buffers) } return i; } -} +} // namespace dsn diff --git a/src/runtime/rpc/raw_message_parser.h b/src/runtime/rpc/raw_message_parser.h index 1b39a41768..c21f316c68 100644 --- a/src/runtime/rpc/raw_message_parser.h +++ b/src/runtime/rpc/raw_message_parser.h @@ -1,28 +1,28 @@ /* -* The MIT License (MIT) -* -* Copyright (c) 2015 Microsoft Corporation -* -* -=- Robust Distributed System Nucleus (rDSN) -=- -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in -* all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -* THE SOFTWARE. -*/ + * The MIT License (MIT) + * + * Copyright (c) 2015 Microsoft Corporation + * + * -=- Robust Distributed System Nucleus (rDSN) -=- + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ #ifndef RAW_MESSAGE_PARSER_H #define RAW_MESSAGE_PARSER_H @@ -51,5 +51,5 @@ class raw_message_parser : public message_parser /*out*/ int &read_next) override; virtual int get_buffers_on_send(message_ex *msg, /*out*/ send_buf *buffers) override; }; -} +} // namespace dsn #endif // RAW_MESSAGE_PARSER_H diff --git a/src/runtime/rpc/rpc_address.h b/src/runtime/rpc/rpc_address.h index be36315843..6f1a216dcd 100644 --- a/src/runtime/rpc/rpc_address.h +++ b/src/runtime/rpc/rpc_address.h @@ -48,7 +48,8 @@ class TProtocol; } // namespace thrift } // namespace apache -typedef enum dsn_host_type_t { +typedef enum dsn_host_type_t +{ HOST_TYPE_INVALID = 0, HOST_TYPE_IPV4 = 1, HOST_TYPE_GROUP = 2, diff --git a/src/runtime/rpc/rpc_holder.h b/src/runtime/rpc/rpc_holder.h index 2aba608910..b1e4af7b84 100644 --- a/src/runtime/rpc/rpc_holder.h +++ b/src/runtime/rpc/rpc_holder.h @@ -168,8 +168,8 @@ class rpc_holder rpc_response_task_ptr t = rpc::create_rpc_response_task( dsn_request(), tracker, - [ cb_fwd = std::forward(callback), - rpc = *this ](error_code err, message_ex * req, message_ex * resp) mutable { + [cb_fwd = std::forward(callback), + rpc = *this](error_code err, message_ex *req, message_ex *resp) mutable { if (err == ERR_OK) { unmarshall(resp, rpc.response()); } @@ -200,8 +200,8 @@ class rpc_holder rpc_response_task_ptr t = rpc::create_rpc_response_task( dsn_request(), tracker, - [ cb_fwd = std::forward(callback), - rpc = *this ](error_code err, message_ex * req, message_ex * resp) mutable { + [cb_fwd = std::forward(callback), + rpc = *this](error_code err, message_ex *req, message_ex *resp) mutable { if (err == ERR_OK) { unmarshall(resp, rpc.response()); } diff --git a/src/runtime/scheduler.cpp b/src/runtime/scheduler.cpp index cfb6051880..440e105907 100644 --- a/src/runtime/scheduler.cpp +++ b/src/runtime/scheduler.cpp @@ -297,5 +297,5 @@ void scheduler::schedule() _is_scheduling = false; } -} -} // end namespace +} // namespace tools +} // namespace dsn diff --git a/src/runtime/serverlet.h b/src/runtime/serverlet.h index 347d5f384a..90ff8e2e1f 100644 --- a/src/runtime/serverlet.h +++ b/src/runtime/serverlet.h @@ -247,4 +247,4 @@ inline void serverlet::reply(dsn::message_ex *request, const TResponse &resp) dsn_rpc_reply(msg); } /*@}*/ -} // end namespace +} // namespace dsn diff --git a/src/runtime/simulator.h b/src/runtime/simulator.h index d48f59c5e1..0ca2e5eb68 100644 --- a/src/runtime/simulator.h +++ b/src/runtime/simulator.h @@ -54,6 +54,7 @@ class checker virtual void initialize(const std::string &name, const std::vector &apps) = 0; virtual void check() = 0; const std::string &name() const { return _name; } + protected: std::vector _apps; std::string _name; @@ -72,5 +73,5 @@ class simulator : public tool_app }; // ---- inline implementation ------ -} -} // end namespace dsn::tools +} // namespace tools +} // namespace dsn diff --git a/src/runtime/task/async_calls.h b/src/runtime/task/async_calls.h index e991e32276..4d279a613e 100644 --- a/src/runtime/task/async_calls.h +++ b/src/runtime/task/async_calls.h @@ -131,7 +131,7 @@ create_rpc_response_task(dsn::message_ex *req, req, tracker, [cb_fwd = std::move(callback)]( - error_code err, dsn::message_ex * req, dsn::message_ex * resp) mutable { + error_code err, dsn::message_ex *req, dsn::message_ex *resp) mutable { typename is_typed_rpc_callback::response_t response = {}; if (err == ERR_OK) { unmarshall(resp, response); diff --git a/src/runtime/task/future_types.h b/src/runtime/task/future_types.h index 1a88c8d426..1f6251c711 100644 --- a/src/runtime/task/future_types.h +++ b/src/runtime/task/future_types.h @@ -34,4 +34,4 @@ namespace dsn { typedef std::function err_callback; typedef future_task error_code_future; typedef dsn::ref_ptr error_code_future_ptr; -} +} // namespace dsn diff --git a/src/runtime/task/hpc_task_queue.cpp b/src/runtime/task/hpc_task_queue.cpp index 8af5103ca6..7249caa8b2 100644 --- a/src/runtime/task/hpc_task_queue.cpp +++ b/src/runtime/task/hpc_task_queue.cpp @@ -80,5 +80,5 @@ task *hpc_concurrent_task_queue::dequeue(int &batch_size) } while (count != 0); return head; } -} -} +} // namespace tools +} // namespace dsn diff --git a/src/runtime/task/hpc_task_queue.h b/src/runtime/task/hpc_task_queue.h index 8a53697eb4..7b60a58be1 100644 --- a/src/runtime/task/hpc_task_queue.h +++ b/src/runtime/task/hpc_task_queue.h @@ -52,5 +52,5 @@ class hpc_concurrent_task_queue : public task_queue task *dequeue(/*inout*/ int &batch_size) override; }; -} -} +} // namespace tools +} // namespace dsn diff --git a/src/runtime/task/task.h b/src/runtime/task/task.h index 9fbe022dd8..68b41bf625 100644 --- a/src/runtime/task/task.h +++ b/src/runtime/task/task.h @@ -249,7 +249,7 @@ class task : public ref_counter, public extensible_object static void set_tls_dsn_context( service_node *node, // cannot be null task_worker *worker // null for io or timer threads if they are not worker threads - ); + ); protected: void enqueue(task_worker_pool *pool); @@ -392,13 +392,13 @@ class future_task : public task } virtual void exec() override { absl::apply(_cb, std::move(_values)); } - void enqueue_with(const First &t, const Remaining &... r, int delay_ms = 0) + void enqueue_with(const First &t, const Remaining &...r, int delay_ms = 0) { _values = std::make_tuple(t, r...); set_delay(delay_ms); enqueue(); } - void enqueue_with(First &&t, Remaining &&... r, int delay_ms = 0) + void enqueue_with(First &&t, Remaining &&...r, int delay_ms = 0) { _values = std::make_tuple(std::move(t), std::forward(r)...); set_delay(delay_ms); diff --git a/src/runtime/task/task_code.cpp b/src/runtime/task/task_code.cpp index 9abf2193fa..1f36264443 100644 --- a/src/runtime/task/task_code.cpp +++ b/src/runtime/task/task_code.cpp @@ -67,7 +67,7 @@ void task_code_mgr::register_commands() return ss.str(); })); } -} +} // namespace utils /*static*/ int task_code::max() { return task_code_mgr::instance().max_value(); } @@ -123,4 +123,4 @@ const char *task_code::to_string() const { return task_code_mgr::instance().get_name(_internal_code); } -} +} // namespace dsn diff --git a/src/runtime/task/task_code.h b/src/runtime/task/task_code.h index 2dd03bf701..ae7c258bf2 100644 --- a/src/runtime/task/task_code.h +++ b/src/runtime/task/task_code.h @@ -43,7 +43,8 @@ class TProtocol; } // namespace thrift } // namespace apache -typedef enum dsn_task_type_t { +typedef enum dsn_task_type_t +{ TASK_TYPE_RPC_REQUEST, ///< task handling rpc request TASK_TYPE_RPC_RESPONSE, ///< task handling rpc response or timeout TASK_TYPE_COMPUTE, ///< async calls or timers @@ -64,7 +65,8 @@ ENUM_REG(TASK_TYPE_AIO) ENUM_REG(TASK_TYPE_CONTINUATION) ENUM_END(dsn_task_type_t) -typedef enum dsn_task_priority_t { +typedef enum dsn_task_priority_t +{ TASK_PRIORITY_LOW, TASK_PRIORITY_COMMON, TASK_PRIORITY_HIGH, diff --git a/src/runtime/task/task_engine.cpp b/src/runtime/task/task_engine.cpp index 179b045626..138afedac7 100644 --- a/src/runtime/task/task_engine.cpp +++ b/src/runtime/task/task_engine.cpp @@ -139,10 +139,10 @@ void task_worker_pool::add_timer(task *t) CHECK_GT_MSG( t->delay_milliseconds(), 0, "task delayed should be dispatched to timer service first"); - unsigned int idx = (_spec.partitioned - ? static_cast(t->hash()) % - static_cast(_per_queue_timer_svcs.size()) - : 0); + unsigned int idx = + (_spec.partitioned ? static_cast(t->hash()) % + static_cast(_per_queue_timer_svcs.size()) + : 0); _per_queue_timer_svcs[idx]->add_timer(t); } @@ -157,10 +157,9 @@ void task_worker_pool::enqueue(task *t) "worker pool {} must be started before enqueue task {}", spec().name, t->spec().name); - unsigned int idx = - (_spec.partitioned - ? static_cast(t->hash()) % static_cast(_queues.size()) - : 0); + unsigned int idx = (_spec.partitioned ? static_cast(t->hash()) % + static_cast(_queues.size()) + : 0); return _queues[idx]->enqueue_internal(t); } diff --git a/src/runtime/task/task_engine.sim.cpp b/src/runtime/task/task_engine.sim.cpp index 1424585221..1703001817 100644 --- a/src/runtime/task/task_engine.sim.cpp +++ b/src/runtime/task/task_engine.sim.cpp @@ -247,5 +247,5 @@ void sim_lock_nr_provider::unlock() _current_holder = -1; _sema.signal(1); } -} -} // end namespace +} // namespace tools +} // namespace dsn diff --git a/src/runtime/task/task_engine.sim.h b/src/runtime/task/task_engine.sim.h index 85ded58eab..e2abe08f74 100644 --- a/src/runtime/task/task_engine.sim.h +++ b/src/runtime/task/task_engine.sim.h @@ -142,5 +142,5 @@ class sim_rwlock_nr_provider : public rwlock_nr_provider private: sim_lock_nr_provider _l; }; -} -} // end namespace +} // namespace tools +} // namespace dsn diff --git a/src/runtime/task/task_spec.cpp b/src/runtime/task/task_spec.cpp index f31b3598da..096b0bd1b0 100644 --- a/src/runtime/task/task_spec.cpp +++ b/src/runtime/task/task_spec.cpp @@ -288,4 +288,4 @@ bool threadpool_spec::init(/*out*/ std::vector &specs) return true; } -} // end namespace +} // namespace dsn diff --git a/src/runtime/task/task_spec.h b/src/runtime/task/task_spec.h index 3b31537e04..058beedced 100644 --- a/src/runtime/task/task_spec.h +++ b/src/runtime/task/task_spec.h @@ -61,7 +61,8 @@ ENUM_REG(TASK_STATE_FINISHED) ENUM_REG(TASK_STATE_CANCELLED) ENUM_END(task_state) -typedef enum grpc_mode_t { +typedef enum grpc_mode_t +{ GRPC_TO_LEADER, // the rpc is sent to the leader (if exist) GRPC_TO_ALL, // the rpc is sent to all GRPC_TO_ANY, // the rpc is sent to one of the group member @@ -76,7 +77,8 @@ ENUM_REG(GRPC_TO_ALL) ENUM_REG(GRPC_TO_ANY) ENUM_END(grpc_mode_t) -typedef enum throttling_mode_t { +typedef enum throttling_mode_t +{ TM_NONE, // no throttling applied TM_REJECT, // reject the incoming request TM_DELAY, // delay network receive ops to reducing incoming rate @@ -91,7 +93,8 @@ ENUM_REG(TM_REJECT) ENUM_REG(TM_DELAY) ENUM_END(throttling_mode_t) -typedef enum dsn_msg_serialize_format { +typedef enum dsn_msg_serialize_format +{ DSF_INVALID = 0, DSF_THRIFT_BINARY = 1, DSF_THRIFT_COMPACT = 2, @@ -317,4 +320,4 @@ CONFIG_FLD(bool, "greater than its timeout value") CONFIG_END -} // end namespace +} // namespace dsn diff --git a/src/runtime/task/task_tracker.cpp b/src/runtime/task/task_tracker.cpp index fbb0dde4fa..23242d6376 100644 --- a/src/runtime/task/task_tracker.cpp +++ b/src/runtime/task/task_tracker.cpp @@ -166,4 +166,4 @@ int task_tracker::cancel_but_not_wait_outstanding_tasks() } return not_finished; } -} +} // namespace dsn diff --git a/src/runtime/task/task_tracker.h b/src/runtime/task/task_tracker.h index 11a9ffc6fb..dd510f7ffc 100644 --- a/src/runtime/task/task_tracker.h +++ b/src/runtime/task/task_tracker.h @@ -226,4 +226,4 @@ inline void trackable_task::owner_delete_commit() _deleting_owner.store(OWNER_DELETE_FINISHED, std::memory_order_relaxed); } -} +} // namespace dsn diff --git a/src/runtime/task/task_worker.cpp b/src/runtime/task/task_worker.cpp index 83b8aa4b96..5b5cf2063c 100644 --- a/src/runtime/task/task_worker.cpp +++ b/src/runtime/task/task_worker.cpp @@ -259,4 +259,4 @@ void task_worker::loop() const threadpool_spec &task_worker::pool_spec() const { return pool()->spec(); } -} // end namespace +} // namespace dsn diff --git a/src/runtime/task/task_worker.h b/src/runtime/task/task_worker.h index 426491508b..bf62852491 100644 --- a/src/runtime/task/task_worker.h +++ b/src/runtime/task/task_worker.h @@ -108,4 +108,4 @@ class task_worker : public extensible_object /*@}*/ }; /*@}*/ -} // end namespace +} // namespace dsn diff --git a/src/runtime/task/timer_service.h b/src/runtime/task/timer_service.h index 86f4b775c9..20118898e3 100644 --- a/src/runtime/task/timer_service.h +++ b/src/runtime/task/timer_service.h @@ -67,4 +67,4 @@ class timer_service service_node *_node; }; /*@}*/ -} // end namespace +} // namespace dsn diff --git a/src/runtime/test/async_call.cpp b/src/runtime/test/async_call.cpp index 38398fd184..7a32736ba2 100644 --- a/src/runtime/test/async_call.cpp +++ b/src/runtime/test/async_call.cpp @@ -95,24 +95,27 @@ TEST(async_call, task_call) /* task tracking */ tc = new tracker_class(); std::vector test_tasks; - t = tasking::enqueue(LPC_TEST_CLIENTLET, - &tc->_tracker, - [=] { tc->callback_function1(); }, - 0, - std::chrono::seconds(30)); + t = tasking::enqueue( + LPC_TEST_CLIENTLET, + &tc->_tracker, + [=] { tc->callback_function1(); }, + 0, + std::chrono::seconds(30)); test_tasks.push_back(t); - t = tasking::enqueue(LPC_TEST_CLIENTLET, - &tc->_tracker, - [tc] { tc->callback_function1(); }, - 0, - std::chrono::seconds(30)); + t = tasking::enqueue( + LPC_TEST_CLIENTLET, + &tc->_tracker, + [tc] { tc->callback_function1(); }, + 0, + std::chrono::seconds(30)); test_tasks.push_back(t); - t = tasking::enqueue_timer(LPC_TEST_CLIENTLET, - &tc->_tracker, - [tc] { tc->callback_function1(); }, - std::chrono::seconds(20), - 0, - std::chrono::seconds(30)); + t = tasking::enqueue_timer( + LPC_TEST_CLIENTLET, + &tc->_tracker, + [tc] { tc->callback_function1(); }, + std::chrono::seconds(20), + 0, + std::chrono::seconds(30)); test_tasks.push_back(t); delete tc; diff --git a/src/runtime/test/sim_lock.cpp b/src/runtime/test/sim_lock.cpp index 7bde41bf28..30c18aa7eb 100644 --- a/src/runtime/test/sim_lock.cpp +++ b/src/runtime/test/sim_lock.cpp @@ -86,7 +86,7 @@ namespace dsn { namespace test { typedef std::function system_callback; } -} +} // namespace dsn TEST(tools_simulator, scheduler) { if (dsn::task::get_current_worker() == nullptr) diff --git a/src/runtime/threadpool_code.cpp b/src/runtime/threadpool_code.cpp index 48a050e228..dcc044306b 100644 --- a/src/runtime/threadpool_code.cpp +++ b/src/runtime/threadpool_code.cpp @@ -50,4 +50,4 @@ const char *threadpool_code::to_string() const { return dsn::utils::customized_id_mgr::instance().get_name(_internal_code); } -} +} // namespace dsn diff --git a/src/server/available_detector.cpp b/src/server/available_detector.cpp index 67754149d7..b61b2f078d 100644 --- a/src/server/available_detector.cpp +++ b/src/server/available_detector.cpp @@ -260,7 +260,7 @@ void available_detector::report_availability_info() std::chrono::minutes(1), 0, std::chrono::minutes(2) // waiting for pegasus finishing start. - ); + ); } bool available_detector::generate_hash_keys() @@ -326,8 +326,9 @@ void available_detector::on_detect(int32_t idx) _recent_minute_detect_times.fetch_add(1); // define async_get callback function. - auto async_get_callback = [this, idx]( - int err, std::string &&_value, pegasus_client::internal_info &&info) { + auto async_get_callback = [this, idx](int err, + std::string &&_value, + pegasus_client::internal_info &&info) { std::atomic &cnt = (*_fail_count[idx]); if (err != PERR_OK) { int prev = cnt.fetch_add(1); @@ -350,10 +351,8 @@ void available_detector::on_detect(int32_t idx) }; // define async_set callback function. - auto async_set_callback = - [ this, idx, user_async_get_callback = std::move(async_get_callback) ]( - int err, pegasus_client::internal_info &&info) - { + auto async_set_callback = [this, idx, user_async_get_callback = std::move(async_get_callback)]( + int err, pegasus_client::internal_info &&info) { std::atomic &cnt = (*_fail_count[idx]); if (err != PERR_OK) { int prev = cnt.fetch_add(1); diff --git a/src/server/info_collector.cpp b/src/server/info_collector.cpp index aa77a5d3c5..c2b1fe0943 100644 --- a/src/server/info_collector.cpp +++ b/src/server/info_collector.cpp @@ -118,13 +118,13 @@ info_collector::~info_collector() void info_collector::start() { - _app_stat_timer_task = - ::dsn::tasking::enqueue_timer(LPC_PEGASUS_APP_STAT_TIMER, - &_tracker, - [this] { on_app_stat(); }, - std::chrono::seconds(FLAGS_app_stat_interval_seconds), - 0, - std::chrono::minutes(1)); + _app_stat_timer_task = ::dsn::tasking::enqueue_timer( + LPC_PEGASUS_APP_STAT_TIMER, + &_tracker, + [this] { on_app_stat(); }, + std::chrono::seconds(FLAGS_app_stat_interval_seconds), + 0, + std::chrono::minutes(1)); _capacity_unit_stat_timer_task = ::dsn::tasking::enqueue_timer( LPC_PEGASUS_CAPACITY_UNIT_STAT_TIMER, @@ -272,11 +272,12 @@ void info_collector::on_capacity_unit_stat(int remaining_retry_count) "wait {} seconds to retry", remaining_retry_count, _capacity_unit_retry_wait_seconds); - ::dsn::tasking::enqueue(LPC_PEGASUS_CAPACITY_UNIT_STAT_TIMER, - &_tracker, - [=] { on_capacity_unit_stat(remaining_retry_count - 1); }, - 0, - std::chrono::seconds(_capacity_unit_retry_wait_seconds)); + ::dsn::tasking::enqueue( + LPC_PEGASUS_CAPACITY_UNIT_STAT_TIMER, + &_tracker, + [=] { on_capacity_unit_stat(remaining_retry_count - 1); }, + 0, + std::chrono::seconds(_capacity_unit_retry_wait_seconds)); } else { LOG_ERROR("get capacity unit stat failed, remaining_retry_count = 0, no retry anymore"); } @@ -319,11 +320,12 @@ void info_collector::on_storage_size_stat(int remaining_retry_count) "seconds to retry", remaining_retry_count, _storage_size_retry_wait_seconds); - ::dsn::tasking::enqueue(LPC_PEGASUS_STORAGE_SIZE_STAT_TIMER, - &_tracker, - [=] { on_storage_size_stat(remaining_retry_count - 1); }, - 0, - std::chrono::seconds(_storage_size_retry_wait_seconds)); + ::dsn::tasking::enqueue( + LPC_PEGASUS_STORAGE_SIZE_STAT_TIMER, + &_tracker, + [=] { on_storage_size_stat(remaining_retry_count - 1); }, + 0, + std::chrono::seconds(_storage_size_retry_wait_seconds)); } else { LOG_ERROR("get storage size stat failed, remaining_retry_count = 0, no retry anymore"); } diff --git a/src/server/info_collector_app.cpp b/src/server/info_collector_app.cpp index 398f8bd5b1..bf70b39cd3 100644 --- a/src/server/info_collector_app.cpp +++ b/src/server/info_collector_app.cpp @@ -53,5 +53,5 @@ ::dsn::error_code info_collector_app::stop(bool cleanup) _detector.stop(); return ::dsn::ERR_OK; } -} -} // namespace +} // namespace server +} // namespace pegasus diff --git a/src/server/info_collector_app.h b/src/server/info_collector_app.h index b731f0e483..73dc958f31 100644 --- a/src/server/info_collector_app.h +++ b/src/server/info_collector_app.h @@ -43,5 +43,5 @@ class info_collector_app : public ::dsn::service_app info_collector _collector; available_detector _detector; }; -} -} // namespace +} // namespace server +} // namespace pegasus diff --git a/src/server/pegasus_mutation_duplicator.cpp b/src/server/pegasus_mutation_duplicator.cpp index 849fa56b3c..6b29e4e3ec 100644 --- a/src/server/pegasus_mutation_duplicator.cpp +++ b/src/server/pegasus_mutation_duplicator.cpp @@ -171,11 +171,12 @@ void pegasus_mutation_duplicator::send(uint64_t hash, callback cb) _inflights[hash].pop_front(); } - _client->async_duplicate(rpc, - [hash, cb, rpc, this](dsn::error_code err) mutable { - on_duplicate_reply(hash, std::move(cb), std::move(rpc), err); - }, - _env.__conf.tracker); + _client->async_duplicate( + rpc, + [hash, cb, rpc, this](dsn::error_code err) mutable { + on_duplicate_reply(hash, std::move(cb), std::move(rpc), err); + }, + _env.__conf.tracker); } void pegasus_mutation_duplicator::on_duplicate_reply(uint64_t hash, diff --git a/src/server/pegasus_scan_context.h b/src/server/pegasus_scan_context.h index ebf4399080..4e019afeca 100644 --- a/src/server/pegasus_scan_context.h +++ b/src/server/pegasus_scan_context.h @@ -138,5 +138,5 @@ class pegasus_context_cache std::unordered_map> _map; ::dsn::utils::ex_lock_nr_spin _lock; }; -} -} +} // namespace server +} // namespace pegasus diff --git a/src/server/pegasus_server_impl.cpp b/src/server/pegasus_server_impl.cpp index 6a76679eba..e3a75c2fef 100644 --- a/src/server/pegasus_server_impl.cpp +++ b/src/server/pegasus_server_impl.cpp @@ -1372,11 +1372,12 @@ void pegasus_server_impl::on_get_scanner(get_scanner_rpc rpc) // if the context is used, it will be fetched and re-put into cache, // which will change the handle, // then the delayed task will fetch null context by old handle, and do nothing. - ::dsn::tasking::enqueue(LPC_PEGASUS_SERVER_DELAY, - &_tracker, - [this, handle]() { _context_cache.fetch(handle); }, - 0, - std::chrono::minutes(5)); + ::dsn::tasking::enqueue( + LPC_PEGASUS_SERVER_DELAY, + &_tracker, + [this, handle]() { _context_cache.fetch(handle); }, + 0, + std::chrono::minutes(5)); } else { // scan completed resp.context_id = pegasus_scan_context::SCAN_CONTEXT_ID_COMPLETED; @@ -1517,11 +1518,12 @@ void pegasus_server_impl::on_scan(scan_rpc rpc) // scan not completed int64_t handle = _context_cache.put(std::move(context)); resp.context_id = handle; - ::dsn::tasking::enqueue(LPC_PEGASUS_SERVER_DELAY, - &_tracker, - [this, handle]() { _context_cache.fetch(handle); }, - 0, - std::chrono::minutes(5)); + ::dsn::tasking::enqueue( + LPC_PEGASUS_SERVER_DELAY, + &_tracker, + [this, handle]() { _context_cache.fetch(handle); }, + 0, + std::chrono::minutes(5)); } else { // scan completed resp.context_id = pegasus_scan_context::SCAN_CONTEXT_ID_COMPLETED; @@ -1805,11 +1807,11 @@ dsn::error_code pegasus_server_impl::start(int argc, char **argv) } LOG_DEBUG_PREFIX("start the update replica-level rocksdb statistics timer task"); - _update_replica_rdb_stat = - dsn::tasking::enqueue_timer(LPC_REPLICATION_LONG_COMMON, - &_tracker, - [this]() { this->update_replica_rocksdb_statistics(); }, - std::chrono::seconds(FLAGS_update_rdb_stat_interval)); + _update_replica_rdb_stat = dsn::tasking::enqueue_timer( + LPC_REPLICATION_LONG_COMMON, + &_tracker, + [this]() { this->update_replica_rocksdb_statistics(); }, + std::chrono::seconds(FLAGS_update_rdb_stat_interval)); // These counters are singletons on this server shared by all replicas, their metrics update // task should be scheduled once an interval on the server view. @@ -1835,15 +1837,17 @@ dsn::error_code pegasus_server_impl::start(int argc, char **argv) this, _read_hotkey_collector, _write_hotkey_collector, _read_size_throttling_controller); _server_write = std::make_unique(this); - dsn::tasking::enqueue_timer(LPC_ANALYZE_HOTKEY, - &_tracker, - [this]() { _read_hotkey_collector->analyse_data(); }, - std::chrono::seconds(FLAGS_hotkey_analyse_time_interval_s)); + dsn::tasking::enqueue_timer( + LPC_ANALYZE_HOTKEY, + &_tracker, + [this]() { _read_hotkey_collector->analyse_data(); }, + std::chrono::seconds(FLAGS_hotkey_analyse_time_interval_s)); - dsn::tasking::enqueue_timer(LPC_ANALYZE_HOTKEY, - &_tracker, - [this]() { _write_hotkey_collector->analyse_data(); }, - std::chrono::seconds(FLAGS_hotkey_analyse_time_interval_s)); + dsn::tasking::enqueue_timer( + LPC_ANALYZE_HOTKEY, + &_tracker, + [this]() { _write_hotkey_collector->analyse_data(); }, + std::chrono::seconds(FLAGS_hotkey_analyse_time_interval_s)); return dsn::ERR_OK; } @@ -3378,14 +3382,15 @@ uint64_t pegasus_server_impl::do_manual_compact(const rocksdb::CompactRangeOptio // we will try to generate it again, and it will probably succeed because at least some // empty data is written into rocksdb by periodic group check. LOG_INFO_PREFIX("release storage failed after manual compact, will retry after 5 minutes"); - ::dsn::tasking::enqueue(LPC_PEGASUS_SERVER_DELAY, - &_tracker, - [this]() { - LOG_INFO_PREFIX("retry release storage after manual compact"); - release_storage_after_manual_compact(); - }, - 0, - std::chrono::minutes(5)); + ::dsn::tasking::enqueue( + LPC_PEGASUS_SERVER_DELAY, + &_tracker, + [this]() { + LOG_INFO_PREFIX("retry release storage after manual compact"); + release_storage_after_manual_compact(); + }, + 0, + std::chrono::minutes(5)); } // update rocksdb statistics immediately diff --git a/src/server/pegasus_server_impl.h b/src/server/pegasus_server_impl.h index d902e647dc..be664105b2 100644 --- a/src/server/pegasus_server_impl.h +++ b/src/server/pegasus_server_impl.h @@ -393,8 +393,8 @@ class pegasus_server_impl : public pegasus_read_service bool check_value_if_nearby(uint64_t base_value, uint64_t check_value) { uint64_t gap = base_value / 4; - uint64_t actual_gap = - (base_value < check_value) ? check_value - base_value : base_value - check_value; + uint64_t actual_gap = (base_value < check_value) ? check_value - base_value + : base_value - check_value; return actual_gap <= gap; } diff --git a/src/server/pegasus_write_service_impl.h b/src/server/pegasus_write_service_impl.h index 6eec4b7601..2169b273e2 100644 --- a/src/server/pegasus_write_service_impl.h +++ b/src/server/pegasus_write_service_impl.h @@ -342,8 +342,8 @@ class pegasus_write_service::impl : public dsn::replication::replica_base if (!passed) { // check not passed, return proper error code to user - resp.error = - invalid_argument ? rocksdb::Status::kInvalidArgument : rocksdb::Status::kTryAgain; + resp.error = invalid_argument ? rocksdb::Status::kInvalidArgument + : rocksdb::Status::kTryAgain; } return rocksdb::Status::kOk; @@ -468,8 +468,8 @@ class pegasus_write_service::impl : public dsn::replication::replica_base if (!passed) { // check not passed, return proper error code to user - resp.error = - invalid_argument ? rocksdb::Status::kInvalidArgument : rocksdb::Status::kTryAgain; + resp.error = invalid_argument ? rocksdb::Status::kInvalidArgument + : rocksdb::Status::kTryAgain; } return rocksdb::Status::kOk; } diff --git a/src/server/test/pegasus_mutation_duplicator_test.cpp b/src/server/test/pegasus_mutation_duplicator_test.cpp index aaf91a0657..e93e3bfc3e 100644 --- a/src/server/test/pegasus_mutation_duplicator_test.cpp +++ b/src/server/test/pegasus_mutation_duplicator_test.cpp @@ -116,12 +116,13 @@ class pegasus_mutation_duplicator_test : public pegasus_server_test_base total_shipped_size += rpc.dsn_request()->body_size() + rpc.dsn_request()->header->hdr_length; - duplicator_impl->on_duplicate_reply(get_hash(rpc), - [total_shipped_size](size_t final_size) { - ASSERT_EQ(total_shipped_size, final_size); - }, - rpc, - dsn::ERR_OK); + duplicator_impl->on_duplicate_reply( + get_hash(rpc), + [total_shipped_size](size_t final_size) { + ASSERT_EQ(total_shipped_size, final_size); + }, + rpc, + dsn::ERR_OK); // schedule next round _tracker.wait_outstanding_tasks(); @@ -190,7 +191,8 @@ class pegasus_mutation_duplicator_test : public pegasus_server_test_base // with other error rpc.response().error = PERR_INVALID_ARGUMENT; - duplicator_impl->on_duplicate_reply(get_hash(rpc), [](size_t) {}, rpc, dsn::ERR_OK); + duplicator_impl->on_duplicate_reply( + get_hash(rpc), [](size_t) {}, rpc, dsn::ERR_OK); _tracker.wait_outstanding_tasks(); ASSERT_EQ(duplicator_impl->_inflights.size(), 1); ASSERT_EQ(duplicate_rpc::mail_box().size(), 1); @@ -260,7 +262,8 @@ class pegasus_mutation_duplicator_test : public pegasus_server_test_base auto rpc_list = std::move(duplicate_rpc::mail_box()); for (const auto &rpc : rpc_list) { rpc.response().error = dsn::ERR_OK; - duplicator_impl->on_duplicate_reply(get_hash(rpc), [](size_t) {}, rpc, dsn::ERR_OK); + duplicator_impl->on_duplicate_reply( + get_hash(rpc), [](size_t) {}, rpc, dsn::ERR_OK); } _tracker.wait_outstanding_tasks(); ASSERT_EQ(duplicate_rpc::mail_box().size(), 0); diff --git a/src/server/test/pegasus_server_impl_test.cpp b/src/server/test/pegasus_server_impl_test.cpp index 1c57922c48..f2a4915750 100644 --- a/src/server/test/pegasus_server_impl_test.cpp +++ b/src/server/test/pegasus_server_impl_test.cpp @@ -104,7 +104,8 @@ class pegasus_server_impl_test : public pegasus_server_test_base std::string env_value; std::string expect_value; } tests[] = { - {"rocksdb.num_levels", "5", "5"}, {"rocksdb.write_buffer_size", "33554432", "33554432"}, + {"rocksdb.num_levels", "5", "5"}, + {"rocksdb.write_buffer_size", "33554432", "33554432"}, }; std::map all_test_envs; diff --git a/src/shell/command_helper.h b/src/shell/command_helper.h index 250e28fec4..0411f5c170 100644 --- a/src/shell/command_helper.h +++ b/src/shell/command_helper.h @@ -375,7 +375,8 @@ inline void scan_multi_data_next(scan_data_context *context) context->sema.wait(); auto callback = [context]( - int err, pegasus::pegasus_client::internal_info &&info) { + int err, + pegasus::pegasus_client::internal_info &&info) { if (err != pegasus::PERR_OK) { if (!context->split_completed.exchange(true)) { fprintf(stderr, @@ -454,28 +455,29 @@ inline void scan_data_next(scan_data_context *context) if (ts_expired) { scan_data_next(context); } else if (context->no_overwrite) { - auto callback = [context]( - int err, - pegasus::pegasus_client::check_and_set_results &&results, - pegasus::pegasus_client::internal_info &&info) { - if (err != pegasus::PERR_OK) { - if (!context->split_completed.exchange(true)) { - fprintf(stderr, + auto callback = + [context](int err, + pegasus::pegasus_client::check_and_set_results &&results, + pegasus::pegasus_client::internal_info &&info) { + if (err != pegasus::PERR_OK) { + if (!context->split_completed.exchange(true)) { + fprintf( + stderr, "ERROR: split[%d] async check and set failed: %s\n", context->split_id, context->client->get_error_string(err)); - context->error_occurred->store(true); - } - } else { - if (results.set_succeed) { - context->split_rows++; + context->error_occurred->store(true); + } + } else { + if (results.set_succeed) { + context->split_rows++; + } + scan_data_next(context); } - scan_data_next(context); - } - // should put "split_request_count--" at end of the scope, - // to prevent that split_request_count becomes 0 in the middle. - context->split_request_count--; - }; + // should put "split_request_count--" at end of the scope, + // to prevent that split_request_count becomes 0 in the middle. + context->split_request_count--; + }; pegasus::pegasus_client::check_and_set_options options; options.set_value_ttl_seconds = ttl_seconds; context->client->async_check_and_set( @@ -824,7 +826,7 @@ class aggregate_stats_calcs #define DEF_CALC_CREATOR(name) \ template \ - void create_##name(Args &&... args) \ + void create_##name(Args &&...args) \ { \ _##name = std::make_unique(std::forward(args)...); \ } @@ -1472,7 +1474,7 @@ inline dsn::metric_filters row_data_filters(int32_t table_id) #define BIND_ROW(metric_name, member) \ { \ - #metric_name, &row.member \ +#metric_name, &row.member \ } inline stat_var_map create_sums(row_data &row) @@ -1572,7 +1574,9 @@ inline std::unique_ptr create_table_aggregate_stats_calcs for (auto &row : rows) { const std::vector>> processors = { - {&sums, create_sums}, {&increases, create_increases}, {&rates, create_rates}, + {&sums, create_sums}, + {&increases, create_increases}, + {&rates, create_rates}, }; for (auto &processor : processors) { // Put both dimensions of table id and metric name into filters for each kind of @@ -1624,7 +1628,9 @@ create_partition_aggregate_stats_calcs(const int32_t table_id, const std::vector>> processors = { - {&sums, create_sums}, {&increases, create_increases}, {&rates, create_rates}, + {&sums, create_sums}, + {&increases, create_increases}, + {&rates, create_rates}, }; for (auto &processor : processors) { // Put all dimensions of table id, partition_id, and metric name into filters for diff --git a/src/shell/commands/bulk_load.cpp b/src/shell/commands/bulk_load.cpp index e5b0265e3d..8a57fe21a2 100644 --- a/src/shell/commands/bulk_load.cpp +++ b/src/shell/commands/bulk_load.cpp @@ -165,8 +165,8 @@ bool control_bulk_load_helper(command_executor *e, err = dsn::error_s::make(err_resp.get_value().err); hint_msg = err_resp.get_value().hint_msg; } - std::string type_str = - type == dsn::replication::bulk_load_control_type::BLC_PAUSE ? "pause" : "restart"; + std::string type_str = type == dsn::replication::bulk_load_control_type::BLC_PAUSE ? "pause" + : "restart"; if (!err.is_ok()) { fmt::print( stderr, "{} bulk load failed, error={} [hint:\"{}\"]\n", type_str, err, hint_msg); diff --git a/src/shell/commands/debugger.cpp b/src/shell/commands/debugger.cpp index 183814eb58..100c6ef75d 100644 --- a/src/shell/commands/debugger.cpp +++ b/src/shell/commands/debugger.cpp @@ -143,8 +143,10 @@ bool mlog_dump(command_executor *e, shell_context *sc, arguments args) std::function callback; if (detailed) { - callback = [&os, sc]( - int64_t decree, int64_t timestamp, dsn::message_ex **requests, int count) mutable { + callback = [&os, sc](int64_t decree, + int64_t timestamp, + dsn::message_ex **requests, + int count) mutable { for (int i = 0; i < count; ++i) { dsn::message_ex *request = requests[i]; CHECK_NOTNULL(request, ""); @@ -205,8 +207,8 @@ bool mlog_dump(command_executor *e, shell_context *sc, arguments args) } else if (msg->local_rpc_code == ::dsn::apps::RPC_RRDB_RRDB_CHECK_AND_SET) { dsn::apps::check_and_set_request update; dsn::unmarshall(request, update); - auto set_sort_key = - update.set_diff_sort_key ? update.set_sort_key : update.check_sort_key; + auto set_sort_key = update.set_diff_sort_key ? update.set_sort_key + : update.check_sort_key; std::string check_operand; if (pegasus::cas_is_check_operand_needed(update.check_type)) { check_operand = fmt::format( diff --git a/src/shell/commands/local_partition_split.cpp b/src/shell/commands/local_partition_split.cpp index 6cc4726fa8..f880f42464 100644 --- a/src/shell/commands/local_partition_split.cpp +++ b/src/shell/commands/local_partition_split.cpp @@ -240,11 +240,10 @@ bool split_file(const LocalPartitionSplitContext &lpsc, const auto &svalue = iter->value(); // Skip empty write, see: // https://pegasus.apache.org/zh/2018/03/07/last_flushed_decree.html. - if (skey.empty() && - pegasus::value_schema_manager::instance() - .get_value_schema(pegasus_data_version) - ->extract_user_data(svalue.ToString()) - .empty()) { + if (skey.empty() && pegasus::value_schema_manager::instance() + .get_value_schema(pegasus_data_version) + ->extract_user_data(svalue.ToString()) + .empty()) { continue; } @@ -737,8 +736,7 @@ bool local_partition_split(command_executor *e, shell_context *sc, arguments arg tp.add_column("key_count"); for (const auto &ddsr : ddsrs) { for (const auto &psr : ddsr.psrs) { - for (const auto & [ new_dst_replica_dir, key_count ] : - psr.key_count_by_dst_replica_dirs) { + for (const auto &[new_dst_replica_dir, key_count] : psr.key_count_by_dst_replica_dirs) { tp.add_row(psr.src_replica_dir); tp.append_data(new_dst_replica_dir); tp.append_data(psr.success); diff --git a/src/shell/main.cpp b/src/shell/main.cpp index cb333a1984..e8533d0d8d 100644 --- a/src/shell/main.cpp +++ b/src/shell/main.cpp @@ -60,10 +60,16 @@ bool help_info(command_executor *e, shell_context *sc, arguments args) static command_executor commands[] = { { - "help", "print help info", "", help_info, + "help", + "print help info", + "", + help_info, }, { - "version", "get the shell version", "", version, + "version", + "get the shell version", + "", + version, }, { "cluster_info", @@ -106,13 +112,22 @@ static command_executor commands[] = { create_app, }, { - "drop", "drop an app", " [-r|--reserve_seconds num]", drop_app, + "drop", + "drop an app", + " [-r|--reserve_seconds num]", + drop_app, }, { - "recall", "recall an app", " [new_app_name]", recall_app, + "recall", + "recall an app", + " [new_app_name]", + recall_app, }, { - "rename", "rename an app", " ", rename_app, + "rename", + "rename an app", + " ", + rename_app, }, { "set_meta_level", @@ -121,7 +136,10 @@ static command_executor commands[] = { set_meta_level, }, { - "get_meta_level", "get the meta function level", "", get_meta_level, + "get_meta_level", + "get the meta function level", + "", + get_meta_level, }, { "balance", @@ -145,7 +163,10 @@ static command_executor commands[] = { use_app_as_current, }, { - "cc", "change to the specified cluster", "[cluster_name]", cc_command, + "cc", + "change to the specified cluster", + "[cluster_name]", + cc_command, }, { "escape_all", @@ -166,7 +187,10 @@ static command_executor commands[] = { calculate_hash_value, }, { - "set", "set value", " [ttl_in_seconds]", data_operations, + "set", + "set value", + " [ttl_in_seconds]", + data_operations, }, { "multi_set", @@ -175,7 +199,10 @@ static command_executor commands[] = { data_operations, }, { - "get", "get value", " ", data_operations, + "get", + "get value", + " ", + data_operations, }, { "multi_get", @@ -200,7 +227,10 @@ static command_executor commands[] = { data_operations, }, { - "del", "delete a key", " ", data_operations, + "del", + "delete a key", + " ", + data_operations, }, { "multi_del", @@ -254,13 +284,22 @@ static command_executor commands[] = { data_operations, }, { - "exist", "check value exist", " ", data_operations, + "exist", + "check value exist", + " ", + data_operations, }, { - "count", "get sort key count for a single hash key", "", data_operations, + "count", + "get sort key count for a single hash key", + "", + data_operations, }, { - "ttl", "query ttl for a specific key", " ", data_operations, + "ttl", + "query ttl for a specific key", + " ", + data_operations, }, { "hash_scan", @@ -363,7 +402,10 @@ static command_executor commands[] = { flush_log, }, { - "local_get", "get value from local db", " ", local_get, + "local_get", + "get value from local db", + " ", + local_get, }, { "rdb_key_str2hex", @@ -461,16 +503,28 @@ static command_executor commands[] = { query_restore_status, }, { - "get_app_envs", "get current app envs", "[-j|--json]", get_app_envs, + "get_app_envs", + "get current app envs", + "[-j|--json]", + get_app_envs, }, { - "set_app_envs", "set current app envs", " [key value...]", set_app_envs, + "set_app_envs", + "set current app envs", + " [key value...]", + set_app_envs, }, { - "del_app_envs", "delete current app envs", " [key...]", del_app_envs, + "del_app_envs", + "delete current app envs", + " [key...]", + del_app_envs, }, { - "clear_app_envs", "clear current app envs", "[-a|--all] [-p|--prefix str]", clear_app_envs, + "clear_app_envs", + "clear current app envs", + "[-a|--all] [-p|--prefix str]", + clear_app_envs, }, { "ddd_diagnose", @@ -506,10 +560,16 @@ static command_executor commands[] = { query_bulk_load_status, }, { - "pause_bulk_load", "pause app bulk load", "<-a --app_name str>", pause_bulk_load, + "pause_bulk_load", + "pause app bulk load", + "<-a --app_name str>", + pause_bulk_load, }, { - "restart_bulk_load", "restart app bulk load", "<-a --app_name str>", restart_bulk_load, + "restart_bulk_load", + "restart app bulk load", + "<-a --app_name str>", + restart_bulk_load, }, { "cancel_bulk_load", @@ -518,7 +578,10 @@ static command_executor commands[] = { cancel_bulk_load, }, { - "clear_bulk_load", "clear app bulk load result", "<-a --app_name str>", clear_bulk_load, + "clear_bulk_load", + "clear app bulk load result", + "<-a --app_name str>", + clear_bulk_load, }, { "detect_hotkey", @@ -574,10 +637,16 @@ static command_executor commands[] = { local_partition_split, }, { - "exit", "exit shell", "", exit_shell, + "exit", + "exit shell", + "", + exit_shell, }, { - nullptr, nullptr, nullptr, nullptr, + nullptr, + nullptr, + nullptr, + nullptr, }}; void print_help(command_executor *e, size_t name_width, size_t option_width) diff --git a/src/test/function_test/base_api/test_basic.cpp b/src/test/function_test/base_api/test_basic.cpp index 674a348548..64ec71dcfc 100644 --- a/src/test/function_test/base_api/test_basic.cpp +++ b/src/test/function_test/base_api/test_basic.cpp @@ -309,7 +309,8 @@ TEST_F(basic, multi_get) std::map new_values; ASSERT_EQ(PERR_OK, client_->multi_get("basic_test_multi_get", "", "", options, new_values)); std::map expect_kvs({ - {"1", "1"}, {"1-abcdefg", "1-abcdefg"}, + {"1", "1"}, + {"1-abcdefg", "1-abcdefg"}, }); ASSERT_EQ(expect_kvs, new_values); } diff --git a/src/test/function_test/base_api/test_batch_get.cpp b/src/test/function_test/base_api/test_batch_get.cpp index f196fc7bde..ff0b15a2e2 100644 --- a/src/test/function_test/base_api/test_batch_get.cpp +++ b/src/test/function_test/base_api/test_batch_get.cpp @@ -1,21 +1,21 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ // IWYU pragma: no_include #include diff --git a/src/test/function_test/base_api/test_scan.cpp b/src/test/function_test/base_api/test_scan.cpp index bda35b25ec..0b6cd2ff1d 100644 --- a/src/test/function_test/base_api/test_scan.cpp +++ b/src/test/function_test/base_api/test_scan.cpp @@ -177,8 +177,8 @@ TEST_F(scan_test, OVERALL_COUNT_ONLY) data_count += kv_count; i++; } - ASSERT_EQ(PERR_SCAN_COMPLETE, ret) << "Error occurred when scan. error=" - << client_->get_error_string(ret); + ASSERT_EQ(PERR_SCAN_COMPLETE, ret) + << "Error occurred when scan. error=" << client_->get_error_string(ret); delete scanner; } LOG_INFO("scan count {}", i); @@ -206,8 +206,8 @@ TEST_F(scan_test, ALL_SORT_KEY) check_and_put(data, expected_hash_key_, sort_key, value); } delete scanner; - ASSERT_EQ(PERR_SCAN_COMPLETE, ret) << "Error occurred when scan. error=" - << client_->get_error_string(ret); + ASSERT_EQ(PERR_SCAN_COMPLETE, ret) + << "Error occurred when scan. error=" << client_->get_error_string(ret); ASSERT_NO_FATAL_FAILURE(compare(expect_kvs_[expected_hash_key_], data, expected_hash_key_)); } @@ -271,8 +271,8 @@ TEST_F(scan_test, BOUND_EXCLUSIVE) check_and_put(data, expected_hash_key_, sort_key, value); } delete scanner; - ASSERT_EQ(PERR_SCAN_COMPLETE, ret) << "Error occurred when scan. error=" - << client_->get_error_string(ret); + ASSERT_EQ(PERR_SCAN_COMPLETE, ret) + << "Error occurred when scan. error=" << client_->get_error_string(ret); ++it1; ASSERT_NO_FATAL_FAILURE( compare(data, std::map(it1, it2), expected_hash_key_)); @@ -363,8 +363,8 @@ TEST_F(scan_test, OVERALL) while (PERR_OK == (ret = (scanner->next(hash_key, sort_key, value)))) { check_and_put(data, hash_key, sort_key, value); } - ASSERT_EQ(PERR_SCAN_COMPLETE, ret) << "Error occurred when scan. error=" - << client_->get_error_string(ret); + ASSERT_EQ(PERR_SCAN_COMPLETE, ret) + << "Error occurred when scan. error=" << client_->get_error_string(ret); delete scanner; } ASSERT_NO_FATAL_FAILURE(compare(expect_kvs_, data)); @@ -406,8 +406,8 @@ TEST_F(scan_test, REQUEST_EXPIRE_TS) } else if (err == pegasus::PERR_SCAN_COMPLETE) { split_completed.store(true); } else { - ASSERT_TRUE(false) << "Error occurred when scan. error=" - << client_->get_error_string(err); + ASSERT_TRUE(false) + << "Error occurred when scan. error=" << client_->get_error_string(err); } op_completed.notify(); }); diff --git a/src/test/function_test/bulk_load/test_bulk_load.cpp b/src/test/function_test/bulk_load/test_bulk_load.cpp index 8e191264f7..825e7b8bf4 100644 --- a/src/test/function_test/bulk_load/test_bulk_load.cpp +++ b/src/test/function_test/bulk_load/test_bulk_load.cpp @@ -147,8 +147,9 @@ class bulk_load_test : public test_util // Find the generated files. std::vector src_files; - ASSERT_TRUE(dsn::utils::filesystem::get_subfiles( - partition_path, src_files, /* recursive */ false)); + ASSERT_TRUE(dsn::utils::filesystem::get_subfiles(partition_path, + src_files, + /* recursive */ false)); ASSERT_FALSE(src_files.empty()); bulk_load_metadata blm; diff --git a/src/test/function_test/restore/test_restore.cpp b/src/test/function_test/restore/test_restore.cpp index fc42593574..141e7c8ac0 100644 --- a/src/test/function_test/restore/test_restore.cpp +++ b/src/test/function_test/restore/test_restore.cpp @@ -101,10 +101,11 @@ class restore_test : public test_util { std::string pegasus_root_dir = global_env::instance()._pegasus_root; CHECK_EQ(0, ::chdir(pegasus_root_dir.c_str())); - std::string cmd = "cd " + backup_path_ + "; " - "ls -c > restore_app_from_backup_test_tmp; " - "tail -n 1 restore_app_from_backup_test_tmp; " - "rm restore_app_from_backup_test_tmp"; + std::string cmd = "cd " + backup_path_ + + "; " + "ls -c > restore_app_from_backup_test_tmp; " + "tail -n 1 restore_app_from_backup_test_tmp; " + "rm restore_app_from_backup_test_tmp"; std::stringstream ss; int ret = dsn::utils::pipe_execute(cmd.c_str(), ss); std::cout << cmd << " output: " << ss.str() << std::endl; diff --git a/src/test/function_test/utils/test_util.cpp b/src/test/function_test/utils/test_util.cpp index c3ca95b8cd..e574eb7046 100644 --- a/src/test/function_test/utils/test_util.cpp +++ b/src/test/function_test/utils/test_util.cpp @@ -1,20 +1,20 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ #include "test_util.h" @@ -55,9 +55,9 @@ class rpc_address; } // namespace dsn using dsn::partition_configuration; +using dsn::rpc_address; using dsn::replication::replica_helper; using dsn::replication::replication_ddl_client; -using dsn::rpc_address; using nlohmann::json; using std::map; using std::string; diff --git a/src/test/function_test/utils/test_util.h b/src/test/function_test/utils/test_util.h index 8e3a1663ce..e00397a82e 100644 --- a/src/test/function_test/utils/test_util.h +++ b/src/test/function_test/utils/test_util.h @@ -1,21 +1,21 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ #pragma once diff --git a/src/test/function_test/utils/utils.h b/src/test/function_test/utils/utils.h index 526bd3ab2e..1c1df69c22 100644 --- a/src/test/function_test/utils/utils.h +++ b/src/test/function_test/utils/utils.h @@ -176,9 +176,9 @@ inline void compare(const std::map &expect, { for (auto it1 = actual.begin(), it2 = expect.begin();; ++it1, ++it2) { if (it1 == actual.end()) { - ASSERT_EQ(expect.end(), it2) << "Only in expect: hash_key=" << hash_key - << ", sort_key=" << it2->first - << ", value=" << it2->second; + ASSERT_EQ(expect.end(), it2) + << "Only in expect: hash_key=" << hash_key << ", sort_key=" << it2->first + << ", value=" << it2->second; break; } ASSERT_NE(expect.end(), it2) << "Only in actual: hash_key=" << hash_key @@ -200,8 +200,8 @@ inline void compare(const T &expect, const U &actual) break; } ASSERT_NE(expect.end(), it2) << "Only in actual: hash_key=" << it1->first; - ASSERT_EQ(it1->first, it2->first) << "Diff: actual_hash_key=" << it1->first - << ", expected_hash_key=" << it2->first; + ASSERT_EQ(it1->first, it2->first) + << "Diff: actual_hash_key=" << it1->first << ", expected_hash_key=" << it2->first; ASSERT_NO_FATAL_FAILURE(compare(it1->second, it2->second, it1->first)); } } diff --git a/src/test/kill_test/job.cpp b/src/test/kill_test/job.cpp index b73789d943..ef0a18c55a 100644 --- a/src/test/kill_test/job.cpp +++ b/src/test/kill_test/job.cpp @@ -37,5 +37,5 @@ std::string job::get_addr_by_index(int index) } void job::set_name(const std::string &_name) { name = _name; } -} -} // end namespace +} // namespace test +} // namespace pegasus diff --git a/src/test/kill_test/job.h b/src/test/kill_test/job.h index 2c05f55edf..a1d7574d61 100644 --- a/src/test/kill_test/job.h +++ b/src/test/kill_test/job.h @@ -61,5 +61,5 @@ struct job std::string get_addr_by_index(int index); void set_name(const std::string &_name); }; -} -} // end namespace +} // namespace test +} // namespace pegasus diff --git a/src/test/kill_test/killer_handler.h b/src/test/kill_test/killer_handler.h index 4662663fd9..c895910fa9 100644 --- a/src/test/kill_test/killer_handler.h +++ b/src/test/kill_test/killer_handler.h @@ -73,5 +73,5 @@ class killer_handler return new T(); } }; -} -} // end namespace +} // namespace test +} // namespace pegasus diff --git a/src/test/kill_test/killer_handler_shell.cpp b/src/test/kill_test/killer_handler_shell.cpp index 764baba9a8..ecccb6f397 100644 --- a/src/test/kill_test/killer_handler_shell.cpp +++ b/src/test/kill_test/killer_handler_shell.cpp @@ -191,5 +191,5 @@ bool killer_handler_shell::check(const std::string &job, int index, const std::s // not implement, just return true return true; } -} -} // end namespace +} // namespace test +} // namespace pegasus diff --git a/src/test/kill_test/killer_handler_shell.h b/src/test/kill_test/killer_handler_shell.h index c3c797fb52..45c1fb5357 100644 --- a/src/test/kill_test/killer_handler_shell.h +++ b/src/test/kill_test/killer_handler_shell.h @@ -59,5 +59,5 @@ class killer_handler_shell : public killer_handler // check whether the command execute success. bool check(const std::string &job, int index, const std::string &type); }; -} -} // end namespace +} // namespace test +} // namespace pegasus diff --git a/src/utils/TokenBucket.h b/src/utils/TokenBucket.h index 9a5a2327d8..c0f9e8e2f9 100644 --- a/src/utils/TokenBucket.h +++ b/src/utils/TokenBucket.h @@ -272,8 +272,8 @@ class BasicDynamicTokenBucket * * Thread-safe (but returned value may immediately be outdated). */ - double available(double rate, double burstSize, double nowInSeconds = defaultClockNow()) const - noexcept + double + available(double rate, double burstSize, double nowInSeconds = defaultClockNow()) const noexcept { assert(rate > 0); assert(burstSize > 0); diff --git a/src/utils/binary_writer.cpp b/src/utils/binary_writer.cpp index 1796f425bb..9234ce9140 100644 --- a/src/utils/binary_writer.cpp +++ b/src/utils/binary_writer.cpp @@ -38,8 +38,8 @@ binary_writer::binary_writer(int reserveBufferSize) { _total_size = 0; _buffers.reserve(1); - _reserved_size_per_buffer = - (reserveBufferSize == 0) ? _reserved_size_per_buffer_static : reserveBufferSize; + _reserved_size_per_buffer = (reserveBufferSize == 0) ? _reserved_size_per_buffer_static + : reserveBufferSize; _current_buffer = nullptr; _current_offset = 0; _current_buffer_length = 0; @@ -200,4 +200,4 @@ bool binary_writer::backup(int count) _total_size -= count; return true; } -} +} // namespace dsn diff --git a/src/utils/binary_writer.h b/src/utils/binary_writer.h index eb33adf8ae..2640567f05 100644 --- a/src/utils/binary_writer.h +++ b/src/utils/binary_writer.h @@ -129,4 +129,4 @@ inline void binary_writer::write(const blob &val) if (len > 0) write((const char *)val.data(), len); } -} +} // namespace dsn diff --git a/src/utils/chrono_literals.h b/src/utils/chrono_literals.h index 250e73ed52..9c28f69b11 100644 --- a/src/utils/chrono_literals.h +++ b/src/utils/chrono_literals.h @@ -73,6 +73,6 @@ constexpr std::chrono::nanoseconds operator"" _ns(unsigned long long v) return std::chrono::nanoseconds{v}; } -} // inline namespace chrono_literals -} // inline namespace literals +} // namespace chrono_literals +} // namespace literals } // namespace dsn diff --git a/src/utils/command_manager.h b/src/utils/command_manager.h index 903ccd2900..b124388fd8 100644 --- a/src/utils/command_manager.h +++ b/src/utils/command_manager.h @@ -66,13 +66,14 @@ class command_manager : public ::dsn::utils::singleton // 'validator' is used to validate the new value. // The value is reset to 'default_value' if passing "DEFAULT" argument. template - WARN_UNUSED_RESULT std::unique_ptr - register_int_command(T &value, - T default_value, - const std::string &command, - const std::string &help, - std::function validator = - [](int64_t new_value) -> bool { return new_value >= 0; }) + WARN_UNUSED_RESULT std::unique_ptr register_int_command( + T &value, + T default_value, + const std::string &command, + const std::string &help, + std::function validator = [](int64_t new_value) -> bool { + return new_value >= 0; + }) { return register_single_command( command, diff --git a/src/utils/configuration.cpp b/src/utils/configuration.cpp index d9d4c1ac64..449d1d4c16 100644 --- a/src/utils/configuration.cpp +++ b/src/utils/configuration.cpp @@ -438,4 +438,4 @@ bool configuration::has_key(const char *section, const char *key) } return false; } -} +} // namespace dsn diff --git a/src/utils/crc.cpp b/src/utils/crc.cpp index b0d608eba8..c710870404 100644 --- a/src/utils/crc.cpp +++ b/src/utils/crc.cpp @@ -439,8 +439,8 @@ uint64_t crc64::_crc_table[sizeof(crc64::_crc_table) / sizeof(crc64::_crc_table[ #undef crc64_POLY #undef BIT64 #undef BIT32 -} -} +} // namespace utils +} // namespace dsn namespace dsn { namespace utils { @@ -477,5 +477,5 @@ uint64_t crc64_concat(uint32_t xy_init, return ::dsn::utils::crc64::concatenate( 0, x_init, x_final, (uint64_t)x_size, y_init, y_final, (uint64_t)y_size); } -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/crc.h b/src/utils/crc.h index e2e4ae100b..c18c2137a5 100644 --- a/src/utils/crc.h +++ b/src/utils/crc.h @@ -71,5 +71,5 @@ uint64_t crc64_concat(uint32_t xy_init, uint64_t y_init, uint64_t y_final, size_t y_size); -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/customizable_id.h b/src/utils/customizable_id.h index 46acfcbfe3..e0efae8863 100644 --- a/src/utils/customizable_id.h +++ b/src/utils/customizable_id.h @@ -207,5 +207,5 @@ int customized_id_mgr::register_id(const char *name) _names2.push_back(std::string(name)); return code; } -} -} // end namespace dsn::utils +} // namespace utils +} // namespace dsn diff --git a/src/utils/distributed_lock_service.h b/src/utils/distributed_lock_service.h index 0a8749021c..eb1858ef65 100644 --- a/src/utils/distributed_lock_service.h +++ b/src/utils/distributed_lock_service.h @@ -116,17 +116,17 @@ class distributed_lock_service const lock_options &opt) = 0; /* - * cancel the lock operation that is on pending - * cb_code: the task code specifies where to execute the callback - * lock_id should be valid, and cb should not be empty - * - * possible ec: - * ERR_INVALID_PARAMETERS - * ERR_OK, the pending lock is cancelled successfully - * ERR_OBJECT_NOT_FOUND, the caller is not found in pending list, check - * returned owner to see whether it already succeedes - * - */ + * cancel the lock operation that is on pending + * cb_code: the task code specifies where to execute the callback + * lock_id should be valid, and cb should not be empty + * + * possible ec: + * ERR_INVALID_PARAMETERS + * ERR_OK, the pending lock is cancelled successfully + * ERR_OBJECT_NOT_FOUND, the caller is not found in pending list, check + * returned owner to see whether it already succeedes + * + */ virtual task_ptr cancel_pending_lock(const std::string &lock_id, const std::string &myself_id, task_code cb_code, @@ -173,5 +173,5 @@ class distributed_lock_service /*out*/ std::string &owner, /*out*/ uint64_t &version) = 0; }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/utils/error_code.cpp b/src/utils/error_code.cpp index 4b26fcc57d..e5930d4598 100644 --- a/src/utils/error_code.cpp +++ b/src/utils/error_code.cpp @@ -65,4 +65,4 @@ const char *error_code::to_string() const { return dsn::utils::customized_id_mgr::instance().get_name(_internal_code); } -} +} // namespace dsn diff --git a/src/utils/exp_delay.h b/src/utils/exp_delay.h index 09b5389348..b6d3af346d 100644 --- a/src/utils/exp_delay.h +++ b/src/utils/exp_delay.h @@ -119,4 +119,4 @@ class shared_exp_delay private: int _delay[DELAY_COUNT]; }; -} +} // namespace dsn diff --git a/src/utils/factory_store.h b/src/utils/factory_store.h index 72d203e0c7..2d1a2bef1a 100644 --- a/src/utils/factory_store.h +++ b/src/utils/factory_store.h @@ -157,5 +157,5 @@ class factory_store } }; }; -} -} // end namespace dsn::utils +} // namespace utils +} // namespace dsn diff --git a/src/utils/function_traits.h b/src/utils/function_traits.h index 1cc3c25103..f060a22fab 100644 --- a/src/utils/function_traits.h +++ b/src/utils/function_traits.h @@ -119,4 +119,4 @@ template struct function_traits : public function_traits { }; -} +} // namespace dsn diff --git a/src/utils/gpid.cpp b/src/utils/gpid.cpp index 1d3bb3e27c..18c5433efd 100644 --- a/src/utils/gpid.cpp +++ b/src/utils/gpid.cpp @@ -43,4 +43,4 @@ const char *gpid::to_string() const snprintf(b, bf.get_chunk_size(), "%d.%d", _value.u.app_id, _value.u.partition_index); return b; } -} +} // namespace dsn diff --git a/src/utils/je_ctl.cpp b/src/utils/je_ctl.cpp index 38f5bb1be0..3cfacf4abc 100644 --- a/src/utils/je_ctl.cpp +++ b/src/utils/je_ctl.cpp @@ -66,7 +66,10 @@ void je_dump_malloc_stats(const char *opts, size_t buf_sz, std::string &stats) const char *je_stats_type_to_opts(je_stats_type type) { static const char *opts_map[] = { - "gmdablxe", "mdablxe", "gblxe", "", + "gmdablxe", + "mdablxe", + "gblxe", + "", }; RETURN_ARRAY_ELEM_BY_ENUM_TYPE(type, opts_map); @@ -75,7 +78,10 @@ const char *je_stats_type_to_opts(je_stats_type type) size_t je_stats_type_to_default_buf_sz(je_stats_type type) { static const size_t buf_sz_map[] = { - 2 * 1024, 4 * 1024, 8 * 1024 * 1024, 8 * 1024 * 1024, + 2 * 1024, + 4 * 1024, + 8 * 1024 * 1024, + 8 * 1024 * 1024, }; RETURN_ARRAY_ELEM_BY_ENUM_TYPE(type, buf_sz_map); diff --git a/src/utils/lockp.std.h b/src/utils/lockp.std.h index b61b6f093e..64cc8095c1 100644 --- a/src/utils/lockp.std.h +++ b/src/utils/lockp.std.h @@ -93,5 +93,5 @@ class std_semaphore_provider : public semaphore_provider private: dsn::utils::semaphore _sema; }; -} -} // end namespace dsn::tools +} // namespace tools +} // namespace dsn diff --git a/src/utils/metrics.h b/src/utils/metrics.h index e8a9cdc6a3..2990537203 100644 --- a/src/utils/metrics.h +++ b/src/utils/metrics.h @@ -377,7 +377,7 @@ class metric_entity : public ref_counter // `args` are the parameters that are used to construct the object of MetricType. template - ref_ptr find_or_create(const metric_prototype *prototype, Args &&... args); + ref_ptr find_or_create(const metric_prototype *prototype, Args &&...args); void take_snapshot(metric_json_writer &writer, const metric_filters &filters) const; @@ -890,7 +890,7 @@ class metric_prototype_with : public metric_prototype // Construct a metric object based on the instance of metric_entity. template - ref_ptr instantiate(const metric_entity_ptr &entity, Args &&... args) const + ref_ptr instantiate(const metric_entity_ptr &entity, Args &&...args) const { return entity->find_or_create(this, std::forward(args)...); } @@ -900,8 +900,7 @@ class metric_prototype_with : public metric_prototype }; template -ref_ptr metric_entity::find_or_create(const metric_prototype *prototype, - Args &&... args) +ref_ptr metric_entity::find_or_create(const metric_prototype *prototype, Args &&...args) { CHECK_STREQ_MSG(prototype->entity_type().data(), _prototype->name(), diff --git a/src/utils/optional.h b/src/utils/optional.h index 0f2f97d9ad..630e323907 100644 --- a/src/utils/optional.h +++ b/src/utils/optional.h @@ -62,7 +62,7 @@ class optional that.reset(); } template - /*implicit*/ optional(Args &&... args) : _is_some(true) + /*implicit*/ optional(Args &&...args) : _is_some(true) { new (_data_placeholder) T{std::forward(args)...}; } @@ -90,7 +90,7 @@ class optional } } template - void reset(Args &&... args) + void reset(Args &&...args) { if (_is_some) { reinterpret_cast(_data_placeholder)->~T(); @@ -101,4 +101,4 @@ class optional } ~optional() { reset(); } }; -} +} // namespace dsn diff --git a/src/utils/preloadable.h b/src/utils/preloadable.h index afca5b065d..c547337121 100644 --- a/src/utils/preloadable.h +++ b/src/utils/preloadable.h @@ -35,5 +35,5 @@ class preloadable template T preloadable::s_instance; -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/priority_queue.h b/src/utils/priority_queue.h index 1c46d11bc1..b149f3a242 100644 --- a/src/utils/priority_queue.h +++ b/src/utils/priority_queue.h @@ -138,5 +138,5 @@ class blocking_priority_queue : public priority_queue private: semaphore _sema; }; -} -} // end namespace +} // namespace utils +} // namespace dsn diff --git a/src/utils/process_utils.cpp b/src/utils/process_utils.cpp index 2859560796..eaa42b69a8 100644 --- a/src/utils/process_utils.cpp +++ b/src/utils/process_utils.cpp @@ -32,8 +32,8 @@ #include "utils/process_utils.h" #include "utils/time_utils.h" -using std::ios_base; using std::ifstream; +using std::ios_base; using std::string; namespace dsn { @@ -114,5 +114,5 @@ const char *process_start_date_time_mills() { return record_process_start_time::s_instance.date_time_mills; } -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/process_utils.h b/src/utils/process_utils.h index 74d65adcb2..611e3d0698 100644 --- a/src/utils/process_utils.h +++ b/src/utils/process_utils.h @@ -74,5 +74,5 @@ inline int get_current_tid() /// uint64_t process_start_millis(); const char *process_start_date_time_mills(); -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/safe_strerror_posix.cpp b/src/utils/safe_strerror_posix.cpp index 45ea67dd4e..a95496af50 100644 --- a/src/utils/safe_strerror_posix.cpp +++ b/src/utils/safe_strerror_posix.cpp @@ -113,5 +113,5 @@ std::string safe_strerror(int err) safe_strerror_r(err, buf, sizeof(buf)); return std::string(buf); } -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/safe_strerror_posix.h b/src/utils/safe_strerror_posix.h index 872b163104..05c0dbf56a 100644 --- a/src/utils/safe_strerror_posix.h +++ b/src/utils/safe_strerror_posix.h @@ -32,5 +32,5 @@ void safe_strerror_r(int err, char *buf, size_t len); // more robust in the case of heap corruption errors, since it doesn't need to // allocate a string. std::string safe_strerror(int err); -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/simple_logger.cpp b/src/utils/simple_logger.cpp index db0e95bf9e..86145d4a8e 100644 --- a/src/utils/simple_logger.cpp +++ b/src/utils/simple_logger.cpp @@ -341,7 +341,7 @@ void simple_logger::remove_redundant_files() matching_file_mtimes.resize(matching_file_mtimes.size() - max_matches); // Remove redundant log files. - for (const auto & [ _, matching_file ] : matching_file_mtimes) { + for (const auto &[_, matching_file] : matching_file_mtimes) { if (::remove(matching_file.c_str()) != 0) { // If remove failed, just print log and ignore it. fmt::print(stderr, diff --git a/src/utils/singleton_store.h b/src/utils/singleton_store.h index 1db4bb008c..7c4257b4ca 100644 --- a/src/utils/singleton_store.h +++ b/src/utils/singleton_store.h @@ -120,5 +120,5 @@ class safe_singleton_store }; //------------- inline implementation ---------- -} -} // end namespace dsn::utils +} // namespace utils +} // namespace dsn diff --git a/src/utils/strings.cpp b/src/utils/strings.cpp index c7c48a9c36..aa73ff520c 100644 --- a/src/utils/strings.cpp +++ b/src/utils/strings.cpp @@ -172,7 +172,7 @@ struct SequenceInserter // The new element is constructed through variadic template and appended at the end // of the sequence container. template - void emplace(SequenceContainer &container, Args &&... args) const + void emplace(SequenceContainer &container, Args &&...args) const { container.emplace_back(std::forward(args)...); } @@ -184,7 +184,7 @@ struct AssociativeInserter // The new element is constructed through variadic template and inserted into the associative // container. template - void emplace(AssociativeContainer &container, Args &&... args) const + void emplace(AssociativeContainer &container, Args &&...args) const { container.emplace(std::forward(args)...); } diff --git a/src/utils/synchronize.h b/src/utils/synchronize.h index 8f1a66cd20..3f863b2c02 100644 --- a/src/utils/synchronize.h +++ b/src/utils/synchronize.h @@ -41,6 +41,7 @@ class ex_lock __inline void lock() { _lock.lock(); } __inline bool try_lock() { return _lock.tryLock(); } __inline void unlock() { _lock.unlock(); } + private: RecursiveBenaphore _lock; }; @@ -51,6 +52,7 @@ class ex_lock_nr __inline void lock() { _lock.lock(); } __inline bool try_lock() { return _lock.tryLock(); } __inline void unlock() { _lock.unlock(); } + private: NonRecursiveBenaphore _lock; }; @@ -179,5 +181,5 @@ class auto_write_lock private: rw_lock_nr *_lock; }; -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/test/TokenBucketTest.cpp b/src/utils/test/TokenBucketTest.cpp index aea2e739de..685b43a0d8 100644 --- a/src/utils/test/TokenBucketTest.cpp +++ b/src/utils/test/TokenBucketTest.cpp @@ -72,7 +72,10 @@ TEST_P(TokenBucketTest, sanity) } static std::vector> rateToConsumeSize = { - {100, 1}, {1000, 1}, {10000, 1}, {10000, 5}, + {100, 1}, + {1000, 1}, + {10000, 1}, + {10000, 5}, }; INSTANTIATE_TEST_SUITE_P(TokenBucket, TokenBucketTest, ::testing::ValuesIn(rateToConsumeSize)); diff --git a/src/utils/test/file_system_test.cpp b/src/utils/test/file_system_test.cpp index dbe0b901a5..0a71272648 100644 --- a/src/utils/test/file_system_test.cpp +++ b/src/utils/test/file_system_test.cpp @@ -195,14 +195,17 @@ TEST(filesystem_test, glob_test) ASSERT_TRUE(create_directory(kTestDir)); std::vector filenames = {"fuzz", "fuzzy", "fuzzyiest", "buzz"}; std::vector> matchers = { - {"file", 0}, {"fuzz", 1}, {"fuzz*", 3}, {"?uzz", 2}, + {"file", 0}, + {"fuzz", 1}, + {"fuzz*", 3}, + {"?uzz", 2}, }; for (const auto &name : filenames) { ASSERT_TRUE(create_file(path_combine(kTestDir, name))); } - for (const auto & [ path_pattern, matched_count ] : matchers) { + for (const auto &[path_pattern, matched_count] : matchers) { std::vector matches; ASSERT_TRUE(glob(path_combine(kTestDir, path_pattern), matches)) << path_pattern; ASSERT_EQ(matched_count, matches.size()) << path_pattern; diff --git a/src/utils/test/long_adder_test.cpp b/src/utils/test/long_adder_test.cpp index a6a910a92a..0fe82e188b 100644 --- a/src/utils/test/long_adder_test.cpp +++ b/src/utils/test/long_adder_test.cpp @@ -150,7 +150,7 @@ class long_adder_test // Define runner to time each case auto runner = [num_operations, num_threads]( - const char *name, std::function func, int64_t &result) { + const char *name, std::function func, int64_t &result) { uint64_t start = dsn_now_ns(); func(result); uint64_t end = dsn_now_ns(); diff --git a/src/utils/test/output_utils_test.cpp b/src/utils/test/output_utils_test.cpp index cd3a9cdbfd..cd774579c5 100644 --- a/src/utils/test/output_utils_test.cpp +++ b/src/utils/test/output_utils_test.cpp @@ -32,9 +32,9 @@ #include "gtest/gtest.h" -using std::vector; -using std::string; using dsn::utils::table_printer; +using std::string; +using std::vector; namespace dsn { diff --git a/src/utils/thread_access_checker.cpp b/src/utils/thread_access_checker.cpp index 5f54188168..eaab4e2baf 100644 --- a/src/utils/thread_access_checker.cpp +++ b/src/utils/thread_access_checker.cpp @@ -45,4 +45,4 @@ void thread_access_checker::only_one_thread_access() _access_thread_id_inited = true; } } -} +} // namespace dsn diff --git a/src/utils/thread_access_checker.h b/src/utils/thread_access_checker.h index 623dfc0f85..929f92eff5 100644 --- a/src/utils/thread_access_checker.h +++ b/src/utils/thread_access_checker.h @@ -45,4 +45,4 @@ class thread_access_checker int _access_thread_id; bool _access_thread_id_inited; }; -} +} // namespace dsn diff --git a/src/utils/threadpool_code.h b/src/utils/threadpool_code.h index 383c1d5247..cd3eac2db7 100644 --- a/src/utils/threadpool_code.h +++ b/src/utils/threadpool_code.h @@ -66,6 +66,6 @@ class threadpool_code DEFINE_THREAD_POOL_CODE(THREAD_POOL_INVALID) DEFINE_THREAD_POOL_CODE(THREAD_POOL_DEFAULT) -} +} // namespace dsn USER_DEFINED_STRUCTURE_FORMATTER(::dsn::threadpool_code); diff --git a/src/utils/threadpool_spec.h b/src/utils/threadpool_spec.h index 1a8506422e..3643c40ba5 100644 --- a/src/utils/threadpool_spec.h +++ b/src/utils/threadpool_spec.h @@ -121,4 +121,4 @@ CONFIG_FLD(bool, false, "throttling: whether to enable throttling with virtual queues") CONFIG_END -} +} // namespace dsn diff --git a/src/utils/uniq_timestamp_us.h b/src/utils/uniq_timestamp_us.h index 5094071f2e..c5208ab151 100644 --- a/src/utils/uniq_timestamp_us.h +++ b/src/utils/uniq_timestamp_us.h @@ -59,4 +59,4 @@ class uniq_timestamp_us return _last_ts; } }; -} +} // namespace dsn diff --git a/src/utils/work_queue.h b/src/utils/work_queue.h index 72f2a61575..5d4c78ca2d 100644 --- a/src/utils/work_queue.h +++ b/src/utils/work_queue.h @@ -98,4 +98,4 @@ class work_queue protected: slist _hdr; }; -} \ No newline at end of file +} // namespace dsn \ No newline at end of file diff --git a/src/utils/zlock_provider.h b/src/utils/zlock_provider.h index 0658066404..e9a1b5bd14 100644 --- a/src/utils/zlock_provider.h +++ b/src/utils/zlock_provider.h @@ -179,4 +179,4 @@ class semaphore_provider : public extensible_object private: semaphore_provider *_inner_provider; }; -} +} // namespace dsn diff --git a/src/utils/zlocks.h b/src/utils/zlocks.h index 472823ac3d..0a6f614291 100644 --- a/src/utils/zlocks.h +++ b/src/utils/zlocks.h @@ -111,7 +111,7 @@ class zevent std::atomic _signaled; bool _manualReset; }; -} +} // namespace dsn /// /// RAII wrapper of rdsn's synchronization objects @@ -188,7 +188,7 @@ class zauto_write_lock bool _locked; zrwlock_nr *_lock; }; -} +} // namespace dsn /// /// utils function used to check the lock safety @@ -197,5 +197,5 @@ namespace dsn { namespace lock_checker { void check_wait_safety(); void check_dangling_lock(); -} -} +} // namespace lock_checker +} // namespace dsn diff --git a/src/zookeeper/distributed_lock_service_zookeeper.cpp b/src/zookeeper/distributed_lock_service_zookeeper.cpp index 52d1f98f62..5b4f8b8269 100644 --- a/src/zookeeper/distributed_lock_service_zookeeper.cpp +++ b/src/zookeeper/distributed_lock_service_zookeeper.cpp @@ -282,5 +282,5 @@ void distributed_lock_service_zookeeper::on_zoo_session_evt(lock_srv_ptr _this, LOG_WARNING("get zoo state: {}, ignore it", zookeeper_session::string_zoo_state(zoo_state)); } } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/distributed_lock_service_zookeeper.h b/src/zookeeper/distributed_lock_service_zookeeper.h index 206e73459a..6d04e672ed 100644 --- a/src/zookeeper/distributed_lock_service_zookeeper.h +++ b/src/zookeeper/distributed_lock_service_zookeeper.h @@ -126,5 +126,5 @@ class distributed_lock_service_zookeeper : public distributed_lock_service, publ friend class lock_struct; }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/lock_struct.cpp b/src/zookeeper/lock_struct.cpp index 56b0fd33be..d88837fe2d 100644 --- a/src/zookeeper/lock_struct.cpp +++ b/src/zookeeper/lock_struct.cpp @@ -81,11 +81,12 @@ static bool is_zookeeper_timeout(int zookeeper_error) zookeeper_session::string_zoo_operation(op->_optype), \ op->_input._path); \ zookeeper_session::add_ref(op); \ - tasking::enqueue(TASK_CODE_DLOCK, \ - nullptr, \ - [_this, op]() { _this->_dist_lock_service->session()->visit(op); }, \ - _this->hash(), \ - std::chrono::seconds(1)); + tasking::enqueue( \ + TASK_CODE_DLOCK, \ + nullptr, \ + [_this, op]() { _this->_dist_lock_service->session()->visit(op); }, \ + _this->hash(), \ + std::chrono::seconds(1)); #define IGNORE_CALLBACK true #define DONT_IGNORE_CALLBACK false @@ -798,5 +799,5 @@ void lock_struct::lock_expired(lock_struct_ptr _this) _this->_checker.only_one_thread_access(); _this->on_expire(); } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/lock_struct.h b/src/zookeeper/lock_struct.h index fa10f94398..bc65494e61 100644 --- a/src/zookeeper/lock_struct.h +++ b/src/zookeeper/lock_struct.h @@ -122,5 +122,5 @@ class lock_struct : public ref_counter thread_access_checker _checker; }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/lock_types.h b/src/zookeeper/lock_types.h index d368fc514d..6cddd64844 100644 --- a/src/zookeeper/lock_types.h +++ b/src/zookeeper/lock_types.h @@ -43,5 +43,5 @@ class distributed_lock_service_zookeeper; class lock_struct; typedef ref_ptr lock_srv_ptr; typedef ref_ptr lock_struct_ptr; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/test/distributed_lock_zookeeper.cpp b/src/zookeeper/test/distributed_lock_zookeeper.cpp index 7da48ef433..75747a4da9 100644 --- a/src/zookeeper/test/distributed_lock_zookeeper.cpp +++ b/src/zookeeper/test/distributed_lock_zookeeper.cpp @@ -163,14 +163,14 @@ TEST(distributed_lock_service_zookeeper, abnormal_api_call) cb_pair.first->wait(); opt.create_if_not_exist = true; - cb_pair = - dlock_svc->lock(lock_id, - my_id, - DLOCK_CALLBACK, - [](error_code ec, const std::string &, int) { ASSERT_TRUE(ec == ERR_OK); }, - DLOCK_CALLBACK, - nullptr, - opt); + cb_pair = dlock_svc->lock( + lock_id, + my_id, + DLOCK_CALLBACK, + [](error_code ec, const std::string &, int) { ASSERT_TRUE(ec == ERR_OK); }, + DLOCK_CALLBACK, + nullptr, + opt); ASSERT_TRUE(cb_pair.first != nullptr && cb_pair.second != nullptr); cb_pair.first->wait(); @@ -208,16 +208,17 @@ TEST(distributed_lock_service_zookeeper, abnormal_api_call) }); tsk->wait(); - cb_pair2 = dlock_svc->lock(lock_id, - my_id2, - DLOCK_CALLBACK, - [my_id2](error_code ec, const std::string &name, int) { - ASSERT_TRUE(ec == ERR_OK); - ASSERT_TRUE(name == my_id2); - }, - DLOCK_CALLBACK, - nullptr, - opt); + cb_pair2 = dlock_svc->lock( + lock_id, + my_id2, + DLOCK_CALLBACK, + [my_id2](error_code ec, const std::string &name, int) { + ASSERT_TRUE(ec == ERR_OK); + ASSERT_TRUE(name == my_id2); + }, + DLOCK_CALLBACK, + nullptr, + opt); bool result = cb_pair2.first->wait(2000); ASSERT_FALSE(result); diff --git a/src/zookeeper/zookeeper_error.cpp b/src/zookeeper/zookeeper_error.cpp index 3504990a59..97f7d3851e 100644 --- a/src/zookeeper/zookeeper_error.cpp +++ b/src/zookeeper/zookeeper_error.cpp @@ -48,5 +48,5 @@ error_code from_zerror(int zerr) return ERR_INCONSISTENT_STATE; return ERR_ZOOKEEPER_OPERATION; } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/zookeeper_error.h b/src/zookeeper/zookeeper_error.h index a78f41be2d..0098a221f5 100644 --- a/src/zookeeper/zookeeper_error.h +++ b/src/zookeeper/zookeeper_error.h @@ -33,4 +33,4 @@ namespace dist { error_code from_zerror(int zerr); } -} +} // namespace dsn diff --git a/src/zookeeper/zookeeper_session.cpp b/src/zookeeper/zookeeper_session.cpp index ed4d6b9d22..380eafa0b0 100644 --- a/src/zookeeper/zookeeper_session.cpp +++ b/src/zookeeper/zookeeper_session.cpp @@ -467,5 +467,5 @@ void zookeeper_session::global_void_completion(int rc, const void *data) op_ctx->_callback_function(op_ctx); release_ref(op_ctx); } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/zookeeper_session.h b/src/zookeeper/zookeeper_session.h index b359f867b9..e46f6d6a25 100644 --- a/src/zookeeper/zookeeper_session.h +++ b/src/zookeeper/zookeeper_session.h @@ -197,7 +197,7 @@ class zookeeper_session global_strings_completion(int rc, const struct String_vector *strings, const void *data); static void global_void_completion(int rc, const void *data); }; -} -} +} // namespace dist +} // namespace dsn USER_DEFINED_STRUCTURE_FORMATTER(::dsn::dist::zookeeper_session); diff --git a/src/zookeeper/zookeeper_session_mgr.cpp b/src/zookeeper/zookeeper_session_mgr.cpp index ca40157205..ea44dbd5ff 100644 --- a/src/zookeeper/zookeeper_session_mgr.cpp +++ b/src/zookeeper/zookeeper_session_mgr.cpp @@ -57,5 +57,5 @@ zookeeper_session *zookeeper_session_mgr::get_session(const service_app_info &in } return ans; } -} -} +} // namespace dist +} // namespace dsn From 06312ca291e56f06b38765e2d793c8820d6b8bda Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Thu, 11 Jul 2024 15:47:41 +0800 Subject: [PATCH 22/29] chore(CI): Add envs in build_debug_on_centos7 to ensure job can be ran on centOS 7 (#2065) https://github.com/actions/runner/issues/2906 --- .github/workflows/lint_and_test_cpp.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/lint_and_test_cpp.yaml b/.github/workflows/lint_and_test_cpp.yaml index ab41a10cbb..1fcd505c75 100644 --- a/.github/workflows/lint_and_test_cpp.yaml +++ b/.github/workflows/lint_and_test_cpp.yaml @@ -418,6 +418,8 @@ jobs: USE_JEMALLOC: OFF BUILD_OPTIONS: -t debug --test --separate_servers PACK_OPTIONS: --separate_servers + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true container: image: apache/pegasus:thirdparties-bin-centos7-${{ github.base_ref }} steps: From 3ed77d97b11af80ce6e628b7882cd464395d9bb2 Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Fri, 12 Jul 2024 21:33:23 +0800 Subject: [PATCH 23/29] feat(FQDN): Add host_port structures for clients and bump actions/checkout from v3 to v4 (#2062) After All GitHub Actions run on Node20 instead of Node16 by default [1], this patch bumps the actions/checkout version from v3 to v4. When the related yaml files changed, the clients CIs are triggered, then they expose that the new introduced structure `host_port` in IDL as unknow because it is not generated by thrift automatically, we have to implement it manually. So this patch also simplily implements the `host_port` structure in python-client and nodejs-client. It should be mentioned that only the `build_debug_on_centos7` is still using `actions/checkout@v3`, see [2] for details. 1. https://github.blog/changelog/2024-03-07-github-actions-all-actions-will-run-on-node20-instead-of-node16-by-default/ 2. https://github.com/apache/incubator-pegasus/pull/2065 --- .github/workflows/build-push-env-docker.yml | 2 +- .github/workflows/labeler.yml | 2 +- .github/workflows/lint_and_test_admin-cli.yml | 4 +- .github/workflows/lint_and_test_collector.yml | 6 +- .github/workflows/lint_and_test_cpp.yaml | 22 +++--- .github/workflows/lint_and_test_go-client.yml | 8 +- .../workflows/lint_and_test_java-client.yml | 6 +- .github/workflows/lint_and_test_pegic.yml | 4 +- .../workflows/lint_and_test_scala-client.yml | 10 +-- .github/workflows/regular-build.yml | 8 +- .github/workflows/standardization_lint.yaml | 6 +- .github/workflows/test_nodejs-client.yml | 4 +- .github/workflows/test_python-client.yml | 4 +- .github/workflows/thirdparty-regular-push.yml | 10 +-- nodejs-client/src/dsn/dsn_types.js | 36 +++++++++ python-client/pypegasus/base/ttypes.py | 75 ++++++++++++++++++- python-client/pypegasus/pgclient.py | 22 +++--- 17 files changed, 166 insertions(+), 63 deletions(-) diff --git a/.github/workflows/build-push-env-docker.yml b/.github/workflows/build-push-env-docker.yml index 1bf695e7ed..cdb88e96b7 100644 --- a/.github/workflows/build-push-env-docker.yml +++ b/.github/workflows/build-push-env-docker.yml @@ -44,7 +44,7 @@ jobs: - centos7 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index eb13a2db7b..a340145473 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -27,7 +27,7 @@ jobs: name: Module Labeler runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Assign GitHub labels uses: actions/labeler@v4 with: diff --git a/.github/workflows/lint_and_test_admin-cli.yml b/.github/workflows/lint_and_test_admin-cli.yml index 788ba8ba3e..4fef441bf0 100644 --- a/.github/workflows/lint_and_test_admin-cli.yml +++ b/.github/workflows/lint_and_test_admin-cli.yml @@ -41,7 +41,7 @@ jobs: name: Lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v2 with: @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v2 with: diff --git a/.github/workflows/lint_and_test_collector.yml b/.github/workflows/lint_and_test_collector.yml index e21ede1a87..6fc8308c2f 100644 --- a/.github/workflows/lint_and_test_collector.yml +++ b/.github/workflows/lint_and_test_collector.yml @@ -43,7 +43,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go uses: actions/setup-go@v2 with: @@ -60,7 +60,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 1 - name: Set up Go @@ -80,7 +80,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 1 - name: Set up Go diff --git a/.github/workflows/lint_and_test_cpp.yaml b/.github/workflows/lint_and_test_cpp.yaml index 1fcd505c75..799263566a 100644 --- a/.github/workflows/lint_and_test_cpp.yaml +++ b/.github/workflows/lint_and_test_cpp.yaml @@ -53,7 +53,7 @@ jobs: name: Lint runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: clang-format run: ./scripts/run-clang-format.py --clang-format-executable clang-format-14 -e ./src/shell/linenoise -e ./src/shell/sds -e ./thirdparty -r . @@ -66,7 +66,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Free Disk Space (Ubuntu) run: | .github/workflows/free_disk_space.sh @@ -97,7 +97,7 @@ jobs: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - name: Clone code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Rebuild thirdparty if needed uses: "./.github/actions/rebuild_thirdparty_if_needed" - name: Build Pegasus @@ -162,7 +162,7 @@ jobs: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} options: --cap-add=SYS_PTRACE steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Download artifact uses: "./.github/actions/download_artifact" - name: Run server tests @@ -179,7 +179,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Rebuild thirdparty if needed uses: "./.github/actions/rebuild_thirdparty_if_needed" - name: Build Pegasus @@ -246,7 +246,7 @@ jobs: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} options: --cap-add=SYS_PTRACE steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Download artifact uses: "./.github/actions/download_artifact" - name: Run server tests @@ -266,7 +266,7 @@ jobs: # container: # image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} # steps: -# - uses: actions/checkout@v3 +# - uses: actions/checkout@v4 # - name: Rebuild thirdparty if needed # uses: "./.github/actions/rebuild_thirdparty_if_needed" # - name: Build Pegasus @@ -329,7 +329,7 @@ jobs: # image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} # options: --cap-add=SYS_PTRACE # steps: -# - uses: actions/checkout@v3 +# - uses: actions/checkout@v4 # - name: Download artifact # uses: "./.github/actions/download_artifact" # - name: Run server tests @@ -346,7 +346,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-jemallc-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Rebuild thirdparty if needed uses: "./.github/actions/rebuild_thirdparty_if_needed" # TODO(yingchun): Append "-m dsn_utils_tests" to the command if not needed to pack server or tools, for example, the dependencies are static linked. @@ -370,7 +370,7 @@ jobs: image: apache/pegasus:thirdparties-bin-test-jemallc-ubuntu2204-${{ github.base_ref }} options: --cap-add=SYS_PTRACE steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Download artifact uses: "./.github/actions/download_artifact" - name: Run server tests @@ -386,7 +386,7 @@ jobs: # Preinstalled softwares: https://github.com/actions/virtual-environments/blob/main/images/macos/macos-12-Readme.md brew install ccache brew install openssl@1.1 - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup cache uses: actions/cache@v3 with: diff --git a/.github/workflows/lint_and_test_go-client.yml b/.github/workflows/lint_and_test_go-client.yml index 4e02501f14..691121b0f1 100644 --- a/.github/workflows/lint_and_test_go-client.yml +++ b/.github/workflows/lint_and_test_go-client.yml @@ -45,7 +45,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go uses: actions/setup-go@v2 with: @@ -66,7 +66,7 @@ jobs: - name: Install thrift run: sudo apt-get install -y thrift-compiler - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go uses: actions/setup-go@v2 with: @@ -91,7 +91,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: "./.github/actions/rebuild_thirdparty_if_needed" - uses: "./.github/actions/build_pegasus" - uses: "./.github/actions/upload_artifact" @@ -118,7 +118,7 @@ jobs: make install cd - && rm -rf thrift-${THRIFT_VERSION} v${THRIFT_VERSION}.tar.gz - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v2 with: diff --git a/.github/workflows/lint_and_test_java-client.yml b/.github/workflows/lint_and_test_java-client.yml index b71d1e021e..6d172bd910 100644 --- a/.github/workflows/lint_and_test_java-client.yml +++ b/.github/workflows/lint_and_test_java-client.yml @@ -39,7 +39,7 @@ jobs: name: Spotless runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-java@v1 with: java-version: 8 @@ -58,7 +58,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Rebuild thirdparty if needed uses: "./.github/actions/rebuild_thirdparty_if_needed" - name: Build Pegasus @@ -79,7 +79,7 @@ jobs: matrix: java: [ '8', '11'] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/cache@v2 with: path: ~/.m2/repository diff --git a/.github/workflows/lint_and_test_pegic.yml b/.github/workflows/lint_and_test_pegic.yml index 3788b44121..da94f9b4a8 100644 --- a/.github/workflows/lint_and_test_pegic.yml +++ b/.github/workflows/lint_and_test_pegic.yml @@ -41,7 +41,7 @@ jobs: name: Lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: golangci-lint uses: golangci/golangci-lint-action@v3 with: @@ -53,7 +53,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v2 with: diff --git a/.github/workflows/lint_and_test_scala-client.yml b/.github/workflows/lint_and_test_scala-client.yml index b261902e73..29925d051b 100644 --- a/.github/workflows/lint_and_test_scala-client.yml +++ b/.github/workflows/lint_and_test_scala-client.yml @@ -39,7 +39,7 @@ jobs: name: Format runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-java@v1 with: java-version: 8 @@ -57,7 +57,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: "./.github/actions/rebuild_thirdparty_if_needed" - uses: "./.github/actions/build_pegasus" - uses: "./.github/actions/upload_artifact" @@ -73,7 +73,7 @@ jobs: matrix: java: [ '8', '11'] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/cache@v2 with: path: ~/.m2/repository @@ -89,9 +89,9 @@ jobs: source /github/home/.sdkman/bin/sdkman-init.sh sdk install sbt sbt -V - - name: Recompile thrift + - name: Download thrift working-directory: ./java-client/scripts - run: ./recompile_thrift.sh + run: ./download_thrift.sh - name: Build Java client working-directory: ./java-client run: | diff --git a/.github/workflows/regular-build.yml b/.github/workflows/regular-build.yml index d4c3fb1820..4bc36a32cb 100644 --- a/.github/workflows/regular-build.yml +++ b/.github/workflows/regular-build.yml @@ -39,7 +39,7 @@ jobs: name: Lint Cpp runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: clang-format run: ./scripts/run-clang-format.py --clang-format-executable clang-format-14 -e ./src/shell/linenoise -e ./src/shell/sds -e ./thirdparty -r . @@ -70,7 +70,7 @@ jobs: working-directory: /root/incubator-pegasus steps: - name: Clone Apache Pegasus Source - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Unpack prebuilt third-parties uses: "./.github/actions/unpack_prebuilt_thirdparties" - name: Build Pegasus @@ -85,7 +85,7 @@ jobs: # to generate code as well. The thrift-compiler version on ubuntu-20.04 is 0.13.0 run: sudo apt-get install -y thrift-compiler - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v2 with: @@ -128,7 +128,7 @@ jobs: key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} restore-keys: | ${{ runner.os }}-maven- - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-java@v1 with: java-version: ${{ matrix.java }} diff --git a/.github/workflows/standardization_lint.yaml b/.github/workflows/standardization_lint.yaml index 66bcd16276..a88f13e66a 100644 --- a/.github/workflows/standardization_lint.yaml +++ b/.github/workflows/standardization_lint.yaml @@ -47,14 +47,14 @@ jobs: name: Check Markdown links runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 dockerfile_linter: name: Lint Dockerfile runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: hadolint/hadolint-action@v3.1.0 with: recursive: true @@ -65,7 +65,7 @@ jobs: runs-on: ubuntu-latest steps: - name: "Checkout ${{ github.ref }} ( ${{ github.sha }} )" - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check License Header uses: apache/skywalking-eyes@main env: diff --git a/.github/workflows/test_nodejs-client.yml b/.github/workflows/test_nodejs-client.yml index 371263e80e..e662e2458f 100644 --- a/.github/workflows/test_nodejs-client.yml +++ b/.github/workflows/test_nodejs-client.yml @@ -46,7 +46,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: "./.github/actions/rebuild_thirdparty_if_needed" - uses: "./.github/actions/build_pegasus" - uses: "./.github/actions/upload_artifact" @@ -58,7 +58,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install nodejs uses: actions/setup-node@v3 with: diff --git a/.github/workflows/test_python-client.yml b/.github/workflows/test_python-client.yml index c1eaaa7b70..f846be60f0 100644 --- a/.github/workflows/test_python-client.yml +++ b/.github/workflows/test_python-client.yml @@ -46,7 +46,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: "./.github/actions/rebuild_thirdparty_if_needed" - uses: "./.github/actions/build_pegasus" - uses: "./.github/actions/upload_artifact" @@ -58,7 +58,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: '3.11' diff --git a/.github/workflows/thirdparty-regular-push.yml b/.github/workflows/thirdparty-regular-push.yml index 59d0430ea2..a4ea746653 100644 --- a/.github/workflows/thirdparty-regular-push.yml +++ b/.github/workflows/thirdparty-regular-push.yml @@ -43,7 +43,7 @@ jobs: build_push_src_docker_images: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx @@ -78,7 +78,7 @@ jobs: - ubuntu2204 - centos7 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx @@ -114,7 +114,7 @@ jobs: - ubuntu2204 - centos7 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx @@ -148,7 +148,7 @@ jobs: osversion: - ubuntu2204 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx @@ -184,7 +184,7 @@ jobs: osversion: - ubuntu2204 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx diff --git a/nodejs-client/src/dsn/dsn_types.js b/nodejs-client/src/dsn/dsn_types.js index 46eb508ffd..4f2ef0a6f0 100644 --- a/nodejs-client/src/dsn/dsn_types.js +++ b/nodejs-client/src/dsn/dsn_types.js @@ -256,6 +256,41 @@ rpc_address.prototype.equals = function(other){ return false; }; +// TODO(yingchun): host_port is now just a place holder and not well implemented, need improve it +var host_port_type = { + HOST_TYPE_INVALID : 0, + HOST_TYPE_IPV4 : 1, + HOST_TYPE_GROUP : 2 +}; + +var host_port = function(args) { + this.host = null; + this.port = 0; + this.type = host_port_type.HOST_TYPE_INVALID; + if(args && args.host){ + this.host = args.host; + } + if(args && args.port){ + this.port = args.port; + } + if(args && args.type){ + this.type = args.type; + } +}; + +host_port.prototype = {}; +host_port.prototype.read = function(input){ + this.host = input.readBinary(); + this.port = input.readI16(); + this.type = input.readByte(); +}; + +host_port.prototype.write = function(output){ + output.writeBinary(this.host); + output.writeI16(this.port); + output.writeByte(this.type); +}; + //value, calculate by app_id and partition index var gpid = function(args) { this.value = 0; @@ -298,6 +333,7 @@ module.exports = { error_code : error_code, task_code : task_code, rpc_address : rpc_address, + host_port : host_port, gpid : gpid, }; diff --git a/python-client/pypegasus/base/ttypes.py b/python-client/pypegasus/base/ttypes.py index def909a0d6..6ed2e88dee 100644 --- a/python-client/pypegasus/base/ttypes.py +++ b/python-client/pypegasus/base/ttypes.py @@ -265,13 +265,13 @@ def __init__(self): def is_valid(self): return self.address == 0 - def from_string(self, host_port): - host, port = host_port.split(':') - self.address = socket.ntohl(struct.unpack("I", socket.inet_aton(host))[0]) + def from_string(self, ip_port): + ip, port = ip_port.split(':') + self.address = socket.ntohl(struct.unpack("I", socket.inet_aton(ip))[0]) self.address = (self.address << 32) + (int(port) << 16) + 1 # TODO why + 1? return True - def to_host_port(self): + def to_ip_port(self): s = [] address = self.address port = (address >> 16) & 0xFFFF @@ -305,6 +305,73 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) + +# TODO(yingchun): host_port is now just a place holder and not well implemented, need improve it +class host_port_types(Enum): + kHostTypeInvalid = 0 + kHostTypeIpv4 = 1 + kHostTypeGroup = 2 + + +class host_port: + + thrift_spec = ( + (1, TType.STRING, 'host', None, None, ), # 1 + (2, TType.I16, 'port', None, None, ), # 2 + (3, TType.I08, 'type', None, None, ), # 3 + ) + + def __init__(self): + self.host = "" + self.port = 0 + self.type = host_port_types.kHostTypeInvalid + + def is_valid(self): + return self.type != host_port_types.kHostTypeInvalid + + def from_string(self, host_port_str): + host_and_port = host_port_str.split(':') + if len(host_and_port) != 2: + return False + self.host = host_and_port[0] + self.port = int(host_and_port[1]) + # TODO(yingchun): Maybe it's not true, improve it + self.type = host_port_types.kHostTypeIpv4 + return True + + def to_host_port(self): + if not self.is_valid(): + return None, None + return self.host, self.port + + def read(self, iprot): + self.host = iprot.readString() + self.port = iprot.readI16() + self.type = iprot.readByte() + + def write(self, oprot): + oprot.writeString(self.host) + oprot.writeI16(self.port) + oprot.writeByte(self.type) + + def validate(self): + return + + def __hash__(self): + return hash(self.host) ^ self.port ^ self.type + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return other.__class__.__name__ == "host_port" and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + class gpid: thrift_spec = ( diff --git a/python-client/pypegasus/pgclient.py b/python-client/pypegasus/pgclient.py index 457e99c8a1..2438483ba0 100644 --- a/python-client/pypegasus/pgclient.py +++ b/python-client/pypegasus/pgclient.py @@ -249,12 +249,12 @@ def __init__(self, table_name, timeout): def add_meta_server(self, meta_addr): rpc_addr = rpc_address() if rpc_addr.from_string(meta_addr): - host_port_list = meta_addr.split(':') - if not len(host_port_list) == 2: + ip_port = meta_addr.split(':') + if not len(ip_port) == 2: return False - host, port = host_port_list[0], int(host_port_list[1]) - self.addr_list.append((host, port)) + ip, port = ip_port[0], int(ip_port[1]) + self.addr_list.append((ip, port)) return True else: @@ -281,9 +281,9 @@ def got_results(self, res): def query(self): ds = [] - for (host, port) in self.addr_list: + for (ip, port) in self.addr_list: rpc_addr = rpc_address() - rpc_addr.from_string(host + ':' + str(port)) + rpc_addr.from_string(ip + ':' + str(port)) if rpc_addr in self.session_dict: self.session_dict[rpc_addr].close() @@ -294,7 +294,7 @@ def query(self): None, self, self.timeout - ).connectTCP(host, port, self.timeout) + ).connectTCP(ip, port, self.timeout) d.addCallbacks(self.got_conn, self.got_err) d.addCallbacks(self.query_one, self.got_err) ds.append(d) @@ -345,7 +345,7 @@ def update_cfg(self, resp): if rpc_addr in connected_rpc_addrs or rpc_addr.address == 0: continue - host, port = rpc_addr.to_host_port() + ip, port = rpc_addr.to_ip_port() if rpc_addr in self.session_dict: self.session_dict[rpc_addr].close() @@ -356,7 +356,7 @@ def update_cfg(self, resp): None, self.container, self.timeout - ).connectTCP(host, port, self.timeout) + ).connectTCP(ip, port, self.timeout) connected_rpc_addrs[rpc_addr] = 1 d.addCallbacks(self.got_conn, self.got_err) ds.append(d) @@ -642,8 +642,8 @@ def __init__(self, meta_addrs=None, table_name='', self.table = Table(table_name, self, timeout) self.meta_session_manager = MetaSessionManager(table_name, timeout) if isinstance(meta_addrs, list): - for host_port in meta_addrs: - self.meta_session_manager.add_meta_server(host_port) + for meta_addr in meta_addrs: + self.meta_session_manager.add_meta_server(meta_addr) PegasusHash.populate_table() self.timeout_times = 0 self.update_partition = False From 1d0fdf5d9d919df2e50c7fed2bad6bbe01191100 Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Fri, 12 Jul 2024 21:33:57 +0800 Subject: [PATCH 24/29] chore(CI): Force CI to run on node16 (#2070) The glibc version on ubuntu1804 and centos7 is lower than the node20 required, so we need to force the node version to 16 when running CI. See more details: https://github.com/actions/checkout/issues/1809 --- .github/workflows/build-push-env-docker.yml | 6 ++++++ .github/workflows/regular-build.yml | 6 ++++++ .github/workflows/thirdparty-regular-push.yml | 12 ++++++++++++ 3 files changed, 24 insertions(+) diff --git a/.github/workflows/build-push-env-docker.yml b/.github/workflows/build-push-env-docker.yml index cdb88e96b7..3f9b4f7101 100644 --- a/.github/workflows/build-push-env-docker.yml +++ b/.github/workflows/build-push-env-docker.yml @@ -34,6 +34,12 @@ on: jobs: build_compilation_env_docker_images: runs-on: ubuntu-latest + env: + # The glibc version on ubuntu1804 and centos7 is lower than the node20 required, so + # we need to force the node version to 16. + # See more details: https://github.com/actions/checkout/issues/1809 + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true strategy: fail-fast: false matrix: diff --git a/.github/workflows/regular-build.yml b/.github/workflows/regular-build.yml index 4bc36a32cb..9beb7cb342 100644 --- a/.github/workflows/regular-build.yml +++ b/.github/workflows/regular-build.yml @@ -46,6 +46,12 @@ jobs: build_cpp: name: Build Cpp runs-on: ubuntu-latest + env: + # The glibc version on ubuntu1804 and centos7 is lower than the node20 required, so + # we need to force the node version to 16. + # See more details: https://github.com/actions/checkout/issues/1809 + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true strategy: fail-fast: false matrix: diff --git a/.github/workflows/thirdparty-regular-push.yml b/.github/workflows/thirdparty-regular-push.yml index a4ea746653..795f4bdb52 100644 --- a/.github/workflows/thirdparty-regular-push.yml +++ b/.github/workflows/thirdparty-regular-push.yml @@ -68,6 +68,12 @@ jobs: build_push_bin_docker_images: runs-on: ubuntu-latest + env: + # The glibc version on ubuntu1804 and centos7 is lower than the node20 required, so + # we need to force the node version to 16. + # See more details: https://github.com/actions/checkout/issues/1809 + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true needs: build_push_src_docker_images strategy: fail-fast: false @@ -104,6 +110,12 @@ jobs: build_push_bin_jemalloc_docker_images: runs-on: ubuntu-latest + env: + # The glibc version on ubuntu1804 and centos7 is lower than the node20 required, so + # we need to force the node version to 16. + # See more details: https://github.com/actions/checkout/issues/1809 + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true needs: build_push_src_docker_images strategy: fail-fast: false From 602b80b1c772e4f74c566d2446d65c0613721714 Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Mon, 15 Jul 2024 11:29:28 +0800 Subject: [PATCH 25/29] chore(CI): Force to use actions/checkout@v3 when OS maybe CentOS 7 or Ubuntu 18.04 (#2072) To solve problem in GitHub actions: ``` Run actions/checkout@v4 /usr/bin/docker exec e63787d641b0351b6c65ad895ccd98db84d6796141ad087c4952bc7f68b03753 sh -c "cat /etc/*release | grep ^ID" /__e/node[20](https://github.com/apache/incubator-pegasus/actions/runs/9908766114/job/27375256228#step:3:21)/bin/node: /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.28' not found (required by /__e/node20/bin/node) ``` --- .github/workflows/build-push-env-docker.yml | 4 +++- .github/workflows/regular-build.yml | 4 +++- .github/workflows/thirdparty-regular-push.yml | 8 ++++++-- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-push-env-docker.yml b/.github/workflows/build-push-env-docker.yml index 3f9b4f7101..f72af174a6 100644 --- a/.github/workflows/build-push-env-docker.yml +++ b/.github/workflows/build-push-env-docker.yml @@ -50,7 +50,9 @@ jobs: - centos7 steps: - name: Checkout - uses: actions/checkout@v4 + # The glibc version on ubuntu1804 and centos7 is lower than the actions/checkout@v4 required, so + # we need to force to use actions/checkout@v3. + uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx diff --git a/.github/workflows/regular-build.yml b/.github/workflows/regular-build.yml index 9beb7cb342..68acc5e7e8 100644 --- a/.github/workflows/regular-build.yml +++ b/.github/workflows/regular-build.yml @@ -76,7 +76,9 @@ jobs: working-directory: /root/incubator-pegasus steps: - name: Clone Apache Pegasus Source - uses: actions/checkout@v4 + # The glibc version on ubuntu1804 and centos7 is lower than the actions/checkout@v4 required, so + # we need to force to use actions/checkout@v3. + uses: actions/checkout@v3 - name: Unpack prebuilt third-parties uses: "./.github/actions/unpack_prebuilt_thirdparties" - name: Build Pegasus diff --git a/.github/workflows/thirdparty-regular-push.yml b/.github/workflows/thirdparty-regular-push.yml index 795f4bdb52..ce8f5d5dc9 100644 --- a/.github/workflows/thirdparty-regular-push.yml +++ b/.github/workflows/thirdparty-regular-push.yml @@ -84,7 +84,9 @@ jobs: - ubuntu2204 - centos7 steps: - - uses: actions/checkout@v4 + # The glibc version on ubuntu1804 and centos7 is lower than the actions/checkout@v4 required, so + # we need to force to use actions/checkout@v3. + - uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx @@ -126,7 +128,9 @@ jobs: - ubuntu2204 - centos7 steps: - - uses: actions/checkout@v4 + # The glibc version on ubuntu1804 and centos7 is lower than the actions/checkout@v4 required, so + # we need to force to use actions/checkout@v3. + - uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx From c10baa95e59eb99213980c4f0c2666cfcf2586a5 Mon Sep 17 00:00:00 2001 From: "shalk(xiao kun)" Date: Mon, 15 Jul 2024 12:00:30 +0800 Subject: [PATCH 26/29] fix(CI): fix java regular workflow (#2074) Fix https://github.com/apache/incubator-pegasus/issues/2073. --- .github/workflows/regular-build.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/regular-build.yml b/.github/workflows/regular-build.yml index 68acc5e7e8..04468e4d39 100644 --- a/.github/workflows/regular-build.yml +++ b/.github/workflows/regular-build.yml @@ -140,9 +140,11 @@ jobs: - uses: actions/setup-java@v1 with: java-version: ${{ matrix.java }} + - name: Download thrift + working-directory: ./java-client/scripts + run: ./download_thrift.sh - name: Build working-directory: ./java-client run: | - cd scripts && bash recompile_thrift.sh && cd - mvn spotless:apply mvn clean package -DskipTests From 8e09a457755fc02ea509750676fa663cbd1b78d0 Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Mon, 15 Jul 2024 15:02:05 +0800 Subject: [PATCH 27/29] refactor(FQDN): update some logs in cpp-shell CLI (#2064) --- src/shell/command_utils.cpp | 20 +++++++++++--------- src/shell/command_utils.h | 2 +- src/shell/commands/detect_hotkey.cpp | 6 +++--- src/shell/commands/node_management.cpp | 2 +- src/shell/commands/recovery.cpp | 4 ++-- src/shell/main.cpp | 8 ++++---- 6 files changed, 22 insertions(+), 20 deletions(-) diff --git a/src/shell/command_utils.cpp b/src/shell/command_utils.cpp index fb3be3bced..96e5235a0b 100644 --- a/src/shell/command_utils.cpp +++ b/src/shell/command_utils.cpp @@ -26,30 +26,32 @@ #include "utils/error_code.h" bool validate_ip(shell_context *sc, - const std::string &ip_str, + const std::string &target_hp_str, dsn::host_port &target_hp, std::string &err_info) { - target_hp = dsn::host_port::from_string(ip_str); + target_hp = dsn::host_port::from_string(target_hp_str); if (!target_hp) { - err_info = fmt::format("invalid ip:port={}, can't transform it into host_port", ip_str); + err_info = + fmt::format("invalid host:port '{}', can't transform it into host_port", target_hp_str); return false; } - std::map nodes; - auto error = sc->ddl_client->list_nodes(dsn::replication::node_status::NS_INVALID, nodes); + std::map ns_by_nodes; + const auto error = + sc->ddl_client->list_nodes(dsn::replication::node_status::NS_INVALID, ns_by_nodes); if (error != dsn::ERR_OK) { - err_info = fmt::format("list nodes failed, error={}", error.to_string()); + err_info = fmt::format("list nodes failed, error={}", error); return false; } - for (const auto &node : nodes) { - if (target_hp == node.first) { + for (const auto &[node, _] : ns_by_nodes) { + if (target_hp == node) { return true; } } - err_info = fmt::format("invalid ip:port={}, can't find it in the cluster", ip_str); + err_info = fmt::format("invalid host:port '{}', can't find it in the cluster", target_hp_str); return false; } diff --git a/src/shell/command_utils.h b/src/shell/command_utils.h index 5e1095d9a1..e076c3d32d 100644 --- a/src/shell/command_utils.h +++ b/src/shell/command_utils.h @@ -66,7 +66,7 @@ inline bool validate_cmd(const argh::parser &cmd, } bool validate_ip(shell_context *sc, - const std::string &ip_str, + const std::string &host_port_str, /*out*/ dsn::host_port &target_hp, /*out*/ std::string &err_info); diff --git a/src/shell/commands/detect_hotkey.cpp b/src/shell/commands/detect_hotkey.cpp index c78f906c67..05acc7cd04 100644 --- a/src/shell/commands/detect_hotkey.cpp +++ b/src/shell/commands/detect_hotkey.cpp @@ -102,8 +102,8 @@ bool detect_hotkey(command_executor *e, shell_context *sc, arguments args) dsn::host_port target_hp; std::string err_info; - std::string ip_str = cmd({"-d", "--address"}).str(); - if (!validate_ip(sc, ip_str, target_hp, err_info)) { + const auto &target_hp_str = cmd({"-d", "--address"}).str(); + if (!validate_ip(sc, target_hp_str, target_hp, err_info)) { fmt::print(stderr, "{}\n", err_info); return false; } @@ -145,7 +145,7 @@ bool detect_hotkey(command_executor *e, shell_context *sc, arguments args) app_id, partition_index, hotkey_type, - ip_str); + target_hp_str); break; case dsn::replication::detect_action::STOP: fmt::print("Hotkey detection is stopped now\n"); diff --git a/src/shell/commands/node_management.cpp b/src/shell/commands/node_management.cpp index abf4c7f750..a18b9ef8d6 100644 --- a/src/shell/commands/node_management.cpp +++ b/src/shell/commands/node_management.cpp @@ -638,7 +638,7 @@ bool remote_command(command_executor *e, shell_context *sc, arguments args) for (std::string &token : tokens) { const auto node = dsn::host_port::from_string(token); if (!node) { - fprintf(stderr, "parse %s as a ip:port node failed\n", token.c_str()); + fprintf(stderr, "parse %s as a host:port node failed\n", token.c_str()); return true; } node_list.emplace_back("user-specified", node); diff --git a/src/shell/commands/recovery.cpp b/src/shell/commands/recovery.cpp index b7c8ce1063..dfd82122b9 100644 --- a/src/shell/commands/recovery.cpp +++ b/src/shell/commands/recovery.cpp @@ -118,7 +118,7 @@ bool recover(command_executor *e, shell_context *sc, arguments args) for (std::string &token : tokens) { const auto node = dsn::host_port::from_string(token); if (!node) { - fprintf(stderr, "parse %s as a ip:port node failed\n", token.c_str()); + fprintf(stderr, "parse %s as a host:port node failed\n", token.c_str()); return true; } node_list.push_back(node); @@ -140,7 +140,7 @@ bool recover(command_executor *e, shell_context *sc, arguments args) const auto node = dsn::host_port::from_string(str); if (!node) { fprintf(stderr, - "parse %s at file %s line %d as ip:port failed\n", + "parse %s at file %s line %d as host:port failed\n", str.c_str(), node_list_file.c_str(), lineno); diff --git a/src/shell/main.cpp b/src/shell/main.cpp index e8533d0d8d..a6df779fe9 100644 --- a/src/shell/main.cpp +++ b/src/shell/main.cpp @@ -372,20 +372,20 @@ static command_executor commands[] = { { "remote_command", "send remote command to servers", - "[-t all|meta-server|replica-server] [-r|--resolve_ip] [-l ip:port,ip:port...]" + "[-t all|meta-server|replica-server] [-r|--resolve_ip] [-l host:port,host:port...]" " [arguments...]", remote_command, }, { "server_info", "get info of servers", - "[-t all|meta-server|replica-server] [-l ip:port,ip:port...] [-r|--resolve_ip]", + "[-t all|meta-server|replica-server] [-l host:port,host:port...] [-r|--resolve_ip]", server_info, }, { "server_stat", "get stat of servers", - "[-t all|meta-server|replica-server] [-l ip:port,ip:port...] [-r|--resolve_ip]", + "[-t all|meta-server|replica-server] [-l host:port,host:port...] [-r|--resolve_ip]", server_stat, }, { @@ -398,7 +398,7 @@ static command_executor commands[] = { { "flush_log", "flush log of servers", - "[-t all|meta-server|replica-server] [-l ip:port,ip:port...][-r|--resolve_ip]", + "[-t all|meta-server|replica-server] [-l host:port,host:port...][-r|--resolve_ip]", flush_log, }, { From 5b70f5b8f1d2b97c62fb73cc84de6581e05bcbc5 Mon Sep 17 00:00:00 2001 From: Yingchun Lai Date: Mon, 15 Jul 2024 19:50:10 +0800 Subject: [PATCH 28/29] fix(Dockerfile): Fix the yum install failure caused by its EOL (#2075) Resolve https://github.com/apache/incubator-pegasus/issues/2071. --- docker/pegasus-build-env/centos7/Dockerfile | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/docker/pegasus-build-env/centos7/Dockerfile b/docker/pegasus-build-env/centos7/Dockerfile index 790268202e..12db0544f8 100644 --- a/docker/pegasus-build-env/centos7/Dockerfile +++ b/docker/pegasus-build-env/centos7/Dockerfile @@ -19,10 +19,16 @@ FROM centos:7.5.1804 LABEL maintainer=wutao -RUN yum -y install centos-release-scl \ +RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo && \ + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo && \ + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo && \ + yum -y install centos-release-scl \ scl-utils \ - epel-release; \ - yum -y install devtoolset-7-gcc \ + epel-release && \ + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo && \ + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo && \ + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo && \ + yum -y install devtoolset-7-gcc \ devtoolset-7-gcc-c++ \ java-1.8.0-openjdk-devel.x86_64 \ python3 \ @@ -48,10 +54,10 @@ RUN yum -y install centos-release-scl \ flex \ krb5-devel \ cyrus-sasl-devel \ - patch; \ - yum -y install ca-certificates; \ - yum clean all; \ - rm -rf /var/cache/yum; + patch && \ + yum -y install ca-certificates && \ + yum clean all && \ + rm -rf /var/cache/yum; ENV PATH="/opt/rh/devtoolset-7/root/bin/:${PATH}" From a6f777f79f406a8cfb317759100c3d10e88b9747 Mon Sep 17 00:00:00 2001 From: lu_peng_fan Date: Thu, 30 May 2024 09:54:23 +0800 Subject: [PATCH 29/29] fix(bulkload) bulkload downloading may cause many node coredump Resolve https://github.com/apache/incubator-pegasus/issues/2006 --- src/replica/bulk_load/replica_bulk_loader.cpp | 39 +++++++++++++++++-- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/src/replica/bulk_load/replica_bulk_loader.cpp b/src/replica/bulk_load/replica_bulk_loader.cpp index a3c27e99e4..b071bb9496 100644 --- a/src/replica/bulk_load/replica_bulk_loader.cpp +++ b/src/replica/bulk_load/replica_bulk_loader.cpp @@ -533,7 +533,30 @@ void replica_bulk_loader::download_sst_file(const std::string &remote_dir, int32_t file_index, dist::block_service::block_filesystem *fs) { - const file_meta &f_meta = _metadata.files[file_index]; + if (_status != bulk_load_status::BLS_DOWNLOADING) { + LOG_WARNING_PREFIX("Cancel download_sst_file task, because bulk_load local_status is {}. " + "local_dir: {} , file_index is {}.", + enum_to_string(_status), + local_dir, + file_index); + return; + } + file_meta f_meta; + bool get_f_meta = true; + { + zauto_read_lock l(_lock); + if (file_index < _metadata.files.size()) { + f_meta = _metadata.files[file_index]; + } else { + get_f_meta = false; + } + } + if (!get_f_meta) { + LOG_WARNING_PREFIX("sst file index {} exceeds number of bulkload sst files, Cancel " + "download_sst_file task.", + file_index); + return; + } uint64_t f_size = 0; std::string f_md5; error_code ec = _stub->_block_service_manager.download_file( @@ -589,9 +612,17 @@ void replica_bulk_loader::download_sst_file(const std::string &remote_dir, METRIC_VAR_INCREMENT_BY(bulk_load_download_file_bytes, f_size); // download next file - if (file_index + 1 < _metadata.files.size()) { - const file_meta &next_f_meta = _metadata.files[file_index + 1]; - _download_files_task[next_f_meta.name] = + get_f_meta = true; + { + zauto_read_lock l(_lock); + if (file_index + 1 < _metadata.files.size()) { + f_meta = _metadata.files[file_index + 1]; + } else { + get_f_meta = false; + } + } + if (get_f_meta) { + _download_files_task[f_meta.name] = tasking::enqueue(LPC_BACKGROUND_BULK_LOAD, tracker(), std::bind(&replica_bulk_loader::download_sst_file,