diff --git a/.github/workflows/python_build.yml b/.github/workflows/python_build.yml index cbee5167b..e68b1d7aa 100644 --- a/.github/workflows/python_build.yml +++ b/.github/workflows/python_build.yml @@ -15,7 +15,7 @@ jobs: - run: python -m pip install flake8 - name: flake8 run: | - flake8 --ignore=E501,W503 --exclude python/rocketmq/protocol python + flake8 --ignore=E501,W503 --exclude python/rocketmq/grpc_protocol python isort: runs-on: ubuntu-latest steps: @@ -28,7 +28,7 @@ jobs: - run: python -m pip install isort - name: isort run: | - isort --check --diff --skip python/rocketmq/protocol python + isort --check --diff --skip python/rocketmq/grpc_protocol python black: runs-on: ubuntu-latest steps: diff --git a/README-CN.md b/README-CN.md index 08dfc6d74..25131b36d 100644 --- a/README-CN.md +++ b/README-CN.md @@ -19,11 +19,11 @@ | 特性 | Java | C/C++ | C# | Golang | Rust | Python | Node.js | PHP | | ---------------------------------------------- | :---: | :---: | :---: | :----: | :---: | :----: | :-----: | :---: | -| Producer with standard messages | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | ✅ | 🚧 | -| Producer with FIFO messages | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | ✅ | 🚧 | -| Producer with timed/delay messages | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | ✅ | 🚧 | -| Producer with transactional messages | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | ✅ | 🚧 | -| Simple consumer | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | ✅ | 🚧 | +| Producer with standard messages | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | +| Producer with FIFO messages | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | +| Producer with timed/delay messages | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | +| Producer with transactional messages | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | +| Simple consumer | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | | Push consumer with concurrent message listener | ✅ | ✅ | 🚧 | 🚧 | ✅ | 🚧 | 🚧 | 🚧 | | Push consumer with FIFO message listener | ✅ | ✅ | 🚧 | 🚧 | ✅ | 🚧 | 🚧 | 🚧 | diff --git a/README.md b/README.md index a9edcc848..0772a1726 100644 --- a/README.md +++ b/README.md @@ -19,11 +19,11 @@ Provide cloud-native and robust solutions for Java, C++, C#, Golang, Rust and al | Feature | Java | C/C++ | C# | Golang | Rust | Python | Node.js | PHP | | ---------------------------------------------- | :---: | :---: | :---: | :----: | :---: | :----: | :-----: | :---: | -| Producer with standard messages | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | ✅ | 🚧 | -| Producer with FIFO messages | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | ✅ | 🚧 | -| Producer with timed/delay messages | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | ✅ | 🚧 | -| Producer with transactional messages | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | ✅ | 🚧 | -| Simple consumer | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | ✅ | 🚧 | +| Producer with standard messages | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | +| Producer with FIFO messages | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | +| Producer with timed/delay messages | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | +| Producer with transactional messages | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | +| Simple consumer | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | 🚧 | | Push consumer with concurrent message listener | ✅ | ✅ | 🚧 | 🚧 | ✅ | 🚧 | 🚧 | 🚧 | | Push consumer with FIFO message listener | ✅ | ✅ | 🚧 | 🚧 | ✅ | 🚧 | 🚧 | 🚧 | diff --git a/python/LICENSE b/python/LICENSE new file mode 100644 index 000000000..989e2c59e --- /dev/null +++ b/python/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/python/NOTICE b/python/NOTICE new file mode 100644 index 000000000..a8b92cb79 --- /dev/null +++ b/python/NOTICE @@ -0,0 +1,28 @@ +========================================================================= +== Apache Notice == +========================================================================= + +Apache RocketMQ +Copyright 2016-2022 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). +========================================================================= +== Protocol Buffers Notice == +========================================================================= + +This product includes software developed by the Protocol Buffers +project (https://github.com/protocolbuffers/protobuf). + +========================================================================= +== gRPC Notice == +========================================================================= + +This product includes software developed by grpc + (https://github.com/grpc/grpc). +========================================================================= +== Opentelemetry Notice == +========================================================================= + +This product includes software developed by Opentelemetry + (https://github.com/open-telemetry). diff --git a/python/README-CN.md b/python/README-CN.md deleted file mode 100644 index 3832808b9..000000000 --- a/python/README-CN.md +++ /dev/null @@ -1,43 +0,0 @@ -# Python 客户端 - -[English](README.md) | 简体中文 | [RocketMQ 官网](https://rocketmq.apache.org/) - -## 概述 - -不同于[基于 RemotingCommand 协议的版本](https://github.com/apache/rocketmq/tree/develop/client),当前的客户端基于 RocektMQ 5.0 存算分离架构进行设计开发,是 RocketMQ 社区目前推荐的接入方式,也是未来客户端演进的主要方向。 - -在开始客户端的部分之前,所需的一些前期工作(或者参照[这里](https://rocketmq.apache.org/zh/docs/quickStart/02quickstart/)): - -1. 准备 Python 环境。Python 3.7 是确保客户端运行的最小版本,Python 3.10 是推荐版本; -2. 部署 namesrv,broker 以及 [proxy](https://github.com/apache/rocketmq/tree/develop/proxy) 组件。 - -## 快速开始 - -我们使用 Poetry 作为依赖管理和发布的工具。你可以在 Poetry 的[官方网站]((https://python-poetry.org/))了解到关于它的更多信息。这里是一些在开发阶段你会使用到的 Poetry 命令: - -```shell -# 创建并激活 python3 的虚拟环境 -poetry env use python3 -# 自动安装工程相关的依赖 -poetry install -# 进入虚拟环境中的 shell -poetry shell -``` - -我们使用 pytest 来作为当前项目的测试框架,你可以通过直接执行 `pytest` 命令来运行所有的测试。 - -## 发布步骤 - -我们使用 PyPi 来帮助用户更好地在自己的工程中引入并使用客户端。为了将客户端发布到 PyPi,可以执行以下命令: - -```shell -# 构建包 -poetry build -# 将包发布到远程仓库 -poetry publish -u username -p password -``` - -## 目前进展 - -* 协议层代码生成完毕。 -* `rpc_client.py` 完成部分。 diff --git a/python/README.md b/python/README.md deleted file mode 100644 index 431fd439c..000000000 --- a/python/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# The Python Implementation of Apache RocketMQ Client - -English | [简体中文](README-CN.md) | [RocketMQ Website](https://rocketmq.apache.org/) - -## Overview - -Here is the Python implementation of the client for [Apache RocketMQ](https://rocketmq.apache.org/). Different from the [remoting-based client](https://github.com/apache/rocketmq/tree/develop/client), the current implementation is based on separating architecture for computing and storage, which is the more recommended way to access the RocketMQ service. - -Here are some preparations you may need to know (or refer to [here](https://rocketmq.apache.org/docs/quickStart/02quickstart/)). - -1. Python 3.7 is the minimum version required, Python 3.10 is the recommended version. -2. Setup namesrv, broker, and [proxy](https://github.com/apache/rocketmq/tree/develop/proxy). - -## Getting Started - -We are using Poetry as the dependency management & publishing tool. You can find out more details about Poetry from its [website](https://python-poetry.org/). Here is the related command of Poetry you may use for development. - -```shell -# Create a virtual environment and activate it. -poetry env use python3 -# Installs the project dependencies. -poetry install -# Spawns a shell within the virtual environment. -poetry shell -``` - -We use pytest as the testing framework for the current project, and you can execute `pytest` directly to run all tests. - -## Publishing Steps - -We utilize PyPI to help users easily introduce and use the Python client in their projects. To publish a package to PyPI, please register an account in advance, then execute the following command. - -```shell -# Builds a package, as a tarball and a wheel by default. -poetry build -# Publishes a package to a remote repository. -poetry publish -u username -p password -``` - -## Current Progress - -* Protocol layer code generation is completed. -* Partial completion of `rpc_client.py`. diff --git a/python/example/async_producer_example.py b/python/example/async_producer_example.py new file mode 100644 index 000000000..2c0b7948b --- /dev/null +++ b/python/example/async_producer_example.py @@ -0,0 +1,44 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rocketmq import ClientConfiguration, Credentials, Message, Producer + + +def handle_send_result(result_future): + try: + res = result_future.result() + print(f"async send message success, {res}") + except Exception as exception: + print(f"async send message failed, raise exception: {exception}") + + +if __name__ == '__main__': + endpoints = "endpoints" + credentials = Credentials("ak", "sk") + config = ClientConfiguration(endpoints, credentials) + topic = "topic" + try: + producer = Producer(config, (topic,)) + producer.startup() + msg = Message() + msg.topic = topic + msg.body = "hello, rocketmq.".encode('utf-8') + msg.tag = "rocketmq-send-message" + msg.keys = "send_async" + msg.add_property("send", "async") + send_result_future = producer.send_async(msg) + send_result_future.add_done_callback(handle_send_result) + except Exception as e: + print(f"async producer example raise exception: {e}") diff --git a/python/example/async_simple_consumer_example.py b/python/example/async_simple_consumer_example.py new file mode 100644 index 000000000..a518ee7e3 --- /dev/null +++ b/python/example/async_simple_consumer_example.py @@ -0,0 +1,47 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +import time + +from rocketmq import ClientConfiguration, Credentials, SimpleConsumer + + +def receive_callback(receive_result_future, consumer): + messages = receive_result_future.result() + for msg in messages: + print(f"{consumer.__str__()} receive {len(messages)} messages in callback.") + try: + consumer.ack(msg) + print(f"receive and ack message:{msg.message_id} in callback.") + except Exception as exception: + print(f"receive message callback raise exception: {exception}") + + +if __name__ == '__main__': + endpoints = "endpoints" + credentials = Credentials("ak", "sk") + config = ClientConfiguration(endpoints, credentials) + topic = "topic" + try: + simple_consumer = SimpleConsumer(config, "consumer_group") + simple_consumer.startup() + simple_consumer.subscribe(topic) + while True: + time.sleep(5) + future = simple_consumer.receive_async(32, 15) + future.add_done_callback(functools.partial(receive_callback, consumer=simple_consumer)) + except Exception as e: + print(f"simple consumer example raise exception: {e}") diff --git a/python/example/normal_producer_example.py b/python/example/normal_producer_example.py new file mode 100644 index 000000000..3ffee0175 --- /dev/null +++ b/python/example/normal_producer_example.py @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rocketmq import ClientConfiguration, Credentials, Message, Producer + +if __name__ == '__main__': + endpoints = "endpoints" + credentials = Credentials("ak", "sk") + config = ClientConfiguration(endpoints, credentials) + topic = "topic" + try: + producer = Producer(config, (topic,)) + producer.startup() + msg = Message() + msg.topic = topic + msg.body = "hello, rocketmq.".encode('utf-8') + msg.tag = "rocketmq-send-message" + msg.keys = "send_sync" + msg.add_property("send", "sync") + res = producer.send(msg) + print(f"{producer.__str__()} send message success. {res}") + producer.shutdown() + print(f"{producer.__str__()} shutdown.") + except Exception as e: + print(f"normal producer example raise exception: {e}") diff --git a/python/example/simple_consumer_example.py b/python/example/simple_consumer_example.py new file mode 100644 index 000000000..19120eade --- /dev/null +++ b/python/example/simple_consumer_example.py @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rocketmq import ClientConfiguration, Credentials, SimpleConsumer + +if __name__ == '__main__': + endpoints = "endpoints" + credentials = Credentials("ak", "sk") + config = ClientConfiguration(endpoints, credentials) + topic = "topic" + try: + simple_consumer = SimpleConsumer(config, "consumer-group") + simple_consumer.startup() + simple_consumer.subscribe(topic) + while True: + try: + messages = simple_consumer.receive(32, 15) + if messages is not None: + print(f"{simple_consumer.__str__()} receive {len(messages)} messages.") + for msg in messages: + simple_consumer.ack(msg) + print(f"{simple_consumer.__str__()} ack message:[{msg.message_id}].") + except Exception as e: + print(f"receive or ack message raise exception: {e}") + except Exception as e: + print(f"simple consumer example raise exception: {e}") diff --git a/python/example/transaction_producer_example.py b/python/example/transaction_producer_example.py new file mode 100644 index 000000000..7296ab115 --- /dev/null +++ b/python/example/transaction_producer_example.py @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rocketmq import (ClientConfiguration, Credentials, Message, Producer, + TransactionChecker, TransactionResolution) + + +class TestChecker(TransactionChecker): + + def check(self, message: Message) -> TransactionResolution: + print(f"do TestChecker check. message_id: {message.message_id}, commit message.") + return TransactionResolution.COMMIT + + +if __name__ == '__main__': + endpoints = "endpoints" + credentials = Credentials("ak", "sk") + config = ClientConfiguration(endpoints, credentials) + topic = "topic" + try: + producer = Producer(config, (topic,), TestChecker()) + producer.startup() + transaction = producer.begin_transaction() + msg = Message() + msg.topic = topic + msg.body = "hello, rocketmq.".encode('utf-8') + msg.tag = "rocketmq-send-transaction-message" + msg.keys = "send_transaction" + msg.add_property("send", "transaction") + res = producer.send(msg, transaction) + print(f"{producer.__str__()} send half message. {res}") + except Exception as e: + print(f"transaction producer example raise exception: {e}") diff --git a/python/examples/simple_consumer_example.py b/python/examples/simple_consumer_example.py deleted file mode 100644 index 07ff20b92..000000000 --- a/python/examples/simple_consumer_example.py +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import asyncio - -from rocketmq.client_config import ClientConfig -from rocketmq.filter_expression import FilterExpression -from rocketmq.log import logger -from rocketmq.protocol.definition_pb2 import Resource -from rocketmq.rpc_client import Endpoints -from rocketmq.session_credentials import (SessionCredentials, - SessionCredentialsProvider) -from rocketmq.simple_consumer import SimpleConsumer - - -async def test(): - credentials = SessionCredentials("username", "password") - credentials_provider = SessionCredentialsProvider(credentials) - client_config = ClientConfig( - endpoints=Endpoints("endpoint"), - session_credentials_provider=credentials_provider, - ssl_enabled=True, - ) - topic = Resource() - topic.name = "normal_topic" - - consumer_group = "yourConsumerGroup" - subscription = {topic.name: FilterExpression("*")} - simple_consumer = (await SimpleConsumer.Builder() - .set_client_config(client_config) - .set_consumer_group(consumer_group) - .set_await_duration(15) - .set_subscription_expression(subscription) - .build()) - logger.info(simple_consumer) - # while True: - message_views = await simple_consumer.receive(16, 15) - logger.info(message_views) - for message in message_views: - logger.info(message.body) - logger.info(f"Received a message, topic={message.topic}, message-id={message.message_id}, body-size={len(message.body)}") - await simple_consumer.ack(message) - logger.info(f"Message is acknowledged successfully, message-id={message.message_id}") - -if __name__ == "__main__": - asyncio.run(test()) diff --git a/python/poetry.lock b/python/poetry.lock deleted file mode 100644 index 14b1e262b..000000000 --- a/python/poetry.lock +++ /dev/null @@ -1,331 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. - -[[package]] -name = "certifi" -version = "2023.5.7" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"}, - {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"}, -] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "exceptiongroup" -version = "1.1.1" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, - {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "grpcio" -version = "1.54.2" -description = "HTTP/2-based RPC framework" -optional = false -python-versions = ">=3.7" -files = [ - {file = "grpcio-1.54.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:40e1cbf69d6741b40f750f3cccc64326f927ac6145a9914d33879e586002350c"}, - {file = "grpcio-1.54.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:2288d76e4d4aa7ef3fe7a73c1c470b66ea68e7969930e746a8cd8eca6ef2a2ea"}, - {file = "grpcio-1.54.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:c0e3155fc5335ec7b3b70f15230234e529ca3607b20a562b6c75fb1b1218874c"}, - {file = "grpcio-1.54.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bf88004fe086c786dc56ef8dd6cb49c026833fdd6f42cb853008bce3f907148"}, - {file = "grpcio-1.54.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2be88c081e33f20630ac3343d8ad9f1125f32987968e9c8c75c051c9800896e8"}, - {file = "grpcio-1.54.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:33d40954199bddbb6a78f8f6f2b2082660f381cd2583ec860a6c2fa7c8400c08"}, - {file = "grpcio-1.54.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b52d00d1793d290c81ad6a27058f5224a7d5f527867e5b580742e1bd211afeee"}, - {file = "grpcio-1.54.2-cp310-cp310-win32.whl", hash = "sha256:881d058c5ccbea7cc2c92085a11947b572498a27ef37d3eef4887f499054dca8"}, - {file = "grpcio-1.54.2-cp310-cp310-win_amd64.whl", hash = "sha256:0212e2f7fdf7592e4b9d365087da30cb4d71e16a6f213120c89b4f8fb35a3ab3"}, - {file = "grpcio-1.54.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:1e623e0cf99a0ac114f091b3083a1848dbc64b0b99e181473b5a4a68d4f6f821"}, - {file = "grpcio-1.54.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:66233ccd2a9371158d96e05d082043d47dadb18cbb294dc5accfdafc2e6b02a7"}, - {file = "grpcio-1.54.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:4cb283f630624ebb16c834e5ac3d7880831b07cbe76cb08ab7a271eeaeb8943e"}, - {file = "grpcio-1.54.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a1e601ee31ef30a9e2c601d0867e236ac54c922d32ed9f727b70dd5d82600d5"}, - {file = "grpcio-1.54.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8da84bbc61a4e92af54dc96344f328e5822d574f767e9b08e1602bb5ddc254a"}, - {file = "grpcio-1.54.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5008964885e8d23313c8e5ea0d44433be9bfd7e24482574e8cc43c02c02fc796"}, - {file = "grpcio-1.54.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a2f5a1f1080ccdc7cbaf1171b2cf384d852496fe81ddedeb882d42b85727f610"}, - {file = "grpcio-1.54.2-cp311-cp311-win32.whl", hash = "sha256:b74ae837368cfffeb3f6b498688a123e6b960951be4dec0e869de77e7fa0439e"}, - {file = "grpcio-1.54.2-cp311-cp311-win_amd64.whl", hash = "sha256:8cdbcbd687e576d48f7886157c95052825ca9948c0ed2afdc0134305067be88b"}, - {file = "grpcio-1.54.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:782f4f8662a2157c4190d0f99eaaebc602899e84fb1e562a944e5025929e351c"}, - {file = "grpcio-1.54.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:714242ad0afa63a2e6dabd522ae22e1d76e07060b5af2ddda5474ba4f14c2c94"}, - {file = "grpcio-1.54.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:f900ed4ad7a0f1f05d35f955e0943944d5a75f607a836958c6b8ab2a81730ef2"}, - {file = "grpcio-1.54.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96a41817d2c763b1d0b32675abeb9179aa2371c72aefdf74b2d2b99a1b92417b"}, - {file = "grpcio-1.54.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70fcac7b94f4c904152809a050164650ac81c08e62c27aa9f156ac518029ebbe"}, - {file = "grpcio-1.54.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fd6c6c29717724acf9fc1847c4515d57e4dc12762452457b9cb37461f30a81bb"}, - {file = "grpcio-1.54.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c2392f5b5d84b71d853918687d806c1aa4308109e5ca158a16e16a6be71041eb"}, - {file = "grpcio-1.54.2-cp37-cp37m-win_amd64.whl", hash = "sha256:51630c92591d6d3fe488a7c706bd30a61594d144bac7dee20c8e1ce78294f474"}, - {file = "grpcio-1.54.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:b04202453941a63b36876a7172b45366dc0cde10d5fd7855c0f4a4e673c0357a"}, - {file = "grpcio-1.54.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:89dde0ac72a858a44a2feb8e43dc68c0c66f7857a23f806e81e1b7cc7044c9cf"}, - {file = "grpcio-1.54.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:09d4bfd84686cd36fd11fd45a0732c7628308d094b14d28ea74a81db0bce2ed3"}, - {file = "grpcio-1.54.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7fc2b4edb938c8faa4b3c3ea90ca0dd89b7565a049e8e4e11b77e60e4ed2cc05"}, - {file = "grpcio-1.54.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61f7203e2767800edee7a1e1040aaaf124a35ce0c7fe0883965c6b762defe598"}, - {file = "grpcio-1.54.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e416c8baf925b5a1aff31f7f5aecc0060b25d50cce3a5a7255dc5cf2f1d4e5eb"}, - {file = "grpcio-1.54.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dc80c9c6b608bf98066a038e0172013a49cfa9a08d53335aefefda2c64fc68f4"}, - {file = "grpcio-1.54.2-cp38-cp38-win32.whl", hash = "sha256:8d6192c37a30a115f4663592861f50e130caed33efc4eec24d92ec881c92d771"}, - {file = "grpcio-1.54.2-cp38-cp38-win_amd64.whl", hash = "sha256:46a057329938b08e5f0e12ea3d7aed3ecb20a0c34c4a324ef34e00cecdb88a12"}, - {file = "grpcio-1.54.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:2296356b5c9605b73ed6a52660b538787094dae13786ba53080595d52df13a98"}, - {file = "grpcio-1.54.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:c72956972e4b508dd39fdc7646637a791a9665b478e768ffa5f4fe42123d5de1"}, - {file = "grpcio-1.54.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:9bdbb7624d65dc0ed2ed8e954e79ab1724526f09b1efa88dcd9a1815bf28be5f"}, - {file = "grpcio-1.54.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c44e1a765b31e175c391f22e8fc73b2a2ece0e5e6ff042743d8109b5d2eff9f"}, - {file = "grpcio-1.54.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cc928cfe6c360c1df636cf7991ab96f059666ac7b40b75a769410cc6217df9c"}, - {file = "grpcio-1.54.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a08920fa1a97d4b8ee5db2f31195de4a9def1a91bc003544eb3c9e6b8977960a"}, - {file = "grpcio-1.54.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4864f99aac207e3e45c5e26c6cbb0ad82917869abc2f156283be86c05286485c"}, - {file = "grpcio-1.54.2-cp39-cp39-win32.whl", hash = "sha256:b38b3de8cff5bc70f8f9c615f51b48eff7313fc9aca354f09f81b73036e7ddfa"}, - {file = "grpcio-1.54.2-cp39-cp39-win_amd64.whl", hash = "sha256:be48496b0e00460717225e7680de57c38be1d8629dc09dadcd1b3389d70d942b"}, - {file = "grpcio-1.54.2.tar.gz", hash = "sha256:50a9f075eeda5097aa9a182bb3877fe1272875e45370368ac0ee16ab9e22d019"}, -] - -[package.extras] -protobuf = ["grpcio-tools (>=1.54.2)"] - -[[package]] -name = "grpcio-tools" -version = "1.54.2" -description = "Protobuf code generator for gRPC" -optional = false -python-versions = ">=3.7" -files = [ - {file = "grpcio-tools-1.54.2.tar.gz", hash = "sha256:e11c2c2aee53f340992e8e4d6a59172cbbbd0193f1351de98c4f810a5041d5ca"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:2b96f5f17d3156058be247fd25b062b4768138665694c00b056659618b8fb418"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:11939c9a8a39bd4815c7e88cb2fee48e1948775b59dbb06de8fcae5991e84f9e"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:129de5579f95d6a55dde185f188b4cbe19d1e2f1471425431d9930c31d300d70"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4128c01cd6f5ea8f7c2db405dbfd8582cd967d36e6fa0952565436633b0e591"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5c7292dd899ad8fa09a2be96719648cee37b17909fe8c12007e3bff58ebee61"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5ef30c2dbc63c1e0a462423ca4f95001814d26ef4fe66208e53fcf220ea3b717"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4abfc1892380abe6cef381eab86f9350cbd703bfe5d834095aa66fd91c886b6d"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-win32.whl", hash = "sha256:9acf443dcf6f68fbea3b7fb519e1716e014db1a561939f5aecc4abda74e4015d"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-win_amd64.whl", hash = "sha256:21b9d2dee80f3f77e4097252e7f0db89772335a7300b72ab3d2e5c280872b1db"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:7b24fbab9e7598518ce4549e066df00aab79c2bf9bedcdde23fb5ef6a3cf532f"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:7baa210c20f71a242d9ae0e02734628f6948e8bee3bf538647894af427d28800"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:e3d0e5188ff8dbaddac2ee44731d36f09c4eccd3eac7328e547862c44f75cacd"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27671c68c7e0e3c5ff9967f5500799f65a04e7b153b8ce10243c87c43199039d"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f39d8e8806b8857fb473ca6a9c7bd800b0673dfdb7283ff569af0345a222f32c"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8e4c5a48f7b2e8798ce381498ee7b9a83c65b87ae66ee5022387394e5eb51771"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f285f8ef3de422717a36bd372239ae778b8cc112ce780ca3c7fe266dadc49fb"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-win32.whl", hash = "sha256:0f952c8a5c47e9204fe8959f7e9add149e660f6579d67cf65024c32736d34caf"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-win_amd64.whl", hash = "sha256:3237149beec39e897fd62cef4aa1e1cd9422d7a95661d24bd0a79200b167e730"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:0ab1b323905d449298523db5d34fa5bf5fffd645bd872b25598e2f8a01f0ea39"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:7d7e6e8d62967b3f037f952620cb7381cc39a4bd31790c75fcfba56cc975d70b"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:7f4624ef2e76a3a5313c4e61a81be38bcc16b59a68a85d30758b84cd2102b161"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e543f457935ba7b763b121f1bf893974393b4d30065042f947f85a8d81081b80"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0239b929eb8b3b30b2397eef3b9abb245087754d77c3721e3be43c44796de87d"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:0de05c7698c655e9a240dc34ae91d6017b93143ac89e5b20046d7ca3bd09c27c"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a3ce0b98fb581c471424d2cda45120f57658ed97677c6fec4d6decf5d7c1b976"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-win_amd64.whl", hash = "sha256:37393ef90674964175923afe3859fc5a208e1ece565f642b4f76a8c0224a0993"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:8e4531267736d88fde1022b36dd42ed8163e3575bcbd12bfed96662872aa93fe"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:a0b7049814442f918b522d66b1d015286afbeb9e6d141af54bbfafe31710a3c8"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:b80585e06c4f0082327eb5c9ad96fbdb2b0e7c14971ea5099fe78c22f4608451"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39fd530cfdf58dc05125775cc233b05554d553d27478f14ae5fd8a6306f0cb28"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3bb9ec4aea0f2b3006fb002fa59e5c10f92b48fc374619fbffd14d2b0e388c3e"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d512de051342a576bb89777476d13c5266d9334cf4badb6468aed9dc8f5bdec1"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1b8ee3099c51ce987fa8a08e6b93fc342b10228415dd96b5c0caa0387f636a6f"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-win32.whl", hash = "sha256:6037f123905dc0141f7c8383ca616ef0195e79cd3b4d82faaee789d4045e891b"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-win_amd64.whl", hash = "sha256:10dd41862f579d185c60f629b5ee89103e216f63b576079d258d974d980bad87"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:f6787d07fdab31a32c433c1ba34883dea6559d8a3fbe08fb93d834ca34136b71"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:21b1467e31e44429d2a78b50135c9cdbd4b8f6d3b5cd548bc98985d3bdc352d0"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:30a49b8b168aced2a4ff40959e6c4383ad6cfd7a20839a47a215e9837eb722dc"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8742122782953d2fd038f0a199f047a24e941cc9718b1aac90876dbdb7167739"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:503ef1351c62fb1d6747eaf74932b609d8fdd4345b3591ef910adef8fa9969d0"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:72d15de4c4b6a764a76c4ae69d99c35f7a0751223688c3f7e62dfa95eb4f61be"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:df079479fb1b9e488334312e35ebbf30cbf5ecad6c56599f1a961800b33ab7c1"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-win32.whl", hash = "sha256:49c2846dcc4803476e839d8bd4db8845e928f19130e0ea86121f2d1f43d2b452"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-win_amd64.whl", hash = "sha256:b82ca472db9c914c44e39a41e9e8bd3ed724523dd7aff5ce37592b8d16920ed9"}, -] - -[package.dependencies] -grpcio = ">=1.54.2" -protobuf = ">=4.21.6,<5.0dev" -setuptools = "*" - -[[package]] -name = "importlib-metadata" -version = "6.6.0" -description = "Read metadata from Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "importlib_metadata-6.6.0-py3-none-any.whl", hash = "sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed"}, - {file = "importlib_metadata-6.6.0.tar.gz", hash = "sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705"}, -] - -[package.dependencies] -typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} -zipp = ">=0.5" - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -perf = ["ipython"] -testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "packaging" -version = "23.1" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, - {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, -] - -[[package]] -name = "pluggy" -version = "1.0.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, - {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, -] - -[package.dependencies] -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "protobuf" -version = "4.23.2" -description = "" -optional = false -python-versions = ">=3.7" -files = [ - {file = "protobuf-4.23.2-cp310-abi3-win32.whl", hash = "sha256:384dd44cb4c43f2ccddd3645389a23ae61aeb8cfa15ca3a0f60e7c3ea09b28b3"}, - {file = "protobuf-4.23.2-cp310-abi3-win_amd64.whl", hash = "sha256:09310bce43353b46d73ba7e3bca78273b9bc50349509b9698e64d288c6372c2a"}, - {file = "protobuf-4.23.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b2cfab63a230b39ae603834718db74ac11e52bccaaf19bf20f5cce1a84cf76df"}, - {file = "protobuf-4.23.2-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:c52cfcbfba8eb791255edd675c1fe6056f723bf832fa67f0442218f8817c076e"}, - {file = "protobuf-4.23.2-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:86df87016d290143c7ce3be3ad52d055714ebaebb57cc659c387e76cfacd81aa"}, - {file = "protobuf-4.23.2-cp37-cp37m-win32.whl", hash = "sha256:281342ea5eb631c86697e1e048cb7e73b8a4e85f3299a128c116f05f5c668f8f"}, - {file = "protobuf-4.23.2-cp37-cp37m-win_amd64.whl", hash = "sha256:ce744938406de1e64b91410f473736e815f28c3b71201302612a68bf01517fea"}, - {file = "protobuf-4.23.2-cp38-cp38-win32.whl", hash = "sha256:6c081863c379bb1741be8f8193e893511312b1d7329b4a75445d1ea9955be69e"}, - {file = "protobuf-4.23.2-cp38-cp38-win_amd64.whl", hash = "sha256:25e3370eda26469b58b602e29dff069cfaae8eaa0ef4550039cc5ef8dc004511"}, - {file = "protobuf-4.23.2-cp39-cp39-win32.whl", hash = "sha256:efabbbbac1ab519a514579ba9ec52f006c28ae19d97915951f69fa70da2c9e91"}, - {file = "protobuf-4.23.2-cp39-cp39-win_amd64.whl", hash = "sha256:54a533b971288af3b9926e53850c7eb186886c0c84e61daa8444385a4720297f"}, - {file = "protobuf-4.23.2-py3-none-any.whl", hash = "sha256:8da6070310d634c99c0db7df48f10da495cc283fd9e9234877f0cd182d43ab7f"}, - {file = "protobuf-4.23.2.tar.gz", hash = "sha256:20874e7ca4436f683b64ebdbee2129a5a2c301579a67d1a7dda2cdf62fb7f5f7"}, -] - -[[package]] -name = "pytest" -version = "7.3.1" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pytest-7.3.1-py3-none-any.whl", hash = "sha256:3799fa815351fea3a5e96ac7e503a96fa51cc9942c3753cda7651b93c1cfa362"}, - {file = "pytest-7.3.1.tar.gz", hash = "sha256:434afafd78b1d78ed0addf160ad2b77a30d35d4bdf8af234fe621919d9ed15e3"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} - -[package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] - -[[package]] -name = "setuptools" -version = "67.8.0" -description = "Easily download, build, install, upgrade, and uninstall Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "setuptools-67.8.0-py3-none-any.whl", hash = "sha256:5df61bf30bb10c6f756eb19e7c9f3b473051f48db77fddbe06ff2ca307df9a6f"}, - {file = "setuptools-67.8.0.tar.gz", hash = "sha256:62642358adc77ffa87233bc4d2354c4b2682d214048f500964dbe760ccedf102"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "typing-extensions" -version = "4.6.2" -description = "Backported and Experimental Type Hints for Python 3.7+" -optional = false -python-versions = ">=3.7" -files = [ - {file = "typing_extensions-4.6.2-py3-none-any.whl", hash = "sha256:3a8b36f13dd5fdc5d1b16fe317f5668545de77fa0b8e02006381fd49d731ab98"}, - {file = "typing_extensions-4.6.2.tar.gz", hash = "sha256:06006244c70ac8ee83fa8282cb188f697b8db25bc8b4df07be1873c43897060c"}, -] - -[[package]] -name = "zipp" -version = "3.15.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false -python-versions = ">=3.7" -files = [ - {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, - {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] - -[metadata] -lock-version = "2.0" -python-versions = "^3.7" -content-hash = "8452cc04f16254585b86b05703ed525c20f7c471f180da580e95f26324df7f55" diff --git a/python/pyproject.toml b/python/pyproject.toml deleted file mode 100644 index ff017178a..000000000 --- a/python/pyproject.toml +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -[tool.poetry] -name = "rocketmq" -version = "0.1.0rc1" -description = "RocketMQ Python Client" -authors = ["RocketMQ Authors"] -readme = "README.md" -packages = [{ include = "rocketmq" }] - -[tool.poetry.dependencies] -python = "^3.7" -grpcio = "^1.50.0" -grpcio-tools = "^1.50.0" -certifi = "^2023.5.7" - -[tool.poetry.dev-dependencies] -pytest = "^7.0.07" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" - -[tool.black] -line-length = 88 -target-version = ['py37'] -include = '\.pyi?$' -# 'extend-exclude' excludes files or directories in addition to the defaults -extend-exclude = ''' -# A regex preceded with ^/ will apply only to files and directories -# in the root of the project. -( - ^/foo.py # exclude a file named foo.py in the root of the project - | .*_pb2.* # exclude autogenerated Protocol Buffer files anywhere in the project -) -''' diff --git a/python/rocketmq/__init__.py b/python/rocketmq/__init__.py new file mode 100644 index 000000000..47375ad1d --- /dev/null +++ b/python/rocketmq/__init__.py @@ -0,0 +1,34 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rocketmq.grpc_protocol import TransactionResolution + +from .v5.client import ClientConfiguration, Credentials +from .v5.consumer import SimpleConsumer +from .v5.model import FilterExpression, Message, SendReceipt +from .v5.producer import Producer, Transaction, TransactionChecker + +__all__ = [ + "Producer", + "TransactionChecker", + "Transaction", + "TransactionResolution", # noqa + "SimpleConsumer", + "Message", + "FilterExpression", + "SendReceipt", + "ClientConfiguration", + "Credentials", +] diff --git a/python/rocketmq/client.py b/python/rocketmq/client.py deleted file mode 100644 index fd6a7f79f..000000000 --- a/python/rocketmq/client.py +++ /dev/null @@ -1,524 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import asyncio -import threading - -from protocol import definition_pb2, service_pb2 -from protocol.definition_pb2 import Code as ProtoCode -from protocol.service_pb2 import HeartbeatRequest, QueryRouteRequest -from rocketmq.client_config import ClientConfig -from rocketmq.client_id_encoder import ClientIdEncoder -from rocketmq.definition import Resource, TopicRouteData -from rocketmq.log import logger -from rocketmq.rpc_client import Endpoints, RpcClient -from rocketmq.session import Session -from rocketmq.signature import Signature - - -class ScheduleWithFixedDelay: - def __init__(self, action, delay, period): - self.action = action - self.delay = delay - self.period = period - self.task = None - - async def start(self): - await asyncio.sleep(self.delay) - while True: - try: - await self.action() - except Exception as e: - logger.error(e, "Failed to execute scheduled task") - finally: - await asyncio.sleep(self.period) - - def schedule(self): - loop1 = asyncio.new_event_loop() - asyncio.set_event_loop(loop1) - self.task = asyncio.create_task(self.start()) - - def cancel(self): - if self.task: - self.task.cancel() - - -class Client: - """ - Main client class which handles interaction with the server. - """ - def __init__(self, client_config: ClientConfig): - """ - Initialization method for the Client class. - - :param client_config: Client configuration. - :param topics: Set of topics that the client is subscribed to. - """ - self.client_config = client_config - self.client_id = ClientIdEncoder.generate() - self.endpoints = client_config.endpoints - - #: A cache to store topic routes. - self.topic_route_cache = {} - - #: A table to store session information. - self.sessions_table = {} - self.sessionsLock = threading.Lock() - self.client_manager = ClientManager(self) - - #: A dictionary to store isolated items. - self.isolated = dict() - - def get_topics(self): - raise NotImplementedError("This method should be implemented by the subclass.") - - async def start(self): - """ - Start method which initiates fetching of topic routes and schedules heartbeats. - """ - # get topic route - logger.debug(f"Begin to start the rocketmq client, client_id={self.client_id}") - for topic in self.get_topics(): - self.topic_route_cache[topic] = await self.fetch_topic_route(topic) - scheduler = ScheduleWithFixedDelay(self.heartbeat, 3, 12) - scheduler_sync_settings = ScheduleWithFixedDelay(self.sync_settings, 3, 12) - scheduler.schedule() - scheduler_sync_settings.schedule() - logger.debug(f"Start the rocketmq client successfully, client_id={self.client_id}") - - async def shutdown(self): - logger.debug(f"Begin to shutdown rocketmq client, client_id={self.client_id}") - - logger.debug(f"Shutdown the rocketmq client successfully, client_id={self.client_id}") - - async def heartbeat(self): - """ - Asynchronous method that sends a heartbeat to the server. - """ - try: - endpoints = self.get_total_route_endpoints() - request = HeartbeatRequest() - request.client_type = definition_pb2.PRODUCER - topic = Resource() - topic.name = "normal_topic" - # Collect task into a map. - for item in endpoints: - try: - - task = await self.client_manager.heartbeat(item, request, self.client_config.request_timeout) - code = task.status.code - if code == ProtoCode.OK: - logger.info(f"Send heartbeat successfully, endpoints={item}, client_id={self.client_id}") - - if item in self.isolated: - self.isolated.pop(item) - logger.info(f"Rejoin endpoints which was isolated before, endpoints={item}, " - + f"client_id={self.client_id}") - return - status_message = task.status.message - logger.info(f"Failed to send heartbeat, endpoints={item}, code={code}, " - + f"status_message={status_message}, client_id={self.client_id}") - except Exception: - logger.error(f"Failed to send heartbeat, endpoints={item}") - except Exception as e: - logger.error(f"[Bug] unexpected exception raised during heartbeat, client_id={self.client_id}, Exception: {str(e)}") - - def get_total_route_endpoints(self): - """ - Method that returns all route endpoints. - """ - endpoints = set() - for item in self.topic_route_cache.items(): - for endpoint in [mq.broker.endpoints for mq in item[1].message_queues]: - endpoints.add(endpoint) - return endpoints - - async def get_route_data(self, topic): - """ - Asynchronous method that fetches route data for a given topic. - - :param topic: The topic to fetch route data for. - """ - if topic in self.topic_route_cache: - return self.topic_route_cache[topic] - topic_route_data = await self.fetch_topic_route(topic=topic) - return topic_route_data - - def get_client_config(self): - """ - Method to return client configuration. - """ - return self.client_config - - async def sync_settings(self): - total_route_endpoints = self.get_total_route_endpoints() - - for endpoints in total_route_endpoints: - created, session = await self.get_session(endpoints) - await session.sync_settings(True) - logger.info(f"Sync settings to remote, endpoints={endpoints}") - - def stats(self): - # TODO: stats implement - pass - - async def notify_client_termination(self): - pass - - async def on_recover_orphaned_transaction_command(self, endpoints, command): - pass - - async def on_verify_message_command(self, endpoints, command): - logger.warn(f"Ignore verify message command from remote, which is not expected, clientId={self.client_id}, " - + f"endpoints={endpoints}, command={command}") - pass - - async def on_print_thread_stack_trace_command(self, endpoints, command): - pass - - async def on_settings_command(self, endpoints, settings): - pass - - async def on_topic_route_data_fetched(self, topic, topic_route_data): - """ - Asynchronous method that handles the process once the topic route data is fetched. - - :param topic: The topic for which the route data is fetched. - :param topic_route_data: The fetched topic route data. - """ - route_endpoints = set() - for mq in topic_route_data.message_queues: - route_endpoints.add(mq.broker.endpoints) - - existed_route_endpoints = self.get_total_route_endpoints() - new_endpoints = route_endpoints.difference(existed_route_endpoints) - - for endpoints in new_endpoints: - created, session = await self.get_session(endpoints) - if not created: - continue - logger.info(f"Begin to establish session for endpoints={endpoints}, client_id={self.client_id}") - await session.sync_settings(True) - logger.info(f"Establish session for endpoints={endpoints} successfully, client_id={self.client_id}") - - self.topic_route_cache[topic] = topic_route_data - - async def fetch_topic_route0(self, topic): - """ - Asynchronous method that fetches the topic route. - - :param topic: The topic to fetch the route for. - """ - try: - req = QueryRouteRequest() - req.topic.name = topic - address = req.endpoints.addresses.add() - address.host = self.endpoints.Addresses[0].host - address.port = self.endpoints.Addresses[0].port - req.endpoints.scheme = self.endpoints.scheme.to_protobuf(self.endpoints.scheme) - response = await self.client_manager.query_route(self.endpoints, req, 10) - code = response.status.code - if code != ProtoCode.OK: - logger.error(f"Failed to fetch topic route, client_id={self.client_id}, topic={topic}, code={code}, " - + f"statusMessage={response.status.message}") - message_queues = response.message_queues - return TopicRouteData(message_queues) - except Exception as e: - logger.error(e, f"Failed to fetch topic route, client_id={self.client_id}, topic={topic}") - raise - - async def fetch_topic_route(self, topic): - """ - Asynchronous method that fetches the topic route and updates the data. - - :param topic: The topic to fetch the route for. - """ - topic_route_data = await self.fetch_topic_route0(topic) - await self.on_topic_route_data_fetched(topic, topic_route_data) - logger.info(f"Fetch topic route successfully, client_id={self.client_id}, topic={topic}, topicRouteData={topic_route_data}") - return topic_route_data - - async def get_session(self, endpoints): - """ - Asynchronous method that gets the session for a given endpoint. - - :param endpoints: The endpoints to get the session for. - """ - self.sessionsLock.acquire() - try: - # Session exists, return in advance. - if endpoints in self.sessions_table: - return (False, self.sessions_table[endpoints]) - finally: - self.sessionsLock.release() - - self.sessionsLock.acquire() - try: - # Session exists, return in advance. - if endpoints in self.sessions_table: - return (False, self.sessions_table[endpoints]) - - stream = self.client_manager.telemetry(endpoints, 10000000) - created = Session(endpoints, stream, self) - self.sessions_table[endpoints] = created - return (True, created) - finally: - self.sessionsLock.release() - - def get_client_id(self): - return self.client_id - - -class ClientManager: - """Manager class for RPC Clients in a thread-safe manner. - Each instance is created by a specific client and can manage - multiple RPC clients. - """ - - def __init__(self, client: Client): - #: The client that instantiated this manager. - self.__client = client - - #: A dictionary that maps endpoints to the corresponding RPC clients. - self.__rpc_clients = {} - - #: A lock used to ensure thread safety when accessing __rpc_clients. - self.__rpc_clients_lock = threading.Lock() - - def __get_rpc_client(self, endpoints: Endpoints, ssl_enabled: bool): - """Retrieve the RPC client corresponding to the given endpoints. - If not present, a new RPC client is created and stored in __rpc_clients. - - :param endpoints: The endpoints associated with the RPC client. - :param ssl_enabled: A flag indicating whether SSL is enabled. - :return: The RPC client associated with the given endpoints. - """ - with self.__rpc_clients_lock: - rpc_client = self.__rpc_clients.get(endpoints) - if rpc_client: - return rpc_client - rpc_client = RpcClient(endpoints.grpc_target(True), ssl_enabled) - self.__rpc_clients[endpoints] = rpc_client - return rpc_client - - async def query_route( - self, - endpoints: Endpoints, - request: service_pb2.QueryRouteRequest, - timeout_seconds: int, - ): - """Query the routing information. - - :param endpoints: The endpoints to query. - :param request: The request containing the details of the query. - :param timeout_seconds: The maximum time to wait for a response. - :return: The result of the query. - """ - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - metadata = Signature.sign(self.__client.client_config, self.__client.client_id) - return await rpc_client.query_route(request, metadata, timeout_seconds) - - async def heartbeat( - self, - endpoints: Endpoints, - request: service_pb2.HeartbeatRequest, - timeout_seconds: int, - ): - """Send a heartbeat to the server to indicate that the client is still alive. - - :param endpoints: The endpoints to send the heartbeat to. - :param request: The request containing the details of the heartbeat. - :param timeout_seconds: The maximum time to wait for a response. - :return: The result of the heartbeat. - """ - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - metadata = Signature.sign(self.__client.client_config, self.__client.client_id) - return await rpc_client.heartbeat(request, metadata, timeout_seconds) - - async def send_message( - self, - endpoints: Endpoints, - request: service_pb2.SendMessageRequest, - timeout_seconds: int, - ): - """Send a message to the server. - - :param endpoints: The endpoints to send the message to. - :param request: The request containing the details of the message. - :param timeout_seconds: The maximum time to wait for a response. - :return: The result of the message sending operation. - """ - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - metadata = Signature.sign(self.__client.client_config, self.__client.client_id) - return await rpc_client.send_message(request, metadata, timeout_seconds) - - async def query_assignment( - self, - endpoints: Endpoints, - request: service_pb2.QueryAssignmentRequest, - timeout_seconds: int, - ): - """Query the assignment information. - - :param endpoints: The endpoints to query. - :param request: The request containing the details of the query. - :param timeout_seconds: The maximum time to wait for a response. - :return: The result of the query. - """ - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - metadata = Signature.sign(self.__client.client_config, self.__client.client_id) - return await rpc_client.query_assignment(request, metadata, timeout_seconds) - - async def ack_message( - self, - endpoints: Endpoints, - request: service_pb2.AckMessageRequest, - timeout_seconds: int, - ): - """Send an acknowledgment for a message to the server. - - :param endpoints: The endpoints to send the acknowledgment to. - :param request: The request containing the details of the acknowledgment. - :param timeout_seconds: The maximum time to wait for a response. - :return: The result of the acknowledgment. - """ - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - metadata = Signature.sign(self.__client.client_config, self.__client.client_id) - return await rpc_client.ack_message(request, metadata, timeout_seconds) - - async def forward_message_to_dead_letter_queue( - self, - endpoints: Endpoints, - request: service_pb2.ForwardMessageToDeadLetterQueueRequest, - timeout_seconds: int, - ): - """Forward a message to the dead letter queue. - - :param endpoints: The endpoints to send the request to. - :param request: The request containing the details of the message to forward. - :param timeout_seconds: The maximum time to wait for a response. - :return: The result of the forward operation. - """ - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - metadata = Signature.sign(self.__client.client_config, self.__client.client_id) - return await rpc_client.forward_message_to_dead_letter_queue( - request, metadata, timeout_seconds - ) - - async def end_transaction( - self, - endpoints: Endpoints, - request: service_pb2.EndTransactionRequest, - timeout_seconds: int, - ): - """Ends a transaction. - - :param endpoints: The endpoints to send the request to. - :param request: The request to end the transaction. - :param timeout_seconds: The maximum time to wait for a response. - :return: The result of the end transaction operation. - """ - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - metadata = Signature.sign(self.__client.client_config, self.__client.client_id) - return await rpc_client.end_transaction(request, metadata, timeout_seconds) - - async def notify_client_termination( - self, - endpoints: Endpoints, - request: service_pb2.NotifyClientTerminationRequest, - timeout_seconds: int, - ): - """Notify server about client termination. - - :param endpoints: The endpoints to send the notification to. - :param request: The request containing the details of the termination. - :param timeout_seconds: The maximum time to wait for a response. - :return: The result of the notification operation. - """ - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - metadata = Signature.sign(self.__client.client_config, self.__client.client_id) - return await rpc_client.notify_client_termination( - request, metadata, timeout_seconds - ) - - async def change_invisible_duration( - self, - endpoints: Endpoints, - request: service_pb2.ChangeInvisibleDurationRequest, - timeout_seconds: int, - ): - """Change the invisible duration of a message. - - :param endpoints: The endpoints to send the request to. - :param request: The request containing the new invisible duration. - :param timeout_seconds: The maximum time to wait for a response. - :return: The result of the change operation. - """ - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - metadata = Signature.sign(self.__client.client_config, self.__client.client_id) - return await rpc_client.change_invisible_duration( - request, metadata, timeout_seconds - ) - - async def receive_message( - self, - endpoints: Endpoints, - request: service_pb2.ReceiveMessageRequest, - timeout_seconds: int, - ): - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - metadata = Signature.sign(self.__client.client_config, self.__client.client_id) - - response = await rpc_client.receive_message( - request, metadata, timeout_seconds - ) - return response - - def telemetry( - self, - endpoints: Endpoints, - timeout_seconds: int, - ): - """Fetch telemetry information. - - :param endpoints: The endpoints to send the request to. - :param timeout_seconds: The maximum time to wait for a response. - :return: The telemetry information. - """ - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - metadata = Signature.sign(self.__client.client_config, self.__client.client_id) - return rpc_client.telemetry(metadata, timeout_seconds) diff --git a/python/rocketmq/client_config.py b/python/rocketmq/client_config.py deleted file mode 100644 index 1ccd5e0b7..000000000 --- a/python/rocketmq/client_config.py +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from rocketmq.rpc_client import Endpoints -from rocketmq.session_credentials import SessionCredentialsProvider - - -class ClientConfig: - """Client configuration class which holds the settings for a client. - The settings include endpoint configurations, session credential provider and SSL settings. - An instance of this class is used to setup the client with necessary configurations. - """ - - def __init__( - self, - endpoints: Endpoints, - session_credentials_provider: SessionCredentialsProvider, - ssl_enabled: bool, - ): - #: The endpoints for the client to connect to. - self.__endpoints = endpoints - - #: The session credentials provider to authenticate the client. - self.__session_credentials_provider = session_credentials_provider - - #: A flag indicating if SSL is enabled for the client. - self.__ssl_enabled = ssl_enabled - - #: The request timeout for the client in seconds. - self.request_timeout = 10 - - @property - def session_credentials_provider(self) -> SessionCredentialsProvider: - """The session credentials provider for the client. - - :return: the session credentials provider - """ - return self.__session_credentials_provider - - @property - def endpoints(self) -> Endpoints: - """The endpoints for the client to connect to. - - :return: the endpoints - """ - return self.__endpoints - - @property - def ssl_enabled(self) -> bool: - """A flag indicating if SSL is enabled for the client. - - :return: True if SSL is enabled, False otherwise - """ - return self.__ssl_enabled diff --git a/python/rocketmq/client_id_encoder.py b/python/rocketmq/client_id_encoder.py deleted file mode 100644 index b6f3ca3fa..000000000 --- a/python/rocketmq/client_id_encoder.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import socket -import threading -import time - -import rocketmq.utils - - -class ClientIdEncoder: - """This class generates a unique client ID for each client based on - hostname, process id, index and the monotonic clock time. - """ - - #: The current index for client id generation. - __INDEX = 0 - - #: The lock used for thread-safe incrementing of the index. - __INDEX_LOCK = threading.Lock() - - #: The separator used in the client id string. - __CLIENT_ID_SEPARATOR = "@" - - @staticmethod - def __get_and_increment_sequence() -> int: - """Increment and return the current index in a thread-safe manner. - - :return: the current index after incrementing it. - """ - with ClientIdEncoder.__INDEX_LOCK: - temp = ClientIdEncoder.__INDEX - ClientIdEncoder.__INDEX += 1 - return temp - - @staticmethod - def generate() -> str: - """Generate a unique client ID. - - :return: the generated client id - """ - index = ClientIdEncoder.__get_and_increment_sequence() - return ( - socket.gethostname() - + ClientIdEncoder.__CLIENT_ID_SEPARATOR - + str(os.getpid()) - + ClientIdEncoder.__CLIENT_ID_SEPARATOR - + str(index) - + ClientIdEncoder.__CLIENT_ID_SEPARATOR - + str(rocketmq.utils.number_to_base(time.monotonic_ns(), 36)) - ) diff --git a/python/rocketmq/client_manager.py b/python/rocketmq/client_manager.py deleted file mode 100644 index a1a87b91a..000000000 --- a/python/rocketmq/client_manager.py +++ /dev/null @@ -1,148 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import threading - -from protocol import service_pb2 -from rocketmq.client import Client -from rocketmq.rpc_client import Endpoints, RpcClient - - -class ClientManager: - def __init__(self, client: Client): - self.__client = client - self.__rpc_clients = {} - self.__rpc_clients_lock = threading.Lock() - - def __get_rpc_client(self, endpoints: Endpoints, ssl_enabled: bool): - with self.__rpc_clients_lock: - rpc_client = self.__rpc_clients.get(endpoints) - if rpc_client: - return rpc_client - rpc_client = RpcClient(endpoints, ssl_enabled) - self.__rpc_clients[endpoints] = rpc_client - return rpc_client - - async def query_route( - self, - endpoints: Endpoints, - request: service_pb2.QueryRouteRequest, - timeout_seconds: int, - ): - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - return await rpc_client.query_route(request, timeout_seconds) - - async def heartbeat( - self, - endpoints: Endpoints, - request: service_pb2.HeartbeatRequest, - timeout_seconds: int, - ): - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - return await rpc_client.heartbeat(request, timeout_seconds) - - async def send_message( - self, - endpoints: Endpoints, - request: service_pb2.SendMessageRequest, - timeout_seconds: int, - ): - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - return await rpc_client.send_message(request, timeout_seconds) - - async def query_assignment( - self, - endpoints: Endpoints, - request: service_pb2.QueryAssignmentRequest, - timeout_seconds: int, - ): - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - return await rpc_client.query_assignment(request, timeout_seconds) - - async def ack_message( - self, - endpoints: Endpoints, - request: service_pb2.AckMessageRequest, - timeout_seconds: int, - ): - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - return await rpc_client.ack_message(request, timeout_seconds) - - async def forward_message_to_dead_letter_queue( - self, - endpoints: Endpoints, - request: service_pb2.ForwardMessageToDeadLetterQueueRequest, - timeout_seconds: int, - ): - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - return await rpc_client.forward_message_to_dead_letter_queue( - request, timeout_seconds - ) - - async def end_transaction( - self, - endpoints: Endpoints, - request: service_pb2.EndTransactionRequest, - timeout_seconds: int, - ): - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - return await rpc_client.end_transaction(request, timeout_seconds) - - async def notify_client_termination( - self, - endpoints: Endpoints, - request: service_pb2.NotifyClientTerminationRequest, - timeout_seconds: int, - ): - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - return await rpc_client.notify_client_termination(request, timeout_seconds) - - async def change_invisible_duration( - self, - endpoints: Endpoints, - request: service_pb2.ChangeInvisibleDurationRequest, - timeout_seconds: int, - ): - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - return await rpc_client.change_invisible_duration(request, timeout_seconds) - - async def telemetry( - self, - endpoints: Endpoints, - request: service_pb2.TelemetryCommand, - timeout_seconds: int, - ): - rpc_client = self.__get_rpc_client( - endpoints, self.__client.client_config.ssl_enabled - ) - return await rpc_client.telemetry() diff --git a/python/rocketmq/consumer.py b/python/rocketmq/consumer.py deleted file mode 100644 index d81d89725..000000000 --- a/python/rocketmq/consumer.py +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -from typing import List - -from filter_expression import ExpressionType -from google.protobuf.duration_pb2 import Duration -from message import MessageView -from rocketmq.client import Client -from rocketmq.protocol.definition_pb2 import \ - FilterExpression as ProtoFilterExpression -from rocketmq.protocol.definition_pb2 import FilterType -from rocketmq.protocol.definition_pb2 import Resource as ProtoResource -from rocketmq.protocol.service_pb2 import \ - ReceiveMessageRequest as ProtoReceiveMessageRequest - - -class ReceiveMessageResult: - def __init__(self, endpoints, messages: List['MessageView']): - self.endpoints = endpoints - self.messages = messages - - -class Consumer(Client): - CONSUMER_GROUP_REGEX = re.compile(r"^[%a-zA-Z0-9_-]+$") - - def __init__(self, client_config, consumer_group): - super().__init__(client_config) - self.consumer_group = consumer_group - - async def receive_message(self, request, mq, await_duration): - tolerance = self.client_config.request_timeout - timeout = tolerance + await_duration - results = await self.client_manager.receive_message(mq.broker.endpoints, request, timeout) - - messages = [MessageView.from_protobuf(message, mq) for message in results] - return ReceiveMessageResult(mq.broker.endpoints, messages) - - @staticmethod - def _wrap_filter_expression(filter_expression): - filter_type = FilterType.TAG - if filter_expression.type == ExpressionType.Sql92: - filter_type = FilterType.SQL - return ProtoFilterExpression( - type=filter_type, - expression=filter_expression.expression - ) - - def wrap_receive_message_request(self, batch_size, mq, filter_expression, await_duration, invisible_duration): - group = ProtoResource() - group.name = self.consumer_group - return ProtoReceiveMessageRequest( - group=group, - message_queue=mq.to_protobuf(), - filter_expression=self._wrap_filter_expression(filter_expression), - long_polling_timeout=Duration(seconds=await_duration), - batch_size=batch_size, - auto_renew=False, - invisible_duration=Duration(seconds=invisible_duration) - ) diff --git a/python/rocketmq/definition.py b/python/rocketmq/definition.py deleted file mode 100644 index 498fc6d9f..000000000 --- a/python/rocketmq/definition.py +++ /dev/null @@ -1,285 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from enum import Enum -from typing import List - -from protocol.definition_pb2 import Broker as ProtoBroker -from protocol.definition_pb2 import Encoding as ProtoEncoding -from protocol.definition_pb2 import MessageQueue as ProtoMessageQueue -from protocol.definition_pb2 import MessageType as ProtoMessageType -from protocol.definition_pb2 import Permission as ProtoPermission -from protocol.definition_pb2 import Resource as ProtoResource -from rocketmq.protocol import definition_pb2 -from rocketmq.rpc_client import Endpoints - - -class Encoding(Enum): - """Enumeration of supported encoding types.""" - IDENTITY = 0 - GZIP = 1 - - -class EncodingHelper: - """Helper class for converting encoding types to protobuf.""" - - @staticmethod - def to_protobuf(mq_encoding): - """Convert encoding type to protobuf. - - :param mq_encoding: The encoding to be converted. - :return: The corresponding protobuf encoding. - """ - if mq_encoding == Encoding.IDENTITY: - return ProtoEncoding.IDENTITY - elif mq_encoding == Encoding.GZIP: - return ProtoEncoding.GZIP - - -class Broker: - """Represent a broker entity.""" - - def __init__(self, broker): - self.name = broker.name - self.id = broker.id - self.endpoints = Endpoints(broker.endpoints) - - def to_protobuf(self): - """Convert the broker to its protobuf representation. - - :return: The protobuf representation of the broker. - """ - return ProtoBroker( - name=self.name, id=self.id, endpoints=self.endpoints.to_protobuf() - ) - - -class Resource: - """Represent a resource entity.""" - - def __init__(self, name=None, resource=None): - """Initialize a resource. - - :param name: The name of the resource. - :param resource: The resource object. - """ - if resource is not None: - self.namespace = resource.resource_namespace - self.name = resource.name - else: - self.namespace = "" - self.name = name - - def to_protobuf(self): - """Convert the resource to its protobuf representation. - - :return: The protobuf representation of the resource. - """ - resource = ProtoResource() - resource.name = self.name - resource.resource_namespace = self.namespace - return resource - - def __str__(self): - return f"{self.namespace}.{self.name}" if self.namespace else self.name - - -class Permission(Enum): - """Enumeration of supported permission types.""" - NONE = 0 - READ = 1 - WRITE = 2 - READ_WRITE = 3 - - -class PermissionHelper: - """Helper class for converting permission types to protobuf and vice versa.""" - - @staticmethod - def from_protobuf(permission): - """Convert protobuf permission to Permission enum. - - :param permission: The protobuf permission to be converted. - :return: The corresponding Permission enum. - """ - if permission == ProtoPermission.READ: - return Permission.READ - elif permission == ProtoPermission.WRITE: - return Permission.WRITE - elif permission == ProtoPermission.READ_WRITE: - return Permission.READ_WRITE - elif permission == ProtoPermission.NONE: - return Permission.NONE - else: - pass - - @staticmethod - def to_protobuf(permission): - """Convert Permission enum to protobuf permission. - - :param permission: The Permission enum to be converted. - :return: The corresponding protobuf permission. - """ - if permission == Permission.READ: - return ProtoPermission.READ - elif permission == Permission.WRITE: - return ProtoPermission.WRITE - elif permission == Permission.READ_WRITE: - return ProtoPermission.READ_WRITE - else: - pass - - @staticmethod - def is_writable(permission): - """Check if the permission is writable. - - :param permission: The Permission enum to be checked. - :return: True if the permission is writable, False otherwise. - """ - if permission in [Permission.WRITE, Permission.READ_WRITE]: - return True - else: - return False - - @staticmethod - def is_readable(permission): - """Check if the permission is readable. - - :param permission: The Permission enum to be checked. - :return: True if the permission is readable, False otherwise. - """ - if permission in [Permission.READ, Permission.READ_WRITE]: - return True - else: - return False - - -class MessageType(Enum): - """Enumeration of supported message types.""" - NORMAL = 0 - FIFO = 1 - DELAY = 2 - TRANSACTION = 3 - - -class MessageTypeHelper: - """Helper class for converting message types to protobuf and vice versa.""" - - @staticmethod - def from_protobuf(message_type): - """Convert protobuf message type to MessageType enum. - - :param message_type: The protobuf message type to be converted. - :return: The corresponding MessageType enum. - """ - if message_type == ProtoMessageType.NORMAL: - return MessageType.NORMAL - elif message_type == ProtoMessageType.FIFO: - return MessageType.FIFO - elif message_type == ProtoMessageType.DELAY: - return MessageType.DELAY - elif message_type == ProtoMessageType.TRANSACTION: - return MessageType.TRANSACTION - else: - pass - - @staticmethod - def to_protobuf(message_type): - """Convert MessageType enum to protobuf message type. - - :param message_type: The MessageType enum to be converted. - :return: The corresponding protobuf message type. - """ - if message_type == MessageType.NORMAL: - return ProtoMessageType.NORMAL - elif message_type == MessageType.FIFO: - return ProtoMessageType.FIFO - elif message_type == MessageType.DELAY: - return ProtoMessageType.DELAY - elif message_type == MessageType.TRANSACTION: - return ProtoMessageType.TRANSACTION - else: - return ProtoMessageType.UNSPECIFIED - - -class MessageQueue: - """A class that encapsulates a message queue entity.""" - - def __init__(self, message_queue): - """Initialize a MessageQueue instance. - - :param message_queue: The initial message queue to be encapsulated. - """ - self._topic_resource = Resource(message_queue.topic.name, message_queue.topic) - self.queue_id = message_queue.id - self.permission = PermissionHelper.from_protobuf(message_queue.permission) - self.accept_message_types = [ - MessageTypeHelper.from_protobuf(mt) - for mt in message_queue.accept_message_types - ] - self.broker = Broker(message_queue.broker) - - @property - def topic(self): - """The topic resource name. - - :return: The name of the topic resource. - """ - return self._topic_resource.name - - def __str__(self): - """Get a string representation of the MessageQueue instance. - - :return: A string that represents the MessageQueue instance. - """ - return f"{self.broker.name}.{self._topic_resource}.{self.queue_id}" - - def to_protobuf(self): - """Convert the MessageQueue instance to protobuf message queue. - - :return: A protobuf message queue that represents the MessageQueue instance. - """ - message_types = [ - MessageTypeHelper.to_protobuf(mt) for mt in self.accept_message_types - ] - return ProtoMessageQueue( - topic=self._topic_resource.to_protobuf(), - id=self.queue_id, - permission=PermissionHelper.to_protobuf(self.permission), - broker=self.broker.to_protobuf(), - accept_message_types=message_types, - ) - - -class TopicRouteData: - """A class that encapsulates a list of message queues.""" - - def __init__(self, message_queues: List[definition_pb2.MessageQueue]): - """Initialize a TopicRouteData instance. - - :param message_queues: The initial list of message queues to be encapsulated. - """ - message_queue_list = [] - for mq in message_queues: - message_queue_list.append(MessageQueue(mq)) - self.__message_queue_list = message_queue_list - - @property - def message_queues(self) -> List[MessageQueue]: - """The list of MessageQueue instances. - - :return: The list of MessageQueue instances that the TopicRouteData instance encapsulates. - """ - return self.__message_queue_list diff --git a/python/rocketmq/exponential_backoff_retry_policy.py b/python/rocketmq/exponential_backoff_retry_policy.py deleted file mode 100644 index 4dc87cd8d..000000000 --- a/python/rocketmq/exponential_backoff_retry_policy.py +++ /dev/null @@ -1,100 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -from datetime import timedelta - -from google.protobuf.duration_pb2 import Duration - - -class ExponentialBackoffRetryPolicy: - """A class implementing exponential backoff retry policy.""" - - def __init__(self, max_attempts, initial_backoff, max_backoff, backoff_multiplier): - """Initialize an ExponentialBackoffRetryPolicy instance. - - :param max_attempts: Maximum number of retry attempts. - :param initial_backoff: Initial delay duration before the first retry. - :param max_backoff: Maximum delay duration between retries. - :param backoff_multiplier: Multiplier that determines the delay factor between retries. - """ - self._max_attempts = max_attempts - self.initial_backoff = initial_backoff - self.max_backoff = max_backoff - self.backoff_multiplier = backoff_multiplier - - def get_max_attempts(self): - """Get maximum number of retry attempts. - - :return: Maximum number of retry attempts. - """ - return self._max_attempts - - def inherit_backoff(self, retry_policy): - """Inherit backoff parameters from another retry policy. - - :param retry_policy: The retry policy to inherit from. - :return: An instance of ExponentialBackoffRetryPolicy with inherited parameters. - :raise ValueError: If the strategy of the retry policy is not ExponentialBackoff. - """ - if retry_policy.strategy_case != "ExponentialBackoff": - raise ValueError("Strategy must be exponential backoff") - return self._inherit_backoff(retry_policy.exponential_backoff) - - def _inherit_backoff(self, retry_policy): - """Inherit backoff parameters from another retry policy. - - :param retry_policy: The retry policy to inherit from. - :return: An instance of ExponentialBackoffRetryPolicy with inherited parameters. - """ - return ExponentialBackoffRetryPolicy(self._max_attempts, - retry_policy.initial.ToTimedelta(), - retry_policy.max.ToTimedelta(), - retry_policy.multiplier) - - def get_next_attempt_delay(self, attempt): - """Calculate the delay before the next retry attempt. - - :param attempt: The number of the current attempt. - :return: The delay before the next attempt. - """ - delay_seconds = min( - self.initial_backoff.total_seconds() * math.pow(self.backoff_multiplier, 1.0 * (attempt - 1)), - self.max_backoff.total_seconds()) - return timedelta(seconds=delay_seconds) if delay_seconds >= 0 else timedelta(seconds=0) - - @staticmethod - def immediately_retry_policy(max_attempts): - """Create a retry policy that makes immediate retries. - - :param max_attempts: Maximum number of retry attempts. - :return: An instance of ExponentialBackoffRetryPolicy with no delay between retries. - """ - return ExponentialBackoffRetryPolicy(max_attempts, timedelta(seconds=0), timedelta(seconds=0), 1) - - def to_protobuf(self): - """Convert the ExponentialBackoffRetryPolicy instance to protobuf. - - :return: A protobuf message that represents the ExponentialBackoffRetryPolicy instance. - """ - exponential_backoff = { - 'Multiplier': self.backoff_multiplier, - 'Max': Duration.FromTimedelta(self.max_backoff), - 'Initial': Duration.FromTimedelta(self.initial_backoff) - } - return { - 'MaxAttempts': self._max_attempts, - 'ExponentialBackoff': exponential_backoff - } diff --git a/python/rocketmq/grpc_protocol/__init__.py b/python/rocketmq/grpc_protocol/__init__.py new file mode 100644 index 000000000..c8555455e --- /dev/null +++ b/python/rocketmq/grpc_protocol/__init__.py @@ -0,0 +1,62 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .definition_pb2 import (Address, AddressScheme, Broker, # noqa + ClientType, Code, DigestType, Encoding, Endpoints, + FilterType, Language, MessageType, Metric, + Permission, Publishing, Resource, Settings, + Status, Subscription, TransactionResolution, + TransactionSource) +from .service_pb2 import (AckMessageEntry, AckMessageRequest, # noqa + ChangeInvisibleDurationRequest, + EndTransactionRequest, HeartbeatRequest, + NotifyClientTerminationRequest, QueryRouteRequest, + ReceiveMessageRequest, SendMessageRequest, + TelemetryCommand) +from .service_pb2_grpc import MessagingServiceStub + +__all__ = [ + "Address", # noqa + "AddressScheme", # noqa + "ClientType", # noqa + "Resource", # noqa + "Broker", # noqa + "Code", # noqa + "DigestType", # noqa + "Encoding", # noqa + "Endpoints", # noqa + "FilterType", # noqa + "Language", # noqa + "MessageType", # noqa + "Metric", # noqa + "Permission", # noqa + "Publishing", # noqa + "Settings", # noqa + "Status", # noqa + "Subscription", # noqa + "TransactionResolution", # noqa + "TransactionSource", # noqa + "AckMessageEntry", # noqa + "AckMessageRequest", # noqa + "ChangeInvisibleDurationRequest", # noqa + "EndTransactionRequest", # noqa + "HeartbeatRequest", # noqa + "NotifyClientTerminationRequest", # noqa + "QueryRouteRequest", # noqa + "ReceiveMessageRequest", # noqa + "SendMessageRequest", # noqa + "TelemetryCommand", # noqa + "MessagingServiceStub", +] diff --git a/python/rocketmq/grpc_protocol/admin_pb2.py b/python/rocketmq/grpc_protocol/admin_pb2.py new file mode 100644 index 000000000..abaa8a489 --- /dev/null +++ b/python/rocketmq/grpc_protocol/admin_pb2.py @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +# Generated by the grpc_protocol buffer compiler. DO NOT EDIT! +# source: admin.proto +# Protobuf Python Version: 5.26.1 +"""Generated grpc_protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0b\x61\x64min.proto\x12\x12\x61pache.rocketmq.v2\"\x95\x01\n\x15\x43hangeLogLevelRequest\x12>\n\x05level\x18\x01 \x01(\x0e\x32/.apache.rocketmq.v2.ChangeLogLevelRequest.Level\"<\n\x05Level\x12\t\n\x05TRACE\x10\x00\x12\t\n\x05\x44\x45\x42UG\x10\x01\x12\x08\n\x04INFO\x10\x02\x12\x08\n\x04WARN\x10\x03\x12\t\n\x05\x45RROR\x10\x04\"(\n\x16\x43hangeLogLevelResponse\x12\x0e\n\x06remark\x18\x01 \x01(\t2r\n\x05\x41\x64min\x12i\n\x0e\x43hangeLogLevel\x12).apache.rocketmq.v2.ChangeLogLevelRequest\x1a*.apache.rocketmq.v2.ChangeLogLevelResponse\"\x00\x42=\n\x12\x61pache.rocketmq.v2B\x07MQAdminP\x01\xa0\x01\x01\xd8\x01\x01\xf8\x01\x01\xaa\x02\x12\x41pache.Rocketmq.V2b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'admin_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\022apache.rocketmq.v2B\007MQAdminP\001\240\001\001\330\001\001\370\001\001\252\002\022Apache.Rocketmq.V2' + _globals['_CHANGELOGLEVELREQUEST']._serialized_start=36 + _globals['_CHANGELOGLEVELREQUEST']._serialized_end=185 + _globals['_CHANGELOGLEVELREQUEST_LEVEL']._serialized_start=125 + _globals['_CHANGELOGLEVELREQUEST_LEVEL']._serialized_end=185 + _globals['_CHANGELOGLEVELRESPONSE']._serialized_start=187 + _globals['_CHANGELOGLEVELRESPONSE']._serialized_end=227 + _globals['_ADMIN']._serialized_start=229 + _globals['_ADMIN']._serialized_end=343 +# @@protoc_insertion_point(module_scope) diff --git a/python/rocketmq/protocol/admin_pb2_grpc.py b/python/rocketmq/grpc_protocol/admin_pb2_grpc.py similarity index 76% rename from python/rocketmq/protocol/admin_pb2_grpc.py rename to python/rocketmq/grpc_protocol/admin_pb2_grpc.py index 6905975f1..058e6844c 100644 --- a/python/rocketmq/protocol/admin_pb2_grpc.py +++ b/python/rocketmq/grpc_protocol/admin_pb2_grpc.py @@ -13,12 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +# Generated by the gRPC Python grpc_protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" +import admin_pb2 as admin__pb2 import grpc -from protocol import admin_pb2 as apache_dot_rocketmq_dot_v2_dot_admin__pb2 - class AdminStub(object): """Missing associated documentation comment in .proto file.""" @@ -31,8 +30,8 @@ def __init__(self, channel): """ self.ChangeLogLevel = channel.unary_unary( '/apache.rocketmq.v2.Admin/ChangeLogLevel', - request_serializer=apache_dot_rocketmq_dot_v2_dot_admin__pb2.ChangeLogLevelRequest.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_admin__pb2.ChangeLogLevelResponse.FromString, + request_serializer=admin__pb2.ChangeLogLevelRequest.SerializeToString, + response_deserializer=admin__pb2.ChangeLogLevelResponse.FromString, ) @@ -50,8 +49,8 @@ def add_AdminServicer_to_server(servicer, server): rpc_method_handlers = { 'ChangeLogLevel': grpc.unary_unary_rpc_method_handler( servicer.ChangeLogLevel, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_admin__pb2.ChangeLogLevelRequest.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_admin__pb2.ChangeLogLevelResponse.SerializeToString, + request_deserializer=admin__pb2.ChangeLogLevelRequest.FromString, + response_serializer=admin__pb2.ChangeLogLevelResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( @@ -75,7 +74,7 @@ def ChangeLogLevel(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.Admin/ChangeLogLevel', - apache_dot_rocketmq_dot_v2_dot_admin__pb2.ChangeLogLevelRequest.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_admin__pb2.ChangeLogLevelResponse.FromString, + admin__pb2.ChangeLogLevelRequest.SerializeToString, + admin__pb2.ChangeLogLevelResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/python/rocketmq/grpc_protocol/definition_pb2.py b/python/rocketmq/grpc_protocol/definition_pb2.py new file mode 100644 index 000000000..159556c0c --- /dev/null +++ b/python/rocketmq/grpc_protocol/definition_pb2.py @@ -0,0 +1,114 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +# Generated by the grpc_protocol buffer compiler. DO NOT EDIT! +# source: definition.proto +# Protobuf Python Version: 5.26.1 +"""Generated grpc_protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import \ + duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import \ + timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x64\x65\x66inition.proto\x12\x12\x61pache.rocketmq.v2\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\"T\n\x10\x46ilterExpression\x12,\n\x04type\x18\x01 \x01(\x0e\x32\x1e.apache.rocketmq.v2.FilterType\x12\x12\n\nexpression\x18\x02 \x01(\t\"\xbb\x01\n\x0bRetryPolicy\x12\x14\n\x0cmax_attempts\x18\x01 \x01(\x05\x12\x45\n\x13\x65xponential_backoff\x18\x02 \x01(\x0b\x32&.apache.rocketmq.v2.ExponentialBackoffH\x00\x12\x43\n\x12\x63ustomized_backoff\x18\x03 \x01(\x0b\x32%.apache.rocketmq.v2.CustomizedBackoffH\x00\x42\n\n\x08strategy\"|\n\x12\x45xponentialBackoff\x12*\n\x07initial\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12&\n\x03max\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nmultiplier\x18\x03 \x01(\x02\"<\n\x11\x43ustomizedBackoff\x12\'\n\x04next\x18\x01 \x03(\x0b\x32\x19.google.protobuf.Duration\"4\n\x08Resource\x12\x1a\n\x12resource_namespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"z\n\x11SubscriptionEntry\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x38\n\nexpression\x18\x02 \x01(\x0b\x32$.apache.rocketmq.v2.FilterExpression\"%\n\x07\x41\x64\x64ress\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\"n\n\tEndpoints\x12\x31\n\x06scheme\x18\x01 \x01(\x0e\x32!.apache.rocketmq.v2.AddressScheme\x12.\n\taddresses\x18\x02 \x03(\x0b\x32\x1b.apache.rocketmq.v2.Address\"T\n\x06\x42roker\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12\x30\n\tendpoints\x18\x03 \x01(\x0b\x32\x1d.apache.rocketmq.v2.Endpoints\"\xe6\x01\n\x0cMessageQueue\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\n\n\x02id\x18\x02 \x01(\x05\x12\x32\n\npermission\x18\x03 \x01(\x0e\x32\x1e.apache.rocketmq.v2.Permission\x12*\n\x06\x62roker\x18\x04 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Broker\x12=\n\x14\x61\x63\x63\x65pt_message_types\x18\x05 \x03(\x0e\x32\x1f.apache.rocketmq.v2.MessageType\"H\n\x06\x44igest\x12,\n\x04type\x18\x01 \x01(\x0e\x32\x1e.apache.rocketmq.v2.DigestType\x12\x10\n\x08\x63hecksum\x18\x02 \x01(\t\"\x8f\x08\n\x10SystemProperties\x12\x10\n\x03tag\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0c\n\x04keys\x18\x02 \x03(\t\x12\x12\n\nmessage_id\x18\x03 \x01(\t\x12/\n\x0b\x62ody_digest\x18\x04 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Digest\x12\x33\n\rbody_encoding\x18\x05 \x01(\x0e\x32\x1c.apache.rocketmq.v2.Encoding\x12\x35\n\x0cmessage_type\x18\x06 \x01(\x0e\x32\x1f.apache.rocketmq.v2.MessageType\x12\x32\n\x0e\x62orn_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x11\n\tborn_host\x18\x08 \x01(\t\x12\x38\n\x0fstore_timestamp\x18\t \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x01\x88\x01\x01\x12\x12\n\nstore_host\x18\n \x01(\t\x12;\n\x12\x64\x65livery_timestamp\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x02\x88\x01\x01\x12\x1b\n\x0ereceipt_handle\x18\x0c \x01(\tH\x03\x88\x01\x01\x12\x10\n\x08queue_id\x18\r \x01(\x05\x12\x19\n\x0cqueue_offset\x18\x0e \x01(\x03H\x04\x88\x01\x01\x12:\n\x12invisible_duration\x18\x0f \x01(\x0b\x32\x19.google.protobuf.DurationH\x05\x88\x01\x01\x12\x1d\n\x10\x64\x65livery_attempt\x18\x10 \x01(\x05H\x06\x88\x01\x01\x12\x1a\n\rmessage_group\x18\x11 \x01(\tH\x07\x88\x01\x01\x12\x1a\n\rtrace_context\x18\x12 \x01(\tH\x08\x88\x01\x01\x12N\n&orphaned_transaction_recovery_duration\x18\x13 \x01(\x0b\x32\x19.google.protobuf.DurationH\t\x88\x01\x01\x12\x43\n\x11\x64\x65\x61\x64_letter_queue\x18\x14 \x01(\x0b\x32#.apache.rocketmq.v2.DeadLetterQueueH\n\x88\x01\x01\x42\x06\n\x04_tagB\x12\n\x10_store_timestampB\x15\n\x13_delivery_timestampB\x11\n\x0f_receipt_handleB\x0f\n\r_queue_offsetB\x15\n\x13_invisible_durationB\x13\n\x11_delivery_attemptB\x10\n\x0e_message_groupB\x10\n\x0e_trace_contextB)\n\'_orphaned_transaction_recovery_durationB\x14\n\x12_dead_letter_queue\"4\n\x0f\x44\x65\x61\x64LetterQueue\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x12\n\nmessage_id\x18\x02 \x01(\t\"\x86\x02\n\x07Message\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12H\n\x0fuser_properties\x18\x02 \x03(\x0b\x32/.apache.rocketmq.v2.Message.UserPropertiesEntry\x12?\n\x11system_properties\x18\x03 \x01(\x0b\x32$.apache.rocketmq.v2.SystemProperties\x12\x0c\n\x04\x62ody\x18\x04 \x01(\x0c\x1a\x35\n\x13UserPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"E\n\nAssignment\x12\x37\n\rmessage_queue\x18\x01 \x01(\x0b\x32 .apache.rocketmq.v2.MessageQueue\"A\n\x06Status\x12&\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x18.apache.rocketmq.v2.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\"i\n\x02UA\x12.\n\x08language\x18\x01 \x01(\x0e\x32\x1c.apache.rocketmq.v2.Language\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x10\n\x08platform\x18\x03 \x01(\t\x12\x10\n\x08hostname\x18\x04 \x01(\t\"\x90\x04\n\x08Settings\x12\x38\n\x0b\x63lient_type\x18\x01 \x01(\x0e\x32\x1e.apache.rocketmq.v2.ClientTypeH\x01\x88\x01\x01\x12\x38\n\x0c\x61\x63\x63\x65ss_point\x18\x02 \x01(\x0b\x32\x1d.apache.rocketmq.v2.EndpointsH\x02\x88\x01\x01\x12<\n\x0e\x62\x61\x63koff_policy\x18\x03 \x01(\x0b\x32\x1f.apache.rocketmq.v2.RetryPolicyH\x03\x88\x01\x01\x12\x37\n\x0frequest_timeout\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationH\x04\x88\x01\x01\x12\x34\n\npublishing\x18\x05 \x01(\x0b\x32\x1e.apache.rocketmq.v2.PublishingH\x00\x12\x38\n\x0csubscription\x18\x06 \x01(\x0b\x32 .apache.rocketmq.v2.SubscriptionH\x00\x12*\n\nuser_agent\x18\x07 \x01(\x0b\x32\x16.apache.rocketmq.v2.UA\x12*\n\x06metric\x18\x08 \x01(\x0b\x32\x1a.apache.rocketmq.v2.MetricB\t\n\x07pub_subB\x0e\n\x0c_client_typeB\x0f\n\r_access_pointB\x11\n\x0f_backoff_policyB\x12\n\x10_request_timeout\"p\n\nPublishing\x12,\n\x06topics\x18\x01 \x03(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x15\n\rmax_body_size\x18\x02 \x01(\x05\x12\x1d\n\x15validate_message_type\x18\x03 \x01(\x08\"\xb3\x02\n\x0cSubscription\x12\x30\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.ResourceH\x00\x88\x01\x01\x12<\n\rsubscriptions\x18\x02 \x03(\x0b\x32%.apache.rocketmq.v2.SubscriptionEntry\x12\x11\n\x04\x66ifo\x18\x03 \x01(\x08H\x01\x88\x01\x01\x12\x1f\n\x12receive_batch_size\x18\x04 \x01(\x05H\x02\x88\x01\x01\x12<\n\x14long_polling_timeout\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationH\x03\x88\x01\x01\x42\x08\n\x06_groupB\x07\n\x05_fifoB\x15\n\x13_receive_batch_sizeB\x17\n\x15_long_polling_timeout\"Y\n\x06Metric\x12\n\n\x02on\x18\x01 \x01(\x08\x12\x35\n\tendpoints\x18\x02 \x01(\x0b\x32\x1d.apache.rocketmq.v2.EndpointsH\x00\x88\x01\x01\x42\x0c\n\n_endpoints*Y\n\x15TransactionResolution\x12&\n\"TRANSACTION_RESOLUTION_UNSPECIFIED\x10\x00\x12\n\n\x06\x43OMMIT\x10\x01\x12\x0c\n\x08ROLLBACK\x10\x02*W\n\x11TransactionSource\x12\x16\n\x12SOURCE_UNSPECIFIED\x10\x00\x12\x11\n\rSOURCE_CLIENT\x10\x01\x12\x17\n\x13SOURCE_SERVER_CHECK\x10\x02*W\n\nPermission\x12\x1a\n\x16PERMISSION_UNSPECIFIED\x10\x00\x12\x08\n\x04NONE\x10\x01\x12\x08\n\x04READ\x10\x02\x12\t\n\x05WRITE\x10\x03\x12\x0e\n\nREAD_WRITE\x10\x04*;\n\nFilterType\x12\x1b\n\x17\x46ILTER_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03TAG\x10\x01\x12\x07\n\x03SQL\x10\x02*T\n\rAddressScheme\x12\x1e\n\x1a\x41\x44\x44RESS_SCHEME_UNSPECIFIED\x10\x00\x12\x08\n\x04IPv4\x10\x01\x12\x08\n\x04IPv6\x10\x02\x12\x0f\n\x0b\x44OMAIN_NAME\x10\x03*]\n\x0bMessageType\x12\x1c\n\x18MESSAGE_TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06NORMAL\x10\x01\x12\x08\n\x04\x46IFO\x10\x02\x12\t\n\x05\x44\x45LAY\x10\x03\x12\x0f\n\x0bTRANSACTION\x10\x04*G\n\nDigestType\x12\x1b\n\x17\x44IGEST_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05\x43RC32\x10\x01\x12\x07\n\x03MD5\x10\x02\x12\x08\n\x04SHA1\x10\x03*r\n\nClientType\x12\x1b\n\x17\x43LIENT_TYPE_UNSPECIFIED\x10\x00\x12\x0c\n\x08PRODUCER\x10\x01\x12\x11\n\rPUSH_CONSUMER\x10\x02\x12\x13\n\x0fSIMPLE_CONSUMER\x10\x03\x12\x11\n\rPULL_CONSUMER\x10\x04*<\n\x08\x45ncoding\x12\x18\n\x14\x45NCODING_UNSPECIFIED\x10\x00\x12\x0c\n\x08IDENTITY\x10\x01\x12\x08\n\x04GZIP\x10\x02*\xac\n\n\x04\x43ode\x12\x14\n\x10\x43ODE_UNSPECIFIED\x10\x00\x12\x08\n\x02OK\x10\xa0\x9c\x01\x12\x16\n\x10MULTIPLE_RESULTS\x10\xb0\xea\x01\x12\x11\n\x0b\x42\x41\x44_REQUEST\x10\xc0\xb8\x02\x12\x1a\n\x14ILLEGAL_ACCESS_POINT\x10\xc1\xb8\x02\x12\x13\n\rILLEGAL_TOPIC\x10\xc2\xb8\x02\x12\x1c\n\x16ILLEGAL_CONSUMER_GROUP\x10\xc3\xb8\x02\x12\x19\n\x13ILLEGAL_MESSAGE_TAG\x10\xc4\xb8\x02\x12\x19\n\x13ILLEGAL_MESSAGE_KEY\x10\xc5\xb8\x02\x12\x1b\n\x15ILLEGAL_MESSAGE_GROUP\x10\xc6\xb8\x02\x12\"\n\x1cILLEGAL_MESSAGE_PROPERTY_KEY\x10\xc7\xb8\x02\x12\x1c\n\x16INVALID_TRANSACTION_ID\x10\xc8\xb8\x02\x12\x18\n\x12ILLEGAL_MESSAGE_ID\x10\xc9\xb8\x02\x12\x1f\n\x19ILLEGAL_FILTER_EXPRESSION\x10\xca\xb8\x02\x12\x1c\n\x16ILLEGAL_INVISIBLE_TIME\x10\xcb\xb8\x02\x12\x1b\n\x15ILLEGAL_DELIVERY_TIME\x10\xcc\xb8\x02\x12\x1c\n\x16INVALID_RECEIPT_HANDLE\x10\xcd\xb8\x02\x12)\n#MESSAGE_PROPERTY_CONFLICT_WITH_TYPE\x10\xce\xb8\x02\x12\x1e\n\x18UNRECOGNIZED_CLIENT_TYPE\x10\xcf\xb8\x02\x12\x17\n\x11MESSAGE_CORRUPTED\x10\xd0\xb8\x02\x12\x18\n\x12\x43LIENT_ID_REQUIRED\x10\xd1\xb8\x02\x12\x1a\n\x14ILLEGAL_POLLING_TIME\x10\xd2\xb8\x02\x12\x14\n\x0eILLEGAL_OFFSET\x10\xd3\xb8\x02\x12\x12\n\x0cUNAUTHORIZED\x10\xa4\xb9\x02\x12\x16\n\x10PAYMENT_REQUIRED\x10\x88\xba\x02\x12\x0f\n\tFORBIDDEN\x10\xec\xba\x02\x12\x0f\n\tNOT_FOUND\x10\xd0\xbb\x02\x12\x17\n\x11MESSAGE_NOT_FOUND\x10\xd1\xbb\x02\x12\x15\n\x0fTOPIC_NOT_FOUND\x10\xd2\xbb\x02\x12\x1e\n\x18\x43ONSUMER_GROUP_NOT_FOUND\x10\xd3\xbb\x02\x12\x16\n\x10OFFSET_NOT_FOUND\x10\xd4\xbb\x02\x12\x15\n\x0fREQUEST_TIMEOUT\x10\xe0\xbe\x02\x12\x17\n\x11PAYLOAD_TOO_LARGE\x10\xd4\xc2\x02\x12\x1c\n\x16MESSAGE_BODY_TOO_LARGE\x10\xd5\xc2\x02\x12\x19\n\x13PRECONDITION_FAILED\x10\xb0\xce\x02\x12\x17\n\x11TOO_MANY_REQUESTS\x10\x94\xcf\x02\x12%\n\x1fREQUEST_HEADER_FIELDS_TOO_LARGE\x10\xdc\xd0\x02\x12\"\n\x1cMESSAGE_PROPERTIES_TOO_LARGE\x10\xdd\xd0\x02\x12\x14\n\x0eINTERNAL_ERROR\x10\xd0\x86\x03\x12\x1b\n\x15INTERNAL_SERVER_ERROR\x10\xd1\x86\x03\x12\x16\n\x10HA_NOT_AVAILABLE\x10\xd2\x86\x03\x12\x15\n\x0fNOT_IMPLEMENTED\x10\xb4\x87\x03\x12\x13\n\rPROXY_TIMEOUT\x10\xe0\x89\x03\x12 \n\x1aMASTER_PERSISTENCE_TIMEOUT\x10\xe1\x89\x03\x12\x1f\n\x19SLAVE_PERSISTENCE_TIMEOUT\x10\xe2\x89\x03\x12\x11\n\x0bUNSUPPORTED\x10\xc4\x8a\x03\x12\x19\n\x13VERSION_UNSUPPORTED\x10\xc5\x8a\x03\x12%\n\x1fVERIFY_FIFO_MESSAGE_UNSUPPORTED\x10\xc6\x8a\x03\x12\x1f\n\x19\x46\x41ILED_TO_CONSUME_MESSAGE\x10\xe0\xd4\x03*\xad\x01\n\x08Language\x12\x18\n\x14LANGUAGE_UNSPECIFIED\x10\x00\x12\x08\n\x04JAVA\x10\x01\x12\x07\n\x03\x43PP\x10\x02\x12\x0b\n\x07\x44OT_NET\x10\x03\x12\n\n\x06GOLANG\x10\x04\x12\x08\n\x04RUST\x10\x05\x12\n\n\x06PYTHON\x10\x06\x12\x07\n\x03PHP\x10\x07\x12\x0b\n\x07NODE_JS\x10\x08\x12\x08\n\x04RUBY\x10\t\x12\x0f\n\x0bOBJECTIVE_C\x10\n\x12\x08\n\x04\x44\x41RT\x10\x0b\x12\n\n\x06KOTLIN\x10\x0c*:\n\x11QueryOffsetPolicy\x12\r\n\tBEGINNING\x10\x00\x12\x07\n\x03\x45ND\x10\x01\x12\r\n\tTIMESTAMP\x10\x02\x42;\n\x12\x61pache.rocketmq.v2B\x08MQDomainP\x01\xa0\x01\x01\xd8\x01\x01\xaa\x02\x12\x41pache.Rocketmq.V2b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'definition_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\022apache.rocketmq.v2B\010MQDomainP\001\240\001\001\330\001\001\252\002\022Apache.Rocketmq.V2' + _globals['_MESSAGE_USERPROPERTIESENTRY']._loaded_options = None + _globals['_MESSAGE_USERPROPERTIESENTRY']._serialized_options = b'8\001' + _globals['_TRANSACTIONRESOLUTION']._serialized_start=3943 + _globals['_TRANSACTIONRESOLUTION']._serialized_end=4032 + _globals['_TRANSACTIONSOURCE']._serialized_start=4034 + _globals['_TRANSACTIONSOURCE']._serialized_end=4121 + _globals['_PERMISSION']._serialized_start=4123 + _globals['_PERMISSION']._serialized_end=4210 + _globals['_FILTERTYPE']._serialized_start=4212 + _globals['_FILTERTYPE']._serialized_end=4271 + _globals['_ADDRESSSCHEME']._serialized_start=4273 + _globals['_ADDRESSSCHEME']._serialized_end=4357 + _globals['_MESSAGETYPE']._serialized_start=4359 + _globals['_MESSAGETYPE']._serialized_end=4452 + _globals['_DIGESTTYPE']._serialized_start=4454 + _globals['_DIGESTTYPE']._serialized_end=4525 + _globals['_CLIENTTYPE']._serialized_start=4527 + _globals['_CLIENTTYPE']._serialized_end=4641 + _globals['_ENCODING']._serialized_start=4643 + _globals['_ENCODING']._serialized_end=4703 + _globals['_CODE']._serialized_start=4706 + _globals['_CODE']._serialized_end=6030 + _globals['_LANGUAGE']._serialized_start=6033 + _globals['_LANGUAGE']._serialized_end=6206 + _globals['_QUERYOFFSETPOLICY']._serialized_start=6208 + _globals['_QUERYOFFSETPOLICY']._serialized_end=6266 + _globals['_FILTEREXPRESSION']._serialized_start=105 + _globals['_FILTEREXPRESSION']._serialized_end=189 + _globals['_RETRYPOLICY']._serialized_start=192 + _globals['_RETRYPOLICY']._serialized_end=379 + _globals['_EXPONENTIALBACKOFF']._serialized_start=381 + _globals['_EXPONENTIALBACKOFF']._serialized_end=505 + _globals['_CUSTOMIZEDBACKOFF']._serialized_start=507 + _globals['_CUSTOMIZEDBACKOFF']._serialized_end=567 + _globals['_RESOURCE']._serialized_start=569 + _globals['_RESOURCE']._serialized_end=621 + _globals['_SUBSCRIPTIONENTRY']._serialized_start=623 + _globals['_SUBSCRIPTIONENTRY']._serialized_end=745 + _globals['_ADDRESS']._serialized_start=747 + _globals['_ADDRESS']._serialized_end=784 + _globals['_ENDPOINTS']._serialized_start=786 + _globals['_ENDPOINTS']._serialized_end=896 + _globals['_BROKER']._serialized_start=898 + _globals['_BROKER']._serialized_end=982 + _globals['_MESSAGEQUEUE']._serialized_start=985 + _globals['_MESSAGEQUEUE']._serialized_end=1215 + _globals['_DIGEST']._serialized_start=1217 + _globals['_DIGEST']._serialized_end=1289 + _globals['_SYSTEMPROPERTIES']._serialized_start=1292 + _globals['_SYSTEMPROPERTIES']._serialized_end=2331 + _globals['_DEADLETTERQUEUE']._serialized_start=2333 + _globals['_DEADLETTERQUEUE']._serialized_end=2385 + _globals['_MESSAGE']._serialized_start=2388 + _globals['_MESSAGE']._serialized_end=2650 + _globals['_MESSAGE_USERPROPERTIESENTRY']._serialized_start=2597 + _globals['_MESSAGE_USERPROPERTIESENTRY']._serialized_end=2650 + _globals['_ASSIGNMENT']._serialized_start=2652 + _globals['_ASSIGNMENT']._serialized_end=2721 + _globals['_STATUS']._serialized_start=2723 + _globals['_STATUS']._serialized_end=2788 + _globals['_UA']._serialized_start=2790 + _globals['_UA']._serialized_end=2895 + _globals['_SETTINGS']._serialized_start=2898 + _globals['_SETTINGS']._serialized_end=3426 + _globals['_PUBLISHING']._serialized_start=3428 + _globals['_PUBLISHING']._serialized_end=3540 + _globals['_SUBSCRIPTION']._serialized_start=3543 + _globals['_SUBSCRIPTION']._serialized_end=3850 + _globals['_METRIC']._serialized_start=3852 + _globals['_METRIC']._serialized_end=3941 +# @@protoc_insertion_point(module_scope) diff --git a/python/rocketmq/protocol/definition_pb2_grpc.py b/python/rocketmq/grpc_protocol/definition_pb2_grpc.py similarity index 92% rename from python/rocketmq/protocol/definition_pb2_grpc.py rename to python/rocketmq/grpc_protocol/definition_pb2_grpc.py index 03079090e..8bcb736b8 100644 --- a/python/rocketmq/protocol/definition_pb2_grpc.py +++ b/python/rocketmq/grpc_protocol/definition_pb2_grpc.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +# Generated by the gRPC Python grpc_protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc diff --git a/python/rocketmq/grpc_protocol/proto/admin.proto b/python/rocketmq/grpc_protocol/proto/admin.proto new file mode 100644 index 000000000..7dbb7027d --- /dev/null +++ b/python/rocketmq/grpc_protocol/proto/admin.proto @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package apache.rocketmq.v2; + +option cc_enable_arenas = true; +option csharp_namespace = "Apache.Rocketmq.V2"; +option java_multiple_files = true; +option java_package = "apache.rocketmq.v2"; +option java_generate_equals_and_hash = true; +option java_string_check_utf8 = true; +option java_outer_classname = "MQAdmin"; + +message ChangeLogLevelRequest { + enum Level { + TRACE = 0; + DEBUG = 1; + INFO = 2; + WARN = 3; + ERROR = 4; + } + Level level = 1; +} + +message ChangeLogLevelResponse { string remark = 1; } + +service Admin { + rpc ChangeLogLevel(ChangeLogLevelRequest) returns (ChangeLogLevelResponse) {} +} \ No newline at end of file diff --git a/python/rocketmq/grpc_protocol/proto/definition.proto b/python/rocketmq/grpc_protocol/proto/definition.proto new file mode 100644 index 000000000..02cbc4ce4 --- /dev/null +++ b/python/rocketmq/grpc_protocol/proto/definition.proto @@ -0,0 +1,568 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; + +package apache.rocketmq.v2; + +option csharp_namespace = "Apache.Rocketmq.V2"; +option java_multiple_files = true; +option java_package = "apache.rocketmq.v2"; +option java_generate_equals_and_hash = true; +option java_string_check_utf8 = true; +option java_outer_classname = "MQDomain"; + +enum TransactionResolution { + TRANSACTION_RESOLUTION_UNSPECIFIED = 0; + COMMIT = 1; + ROLLBACK = 2; +} + +enum TransactionSource { + SOURCE_UNSPECIFIED = 0; + SOURCE_CLIENT = 1; + SOURCE_SERVER_CHECK = 2; +} + +enum Permission { + PERMISSION_UNSPECIFIED = 0; + NONE = 1; + READ = 2; + WRITE = 3; + READ_WRITE = 4; +} + +enum FilterType { + FILTER_TYPE_UNSPECIFIED = 0; + TAG = 1; + SQL = 2; +} + +message FilterExpression { + FilterType type = 1; + string expression = 2; +} + +message RetryPolicy { + int32 max_attempts = 1; + oneof strategy { + ExponentialBackoff exponential_backoff = 2; + CustomizedBackoff customized_backoff = 3; + } +} + +// https://en.wikipedia.org/wiki/Exponential_backoff +message ExponentialBackoff { + google.protobuf.Duration initial = 1; + google.protobuf.Duration max = 2; + float multiplier = 3; +} + +message CustomizedBackoff { + // To support classic backoff strategy which is arbitrary defined by end users. + // Typical values are: `1s 5s 10s 30s 1m 2m 3m 4m 5m 6m 7m 8m 9m 10m 20m 30m 1h 2h` + repeated google.protobuf.Duration next = 1; +} + +message Resource { + string resource_namespace = 1; + + // Resource name identifier, which remains unique within the abstract resource + // namespace. + string name = 2; +} + +message SubscriptionEntry { + Resource topic = 1; + FilterExpression expression = 2; +} + +enum AddressScheme { + ADDRESS_SCHEME_UNSPECIFIED = 0; + IPv4 = 1; + IPv6 = 2; + DOMAIN_NAME = 3; +} + +message Address { + string host = 1; + int32 port = 2; +} + +message Endpoints { + AddressScheme scheme = 1; + repeated Address addresses = 2; +} + +message Broker { + // Name of the broker + string name = 1; + + // Broker index. Canonically, index = 0 implies that the broker is playing + // leader role while brokers with index > 0 play follower role. + int32 id = 2; + + // Address of the broker, complying with the following scheme + // 1. dns:[//authority/]host[:port] + // 2. ipv4:address[:port][,address[:port],...] – IPv4 addresses + // 3. ipv6:address[:port][,address[:port],...] – IPv6 addresses + Endpoints endpoints = 3; +} + +message MessageQueue { + Resource topic = 1; + int32 id = 2; + Permission permission = 3; + Broker broker = 4; + repeated MessageType accept_message_types = 5; +} + +enum MessageType { + MESSAGE_TYPE_UNSPECIFIED = 0; + + NORMAL = 1; + + // Sequenced message + FIFO = 2; + + // Messages that are delivered after the specified duration. + DELAY = 3; + + // Messages that are transactional. Only committed messages are delivered to + // subscribers. + TRANSACTION = 4; +} + +enum DigestType { + DIGEST_TYPE_UNSPECIFIED = 0; + + // CRC algorithm achieves goal of detecting random data error with lowest + // computation overhead. + CRC32 = 1; + + // MD5 algorithm achieves good balance between collision rate and computation + // overhead. + MD5 = 2; + + // SHA-family has substantially fewer collision with fair amount of + // computation. + SHA1 = 3; +} + +// When publishing messages to or subscribing messages from brokers, clients +// shall include or validate digests of message body to ensure data integrity. +// +// For message publishing, when an invalid digest were detected, brokers need +// respond client with BAD_REQUEST. +// +// For messages subscription, when an invalid digest were detected, consumers +// need to handle this case according to message type: +// 1) Standard messages should be negatively acknowledged instantly, causing +// immediate re-delivery; 2) FIFO messages require special RPC, to re-fetch +// previously acquired messages batch; +message Digest { + DigestType type = 1; + string checksum = 2; +} + +enum ClientType { + CLIENT_TYPE_UNSPECIFIED = 0; + PRODUCER = 1; + PUSH_CONSUMER = 2; + SIMPLE_CONSUMER = 3; + PULL_CONSUMER = 4; +} + +enum Encoding { + ENCODING_UNSPECIFIED = 0; + + IDENTITY = 1; + + GZIP = 2; +} + +message SystemProperties { + // Tag, which is optional. + optional string tag = 1; + + // Message keys + repeated string keys = 2; + + // Message identifier, client-side generated, remains unique. + // if message_id is empty, the send message request will be aborted with + // status `INVALID_ARGUMENT` + string message_id = 3; + + // Message body digest + Digest body_digest = 4; + + // Message body encoding. Candidate options are identity, gzip, snappy etc. + Encoding body_encoding = 5; + + // Message type, normal, FIFO or transactional. + MessageType message_type = 6; + + // Message born time-point. + google.protobuf.Timestamp born_timestamp = 7; + + // Message born host. Valid options are IPv4, IPv6 or client host domain name. + string born_host = 8; + + // Time-point at which the message is stored in the broker, which is absent + // for message publishing. + optional google.protobuf.Timestamp store_timestamp = 9; + + // The broker that stores this message. It may be broker name, IP or arbitrary + // identifier that uniquely identify the server. + string store_host = 10; + + // Time-point at which broker delivers to clients, which is optional. + optional google.protobuf.Timestamp delivery_timestamp = 11; + + // If a message is acquired by way of POP, this field holds the receipt, + // which is absent for message publishing. + // Clients use the receipt to acknowledge or negatively acknowledge the + // message. + optional string receipt_handle = 12; + + // Message queue identifier in which a message is physically stored. + int32 queue_id = 13; + + // Message-queue offset at which a message is stored, which is absent for + // message publishing. + optional int64 queue_offset = 14; + + // Period of time servers would remain invisible once a message is acquired. + optional google.protobuf.Duration invisible_duration = 15; + + // Business code may failed to process messages for the moment. Hence, clients + // may request servers to deliver them again using certain back-off strategy, + // the attempt is 1 not 0 if message is delivered first time, and it is absent + // for message publishing. + optional int32 delivery_attempt = 16; + + // Define the group name of message in the same topic, which is optional. + optional string message_group = 17; + + // Trace context for each message, which is optional. + optional string trace_context = 18; + + // If a transactional message stay unresolved for more than + // `transaction_orphan_threshold`, it would be regarded as an + // orphan. Servers that manages orphan messages would pick up + // a capable publisher to resolve + optional google.protobuf.Duration orphaned_transaction_recovery_duration = 19; + + // Information to identify whether this message is from dead letter queue. + optional DeadLetterQueue dead_letter_queue = 20; +} + +message DeadLetterQueue { + // Original topic for this DLQ message. + string topic = 1; + // Original message id for this DLQ message. + string message_id = 2; +} + +message Message { + + Resource topic = 1; + + // User defined key-value pairs. + // If user_properties contain the reserved keys by RocketMQ, + // the send message request will be aborted with status `INVALID_ARGUMENT`. + // See below links for the reserved keys + // https://github.com/apache/rocketmq/blob/master/common/src/main/java/org/apache/rocketmq/common/message/MessageConst.java#L58 + map user_properties = 2; + + SystemProperties system_properties = 3; + + bytes body = 4; +} + +message Assignment { + MessageQueue message_queue = 1; +} + +enum Code { + CODE_UNSPECIFIED = 0; + + // Generic code for success. + OK = 20000; + + // Generic code for multiple return results. + MULTIPLE_RESULTS = 30000; + + // Generic code for bad request, indicating that required fields or headers are missing. + BAD_REQUEST = 40000; + // Format of access point is illegal. + ILLEGAL_ACCESS_POINT = 40001; + // Format of topic is illegal. + ILLEGAL_TOPIC = 40002; + // Format of consumer group is illegal. + ILLEGAL_CONSUMER_GROUP = 40003; + // Format of message tag is illegal. + ILLEGAL_MESSAGE_TAG = 40004; + // Format of message key is illegal. + ILLEGAL_MESSAGE_KEY = 40005; + // Format of message group is illegal. + ILLEGAL_MESSAGE_GROUP = 40006; + // Format of message property key is illegal. + ILLEGAL_MESSAGE_PROPERTY_KEY = 40007; + // Transaction id is invalid. + INVALID_TRANSACTION_ID = 40008; + // Format of message id is illegal. + ILLEGAL_MESSAGE_ID = 40009; + // Format of filter expression is illegal. + ILLEGAL_FILTER_EXPRESSION = 40010; + // The invisible time of request is invalid. + ILLEGAL_INVISIBLE_TIME = 40011; + // The delivery timestamp of message is invalid. + ILLEGAL_DELIVERY_TIME = 40012; + // Receipt handle of message is invalid. + INVALID_RECEIPT_HANDLE = 40013; + // Message property conflicts with its type. + MESSAGE_PROPERTY_CONFLICT_WITH_TYPE = 40014; + // Client type could not be recognized. + UNRECOGNIZED_CLIENT_TYPE = 40015; + // Message is corrupted. + MESSAGE_CORRUPTED = 40016; + // Request is rejected due to missing of x-mq-client-id header. + CLIENT_ID_REQUIRED = 40017; + // Polling time is illegal. + ILLEGAL_POLLING_TIME = 40018; + // Offset is illegal. + ILLEGAL_OFFSET = 40019; + + // Generic code indicates that the client request lacks valid authentication + // credentials for the requested resource. + UNAUTHORIZED = 40100; + + // Generic code indicates that the account is suspended due to overdue of payment. + PAYMENT_REQUIRED = 40200; + + // Generic code for the case that user does not have the permission to operate. + FORBIDDEN = 40300; + + // Generic code for resource not found. + NOT_FOUND = 40400; + // Message not found from server. + MESSAGE_NOT_FOUND = 40401; + // Topic resource does not exist. + TOPIC_NOT_FOUND = 40402; + // Consumer group resource does not exist. + CONSUMER_GROUP_NOT_FOUND = 40403; + // Offset not found from server. + OFFSET_NOT_FOUND = 40404; + + // Generic code representing client side timeout when connecting to, reading data from, or write data to server. + REQUEST_TIMEOUT = 40800; + + // Generic code represents that the request entity is larger than limits defined by server. + PAYLOAD_TOO_LARGE = 41300; + // Message body size exceeds the threshold. + MESSAGE_BODY_TOO_LARGE = 41301; + + // Generic code for use cases where pre-conditions are not met. + // For example, if a producer instance is used to publish messages without prior start() invocation, + // this error code will be raised. + PRECONDITION_FAILED = 42800; + + // Generic code indicates that too many requests are made in short period of duration. + // Requests are throttled. + TOO_MANY_REQUESTS = 42900; + + // Generic code for the case that the server is unwilling to process the request because its header fields are too large. + // The request may be resubmitted after reducing the size of the request header fields. + REQUEST_HEADER_FIELDS_TOO_LARGE = 43100; + // Message properties total size exceeds the threshold. + MESSAGE_PROPERTIES_TOO_LARGE = 43101; + + // Generic code indicates that server/client encountered an unexpected + // condition that prevented it from fulfilling the request. + INTERNAL_ERROR = 50000; + // Code indicates that the server encountered an unexpected condition + // that prevented it from fulfilling the request. + // This error response is a generic "catch-all" response. + // Usually, this indicates the server cannot find a better alternative + // error code to response. Sometimes, server administrators log error + // responses like the 500 status code with more details about the request + // to prevent the error from happening again in the future. + // + // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500 + INTERNAL_SERVER_ERROR = 50001; + // The HA-mechanism is not working now. + HA_NOT_AVAILABLE = 50002; + + // Generic code means that the server or client does not support the + // functionality required to fulfill the request. + NOT_IMPLEMENTED = 50100; + + // Generic code represents that the server, which acts as a gateway or proxy, + // does not get an satisfied response in time from its upstream servers. + // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/504 + PROXY_TIMEOUT = 50400; + // Message persistence timeout. + MASTER_PERSISTENCE_TIMEOUT = 50401; + // Slave persistence timeout. + SLAVE_PERSISTENCE_TIMEOUT = 50402; + + // Generic code for unsupported operation. + UNSUPPORTED = 50500; + // Operation is not allowed in current version. + VERSION_UNSUPPORTED = 50501; + // Not allowed to verify message. Chances are that you are verifying + // a FIFO message, as is violating FIFO semantics. + VERIFY_FIFO_MESSAGE_UNSUPPORTED = 50502; + + // Generic code for failed message consumption. + FAILED_TO_CONSUME_MESSAGE = 60000; +} + +message Status { + Code code = 1; + string message = 2; +} + +enum Language { + LANGUAGE_UNSPECIFIED = 0; + JAVA = 1; + CPP = 2; + DOT_NET = 3; + GOLANG = 4; + RUST = 5; + PYTHON = 6; + PHP = 7; + NODE_JS = 8; + RUBY = 9; + OBJECTIVE_C = 10; + DART = 11; + KOTLIN = 12; +} + +// User Agent +message UA { + // SDK language + Language language = 1; + + // SDK version + string version = 2; + + // Platform details, including OS name, version, arch etc. + string platform = 3; + + // Hostname of the node + string hostname = 4; +} + +message Settings { + // Configurations for all clients. + optional ClientType client_type = 1; + + optional Endpoints access_point = 2; + + // If publishing of messages encounters throttling or server internal errors, + // publishers should implement automatic retries after progressive longer + // back-offs for consecutive errors. + // + // When processing message fails, `backoff_policy` describes an interval + // after which the message should be available to consume again. + // + // For FIFO messages, the interval should be relatively small because + // messages of the same message group would not be readily available until + // the prior one depletes its lifecycle. + optional RetryPolicy backoff_policy = 3; + + // Request timeout for RPCs excluding long-polling. + optional google.protobuf.Duration request_timeout = 4; + + oneof pub_sub { + Publishing publishing = 5; + + Subscription subscription = 6; + } + + // User agent details + UA user_agent = 7; + + Metric metric = 8; +} + +message Publishing { + // Publishing settings below here is appointed by client, thus it is + // unnecessary for server to push at present. + // + // List of topics to which messages will publish to. + repeated Resource topics = 1; + + // If the message body size exceeds `max_body_size`, broker servers would + // reject the request. As a result, it is advisable that Producer performs + // client-side check validation. + int32 max_body_size = 2; + + // When `validate_message_type` flag set `false`, no need to validate message's type + // with messageQueue's `accept_message_types` before publishing. + bool validate_message_type = 3; +} + +message Subscription { + // Subscription settings below here is appointed by client, thus it is + // unnecessary for server to push at present. + // + // Consumer group. + optional Resource group = 1; + + // Subscription for consumer. + repeated SubscriptionEntry subscriptions = 2; + + // Subscription settings below here are from server, it is essential for + // server to push. + // + // When FIFO flag is `true`, messages of the same message group are processed + // in first-in-first-out manner. + // + // Brokers will not deliver further messages of the same group until prior + // ones are completely acknowledged. + optional bool fifo = 3; + + // Message receive batch size here is essential for push consumer. + optional int32 receive_batch_size = 4; + + // Long-polling timeout for `ReceiveMessageRequest`, which is essential for + // push consumer. + optional google.protobuf.Duration long_polling_timeout = 5; +} + +message Metric { + // Indicates that if client should export local metrics to server. + bool on = 1; + + // The endpoint that client metrics should be exported to, which is required if the switch is on. + optional Endpoints endpoints = 2; +} + +enum QueryOffsetPolicy { + // Use this option if client wishes to playback all existing messages. + BEGINNING = 0; + + // Use this option if client wishes to skip all existing messages. + END = 1; + + // Use this option if time-based seek is targeted. + TIMESTAMP = 2; +} \ No newline at end of file diff --git a/python/rocketmq/grpc_protocol/proto/service.proto b/python/rocketmq/grpc_protocol/proto/service.proto new file mode 100644 index 000000000..5c6def3b0 --- /dev/null +++ b/python/rocketmq/grpc_protocol/proto/service.proto @@ -0,0 +1,419 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +import "definition.proto"; + +package apache.rocketmq.v2; + +option csharp_namespace = "Apache.Rocketmq.V2"; +option java_multiple_files = true; +option java_package = "apache.rocketmq.v2"; +option java_generate_equals_and_hash = true; +option java_string_check_utf8 = true; +option java_outer_classname = "MQService"; + +// Topics are destination of messages to publish to or subscribe from. Similar +// to domain names, they will be addressable after resolution through the +// provided access point. +// +// Access points are usually the addresses of name servers, which fulfill +// service discovery, load-balancing and other auxiliary services. Name servers +// receive periodic heartbeats from affiliate brokers and erase those which +// failed to maintain alive status. +// +// Name servers answer queries of QueryRouteRequest, responding clients with +// addressable message-queues, which they may directly publish messages to or +// subscribe messages from. +// +// QueryRouteRequest shall include source endpoints, aka, configured +// access-point, which annotates tenant-id, instance-id or other +// vendor-specific settings. Purpose-built name servers may respond customized +// results based on these particular requirements. +message QueryRouteRequest { + Resource topic = 1; + Endpoints endpoints = 2; +} + +message QueryRouteResponse { + Status status = 1; + + repeated MessageQueue message_queues = 2; +} + +message SendMessageRequest { + repeated Message messages = 1; +} + +message SendResultEntry { + Status status = 1; + string message_id = 2; + string transaction_id = 3; + int64 offset = 4; +} + +message SendMessageResponse { + Status status = 1; + + // Some implementation may have partial failure issues. Client SDK developers are expected to inspect + // each entry for best certainty. + repeated SendResultEntry entries = 2; +} + +message QueryAssignmentRequest { + Resource topic = 1; + Resource group = 2; + Endpoints endpoints = 3; +} + +message QueryAssignmentResponse { + Status status = 1; + repeated Assignment assignments = 2; +} + +message ReceiveMessageRequest { + Resource group = 1; + MessageQueue message_queue = 2; + FilterExpression filter_expression = 3; + int32 batch_size = 4; + // Required if client type is simple consumer. + optional google.protobuf.Duration invisible_duration = 5; + // For message auto renew and clean + bool auto_renew = 6; + optional google.protobuf.Duration long_polling_timeout = 7; + optional string attempt_id = 8; +} + +message ReceiveMessageResponse { + oneof content { + Status status = 1; + Message message = 2; + // The timestamp that brokers start to deliver status line or message. + google.protobuf.Timestamp delivery_timestamp = 3; + } +} + +message AckMessageEntry { + string message_id = 1; + string receipt_handle = 2; +} + +message AckMessageRequest { + Resource group = 1; + Resource topic = 2; + repeated AckMessageEntry entries = 3; +} + +message AckMessageResultEntry { + string message_id = 1; + string receipt_handle = 2; + + // Acknowledge result may be acquired through inspecting + // `status.code`; In case acknowledgement failed, `status.message` + // is the explanation of the failure. + Status status = 3; +} + +message AckMessageResponse { + + // RPC tier status, which is used to represent RPC-level errors including + // authentication, authorization, throttling and other general failures. + Status status = 1; + + repeated AckMessageResultEntry entries = 2; +} + +message ForwardMessageToDeadLetterQueueRequest { + Resource group = 1; + Resource topic = 2; + string receipt_handle = 3; + string message_id = 4; + int32 delivery_attempt = 5; + int32 max_delivery_attempts = 6; +} + +message ForwardMessageToDeadLetterQueueResponse { Status status = 1; } + +message HeartbeatRequest { + optional Resource group = 1; + ClientType client_type = 2; +} + +message HeartbeatResponse { Status status = 1; } + +message EndTransactionRequest { + Resource topic = 1; + string message_id = 2; + string transaction_id = 3; + TransactionResolution resolution = 4; + TransactionSource source = 5; + string trace_context = 6; +} + +message EndTransactionResponse { Status status = 1; } + +message PrintThreadStackTraceCommand { string nonce = 1; } + +message ThreadStackTrace { + string nonce = 1; + optional string thread_stack_trace = 2; +} + +message VerifyMessageCommand { + string nonce = 1; + Message message = 2; +} + +message VerifyMessageResult { + string nonce = 1; +} + +message RecoverOrphanedTransactionCommand { + Message message = 1; + string transaction_id = 2; +} + +message TelemetryCommand { + optional Status status = 1; + + oneof command { + // Client settings + Settings settings = 2; + + // These messages are from client. + // + // Report thread stack trace to server. + ThreadStackTrace thread_stack_trace = 3; + + // Report message verify result to server. + VerifyMessageResult verify_message_result = 4; + + // There messages are from server. + // + // Request client to recover the orphaned transaction message. + RecoverOrphanedTransactionCommand recover_orphaned_transaction_command = 5; + + // Request client to print thread stack trace. + PrintThreadStackTraceCommand print_thread_stack_trace_command = 6; + + // Request client to verify the consumption of the appointed message. + VerifyMessageCommand verify_message_command = 7; + } +} + +message NotifyClientTerminationRequest { + // Consumer group, which is absent for producer. + optional Resource group = 1; +} + +message NotifyClientTerminationResponse { Status status = 1; } + +message ChangeInvisibleDurationRequest { + Resource group = 1; + Resource topic = 2; + + // Unique receipt handle to identify message to change + string receipt_handle = 3; + + // New invisible duration + google.protobuf.Duration invisible_duration = 4; + + // For message tracing + string message_id = 5; +} + +message ChangeInvisibleDurationResponse { + Status status = 1; + + // Server may generate a new receipt handle for the message. + string receipt_handle = 2; +} + +message PullMessageRequest { + Resource group = 1; + MessageQueue message_queue = 2; + int64 offset = 3; + int32 batch_size = 4; + FilterExpression filter_expression = 5; + google.protobuf.Duration long_polling_timeout = 6; +} + +message PullMessageResponse { + oneof content { + Status status = 1; + Message message = 2; + int64 next_offset = 3; + } +} + +message UpdateOffsetRequest { + Resource group = 1; + MessageQueue message_queue = 2; + int64 offset = 3; +} + +message UpdateOffsetResponse { + Status status = 1; +} + +message GetOffsetRequest { + Resource group = 1; + MessageQueue message_queue = 2; +} + +message GetOffsetResponse { + Status status = 1; + int64 offset = 2; +} + +message QueryOffsetRequest { + MessageQueue message_queue = 1; + QueryOffsetPolicy query_offset_policy = 2; + optional google.protobuf.Timestamp timestamp = 3; +} + +message QueryOffsetResponse { + Status status = 1; + int64 offset = 2; +} + +// For all the RPCs in MessagingService, the following error handling policies +// apply: +// +// If the request doesn't bear a valid authentication credential, return a +// response with common.status.code == `UNAUTHENTICATED`. If the authenticated +// user is not granted with sufficient permission to execute the requested +// operation, return a response with common.status.code == `PERMISSION_DENIED`. +// If the per-user-resource-based quota is exhausted, return a response with +// common.status.code == `RESOURCE_EXHAUSTED`. If any unexpected server-side +// errors raise, return a response with common.status.code == `INTERNAL`. +service MessagingService { + + // Queries the route entries of the requested topic in the perspective of the + // given endpoints. On success, servers should return a collection of + // addressable message-queues. Note servers may return customized route + // entries based on endpoints provided. + // + // If the requested topic doesn't exist, returns `NOT_FOUND`. + // If the specific endpoints is empty, returns `INVALID_ARGUMENT`. + rpc QueryRoute(QueryRouteRequest) returns (QueryRouteResponse) {} + + // Producer or consumer sends HeartbeatRequest to servers periodically to + // keep-alive. Additionally, it also reports client-side configuration, + // including topic subscription, load-balancing group name, etc. + // + // Returns `OK` if success. + // + // If a client specifies a language that is not yet supported by servers, + // returns `INVALID_ARGUMENT` + rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse) {} + + // Delivers messages to brokers. + // Clients may further: + // 1. Refine a message destination to message-queues which fulfills parts of + // FIFO semantic; + // 2. Flag a message as transactional, which keeps it invisible to consumers + // until it commits; + // 3. Time a message, making it invisible to consumers till specified + // time-point; + // 4. And more... + // + // Returns message-id or transaction-id with status `OK` on success. + // + // If the destination topic doesn't exist, returns `NOT_FOUND`. + rpc SendMessage(SendMessageRequest) returns (SendMessageResponse) {} + + // Queries the assigned route info of a topic for current consumer, + // the returned assignment result is decided by server-side load balancer. + // + // If the corresponding topic doesn't exist, returns `NOT_FOUND`. + // If the specific endpoints is empty, returns `INVALID_ARGUMENT`. + rpc QueryAssignment(QueryAssignmentRequest) returns (QueryAssignmentResponse) { + } + + // Receives messages from the server in batch manner, returns a set of + // messages if success. The received messages should be acked or redelivered + // after processed. + // + // If the pending concurrent receive requests exceed the quota of the given + // consumer group, returns `UNAVAILABLE`. If the upstream store server hangs, + // return `DEADLINE_EXCEEDED` in a timely manner. If the corresponding topic + // or consumer group doesn't exist, returns `NOT_FOUND`. If there is no new + // message in the specific topic, returns `OK` with an empty message set. + // Please note that client may suffer from false empty responses. + // + // If failed to receive message from remote, server must return only one + // `ReceiveMessageResponse` as the reply to the request, whose `Status` indicates + // the specific reason of failure, otherwise, the reply is considered successful. + rpc ReceiveMessage(ReceiveMessageRequest) returns (stream ReceiveMessageResponse) { + } + + // Acknowledges the message associated with the `receipt_handle` or `offset` + // in the `AckMessageRequest`, it means the message has been successfully + // processed. Returns `OK` if the message server remove the relevant message + // successfully. + // + // If the given receipt_handle is illegal or out of date, returns + // `INVALID_ARGUMENT`. + rpc AckMessage(AckMessageRequest) returns (AckMessageResponse) {} + + // Forwards one message to dead letter queue if the max delivery attempts is + // exceeded by this message at client-side, return `OK` if success. + rpc ForwardMessageToDeadLetterQueue(ForwardMessageToDeadLetterQueueRequest) + returns (ForwardMessageToDeadLetterQueueResponse) {} + + // PullMessage and ReceiveMessage RPCs serve a similar purpose, + // which is to attempt to get messages from the server, but with different semantics. + rpc PullMessage(PullMessageRequest) returns (stream PullMessageResponse) {} + + // Update the consumption progress of the designated queue of the + // consumer group to the remote. + rpc UpdateOffset(UpdateOffsetRequest) returns (UpdateOffsetResponse) {} + + // Query the consumption progress of the designated queue of the + // consumer group to the remote. + rpc GetOffset(GetOffsetRequest) returns (GetOffsetResponse) {} + + // Query the offset of the designated queue by the query offset policy. + rpc QueryOffset(QueryOffsetRequest) returns (QueryOffsetResponse) {} + + // Commits or rollback one transactional message. + rpc EndTransaction(EndTransactionRequest) returns (EndTransactionResponse) {} + + // Once a client starts, it would immediately establishes bi-lateral stream + // RPCs with brokers, reporting its settings as the initiative command. + // + // When servers have need of inspecting client status, they would issue + // telemetry commands to clients. After executing received instructions, + // clients shall report command execution results through client-side streams. + rpc Telemetry(stream TelemetryCommand) returns (stream TelemetryCommand) {} + + // Notify the server that the client is terminated. + rpc NotifyClientTermination(NotifyClientTerminationRequest) returns (NotifyClientTerminationResponse) { + } + + // Once a message is retrieved from consume queue on behalf of the group, it + // will be kept invisible to other clients of the same group for a period of + // time. The message is supposed to be processed within the invisible + // duration. If the client, which is in charge of the invisible message, is + // not capable of processing the message timely, it may use + // ChangeInvisibleDuration to lengthen invisible duration. + rpc ChangeInvisibleDuration(ChangeInvisibleDurationRequest) returns (ChangeInvisibleDurationResponse) { + } +} diff --git a/python/rocketmq/grpc_protocol/service_pb2.py b/python/rocketmq/grpc_protocol/service_pb2.py new file mode 100644 index 000000000..30abc388d --- /dev/null +++ b/python/rocketmq/grpc_protocol/service_pb2.py @@ -0,0 +1,114 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +# Generated by the grpc_protocol buffer compiler. DO NOT EDIT! +# source: service.proto +# Protobuf Python Version: 5.26.1 +"""Generated grpc_protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rservice.proto\x12\x12\x61pache.rocketmq.v2\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x10\x64\x65\x66inition.proto\"r\n\x11QueryRouteRequest\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x30\n\tendpoints\x18\x02 \x01(\x0b\x32\x1d.apache.rocketmq.v2.Endpoints\"z\n\x12QueryRouteResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x38\n\x0emessage_queues\x18\x02 \x03(\x0b\x32 .apache.rocketmq.v2.MessageQueue\"C\n\x12SendMessageRequest\x12-\n\x08messages\x18\x01 \x03(\x0b\x32\x1b.apache.rocketmq.v2.Message\"y\n\x0fSendResultEntry\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x12\n\nmessage_id\x18\x02 \x01(\t\x12\x16\n\x0etransaction_id\x18\x03 \x01(\t\x12\x0e\n\x06offset\x18\x04 \x01(\x03\"w\n\x13SendMessageResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x34\n\x07\x65ntries\x18\x02 \x03(\x0b\x32#.apache.rocketmq.v2.SendResultEntry\"\xa4\x01\n\x16QueryAssignmentRequest\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12+\n\x05group\x18\x02 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x30\n\tendpoints\x18\x03 \x01(\x0b\x32\x1d.apache.rocketmq.v2.Endpoints\"z\n\x17QueryAssignmentResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x33\n\x0b\x61ssignments\x18\x02 \x03(\x0b\x32\x1e.apache.rocketmq.v2.Assignment\"\xb8\x03\n\x15ReceiveMessageRequest\x12+\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x37\n\rmessage_queue\x18\x02 \x01(\x0b\x32 .apache.rocketmq.v2.MessageQueue\x12?\n\x11\x66ilter_expression\x18\x03 \x01(\x0b\x32$.apache.rocketmq.v2.FilterExpression\x12\x12\n\nbatch_size\x18\x04 \x01(\x05\x12:\n\x12invisible_duration\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x88\x01\x01\x12\x12\n\nauto_renew\x18\x06 \x01(\x08\x12<\n\x14long_polling_timeout\x18\x07 \x01(\x0b\x32\x19.google.protobuf.DurationH\x01\x88\x01\x01\x12\x17\n\nattempt_id\x18\x08 \x01(\tH\x02\x88\x01\x01\x42\x15\n\x13_invisible_durationB\x17\n\x15_long_polling_timeoutB\r\n\x0b_attempt_id\"\xbb\x01\n\x16ReceiveMessageResponse\x12,\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.StatusH\x00\x12.\n\x07message\x18\x02 \x01(\x0b\x32\x1b.apache.rocketmq.v2.MessageH\x00\x12\x38\n\x12\x64\x65livery_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\t\n\x07\x63ontent\"=\n\x0f\x41\x63kMessageEntry\x12\x12\n\nmessage_id\x18\x01 \x01(\t\x12\x16\n\x0ereceipt_handle\x18\x02 \x01(\t\"\xa3\x01\n\x11\x41\x63kMessageRequest\x12+\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12+\n\x05topic\x18\x02 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x34\n\x07\x65ntries\x18\x03 \x03(\x0b\x32#.apache.rocketmq.v2.AckMessageEntry\"o\n\x15\x41\x63kMessageResultEntry\x12\x12\n\nmessage_id\x18\x01 \x01(\t\x12\x16\n\x0ereceipt_handle\x18\x02 \x01(\t\x12*\n\x06status\x18\x03 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\"|\n\x12\x41\x63kMessageResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12:\n\x07\x65ntries\x18\x02 \x03(\x0b\x32).apache.rocketmq.v2.AckMessageResultEntry\"\xe7\x01\n&ForwardMessageToDeadLetterQueueRequest\x12+\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12+\n\x05topic\x18\x02 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x16\n\x0ereceipt_handle\x18\x03 \x01(\t\x12\x12\n\nmessage_id\x18\x04 \x01(\t\x12\x18\n\x10\x64\x65livery_attempt\x18\x05 \x01(\x05\x12\x1d\n\x15max_delivery_attempts\x18\x06 \x01(\x05\"U\n\'ForwardMessageToDeadLetterQueueResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\"\x83\x01\n\x10HeartbeatRequest\x12\x30\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.ResourceH\x00\x88\x01\x01\x12\x33\n\x0b\x63lient_type\x18\x02 \x01(\x0e\x32\x1e.apache.rocketmq.v2.ClientTypeB\x08\n\x06_group\"?\n\x11HeartbeatResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\"\xfd\x01\n\x15\x45ndTransactionRequest\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x12\n\nmessage_id\x18\x02 \x01(\t\x12\x16\n\x0etransaction_id\x18\x03 \x01(\t\x12=\n\nresolution\x18\x04 \x01(\x0e\x32).apache.rocketmq.v2.TransactionResolution\x12\x35\n\x06source\x18\x05 \x01(\x0e\x32%.apache.rocketmq.v2.TransactionSource\x12\x15\n\rtrace_context\x18\x06 \x01(\t\"D\n\x16\x45ndTransactionResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\"-\n\x1cPrintThreadStackTraceCommand\x12\r\n\x05nonce\x18\x01 \x01(\t\"Y\n\x10ThreadStackTrace\x12\r\n\x05nonce\x18\x01 \x01(\t\x12\x1f\n\x12thread_stack_trace\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x15\n\x13_thread_stack_trace\"S\n\x14VerifyMessageCommand\x12\r\n\x05nonce\x18\x01 \x01(\t\x12,\n\x07message\x18\x02 \x01(\x0b\x32\x1b.apache.rocketmq.v2.Message\"$\n\x13VerifyMessageResult\x12\r\n\x05nonce\x18\x01 \x01(\t\"i\n!RecoverOrphanedTransactionCommand\x12,\n\x07message\x18\x01 \x01(\x0b\x32\x1b.apache.rocketmq.v2.Message\x12\x16\n\x0etransaction_id\x18\x02 \x01(\t\"\xaa\x04\n\x10TelemetryCommand\x12/\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.StatusH\x01\x88\x01\x01\x12\x30\n\x08settings\x18\x02 \x01(\x0b\x32\x1c.apache.rocketmq.v2.SettingsH\x00\x12\x42\n\x12thread_stack_trace\x18\x03 \x01(\x0b\x32$.apache.rocketmq.v2.ThreadStackTraceH\x00\x12H\n\x15verify_message_result\x18\x04 \x01(\x0b\x32\'.apache.rocketmq.v2.VerifyMessageResultH\x00\x12\x65\n$recover_orphaned_transaction_command\x18\x05 \x01(\x0b\x32\x35.apache.rocketmq.v2.RecoverOrphanedTransactionCommandH\x00\x12\\\n print_thread_stack_trace_command\x18\x06 \x01(\x0b\x32\x30.apache.rocketmq.v2.PrintThreadStackTraceCommandH\x00\x12J\n\x16verify_message_command\x18\x07 \x01(\x0b\x32(.apache.rocketmq.v2.VerifyMessageCommandH\x00\x42\t\n\x07\x63ommandB\t\n\x07_status\"\\\n\x1eNotifyClientTerminationRequest\x12\x30\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.ResourceH\x00\x88\x01\x01\x42\x08\n\x06_group\"M\n\x1fNotifyClientTerminationResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\"\xdd\x01\n\x1e\x43hangeInvisibleDurationRequest\x12+\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12+\n\x05topic\x18\x02 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x16\n\x0ereceipt_handle\x18\x03 \x01(\t\x12\x35\n\x12invisible_duration\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nmessage_id\x18\x05 \x01(\t\"e\n\x1f\x43hangeInvisibleDurationResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x16\n\x0ereceipt_handle\x18\x02 \x01(\t\"\x98\x02\n\x12PullMessageRequest\x12+\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x37\n\rmessage_queue\x18\x02 \x01(\x0b\x32 .apache.rocketmq.v2.MessageQueue\x12\x0e\n\x06offset\x18\x03 \x01(\x03\x12\x12\n\nbatch_size\x18\x04 \x01(\x05\x12?\n\x11\x66ilter_expression\x18\x05 \x01(\x0b\x32$.apache.rocketmq.v2.FilterExpression\x12\x37\n\x14long_polling_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x95\x01\n\x13PullMessageResponse\x12,\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.StatusH\x00\x12.\n\x07message\x18\x02 \x01(\x0b\x32\x1b.apache.rocketmq.v2.MessageH\x00\x12\x15\n\x0bnext_offset\x18\x03 \x01(\x03H\x00\x42\t\n\x07\x63ontent\"\x8b\x01\n\x13UpdateOffsetRequest\x12+\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x37\n\rmessage_queue\x18\x02 \x01(\x0b\x32 .apache.rocketmq.v2.MessageQueue\x12\x0e\n\x06offset\x18\x03 \x01(\x03\"B\n\x14UpdateOffsetResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\"x\n\x10GetOffsetRequest\x12+\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x37\n\rmessage_queue\x18\x02 \x01(\x0b\x32 .apache.rocketmq.v2.MessageQueue\"O\n\x11GetOffsetResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x0e\n\x06offset\x18\x02 \x01(\x03\"\xd3\x01\n\x12QueryOffsetRequest\x12\x37\n\rmessage_queue\x18\x01 \x01(\x0b\x32 .apache.rocketmq.v2.MessageQueue\x12\x42\n\x13query_offset_policy\x18\x02 \x01(\x0e\x32%.apache.rocketmq.v2.QueryOffsetPolicy\x12\x32\n\ttimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x88\x01\x01\x42\x0c\n\n_timestamp\"Q\n\x13QueryOffsetResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x0e\n\x06offset\x18\x02 \x01(\x03\x32\xe7\x0c\n\x10MessagingService\x12]\n\nQueryRoute\x12%.apache.rocketmq.v2.QueryRouteRequest\x1a&.apache.rocketmq.v2.QueryRouteResponse\"\x00\x12Z\n\tHeartbeat\x12$.apache.rocketmq.v2.HeartbeatRequest\x1a%.apache.rocketmq.v2.HeartbeatResponse\"\x00\x12`\n\x0bSendMessage\x12&.apache.rocketmq.v2.SendMessageRequest\x1a\'.apache.rocketmq.v2.SendMessageResponse\"\x00\x12l\n\x0fQueryAssignment\x12*.apache.rocketmq.v2.QueryAssignmentRequest\x1a+.apache.rocketmq.v2.QueryAssignmentResponse\"\x00\x12k\n\x0eReceiveMessage\x12).apache.rocketmq.v2.ReceiveMessageRequest\x1a*.apache.rocketmq.v2.ReceiveMessageResponse\"\x00\x30\x01\x12]\n\nAckMessage\x12%.apache.rocketmq.v2.AckMessageRequest\x1a&.apache.rocketmq.v2.AckMessageResponse\"\x00\x12\x9c\x01\n\x1f\x46orwardMessageToDeadLetterQueue\x12:.apache.rocketmq.v2.ForwardMessageToDeadLetterQueueRequest\x1a;.apache.rocketmq.v2.ForwardMessageToDeadLetterQueueResponse\"\x00\x12\x62\n\x0bPullMessage\x12&.apache.rocketmq.v2.PullMessageRequest\x1a\'.apache.rocketmq.v2.PullMessageResponse\"\x00\x30\x01\x12\x63\n\x0cUpdateOffset\x12\'.apache.rocketmq.v2.UpdateOffsetRequest\x1a(.apache.rocketmq.v2.UpdateOffsetResponse\"\x00\x12Z\n\tGetOffset\x12$.apache.rocketmq.v2.GetOffsetRequest\x1a%.apache.rocketmq.v2.GetOffsetResponse\"\x00\x12`\n\x0bQueryOffset\x12&.apache.rocketmq.v2.QueryOffsetRequest\x1a\'.apache.rocketmq.v2.QueryOffsetResponse\"\x00\x12i\n\x0e\x45ndTransaction\x12).apache.rocketmq.v2.EndTransactionRequest\x1a*.apache.rocketmq.v2.EndTransactionResponse\"\x00\x12]\n\tTelemetry\x12$.apache.rocketmq.v2.TelemetryCommand\x1a$.apache.rocketmq.v2.TelemetryCommand\"\x00(\x01\x30\x01\x12\x84\x01\n\x17NotifyClientTermination\x12\x32.apache.rocketmq.v2.NotifyClientTerminationRequest\x1a\x33.apache.rocketmq.v2.NotifyClientTerminationResponse\"\x00\x12\x84\x01\n\x17\x43hangeInvisibleDuration\x12\x32.apache.rocketmq.v2.ChangeInvisibleDurationRequest\x1a\x33.apache.rocketmq.v2.ChangeInvisibleDurationResponse\"\x00\x42<\n\x12\x61pache.rocketmq.v2B\tMQServiceP\x01\xa0\x01\x01\xd8\x01\x01\xaa\x02\x12\x41pache.Rocketmq.V2b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'service_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\022apache.rocketmq.v2B\tMQServiceP\001\240\001\001\330\001\001\252\002\022Apache.Rocketmq.V2' + _globals['_QUERYROUTEREQUEST']._serialized_start=120 + _globals['_QUERYROUTEREQUEST']._serialized_end=234 + _globals['_QUERYROUTERESPONSE']._serialized_start=236 + _globals['_QUERYROUTERESPONSE']._serialized_end=358 + _globals['_SENDMESSAGEREQUEST']._serialized_start=360 + _globals['_SENDMESSAGEREQUEST']._serialized_end=427 + _globals['_SENDRESULTENTRY']._serialized_start=429 + _globals['_SENDRESULTENTRY']._serialized_end=550 + _globals['_SENDMESSAGERESPONSE']._serialized_start=552 + _globals['_SENDMESSAGERESPONSE']._serialized_end=671 + _globals['_QUERYASSIGNMENTREQUEST']._serialized_start=674 + _globals['_QUERYASSIGNMENTREQUEST']._serialized_end=838 + _globals['_QUERYASSIGNMENTRESPONSE']._serialized_start=840 + _globals['_QUERYASSIGNMENTRESPONSE']._serialized_end=962 + _globals['_RECEIVEMESSAGEREQUEST']._serialized_start=965 + _globals['_RECEIVEMESSAGEREQUEST']._serialized_end=1405 + _globals['_RECEIVEMESSAGERESPONSE']._serialized_start=1408 + _globals['_RECEIVEMESSAGERESPONSE']._serialized_end=1595 + _globals['_ACKMESSAGEENTRY']._serialized_start=1597 + _globals['_ACKMESSAGEENTRY']._serialized_end=1658 + _globals['_ACKMESSAGEREQUEST']._serialized_start=1661 + _globals['_ACKMESSAGEREQUEST']._serialized_end=1824 + _globals['_ACKMESSAGERESULTENTRY']._serialized_start=1826 + _globals['_ACKMESSAGERESULTENTRY']._serialized_end=1937 + _globals['_ACKMESSAGERESPONSE']._serialized_start=1939 + _globals['_ACKMESSAGERESPONSE']._serialized_end=2063 + _globals['_FORWARDMESSAGETODEADLETTERQUEUEREQUEST']._serialized_start=2066 + _globals['_FORWARDMESSAGETODEADLETTERQUEUEREQUEST']._serialized_end=2297 + _globals['_FORWARDMESSAGETODEADLETTERQUEUERESPONSE']._serialized_start=2299 + _globals['_FORWARDMESSAGETODEADLETTERQUEUERESPONSE']._serialized_end=2384 + _globals['_HEARTBEATREQUEST']._serialized_start=2387 + _globals['_HEARTBEATREQUEST']._serialized_end=2518 + _globals['_HEARTBEATRESPONSE']._serialized_start=2520 + _globals['_HEARTBEATRESPONSE']._serialized_end=2583 + _globals['_ENDTRANSACTIONREQUEST']._serialized_start=2586 + _globals['_ENDTRANSACTIONREQUEST']._serialized_end=2839 + _globals['_ENDTRANSACTIONRESPONSE']._serialized_start=2841 + _globals['_ENDTRANSACTIONRESPONSE']._serialized_end=2909 + _globals['_PRINTTHREADSTACKTRACECOMMAND']._serialized_start=2911 + _globals['_PRINTTHREADSTACKTRACECOMMAND']._serialized_end=2956 + _globals['_THREADSTACKTRACE']._serialized_start=2958 + _globals['_THREADSTACKTRACE']._serialized_end=3047 + _globals['_VERIFYMESSAGECOMMAND']._serialized_start=3049 + _globals['_VERIFYMESSAGECOMMAND']._serialized_end=3132 + _globals['_VERIFYMESSAGERESULT']._serialized_start=3134 + _globals['_VERIFYMESSAGERESULT']._serialized_end=3170 + _globals['_RECOVERORPHANEDTRANSACTIONCOMMAND']._serialized_start=3172 + _globals['_RECOVERORPHANEDTRANSACTIONCOMMAND']._serialized_end=3277 + _globals['_TELEMETRYCOMMAND']._serialized_start=3280 + _globals['_TELEMETRYCOMMAND']._serialized_end=3834 + _globals['_NOTIFYCLIENTTERMINATIONREQUEST']._serialized_start=3836 + _globals['_NOTIFYCLIENTTERMINATIONREQUEST']._serialized_end=3928 + _globals['_NOTIFYCLIENTTERMINATIONRESPONSE']._serialized_start=3930 + _globals['_NOTIFYCLIENTTERMINATIONRESPONSE']._serialized_end=4007 + _globals['_CHANGEINVISIBLEDURATIONREQUEST']._serialized_start=4010 + _globals['_CHANGEINVISIBLEDURATIONREQUEST']._serialized_end=4231 + _globals['_CHANGEINVISIBLEDURATIONRESPONSE']._serialized_start=4233 + _globals['_CHANGEINVISIBLEDURATIONRESPONSE']._serialized_end=4334 + _globals['_PULLMESSAGEREQUEST']._serialized_start=4337 + _globals['_PULLMESSAGEREQUEST']._serialized_end=4617 + _globals['_PULLMESSAGERESPONSE']._serialized_start=4620 + _globals['_PULLMESSAGERESPONSE']._serialized_end=4769 + _globals['_UPDATEOFFSETREQUEST']._serialized_start=4772 + _globals['_UPDATEOFFSETREQUEST']._serialized_end=4911 + _globals['_UPDATEOFFSETRESPONSE']._serialized_start=4913 + _globals['_UPDATEOFFSETRESPONSE']._serialized_end=4979 + _globals['_GETOFFSETREQUEST']._serialized_start=4981 + _globals['_GETOFFSETREQUEST']._serialized_end=5101 + _globals['_GETOFFSETRESPONSE']._serialized_start=5103 + _globals['_GETOFFSETRESPONSE']._serialized_end=5182 + _globals['_QUERYOFFSETREQUEST']._serialized_start=5185 + _globals['_QUERYOFFSETREQUEST']._serialized_end=5396 + _globals['_QUERYOFFSETRESPONSE']._serialized_start=5398 + _globals['_QUERYOFFSETRESPONSE']._serialized_end=5479 + _globals['_MESSAGINGSERVICE']._serialized_start=5482 + _globals['_MESSAGINGSERVICE']._serialized_end=7121 +# @@protoc_insertion_point(module_scope) diff --git a/python/rocketmq/protocol/service_pb2_grpc.py b/python/rocketmq/grpc_protocol/service_pb2_grpc.py similarity index 61% rename from python/rocketmq/protocol/service_pb2_grpc.py rename to python/rocketmq/grpc_protocol/service_pb2_grpc.py index 028aed49e..e3796fe21 100644 --- a/python/rocketmq/protocol/service_pb2_grpc.py +++ b/python/rocketmq/grpc_protocol/service_pb2_grpc.py @@ -13,11 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +# Generated by the gRPC Python grpc_protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc -from protocol import service_pb2 as apache_dot_rocketmq_dot_v2_dot_service__pb2 +from rocketmq.grpc_protocol import service_pb2 as service__pb2 class MessagingServiceStub(object): @@ -41,58 +41,78 @@ def __init__(self, channel): """ self.QueryRoute = channel.unary_unary( '/apache.rocketmq.v2.MessagingService/QueryRoute', - request_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryRouteRequest.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryRouteResponse.FromString, + request_serializer=service__pb2.QueryRouteRequest.SerializeToString, + response_deserializer=service__pb2.QueryRouteResponse.FromString, ) self.Heartbeat = channel.unary_unary( '/apache.rocketmq.v2.MessagingService/Heartbeat', - request_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.HeartbeatRequest.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.HeartbeatResponse.FromString, + request_serializer=service__pb2.HeartbeatRequest.SerializeToString, + response_deserializer=service__pb2.HeartbeatResponse.FromString, ) self.SendMessage = channel.unary_unary( '/apache.rocketmq.v2.MessagingService/SendMessage', - request_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.SendMessageRequest.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.SendMessageResponse.FromString, + request_serializer=service__pb2.SendMessageRequest.SerializeToString, + response_deserializer=service__pb2.SendMessageResponse.FromString, ) self.QueryAssignment = channel.unary_unary( '/apache.rocketmq.v2.MessagingService/QueryAssignment', - request_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryAssignmentRequest.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryAssignmentResponse.FromString, + request_serializer=service__pb2.QueryAssignmentRequest.SerializeToString, + response_deserializer=service__pb2.QueryAssignmentResponse.FromString, ) self.ReceiveMessage = channel.unary_stream( '/apache.rocketmq.v2.MessagingService/ReceiveMessage', - request_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ReceiveMessageRequest.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ReceiveMessageResponse.FromString, + request_serializer=service__pb2.ReceiveMessageRequest.SerializeToString, + response_deserializer=service__pb2.ReceiveMessageResponse.FromString, ) self.AckMessage = channel.unary_unary( '/apache.rocketmq.v2.MessagingService/AckMessage', - request_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.AckMessageRequest.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.AckMessageResponse.FromString, + request_serializer=service__pb2.AckMessageRequest.SerializeToString, + response_deserializer=service__pb2.AckMessageResponse.FromString, ) self.ForwardMessageToDeadLetterQueue = channel.unary_unary( '/apache.rocketmq.v2.MessagingService/ForwardMessageToDeadLetterQueue', - request_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ForwardMessageToDeadLetterQueueRequest.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ForwardMessageToDeadLetterQueueResponse.FromString, + request_serializer=service__pb2.ForwardMessageToDeadLetterQueueRequest.SerializeToString, + response_deserializer=service__pb2.ForwardMessageToDeadLetterQueueResponse.FromString, + ) + self.PullMessage = channel.unary_stream( + '/apache.rocketmq.v2.MessagingService/PullMessage', + request_serializer=service__pb2.PullMessageRequest.SerializeToString, + response_deserializer=service__pb2.PullMessageResponse.FromString, + ) + self.UpdateOffset = channel.unary_unary( + '/apache.rocketmq.v2.MessagingService/UpdateOffset', + request_serializer=service__pb2.UpdateOffsetRequest.SerializeToString, + response_deserializer=service__pb2.UpdateOffsetResponse.FromString, + ) + self.GetOffset = channel.unary_unary( + '/apache.rocketmq.v2.MessagingService/GetOffset', + request_serializer=service__pb2.GetOffsetRequest.SerializeToString, + response_deserializer=service__pb2.GetOffsetResponse.FromString, + ) + self.QueryOffset = channel.unary_unary( + '/apache.rocketmq.v2.MessagingService/QueryOffset', + request_serializer=service__pb2.QueryOffsetRequest.SerializeToString, + response_deserializer=service__pb2.QueryOffsetResponse.FromString, ) self.EndTransaction = channel.unary_unary( '/apache.rocketmq.v2.MessagingService/EndTransaction', - request_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.EndTransactionRequest.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.EndTransactionResponse.FromString, + request_serializer=service__pb2.EndTransactionRequest.SerializeToString, + response_deserializer=service__pb2.EndTransactionResponse.FromString, ) self.Telemetry = channel.stream_stream( '/apache.rocketmq.v2.MessagingService/Telemetry', - request_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.TelemetryCommand.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.TelemetryCommand.FromString, + request_serializer=service__pb2.TelemetryCommand.SerializeToString, + response_deserializer=service__pb2.TelemetryCommand.FromString, ) self.NotifyClientTermination = channel.unary_unary( '/apache.rocketmq.v2.MessagingService/NotifyClientTermination', - request_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.NotifyClientTerminationRequest.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.NotifyClientTerminationResponse.FromString, + request_serializer=service__pb2.NotifyClientTerminationRequest.SerializeToString, + response_deserializer=service__pb2.NotifyClientTerminationResponse.FromString, ) self.ChangeInvisibleDuration = channel.unary_unary( '/apache.rocketmq.v2.MessagingService/ChangeInvisibleDuration', - request_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ChangeInvisibleDurationRequest.SerializeToString, - response_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ChangeInvisibleDurationResponse.FromString, + request_serializer=service__pb2.ChangeInvisibleDurationRequest.SerializeToString, + response_deserializer=service__pb2.ChangeInvisibleDurationResponse.FromString, ) @@ -207,6 +227,37 @@ def ForwardMessageToDeadLetterQueue(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def PullMessage(self, request, context): + """PullMessage and ReceiveMessage RPCs serve a similar purpose, + which is to attempt to get messages from the server, but with different semantics. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateOffset(self, request, context): + """Update the consumption progress of the designated queue of the + consumer group to the remote. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetOffset(self, request, context): + """Query the consumption progress of the designated queue of the + consumer group to the remote. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def QueryOffset(self, request, context): + """Query the offset of the designated queue by the query offset policy. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def EndTransaction(self, request, context): """Commits or rollback one transactional message. """ @@ -250,58 +301,78 @@ def add_MessagingServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'QueryRoute': grpc.unary_unary_rpc_method_handler( servicer.QueryRoute, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryRouteRequest.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryRouteResponse.SerializeToString, + request_deserializer=service__pb2.QueryRouteRequest.FromString, + response_serializer=service__pb2.QueryRouteResponse.SerializeToString, ), 'Heartbeat': grpc.unary_unary_rpc_method_handler( servicer.Heartbeat, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.HeartbeatRequest.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.HeartbeatResponse.SerializeToString, + request_deserializer=service__pb2.HeartbeatRequest.FromString, + response_serializer=service__pb2.HeartbeatResponse.SerializeToString, ), 'SendMessage': grpc.unary_unary_rpc_method_handler( servicer.SendMessage, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.SendMessageRequest.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.SendMessageResponse.SerializeToString, + request_deserializer=service__pb2.SendMessageRequest.FromString, + response_serializer=service__pb2.SendMessageResponse.SerializeToString, ), 'QueryAssignment': grpc.unary_unary_rpc_method_handler( servicer.QueryAssignment, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryAssignmentRequest.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryAssignmentResponse.SerializeToString, + request_deserializer=service__pb2.QueryAssignmentRequest.FromString, + response_serializer=service__pb2.QueryAssignmentResponse.SerializeToString, ), 'ReceiveMessage': grpc.unary_stream_rpc_method_handler( servicer.ReceiveMessage, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ReceiveMessageRequest.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ReceiveMessageResponse.SerializeToString, + request_deserializer=service__pb2.ReceiveMessageRequest.FromString, + response_serializer=service__pb2.ReceiveMessageResponse.SerializeToString, ), 'AckMessage': grpc.unary_unary_rpc_method_handler( servicer.AckMessage, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.AckMessageRequest.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.AckMessageResponse.SerializeToString, + request_deserializer=service__pb2.AckMessageRequest.FromString, + response_serializer=service__pb2.AckMessageResponse.SerializeToString, ), 'ForwardMessageToDeadLetterQueue': grpc.unary_unary_rpc_method_handler( servicer.ForwardMessageToDeadLetterQueue, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ForwardMessageToDeadLetterQueueRequest.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ForwardMessageToDeadLetterQueueResponse.SerializeToString, + request_deserializer=service__pb2.ForwardMessageToDeadLetterQueueRequest.FromString, + response_serializer=service__pb2.ForwardMessageToDeadLetterQueueResponse.SerializeToString, + ), + 'PullMessage': grpc.unary_stream_rpc_method_handler( + servicer.PullMessage, + request_deserializer=service__pb2.PullMessageRequest.FromString, + response_serializer=service__pb2.PullMessageResponse.SerializeToString, + ), + 'UpdateOffset': grpc.unary_unary_rpc_method_handler( + servicer.UpdateOffset, + request_deserializer=service__pb2.UpdateOffsetRequest.FromString, + response_serializer=service__pb2.UpdateOffsetResponse.SerializeToString, + ), + 'GetOffset': grpc.unary_unary_rpc_method_handler( + servicer.GetOffset, + request_deserializer=service__pb2.GetOffsetRequest.FromString, + response_serializer=service__pb2.GetOffsetResponse.SerializeToString, + ), + 'QueryOffset': grpc.unary_unary_rpc_method_handler( + servicer.QueryOffset, + request_deserializer=service__pb2.QueryOffsetRequest.FromString, + response_serializer=service__pb2.QueryOffsetResponse.SerializeToString, ), 'EndTransaction': grpc.unary_unary_rpc_method_handler( servicer.EndTransaction, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.EndTransactionRequest.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.EndTransactionResponse.SerializeToString, + request_deserializer=service__pb2.EndTransactionRequest.FromString, + response_serializer=service__pb2.EndTransactionResponse.SerializeToString, ), 'Telemetry': grpc.stream_stream_rpc_method_handler( servicer.Telemetry, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.TelemetryCommand.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.TelemetryCommand.SerializeToString, + request_deserializer=service__pb2.TelemetryCommand.FromString, + response_serializer=service__pb2.TelemetryCommand.SerializeToString, ), 'NotifyClientTermination': grpc.unary_unary_rpc_method_handler( servicer.NotifyClientTermination, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.NotifyClientTerminationRequest.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.NotifyClientTerminationResponse.SerializeToString, + request_deserializer=service__pb2.NotifyClientTerminationRequest.FromString, + response_serializer=service__pb2.NotifyClientTerminationResponse.SerializeToString, ), 'ChangeInvisibleDuration': grpc.unary_unary_rpc_method_handler( servicer.ChangeInvisibleDuration, - request_deserializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ChangeInvisibleDurationRequest.FromString, - response_serializer=apache_dot_rocketmq_dot_v2_dot_service__pb2.ChangeInvisibleDurationResponse.SerializeToString, + request_deserializer=service__pb2.ChangeInvisibleDurationRequest.FromString, + response_serializer=service__pb2.ChangeInvisibleDurationResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( @@ -335,8 +406,8 @@ def QueryRoute(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/QueryRoute', - apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryRouteRequest.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryRouteResponse.FromString, + service__pb2.QueryRouteRequest.SerializeToString, + service__pb2.QueryRouteResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -352,8 +423,8 @@ def Heartbeat(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/Heartbeat', - apache_dot_rocketmq_dot_v2_dot_service__pb2.HeartbeatRequest.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_service__pb2.HeartbeatResponse.FromString, + service__pb2.HeartbeatRequest.SerializeToString, + service__pb2.HeartbeatResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -369,8 +440,8 @@ def SendMessage(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/SendMessage', - apache_dot_rocketmq_dot_v2_dot_service__pb2.SendMessageRequest.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_service__pb2.SendMessageResponse.FromString, + service__pb2.SendMessageRequest.SerializeToString, + service__pb2.SendMessageResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -386,8 +457,8 @@ def QueryAssignment(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/QueryAssignment', - apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryAssignmentRequest.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_service__pb2.QueryAssignmentResponse.FromString, + service__pb2.QueryAssignmentRequest.SerializeToString, + service__pb2.QueryAssignmentResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -403,8 +474,8 @@ def ReceiveMessage(request, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/apache.rocketmq.v2.MessagingService/ReceiveMessage', - apache_dot_rocketmq_dot_v2_dot_service__pb2.ReceiveMessageRequest.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_service__pb2.ReceiveMessageResponse.FromString, + service__pb2.ReceiveMessageRequest.SerializeToString, + service__pb2.ReceiveMessageResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -420,8 +491,8 @@ def AckMessage(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/AckMessage', - apache_dot_rocketmq_dot_v2_dot_service__pb2.AckMessageRequest.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_service__pb2.AckMessageResponse.FromString, + service__pb2.AckMessageRequest.SerializeToString, + service__pb2.AckMessageResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -437,8 +508,76 @@ def ForwardMessageToDeadLetterQueue(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/ForwardMessageToDeadLetterQueue', - apache_dot_rocketmq_dot_v2_dot_service__pb2.ForwardMessageToDeadLetterQueueRequest.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_service__pb2.ForwardMessageToDeadLetterQueueResponse.FromString, + service__pb2.ForwardMessageToDeadLetterQueueRequest.SerializeToString, + service__pb2.ForwardMessageToDeadLetterQueueResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PullMessage(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/apache.rocketmq.v2.MessagingService/PullMessage', + service__pb2.PullMessageRequest.SerializeToString, + service__pb2.PullMessageResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def UpdateOffset(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/UpdateOffset', + service__pb2.UpdateOffsetRequest.SerializeToString, + service__pb2.UpdateOffsetResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetOffset(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/GetOffset', + service__pb2.GetOffsetRequest.SerializeToString, + service__pb2.GetOffsetResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def QueryOffset(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/QueryOffset', + service__pb2.QueryOffsetRequest.SerializeToString, + service__pb2.QueryOffsetResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -454,8 +593,8 @@ def EndTransaction(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/EndTransaction', - apache_dot_rocketmq_dot_v2_dot_service__pb2.EndTransactionRequest.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_service__pb2.EndTransactionResponse.FromString, + service__pb2.EndTransactionRequest.SerializeToString, + service__pb2.EndTransactionResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -471,8 +610,8 @@ def Telemetry(request_iterator, timeout=None, metadata=None): return grpc.experimental.stream_stream(request_iterator, target, '/apache.rocketmq.v2.MessagingService/Telemetry', - apache_dot_rocketmq_dot_v2_dot_service__pb2.TelemetryCommand.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_service__pb2.TelemetryCommand.FromString, + service__pb2.TelemetryCommand.SerializeToString, + service__pb2.TelemetryCommand.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -488,8 +627,8 @@ def NotifyClientTermination(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/NotifyClientTermination', - apache_dot_rocketmq_dot_v2_dot_service__pb2.NotifyClientTerminationRequest.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_service__pb2.NotifyClientTerminationResponse.FromString, + service__pb2.NotifyClientTerminationRequest.SerializeToString, + service__pb2.NotifyClientTerminationResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @@ -505,7 +644,7 @@ def ChangeInvisibleDuration(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/apache.rocketmq.v2.MessagingService/ChangeInvisibleDuration', - apache_dot_rocketmq_dot_v2_dot_service__pb2.ChangeInvisibleDurationRequest.SerializeToString, - apache_dot_rocketmq_dot_v2_dot_service__pb2.ChangeInvisibleDurationResponse.FromString, + service__pb2.ChangeInvisibleDurationRequest.SerializeToString, + service__pb2.ChangeInvisibleDurationResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/python/rocketmq/message.py b/python/rocketmq/message.py deleted file mode 100644 index a315a1967..000000000 --- a/python/rocketmq/message.py +++ /dev/null @@ -1,210 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import binascii -import gzip -import hashlib -import zlib -from typing import Dict, List - -from rocketmq.definition import MessageQueue -from rocketmq.protocol.definition_pb2 import DigestType as ProtoDigestType -from rocketmq.protocol.definition_pb2 import Encoding as ProtoEncoding - - -class Message: - def __init__( - self, - topic: str, - body: bytes, - properties: map = None, - tag: str = None, - keys: str = None, - message_group: str = None, - delivery_timestamp: int = None, - ): - if properties is None: - properties = {} - self.__topic = topic - self.__body = body - self.__properties = properties - self.__tag = tag - self.__keys = keys - self.__message_group = message_group - self.__delivery_timestamp = delivery_timestamp - - @property - def topic(self): - return self.__topic - - @property - def body(self): - return self.__body - - @property - def properties(self): - return self.__properties - - @property - def tag(self): - return self.__tag - - @property - def keys(self): - return self.__keys - - @property - def message_group(self): - return self.__message_group - - @property - def delivery_timestamp(self): - return self.__delivery_timestamp - - -class MessageView: - def __init__( - self, - message_id: str, - topic: str, - body: bytes, - tag: str, - message_group: str, - delivery_timestamp: int, - keys: List[str], - properties: Dict[str, str], - born_host: str, - born_time: int, - delivery_attempt: int, - message_queue: MessageQueue, - receipt_handle: str, - offset: int, - corrupted: bool - ): - self.__message_id = message_id - self.__topic = topic - self.__body = body - self.__properties = properties - self.__tag = tag - self.__keys = keys - self.__message_group = message_group - self.__delivery_timestamp = delivery_timestamp - self.__born_host = born_host - self.__delivery_attempt = delivery_attempt - self.__receipt_handle = receipt_handle - self.__born_time = born_time - self.__message_queue = message_queue - self.__offset = offset - self.__corrupted = corrupted - - @property - def message_queue(self): - return self.__message_queue - - @property - def receipt_handle(self): - return self.__receipt_handle - - @property - def topic(self): - return self.__topic - - @property - def body(self): - return self.__body - - @property - def message_id(self): - return self.__message_id - - @property - def born_host(self): - return self.__born_host - - @property - def keys(self): - return self.__keys - - @property - def properties(self): - return self.__properties - - @property - def tag(self): - return self.__tag - - @property - def message_group(self): - return self.__message_group - - @property - def delivery_timestamp(self): - return self.__delivery_timestamp - - @classmethod - def from_protobuf(cls, message, message_queue=None): - topic = message.topic.name - system_properties = message.system_properties - message_id = system_properties.message_id - body_digest = system_properties.body_digest - check_sum = body_digest.checksum - raw = message.body - corrupted = False - digest_type = body_digest.type - - # Digest Type check - if digest_type == ProtoDigestType.CRC32: - expected_check_sum = format(binascii.crc32(raw) & 0xFFFFFFFF, '08X') - if not expected_check_sum == check_sum: - corrupted = True - elif digest_type == ProtoDigestType.MD5: - expected_check_sum = hashlib.md5(raw).hexdigest() - if not expected_check_sum == check_sum: - corrupted = True - elif digest_type == ProtoDigestType.SHA1: - expected_check_sum = hashlib.sha1(raw).hexdigest() - if not expected_check_sum == check_sum: - corrupted = True - elif digest_type in [ProtoDigestType.unspecified, None]: - print(f"Unsupported message body digest algorithm, digestType={digest_type}, topic={topic}, messageId={message_id}") - - # Body Encoding check - body_encoding = system_properties.body_encoding - body = raw - if body_encoding == ProtoEncoding.GZIP: - if message.body and message.body[:2] == b'\x1f\x8b': # Standard Gzip format - body = gzip.decompress(message.body) - else: # deflate zip - body = zlib.decompress(message.body) - elif body_encoding in [ProtoEncoding.IDENTITY, None]: - pass - else: - print(f"Unsupported message encoding algorithm, topic={topic}, messageId={message_id}, bodyEncoding={body_encoding}") - - tag = system_properties.tag - message_group = system_properties.message_group - delivery_time = system_properties.delivery_timestamp - keys = list(system_properties.keys) - - born_host = system_properties.born_host - born_time = system_properties.born_timestamp - delivery_attempt = system_properties.delivery_attempt - queue_offset = system_properties.queue_offset - properties = {key: value for key, value in message.user_properties.items()} - receipt_handle = system_properties.receipt_handle - - return cls(message_id, topic, body, tag, message_group, delivery_time, keys, properties, born_host, - born_time, delivery_attempt, message_queue, receipt_handle, queue_offset, corrupted) diff --git a/python/rocketmq/message_id_codec.py b/python/rocketmq/message_id_codec.py deleted file mode 100644 index 0a52c831a..000000000 --- a/python/rocketmq/message_id_codec.py +++ /dev/null @@ -1,81 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -import os -import threading -import time -import uuid -from datetime import datetime, timezone - - -class MessageIdCodec: - __MESSAGE_ID_VERSION_V1 = "01" - - @staticmethod - def __get_process_fixed_string(): - mac = uuid.getnode() - mac = format(mac, "012x") - mac_bytes = bytes.fromhex(mac[-12:]) - pid = os.getpid() % 65536 - pid_bytes = pid.to_bytes(2, "big") - return mac_bytes.hex().upper() + pid_bytes.hex().upper() - - @staticmethod - def __get_seconds_since_custom_epoch(): - custom_epoch = datetime(2021, 1, 1, tzinfo=timezone.utc) - now = datetime.now(timezone.utc) - return int((now - custom_epoch).total_seconds()) - - __PROCESS_FIXED_STRING_V1 = __get_process_fixed_string() - __SECONDS_SINCE_CUSTOM_EPOCH = __get_seconds_since_custom_epoch() - __SECONDS_START_TIMESTAMP = int(time.time()) - - @staticmethod - def __delta_seconds(): - return ( - int(time.time()) - - MessageIdCodec.__SECONDS_START_TIMESTAMP - + MessageIdCodec.__SECONDS_SINCE_CUSTOM_EPOCH - ) - - @staticmethod - def __int_to_bytes_with_big_endian(number: int, min_bytes: int): - num_bytes = max(math.ceil(number.bit_length() / 8), min_bytes) - return number.to_bytes(num_bytes, "big") - - __SEQUENCE = 0 - __SEQUENCE_LOCK = threading.Lock() - - @staticmethod - def __get_and_increment_sequence(): - with MessageIdCodec.__SEQUENCE_LOCK: - temp = MessageIdCodec.__SEQUENCE - MessageIdCodec.__SEQUENCE += 1 - return temp - - @staticmethod - def next_message_id(): - seconds = MessageIdCodec.__delta_seconds() - seconds_bytes = MessageIdCodec.__int_to_bytes_with_big_endian(seconds, 4)[-4:] - sequence_bytes = MessageIdCodec.__int_to_bytes_with_big_endian( - MessageIdCodec.__get_and_increment_sequence(), 4 - )[-4:] - return ( - MessageIdCodec.__MESSAGE_ID_VERSION_V1 - + MessageIdCodec.__PROCESS_FIXED_STRING_V1 - + seconds_bytes.hex().upper() - + sequence_bytes.hex().upper() - ) diff --git a/python/rocketmq/producer.py b/python/rocketmq/producer.py deleted file mode 100644 index 378bf8ebb..000000000 --- a/python/rocketmq/producer.py +++ /dev/null @@ -1,512 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import asyncio -import threading -import time -# from status_checker import StatusChecker -from datetime import datetime, timedelta -from threading import RLock -from typing import Set -from unittest.mock import MagicMock, patch - -import rocketmq -from publishing_message import MessageType -from rocketmq.client import Client -from rocketmq.client_config import ClientConfig -from rocketmq.definition import PermissionHelper, TopicRouteData -from rocketmq.exponential_backoff_retry_policy import \ - ExponentialBackoffRetryPolicy -from rocketmq.log import logger -from rocketmq.message import Message -from rocketmq.message_id_codec import MessageIdCodec -from rocketmq.protocol.definition_pb2 import Message as ProtoMessage -from rocketmq.protocol.definition_pb2 import Resource -from rocketmq.protocol.definition_pb2 import Resource as ProtoResource -from rocketmq.protocol.definition_pb2 import SystemProperties -from rocketmq.protocol.definition_pb2 import \ - TransactionResolution as ProtoTransactionResolution -from rocketmq.protocol.service_pb2 import (EndTransactionRequest, - SendMessageRequest) -from rocketmq.publish_settings import PublishingSettings -from rocketmq.publishing_message import PublishingMessage -from rocketmq.rpc_client import Endpoints -from rocketmq.send_receipt import SendReceipt -from rocketmq.session_credentials import (SessionCredentials, - SessionCredentialsProvider) -from status_checker import TooManyRequestsException -from utils import get_positive_mod - - -class Transaction: - MAX_MESSAGE_NUM = 1 - - def __init__(self, producer): - self.producer = producer - self.messages = set() - self.messages_lock = RLock() - self.message_send_receipt_dict = {} - - def try_add_message(self, message): - with self.messages_lock: - if len(self.messages) > self.MAX_MESSAGE_NUM: - raise ValueError(f"Message in transaction has exceed the threshold: {self.MAX_MESSAGE_NUM}") - - publishing_message = PublishingMessage(message, self.producer.publish_settings, True) - self.messages.add(publishing_message) - return publishing_message - - def try_add_receipt(self, publishing_message, send_receipt): - with self.messages_lock: - if publishing_message not in self.messages: - raise ValueError("Message is not in the transaction") - - self.message_send_receipt_dict[publishing_message] = send_receipt - - async def commit(self): - # if self.producer.state != "Running": - # raise Exception("Producer is not running") - - if not self.message_send_receipt_dict: - raise ValueError("Transactional message has not been sent yet") - - for publishing_message, send_receipt in self.message_send_receipt_dict.items(): - await self.producer.end_transaction(send_receipt.endpoints, publishing_message.message.topic, send_receipt.message_id, send_receipt.transaction_id, "Commit") - - async def rollback(self): - # if self.producer.state != "Running": - # raise Exception("Producer is not running") - - if not self.message_send_receipt_dict: - raise ValueError("Transactional message has not been sent yet") - - for publishing_message, send_receipt in self.message_send_receipt_dict.items(): - await self.producer.end_transaction(send_receipt.endpoints, publishing_message.message.topic, send_receipt.message_id, send_receipt.transaction_id, "Rollback") - - -class PublishingLoadBalancer: - """This class serves as a load balancer for message publishing. - It keeps track of a rotating index to help distribute the load evenly. - """ - - def __init__(self, topic_route_data: TopicRouteData, index: int = 0): - #: current index for message queue selection - self.__index = index - #: thread lock to ensure atomic update to the index - self.__index_lock = threading.Lock() - - #: filter the message queues which are writable and from the master broker - message_queues = [] - for mq in topic_route_data.message_queues: - if ( - not PermissionHelper().is_writable(mq.permission) - or mq.broker.id is not rocketmq.utils.master_broker_id - ): - continue - message_queues.append(mq) - self.__message_queues = message_queues - - @property - def index(self): - """Property to fetch the current index""" - return self.__index - - def get_and_increment_index(self): - """Thread safe method to get the current index and increment it by one""" - with self.__index_lock: - temp = self.__index - self.__index += 1 - return temp - - def take_message_queues(self, excluded: Set[Endpoints], count: int): - """Fetch a specified number of message queues, excluding the ones provided. - It will first try to fetch from non-excluded brokers and if insufficient, - it will select from the excluded ones. - """ - next_index = self.get_and_increment_index() - candidates = [] - candidate_broker_name = set() - - queue_num = len(self.__message_queues) - for i in range(queue_num): - mq = self.__message_queues[next_index % queue_num] - next_index = next_index + 1 - if ( - mq.broker.endpoints not in excluded - and mq.broker.name not in candidate_broker_name - ): - candidate_broker_name.add(mq.broker.name) - candidates.append(mq) - if len(candidates) >= count: - return candidates - # if all endpoints are isolated - if candidates: - return candidates - for i in range(queue_num): - mq = self.__message_queues[next_index % queue_num] - if mq.broker.name not in candidate_broker_name: - candidate_broker_name.add(mq.broker.name) - candidates.append(mq) - if len(candidates) >= count: - return candidates - return candidates - - def take_message_queue_by_message_group(self, message_group): - index = get_positive_mod(hash(message_group), len(self.__message_queues)) - return self.__message_queues[index] - - -class Producer(Client): - """The Producer class extends the Client class and is used to publish - messages to specific topics in RocketMQ. - """ - - def __init__(self, client_config: ClientConfig, topics: Set[str]): - """Create a new Producer. - - :param client_config: The configuration for the client. - :param topics: The set of topics to which the producer can send messages. - """ - super().__init__(client_config) - self.publish_topics = topics - retry_policy = ExponentialBackoffRetryPolicy.immediately_retry_policy(10) - #: Set up the publishing settings with the given parameters. - self.publish_settings = PublishingSettings( - self.client_id, self.endpoints, retry_policy, 10, topics - ) - #: Initialize the routedata cache. - self.publish_routedata_cache = {} - - async def __aenter__(self): - """Provide an asynchronous context manager for the producer.""" - await self.start() - - async def __aexit__(self, exc_type, exc_val, exc_tb): - """Provide an asynchronous context manager for the producer.""" - await self.shutdown() - - def get_topics(self): - return self.publish_topics - - async def start(self): - """Start the RocketMQ producer and log the operation.""" - logger.info(f"Begin to start the rocketmq producer, client_id={self.client_id}") - await super().start() - logger.info(f"The rocketmq producer starts successfully, client_id={self.client_id}") - - async def shutdown(self): - """Shutdown the RocketMQ producer and log the operation.""" - logger.info(f"Begin to shutdown the rocketmq producer, client_id={self.client_id}") - await super().shutdown() - logger.info(f"Shutdown the rocketmq producer successfully, client_id={self.client_id}") - - @staticmethod - def wrap_send_message_request(message, message_queue): - """Wrap the send message request for the RocketMQ producer. - - :param message: The message to be sent. - :param message_queue: The queue to which the message will be sent. - :return: The SendMessageRequest with the message and queue details. - """ - req = SendMessageRequest() - req.messages.extend([message.to_protobuf(message_queue.queue_id)]) - return req - - async def send(self, message, transaction: Transaction = None): - tx_enabled = True - if transaction is None: - tx_enabled = False - if tx_enabled: - logger.debug("Transaction send") - publishing_message = transaction.try_add_message(message) - send_receipt = await self.send_message(message, tx_enabled) - transaction.try_add_receipt(publishing_message, send_receipt) - return send_receipt - else: - return await self.send_message(message) - - async def send_message(self, message, tx_enabled=False): - """Send a message using a load balancer, retrying as needed according to the retry policy. - - :param message: The message to be sent. - """ - publish_load_balancer = await self.get_publish_load_balancer(message.topic) - publishing_message = PublishingMessage(message, self.publish_settings, tx_enabled) - retry_policy = self.get_retry_policy() - max_attempts = retry_policy.get_max_attempts() - - exception = None - logger.debug(publishing_message.message.message_group) - candidates = ( - publish_load_balancer.take_message_queues(set(self.isolated.keys()), max_attempts) - if publishing_message.message.message_group is None else - [publish_load_balancer.take_message_queue_by_message_group(publishing_message.message.message_group)]) - for attempt in range(1, max_attempts + 1): - start_time = time.time() - candidate_index = (attempt - 1) % len(candidates) - mq = candidates[candidate_index] - logger.debug(mq.accept_message_types) - if self.publish_settings.is_validate_message_type() and publishing_message.message_type.value != mq.accept_message_types[0].value: - raise ValueError( - "Current message type does not match with the accept message types," - + f" topic={message.topic}, actualMessageType={publishing_message.message_type}" - + f" acceptMessageType={','}") - - send_message_request = self.wrap_send_message_request(publishing_message, mq) - # topic_data = self.topic_route_cache["normal_topic"] - endpoints = mq.broker.endpoints - - try: - invocation = await self.client_manager.send_message(endpoints, send_message_request, self.client_config.request_timeout) - logger.debug(invocation) - send_recepits = SendReceipt.process_send_message_response(mq, invocation) - send_recepit = send_recepits[0] - if attempt > 1: - logger.info( - f"Re-send message successfully, topic={message.topic}," - + f" max_attempts={max_attempts}, endpoints={str(endpoints)}, clientId={self.client_id}") - return send_recepit - except Exception as e: - exception = e - self.isolated[endpoints] = True - if attempt >= max_attempts: - logger.error("Failed to send message finally, run out of attempt times, " - + f"topic={message.topic}, maxAttempt={max_attempts}, attempt={attempt}, " - + f"endpoints={endpoints}, messageId={publishing_message.message_id}, clientId={self.client_id}") - raise - if publishing_message.message_type == MessageType.TRANSACTION: - logger.error("Failed to send transaction message, run out of attempt times, " - + f"topic={message.topic}, maxAttempt=1, attempt={attempt}, " - + f"endpoints={endpoints}, messageId={publishing_message.message_id}, clientId={self.client_id}") - raise - if not isinstance(exception, TooManyRequestsException): - logger.error(f"Failed to send message, topic={message.topic}, max_attempts={max_attempts}, " - + f"attempt={attempt}, endpoints={endpoints}, messageId={publishing_message.message_id}," - + f" clientId={self.client_id}") - continue - - nextAttempt = 1 + attempt - delay = retry_policy.get_next_attempt_delay(nextAttempt) - await asyncio.sleep(delay.total_seconds()) - logger.warning(f"Failed to send message due to too many requests, would attempt to resend after {delay},\ - topic={message.topic}, max_attempts={max_attempts}, attempt={attempt}, endpoints={endpoints},\ - message_id={publishing_message.message_id}, client_id={self.client_id}") - finally: - elapsed_time = time.time() - start_time - logger.info(f"send time: {elapsed_time}") - - def update_publish_load_balancer(self, topic, topic_route_data): - """Update the load balancer used for publishing messages to a topic. - - :param topic: The topic for which to update the load balancer. - :param topic_route_data: The new route data for the topic. - :return: The updated load balancer. - """ - publishing_load_balancer = None - if topic in self.publish_routedata_cache: - publishing_load_balancer = self.publish_routedata_cache[topic] - else: - publishing_load_balancer = PublishingLoadBalancer(topic_route_data) - self.publish_routedata_cache[topic] = publishing_load_balancer - return publishing_load_balancer - - async def get_publish_load_balancer(self, topic): - """Get the load balancer used for publishing messages to a topic. - - :param topic: The topic for which to get the load balancer. - :return: The load balancer for the topic. - """ - if topic in self.publish_routedata_cache: - return self.publish_routedata_cache[topic] - topic_route_data = await self.get_route_data(topic) - return self.update_publish_load_balancer(topic, topic_route_data) - - def get_settings(self): - """Get the publishing settings for this producer. - - :return: The publishing settings for this producer. - """ - return self.publish_settings - - def get_retry_policy(self): - """Get the retry policy for this producer. - - :return: The retry policy for this producer. - """ - return self.publish_settings.GetRetryPolicy() - - def begin_transaction(self): - """Start a new transaction.""" - return Transaction(self) - - async def end_transaction(self, endpoints, topic, message_id, transaction_id, resolution): - """End a transaction based on its resolution (commit or rollback).""" - topic_resource = ProtoResource(name=topic) - request = EndTransactionRequest( - transaction_id=transaction_id, - message_id=message_id, - topic=topic_resource, - resolution=ProtoTransactionResolution.COMMIT if resolution == "Commit" else ProtoTransactionResolution.ROLLBACK - ) - await self.client_manager.end_transaction(endpoints, request, self.client_config.request_timeout) - # StatusChecker.check(invocation.response.status, request, invocation.request_id) - - -async def test(): - credentials = SessionCredentials("username", "password") - credentials_provider = SessionCredentialsProvider(credentials) - client_config = ClientConfig( - endpoints=Endpoints("endpoint"), - session_credentials_provider=credentials_provider, - ssl_enabled=True, - ) - topic = Resource() - topic.name = "normal_topic" - msg = ProtoMessage() - msg.topic.CopyFrom(topic) - msg.body = b"My Normal Message Body" - sysperf = SystemProperties() - sysperf.message_id = MessageIdCodec.next_message_id() - sysperf.message_group = "yourConsumerGroup" - msg.system_properties.CopyFrom(sysperf) - producer = Producer(client_config, topics={"normal_topic"}) - message = Message(topic.name, msg.body) - await producer.start() - await asyncio.sleep(10) - send_receipt = await producer.send(message) - logger.info(f"Send message successfully, {send_receipt}") - - -async def test_delay_message(): - credentials = SessionCredentials("username", "password") - credentials_provider = SessionCredentialsProvider(credentials) - client_config = ClientConfig( - endpoints=Endpoints("endpoint"), - session_credentials_provider=credentials_provider, - ssl_enabled=True, - ) - topic = Resource() - topic.name = "delay_topic" - msg = ProtoMessage() - msg.topic.CopyFrom(topic) - msg.body = b"My Delay Message Body" - sysperf = SystemProperties() - sysperf.message_id = MessageIdCodec.next_message_id() - msg.system_properties.CopyFrom(sysperf) - logger.debug(f"{msg}") - producer = Producer(client_config, topics={"delay_topic"}) - current_time_millis = int(round(time.time() * 1000)) - message_delay_time = timedelta(seconds=10) - result_time_millis = current_time_millis + int(message_delay_time.total_seconds() * 1000) - result_time_datetime = datetime.fromtimestamp(result_time_millis / 1000.0) - message = Message(topic.name, msg.body, delivery_timestamp=result_time_datetime) - await producer.start() - await asyncio.sleep(10) - send_receipt = await producer.send(message) - logger.info(f"Send message successfully, {send_receipt}") - - -async def test_fifo_message(): - credentials = SessionCredentials("username", "password") - credentials_provider = SessionCredentialsProvider(credentials) - client_config = ClientConfig( - endpoints=Endpoints("endpoint"), - session_credentials_provider=credentials_provider, - ssl_enabled=True, - ) - topic = Resource() - topic.name = "fifo_topic" - msg = ProtoMessage() - msg.topic.CopyFrom(topic) - msg.body = b"My FIFO Message Body" - sysperf = SystemProperties() - sysperf.message_id = MessageIdCodec.next_message_id() - msg.system_properties.CopyFrom(sysperf) - logger.debug(f"{msg}") - producer = Producer(client_config, topics={"fifo_topic"}) - message = Message(topic.name, msg.body, message_group="yourConsumerGroup") - await producer.start() - await asyncio.sleep(10) - send_receipt = await producer.send(message) - logger.info(f"Send message successfully, {send_receipt}") - - -async def test_transaction_message(): - credentials = SessionCredentials("username", "password") - credentials_provider = SessionCredentialsProvider(credentials) - client_config = ClientConfig( - endpoints=Endpoints("endpoint"), - session_credentials_provider=credentials_provider, - ssl_enabled=True, - ) - topic = Resource() - topic.name = "transaction_topic" - msg = ProtoMessage() - msg.topic.CopyFrom(topic) - msg.body = b"My Transaction Message Body" - sysperf = SystemProperties() - sysperf.message_id = MessageIdCodec.next_message_id() - msg.system_properties.CopyFrom(sysperf) - logger.debug(f"{msg}") - producer = Producer(client_config, topics={"transaction_topic"}) - message = Message(topic.name, msg.body) - await producer.start() - # await asyncio.sleep(10) - transaction = producer.begin_transaction() - send_receipt = await producer.send(message, transaction) - logger.info(f"Send message successfully, {send_receipt}") - await transaction.commit() - - -async def test_retry_and_isolation(): - credentials = SessionCredentials("username", "password") - credentials_provider = SessionCredentialsProvider(credentials) - client_config = ClientConfig( - endpoints=Endpoints("endpoint"), - session_credentials_provider=credentials_provider, - ssl_enabled=True, - ) - topic = Resource() - topic.name = "normal_topic" - msg = ProtoMessage() - msg.topic.CopyFrom(topic) - msg.body = b"My Message Body" - sysperf = SystemProperties() - sysperf.message_id = MessageIdCodec.next_message_id() - msg.system_properties.CopyFrom(sysperf) - logger.info(f"{msg}") - producer = Producer(client_config, topics={"normal_topic"}) - message = Message(topic.name, msg.body) - with patch.object(producer.client_manager, 'send_message', new_callable=MagicMock) as mock_send: - mock_send.side_effect = Exception("Forced Exception for Testing") - await producer.start() - - try: - await producer.send(message) - except Exception: - logger.info("Exception occurred as expected") - - assert mock_send.call_count == producer.get_retry_policy().get_max_attempts(), "Number of attempts should equal max_attempts." - logger.debug(producer.isolated) - assert producer.isolated, "Endpoint should be marked as isolated after an error." - - logger.info("Test completed successfully.") - -if __name__ == "__main__": - asyncio.run(test()) - asyncio.run(test_delay_message()) - asyncio.run(test_fifo_message()) - asyncio.run(test_transaction_message()) - asyncio.run(test_retry_and_isolation()) diff --git a/python/rocketmq/protocol/admin_pb2.py b/python/rocketmq/protocol/admin_pb2.py deleted file mode 100644 index bb49bfc18..000000000 --- a/python/rocketmq/protocol/admin_pb2.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: apache/rocketmq/v2/admin.proto -"""Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1e\x61pache/rocketmq/v2/admin.proto\x12\x12\x61pache.rocketmq.v2\"\x95\x01\n\x15\x43hangeLogLevelRequest\x12>\n\x05level\x18\x01 \x01(\x0e\x32/.apache.rocketmq.v2.ChangeLogLevelRequest.Level\"<\n\x05Level\x12\t\n\x05TRACE\x10\x00\x12\t\n\x05\x44\x45\x42UG\x10\x01\x12\x08\n\x04INFO\x10\x02\x12\x08\n\x04WARN\x10\x03\x12\t\n\x05\x45RROR\x10\x04\"(\n\x16\x43hangeLogLevelResponse\x12\x0e\n\x06remark\x18\x01 \x01(\t2r\n\x05\x41\x64min\x12i\n\x0e\x43hangeLogLevel\x12).apache.rocketmq.v2.ChangeLogLevelRequest\x1a*.apache.rocketmq.v2.ChangeLogLevelResponse\"\x00\x42=\n\x12\x61pache.rocketmq.v2B\x07MQAdminP\x01\xa0\x01\x01\xd8\x01\x01\xf8\x01\x01\xaa\x02\x12\x41pache.Rocketmq.V2b\x06proto3') - -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'apache.rocketmq.v2.admin_pb2', globals()) -if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\n\022apache.rocketmq.v2B\007MQAdminP\001\240\001\001\330\001\001\370\001\001\252\002\022Apache.Rocketmq.V2' - _CHANGELOGLEVELREQUEST._serialized_start=55 - _CHANGELOGLEVELREQUEST._serialized_end=204 - _CHANGELOGLEVELREQUEST_LEVEL._serialized_start=144 - _CHANGELOGLEVELREQUEST_LEVEL._serialized_end=204 - _CHANGELOGLEVELRESPONSE._serialized_start=206 - _CHANGELOGLEVELRESPONSE._serialized_end=246 - _ADMIN._serialized_start=248 - _ADMIN._serialized_end=362 -# @@protoc_insertion_point(module_scope) diff --git a/python/rocketmq/protocol/admin_pb2.pyi b/python/rocketmq/protocol/admin_pb2.pyi deleted file mode 100644 index b286d4a4b..000000000 --- a/python/rocketmq/protocol/admin_pb2.pyi +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class ChangeLogLevelRequest(_message.Message): - __slots__ = ["level"] - class Level(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - DEBUG: ChangeLogLevelRequest.Level - ERROR: ChangeLogLevelRequest.Level - INFO: ChangeLogLevelRequest.Level - LEVEL_FIELD_NUMBER: _ClassVar[int] - TRACE: ChangeLogLevelRequest.Level - WARN: ChangeLogLevelRequest.Level - level: ChangeLogLevelRequest.Level - def __init__(self, level: _Optional[_Union[ChangeLogLevelRequest.Level, str]] = ...) -> None: ... - -class ChangeLogLevelResponse(_message.Message): - __slots__ = ["remark"] - REMARK_FIELD_NUMBER: _ClassVar[int] - remark: str - def __init__(self, remark: _Optional[str] = ...) -> None: ... diff --git a/python/rocketmq/protocol/definition_pb2.py b/python/rocketmq/protocol/definition_pb2.py deleted file mode 100644 index 9ad8bc6a5..000000000 --- a/python/rocketmq/protocol/definition_pb2.py +++ /dev/null @@ -1,109 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: apache/rocketmq/v2/definition.proto -"""Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#apache/rocketmq/v2/definition.proto\x12\x12\x61pache.rocketmq.v2\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\"T\n\x10\x46ilterExpression\x12,\n\x04type\x18\x01 \x01(\x0e\x32\x1e.apache.rocketmq.v2.FilterType\x12\x12\n\nexpression\x18\x02 \x01(\t\"\xbb\x01\n\x0bRetryPolicy\x12\x14\n\x0cmax_attempts\x18\x01 \x01(\x05\x12\x45\n\x13\x65xponential_backoff\x18\x02 \x01(\x0b\x32&.apache.rocketmq.v2.ExponentialBackoffH\x00\x12\x43\n\x12\x63ustomized_backoff\x18\x03 \x01(\x0b\x32%.apache.rocketmq.v2.CustomizedBackoffH\x00\x42\n\n\x08strategy\"|\n\x12\x45xponentialBackoff\x12*\n\x07initial\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12&\n\x03max\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nmultiplier\x18\x03 \x01(\x02\"<\n\x11\x43ustomizedBackoff\x12\'\n\x04next\x18\x01 \x03(\x0b\x32\x19.google.protobuf.Duration\"4\n\x08Resource\x12\x1a\n\x12resource_namespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"z\n\x11SubscriptionEntry\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x38\n\nexpression\x18\x02 \x01(\x0b\x32$.apache.rocketmq.v2.FilterExpression\"%\n\x07\x41\x64\x64ress\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\"n\n\tEndpoints\x12\x31\n\x06scheme\x18\x01 \x01(\x0e\x32!.apache.rocketmq.v2.AddressScheme\x12.\n\taddresses\x18\x02 \x03(\x0b\x32\x1b.apache.rocketmq.v2.Address\"T\n\x06\x42roker\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12\x30\n\tendpoints\x18\x03 \x01(\x0b\x32\x1d.apache.rocketmq.v2.Endpoints\"\xe6\x01\n\x0cMessageQueue\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\n\n\x02id\x18\x02 \x01(\x05\x12\x32\n\npermission\x18\x03 \x01(\x0e\x32\x1e.apache.rocketmq.v2.Permission\x12*\n\x06\x62roker\x18\x04 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Broker\x12=\n\x14\x61\x63\x63\x65pt_message_types\x18\x05 \x03(\x0e\x32\x1f.apache.rocketmq.v2.MessageType\"H\n\x06\x44igest\x12,\n\x04type\x18\x01 \x01(\x0e\x32\x1e.apache.rocketmq.v2.DigestType\x12\x10\n\x08\x63hecksum\x18\x02 \x01(\t\"\x8f\x08\n\x10SystemProperties\x12\x10\n\x03tag\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0c\n\x04keys\x18\x02 \x03(\t\x12\x12\n\nmessage_id\x18\x03 \x01(\t\x12/\n\x0b\x62ody_digest\x18\x04 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Digest\x12\x33\n\rbody_encoding\x18\x05 \x01(\x0e\x32\x1c.apache.rocketmq.v2.Encoding\x12\x35\n\x0cmessage_type\x18\x06 \x01(\x0e\x32\x1f.apache.rocketmq.v2.MessageType\x12\x32\n\x0e\x62orn_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x11\n\tborn_host\x18\x08 \x01(\t\x12\x38\n\x0fstore_timestamp\x18\t \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x01\x88\x01\x01\x12\x12\n\nstore_host\x18\n \x01(\t\x12;\n\x12\x64\x65livery_timestamp\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x02\x88\x01\x01\x12\x1b\n\x0ereceipt_handle\x18\x0c \x01(\tH\x03\x88\x01\x01\x12\x10\n\x08queue_id\x18\r \x01(\x05\x12\x19\n\x0cqueue_offset\x18\x0e \x01(\x03H\x04\x88\x01\x01\x12:\n\x12invisible_duration\x18\x0f \x01(\x0b\x32\x19.google.protobuf.DurationH\x05\x88\x01\x01\x12\x1d\n\x10\x64\x65livery_attempt\x18\x10 \x01(\x05H\x06\x88\x01\x01\x12\x1a\n\rmessage_group\x18\x11 \x01(\tH\x07\x88\x01\x01\x12\x1a\n\rtrace_context\x18\x12 \x01(\tH\x08\x88\x01\x01\x12N\n&orphaned_transaction_recovery_duration\x18\x13 \x01(\x0b\x32\x19.google.protobuf.DurationH\t\x88\x01\x01\x12\x43\n\x11\x64\x65\x61\x64_letter_queue\x18\x14 \x01(\x0b\x32#.apache.rocketmq.v2.DeadLetterQueueH\n\x88\x01\x01\x42\x06\n\x04_tagB\x12\n\x10_store_timestampB\x15\n\x13_delivery_timestampB\x11\n\x0f_receipt_handleB\x0f\n\r_queue_offsetB\x15\n\x13_invisible_durationB\x13\n\x11_delivery_attemptB\x10\n\x0e_message_groupB\x10\n\x0e_trace_contextB)\n\'_orphaned_transaction_recovery_durationB\x14\n\x12_dead_letter_queue\"4\n\x0f\x44\x65\x61\x64LetterQueue\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x12\n\nmessage_id\x18\x02 \x01(\t\"\x86\x02\n\x07Message\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12H\n\x0fuser_properties\x18\x02 \x03(\x0b\x32/.apache.rocketmq.v2.Message.UserPropertiesEntry\x12?\n\x11system_properties\x18\x03 \x01(\x0b\x32$.apache.rocketmq.v2.SystemProperties\x12\x0c\n\x04\x62ody\x18\x04 \x01(\x0c\x1a\x35\n\x13UserPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"E\n\nAssignment\x12\x37\n\rmessage_queue\x18\x01 \x01(\x0b\x32 .apache.rocketmq.v2.MessageQueue\"A\n\x06Status\x12&\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x18.apache.rocketmq.v2.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\"i\n\x02UA\x12.\n\x08language\x18\x01 \x01(\x0e\x32\x1c.apache.rocketmq.v2.Language\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x10\n\x08platform\x18\x03 \x01(\t\x12\x10\n\x08hostname\x18\x04 \x01(\t\"\x90\x04\n\x08Settings\x12\x38\n\x0b\x63lient_type\x18\x01 \x01(\x0e\x32\x1e.apache.rocketmq.v2.ClientTypeH\x01\x88\x01\x01\x12\x38\n\x0c\x61\x63\x63\x65ss_point\x18\x02 \x01(\x0b\x32\x1d.apache.rocketmq.v2.EndpointsH\x02\x88\x01\x01\x12<\n\x0e\x62\x61\x63koff_policy\x18\x03 \x01(\x0b\x32\x1f.apache.rocketmq.v2.RetryPolicyH\x03\x88\x01\x01\x12\x37\n\x0frequest_timeout\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationH\x04\x88\x01\x01\x12\x34\n\npublishing\x18\x05 \x01(\x0b\x32\x1e.apache.rocketmq.v2.PublishingH\x00\x12\x38\n\x0csubscription\x18\x06 \x01(\x0b\x32 .apache.rocketmq.v2.SubscriptionH\x00\x12*\n\nuser_agent\x18\x07 \x01(\x0b\x32\x16.apache.rocketmq.v2.UA\x12*\n\x06metric\x18\x08 \x01(\x0b\x32\x1a.apache.rocketmq.v2.MetricB\t\n\x07pub_subB\x0e\n\x0c_client_typeB\x0f\n\r_access_pointB\x11\n\x0f_backoff_policyB\x12\n\x10_request_timeout\"p\n\nPublishing\x12,\n\x06topics\x18\x01 \x03(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x15\n\rmax_body_size\x18\x02 \x01(\x05\x12\x1d\n\x15validate_message_type\x18\x03 \x01(\x08\"\xb3\x02\n\x0cSubscription\x12\x30\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.ResourceH\x00\x88\x01\x01\x12<\n\rsubscriptions\x18\x02 \x03(\x0b\x32%.apache.rocketmq.v2.SubscriptionEntry\x12\x11\n\x04\x66ifo\x18\x03 \x01(\x08H\x01\x88\x01\x01\x12\x1f\n\x12receive_batch_size\x18\x04 \x01(\x05H\x02\x88\x01\x01\x12<\n\x14long_polling_timeout\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationH\x03\x88\x01\x01\x42\x08\n\x06_groupB\x07\n\x05_fifoB\x15\n\x13_receive_batch_sizeB\x17\n\x15_long_polling_timeout\"Y\n\x06Metric\x12\n\n\x02on\x18\x01 \x01(\x08\x12\x35\n\tendpoints\x18\x02 \x01(\x0b\x32\x1d.apache.rocketmq.v2.EndpointsH\x00\x88\x01\x01\x42\x0c\n\n_endpoints*Y\n\x15TransactionResolution\x12&\n\"TRANSACTION_RESOLUTION_UNSPECIFIED\x10\x00\x12\n\n\x06\x43OMMIT\x10\x01\x12\x0c\n\x08ROLLBACK\x10\x02*W\n\x11TransactionSource\x12\x16\n\x12SOURCE_UNSPECIFIED\x10\x00\x12\x11\n\rSOURCE_CLIENT\x10\x01\x12\x17\n\x13SOURCE_SERVER_CHECK\x10\x02*W\n\nPermission\x12\x1a\n\x16PERMISSION_UNSPECIFIED\x10\x00\x12\x08\n\x04NONE\x10\x01\x12\x08\n\x04READ\x10\x02\x12\t\n\x05WRITE\x10\x03\x12\x0e\n\nREAD_WRITE\x10\x04*;\n\nFilterType\x12\x1b\n\x17\x46ILTER_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03TAG\x10\x01\x12\x07\n\x03SQL\x10\x02*T\n\rAddressScheme\x12\x1e\n\x1a\x41\x44\x44RESS_SCHEME_UNSPECIFIED\x10\x00\x12\x08\n\x04IPv4\x10\x01\x12\x08\n\x04IPv6\x10\x02\x12\x0f\n\x0b\x44OMAIN_NAME\x10\x03*]\n\x0bMessageType\x12\x1c\n\x18MESSAGE_TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06NORMAL\x10\x01\x12\x08\n\x04\x46IFO\x10\x02\x12\t\n\x05\x44\x45LAY\x10\x03\x12\x0f\n\x0bTRANSACTION\x10\x04*G\n\nDigestType\x12\x1b\n\x17\x44IGEST_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05\x43RC32\x10\x01\x12\x07\n\x03MD5\x10\x02\x12\x08\n\x04SHA1\x10\x03*_\n\nClientType\x12\x1b\n\x17\x43LIENT_TYPE_UNSPECIFIED\x10\x00\x12\x0c\n\x08PRODUCER\x10\x01\x12\x11\n\rPUSH_CONSUMER\x10\x02\x12\x13\n\x0fSIMPLE_CONSUMER\x10\x03*<\n\x08\x45ncoding\x12\x18\n\x14\x45NCODING_UNSPECIFIED\x10\x00\x12\x0c\n\x08IDENTITY\x10\x01\x12\x08\n\x04GZIP\x10\x02*\xfe\t\n\x04\x43ode\x12\x14\n\x10\x43ODE_UNSPECIFIED\x10\x00\x12\x08\n\x02OK\x10\xa0\x9c\x01\x12\x16\n\x10MULTIPLE_RESULTS\x10\xb0\xea\x01\x12\x11\n\x0b\x42\x41\x44_REQUEST\x10\xc0\xb8\x02\x12\x1a\n\x14ILLEGAL_ACCESS_POINT\x10\xc1\xb8\x02\x12\x13\n\rILLEGAL_TOPIC\x10\xc2\xb8\x02\x12\x1c\n\x16ILLEGAL_CONSUMER_GROUP\x10\xc3\xb8\x02\x12\x19\n\x13ILLEGAL_MESSAGE_TAG\x10\xc4\xb8\x02\x12\x19\n\x13ILLEGAL_MESSAGE_KEY\x10\xc5\xb8\x02\x12\x1b\n\x15ILLEGAL_MESSAGE_GROUP\x10\xc6\xb8\x02\x12\"\n\x1cILLEGAL_MESSAGE_PROPERTY_KEY\x10\xc7\xb8\x02\x12\x1c\n\x16INVALID_TRANSACTION_ID\x10\xc8\xb8\x02\x12\x18\n\x12ILLEGAL_MESSAGE_ID\x10\xc9\xb8\x02\x12\x1f\n\x19ILLEGAL_FILTER_EXPRESSION\x10\xca\xb8\x02\x12\x1c\n\x16ILLEGAL_INVISIBLE_TIME\x10\xcb\xb8\x02\x12\x1b\n\x15ILLEGAL_DELIVERY_TIME\x10\xcc\xb8\x02\x12\x1c\n\x16INVALID_RECEIPT_HANDLE\x10\xcd\xb8\x02\x12)\n#MESSAGE_PROPERTY_CONFLICT_WITH_TYPE\x10\xce\xb8\x02\x12\x1e\n\x18UNRECOGNIZED_CLIENT_TYPE\x10\xcf\xb8\x02\x12\x17\n\x11MESSAGE_CORRUPTED\x10\xd0\xb8\x02\x12\x18\n\x12\x43LIENT_ID_REQUIRED\x10\xd1\xb8\x02\x12\x1a\n\x14ILLEGAL_POLLING_TIME\x10\xd2\xb8\x02\x12\x12\n\x0cUNAUTHORIZED\x10\xa4\xb9\x02\x12\x16\n\x10PAYMENT_REQUIRED\x10\x88\xba\x02\x12\x0f\n\tFORBIDDEN\x10\xec\xba\x02\x12\x0f\n\tNOT_FOUND\x10\xd0\xbb\x02\x12\x17\n\x11MESSAGE_NOT_FOUND\x10\xd1\xbb\x02\x12\x15\n\x0fTOPIC_NOT_FOUND\x10\xd2\xbb\x02\x12\x1e\n\x18\x43ONSUMER_GROUP_NOT_FOUND\x10\xd3\xbb\x02\x12\x15\n\x0fREQUEST_TIMEOUT\x10\xe0\xbe\x02\x12\x17\n\x11PAYLOAD_TOO_LARGE\x10\xd4\xc2\x02\x12\x1c\n\x16MESSAGE_BODY_TOO_LARGE\x10\xd5\xc2\x02\x12\x19\n\x13PRECONDITION_FAILED\x10\xb0\xce\x02\x12\x17\n\x11TOO_MANY_REQUESTS\x10\x94\xcf\x02\x12%\n\x1fREQUEST_HEADER_FIELDS_TOO_LARGE\x10\xdc\xd0\x02\x12\"\n\x1cMESSAGE_PROPERTIES_TOO_LARGE\x10\xdd\xd0\x02\x12\x14\n\x0eINTERNAL_ERROR\x10\xd0\x86\x03\x12\x1b\n\x15INTERNAL_SERVER_ERROR\x10\xd1\x86\x03\x12\x16\n\x10HA_NOT_AVAILABLE\x10\xd2\x86\x03\x12\x15\n\x0fNOT_IMPLEMENTED\x10\xb4\x87\x03\x12\x13\n\rPROXY_TIMEOUT\x10\xe0\x89\x03\x12 \n\x1aMASTER_PERSISTENCE_TIMEOUT\x10\xe1\x89\x03\x12\x1f\n\x19SLAVE_PERSISTENCE_TIMEOUT\x10\xe2\x89\x03\x12\x11\n\x0bUNSUPPORTED\x10\xc4\x8a\x03\x12\x19\n\x13VERSION_UNSUPPORTED\x10\xc5\x8a\x03\x12%\n\x1fVERIFY_FIFO_MESSAGE_UNSUPPORTED\x10\xc6\x8a\x03\x12\x1f\n\x19\x46\x41ILED_TO_CONSUME_MESSAGE\x10\xe0\xd4\x03*\xad\x01\n\x08Language\x12\x18\n\x14LANGUAGE_UNSPECIFIED\x10\x00\x12\x08\n\x04JAVA\x10\x01\x12\x07\n\x03\x43PP\x10\x02\x12\x0b\n\x07\x44OT_NET\x10\x03\x12\n\n\x06GOLANG\x10\x04\x12\x08\n\x04RUST\x10\x05\x12\n\n\x06PYTHON\x10\x06\x12\x07\n\x03PHP\x10\x07\x12\x0b\n\x07NODE_JS\x10\x08\x12\x08\n\x04RUBY\x10\t\x12\x0f\n\x0bOBJECTIVE_C\x10\n\x12\x08\n\x04\x44\x41RT\x10\x0b\x12\n\n\x06KOTLIN\x10\x0c\x42;\n\x12\x61pache.rocketmq.v2B\x08MQDomainP\x01\xa0\x01\x01\xd8\x01\x01\xaa\x02\x12\x41pache.Rocketmq.V2b\x06proto3') - -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'apache.rocketmq.v2.definition_pb2', globals()) -if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\n\022apache.rocketmq.v2B\010MQDomainP\001\240\001\001\330\001\001\252\002\022Apache.Rocketmq.V2' - _MESSAGE_USERPROPERTIESENTRY._options = None - _MESSAGE_USERPROPERTIESENTRY._serialized_options = b'8\001' - _TRANSACTIONRESOLUTION._serialized_start=3962 - _TRANSACTIONRESOLUTION._serialized_end=4051 - _TRANSACTIONSOURCE._serialized_start=4053 - _TRANSACTIONSOURCE._serialized_end=4140 - _PERMISSION._serialized_start=4142 - _PERMISSION._serialized_end=4229 - _FILTERTYPE._serialized_start=4231 - _FILTERTYPE._serialized_end=4290 - _ADDRESSSCHEME._serialized_start=4292 - _ADDRESSSCHEME._serialized_end=4376 - _MESSAGETYPE._serialized_start=4378 - _MESSAGETYPE._serialized_end=4471 - _DIGESTTYPE._serialized_start=4473 - _DIGESTTYPE._serialized_end=4544 - _CLIENTTYPE._serialized_start=4546 - _CLIENTTYPE._serialized_end=4641 - _ENCODING._serialized_start=4643 - _ENCODING._serialized_end=4703 - _CODE._serialized_start=4706 - _CODE._serialized_end=5984 - _LANGUAGE._serialized_start=5987 - _LANGUAGE._serialized_end=6160 - _FILTEREXPRESSION._serialized_start=124 - _FILTEREXPRESSION._serialized_end=208 - _RETRYPOLICY._serialized_start=211 - _RETRYPOLICY._serialized_end=398 - _EXPONENTIALBACKOFF._serialized_start=400 - _EXPONENTIALBACKOFF._serialized_end=524 - _CUSTOMIZEDBACKOFF._serialized_start=526 - _CUSTOMIZEDBACKOFF._serialized_end=586 - _RESOURCE._serialized_start=588 - _RESOURCE._serialized_end=640 - _SUBSCRIPTIONENTRY._serialized_start=642 - _SUBSCRIPTIONENTRY._serialized_end=764 - _ADDRESS._serialized_start=766 - _ADDRESS._serialized_end=803 - _ENDPOINTS._serialized_start=805 - _ENDPOINTS._serialized_end=915 - _BROKER._serialized_start=917 - _BROKER._serialized_end=1001 - _MESSAGEQUEUE._serialized_start=1004 - _MESSAGEQUEUE._serialized_end=1234 - _DIGEST._serialized_start=1236 - _DIGEST._serialized_end=1308 - _SYSTEMPROPERTIES._serialized_start=1311 - _SYSTEMPROPERTIES._serialized_end=2350 - _DEADLETTERQUEUE._serialized_start=2352 - _DEADLETTERQUEUE._serialized_end=2404 - _MESSAGE._serialized_start=2407 - _MESSAGE._serialized_end=2669 - _MESSAGE_USERPROPERTIESENTRY._serialized_start=2616 - _MESSAGE_USERPROPERTIESENTRY._serialized_end=2669 - _ASSIGNMENT._serialized_start=2671 - _ASSIGNMENT._serialized_end=2740 - _STATUS._serialized_start=2742 - _STATUS._serialized_end=2807 - _UA._serialized_start=2809 - _UA._serialized_end=2914 - _SETTINGS._serialized_start=2917 - _SETTINGS._serialized_end=3445 - _PUBLISHING._serialized_start=3447 - _PUBLISHING._serialized_end=3559 - _SUBSCRIPTION._serialized_start=3562 - _SUBSCRIPTION._serialized_end=3869 - _METRIC._serialized_start=3871 - _METRIC._serialized_end=3960 -# @@protoc_insertion_point(module_scope) diff --git a/python/rocketmq/protocol/definition_pb2.pyi b/python/rocketmq/protocol/definition_pb2.pyi deleted file mode 100644 index dcd0286bb..000000000 --- a/python/rocketmq/protocol/definition_pb2.pyi +++ /dev/null @@ -1,398 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from google.protobuf import timestamp_pb2 as _timestamp_pb2 -from google.protobuf import duration_pb2 as _duration_pb2 -from google.protobuf.internal import containers as _containers -from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union - -ADDRESS_SCHEME_UNSPECIFIED: AddressScheme -BAD_REQUEST: Code -CLIENT_ID_REQUIRED: Code -CLIENT_TYPE_UNSPECIFIED: ClientType -CODE_UNSPECIFIED: Code -COMMIT: TransactionResolution -CONSUMER_GROUP_NOT_FOUND: Code -CPP: Language -CRC32: DigestType -DART: Language -DELAY: MessageType -DESCRIPTOR: _descriptor.FileDescriptor -DIGEST_TYPE_UNSPECIFIED: DigestType -DOMAIN_NAME: AddressScheme -DOT_NET: Language -ENCODING_UNSPECIFIED: Encoding -FAILED_TO_CONSUME_MESSAGE: Code -FIFO: MessageType -FILTER_TYPE_UNSPECIFIED: FilterType -FORBIDDEN: Code -GOLANG: Language -GZIP: Encoding -HA_NOT_AVAILABLE: Code -IDENTITY: Encoding -ILLEGAL_ACCESS_POINT: Code -ILLEGAL_CONSUMER_GROUP: Code -ILLEGAL_DELIVERY_TIME: Code -ILLEGAL_FILTER_EXPRESSION: Code -ILLEGAL_INVISIBLE_TIME: Code -ILLEGAL_MESSAGE_GROUP: Code -ILLEGAL_MESSAGE_ID: Code -ILLEGAL_MESSAGE_KEY: Code -ILLEGAL_MESSAGE_PROPERTY_KEY: Code -ILLEGAL_MESSAGE_TAG: Code -ILLEGAL_POLLING_TIME: Code -ILLEGAL_TOPIC: Code -INTERNAL_ERROR: Code -INTERNAL_SERVER_ERROR: Code -INVALID_RECEIPT_HANDLE: Code -INVALID_TRANSACTION_ID: Code -IPv4: AddressScheme -IPv6: AddressScheme -JAVA: Language -KOTLIN: Language -LANGUAGE_UNSPECIFIED: Language -MASTER_PERSISTENCE_TIMEOUT: Code -MD5: DigestType -MESSAGE_BODY_TOO_LARGE: Code -MESSAGE_CORRUPTED: Code -MESSAGE_NOT_FOUND: Code -MESSAGE_PROPERTIES_TOO_LARGE: Code -MESSAGE_PROPERTY_CONFLICT_WITH_TYPE: Code -MESSAGE_TYPE_UNSPECIFIED: MessageType -MULTIPLE_RESULTS: Code -NODE_JS: Language -NONE: Permission -NORMAL: MessageType -NOT_FOUND: Code -NOT_IMPLEMENTED: Code -OBJECTIVE_C: Language -OK: Code -PAYLOAD_TOO_LARGE: Code -PAYMENT_REQUIRED: Code -PERMISSION_UNSPECIFIED: Permission -PHP: Language -PRECONDITION_FAILED: Code -PRODUCER: ClientType -PROXY_TIMEOUT: Code -PUSH_CONSUMER: ClientType -PYTHON: Language -READ: Permission -READ_WRITE: Permission -REQUEST_HEADER_FIELDS_TOO_LARGE: Code -REQUEST_TIMEOUT: Code -ROLLBACK: TransactionResolution -RUBY: Language -RUST: Language -SHA1: DigestType -SIMPLE_CONSUMER: ClientType -SLAVE_PERSISTENCE_TIMEOUT: Code -SOURCE_CLIENT: TransactionSource -SOURCE_SERVER_CHECK: TransactionSource -SOURCE_UNSPECIFIED: TransactionSource -SQL: FilterType -TAG: FilterType -TOO_MANY_REQUESTS: Code -TOPIC_NOT_FOUND: Code -TRANSACTION: MessageType -TRANSACTION_RESOLUTION_UNSPECIFIED: TransactionResolution -UNAUTHORIZED: Code -UNRECOGNIZED_CLIENT_TYPE: Code -UNSUPPORTED: Code -VERIFY_FIFO_MESSAGE_UNSUPPORTED: Code -VERSION_UNSUPPORTED: Code -WRITE: Permission - -class Address(_message.Message): - __slots__ = ["host", "port"] - HOST_FIELD_NUMBER: _ClassVar[int] - PORT_FIELD_NUMBER: _ClassVar[int] - host: str - port: int - def __init__(self, host: _Optional[str] = ..., port: _Optional[int] = ...) -> None: ... - -class Assignment(_message.Message): - __slots__ = ["message_queue"] - MESSAGE_QUEUE_FIELD_NUMBER: _ClassVar[int] - message_queue: MessageQueue - def __init__(self, message_queue: _Optional[_Union[MessageQueue, _Mapping]] = ...) -> None: ... - -class Broker(_message.Message): - __slots__ = ["endpoints", "id", "name"] - ENDPOINTS_FIELD_NUMBER: _ClassVar[int] - ID_FIELD_NUMBER: _ClassVar[int] - NAME_FIELD_NUMBER: _ClassVar[int] - endpoints: Endpoints - id: int - name: str - def __init__(self, name: _Optional[str] = ..., id: _Optional[int] = ..., endpoints: _Optional[_Union[Endpoints, _Mapping]] = ...) -> None: ... - -class CustomizedBackoff(_message.Message): - __slots__ = ["next"] - NEXT_FIELD_NUMBER: _ClassVar[int] - next: _containers.RepeatedCompositeFieldContainer[_duration_pb2.Duration] - def __init__(self, next: _Optional[_Iterable[_Union[_duration_pb2.Duration, _Mapping]]] = ...) -> None: ... - -class DeadLetterQueue(_message.Message): - __slots__ = ["message_id", "topic"] - MESSAGE_ID_FIELD_NUMBER: _ClassVar[int] - TOPIC_FIELD_NUMBER: _ClassVar[int] - message_id: str - topic: str - def __init__(self, topic: _Optional[str] = ..., message_id: _Optional[str] = ...) -> None: ... - -class Digest(_message.Message): - __slots__ = ["checksum", "type"] - CHECKSUM_FIELD_NUMBER: _ClassVar[int] - TYPE_FIELD_NUMBER: _ClassVar[int] - checksum: str - type: DigestType - def __init__(self, type: _Optional[_Union[DigestType, str]] = ..., checksum: _Optional[str] = ...) -> None: ... - -class Endpoints(_message.Message): - __slots__ = ["addresses", "scheme"] - ADDRESSES_FIELD_NUMBER: _ClassVar[int] - SCHEME_FIELD_NUMBER: _ClassVar[int] - addresses: _containers.RepeatedCompositeFieldContainer[Address] - scheme: AddressScheme - def __init__(self, scheme: _Optional[_Union[AddressScheme, str]] = ..., addresses: _Optional[_Iterable[_Union[Address, _Mapping]]] = ...) -> None: ... - -class ExponentialBackoff(_message.Message): - __slots__ = ["initial", "max", "multiplier"] - INITIAL_FIELD_NUMBER: _ClassVar[int] - MAX_FIELD_NUMBER: _ClassVar[int] - MULTIPLIER_FIELD_NUMBER: _ClassVar[int] - initial: _duration_pb2.Duration - max: _duration_pb2.Duration - multiplier: float - def __init__(self, initial: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., max: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., multiplier: _Optional[float] = ...) -> None: ... - -class FilterExpression(_message.Message): - __slots__ = ["expression", "type"] - EXPRESSION_FIELD_NUMBER: _ClassVar[int] - TYPE_FIELD_NUMBER: _ClassVar[int] - expression: str - type: FilterType - def __init__(self, type: _Optional[_Union[FilterType, str]] = ..., expression: _Optional[str] = ...) -> None: ... - -class Message(_message.Message): - __slots__ = ["body", "system_properties", "topic", "user_properties"] - class UserPropertiesEntry(_message.Message): - __slots__ = ["key", "value"] - KEY_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - key: str - value: str - def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... - BODY_FIELD_NUMBER: _ClassVar[int] - SYSTEM_PROPERTIES_FIELD_NUMBER: _ClassVar[int] - TOPIC_FIELD_NUMBER: _ClassVar[int] - USER_PROPERTIES_FIELD_NUMBER: _ClassVar[int] - body: bytes - system_properties: SystemProperties - topic: Resource - user_properties: _containers.ScalarMap[str, str] - def __init__(self, topic: _Optional[_Union[Resource, _Mapping]] = ..., user_properties: _Optional[_Mapping[str, str]] = ..., system_properties: _Optional[_Union[SystemProperties, _Mapping]] = ..., body: _Optional[bytes] = ...) -> None: ... - -class MessageQueue(_message.Message): - __slots__ = ["accept_message_types", "broker", "id", "permission", "topic"] - ACCEPT_MESSAGE_TYPES_FIELD_NUMBER: _ClassVar[int] - BROKER_FIELD_NUMBER: _ClassVar[int] - ID_FIELD_NUMBER: _ClassVar[int] - PERMISSION_FIELD_NUMBER: _ClassVar[int] - TOPIC_FIELD_NUMBER: _ClassVar[int] - accept_message_types: _containers.RepeatedScalarFieldContainer[MessageType] - broker: Broker - id: int - permission: Permission - topic: Resource - def __init__(self, topic: _Optional[_Union[Resource, _Mapping]] = ..., id: _Optional[int] = ..., permission: _Optional[_Union[Permission, str]] = ..., broker: _Optional[_Union[Broker, _Mapping]] = ..., accept_message_types: _Optional[_Iterable[_Union[MessageType, str]]] = ...) -> None: ... - -class Metric(_message.Message): - __slots__ = ["endpoints", "on"] - ENDPOINTS_FIELD_NUMBER: _ClassVar[int] - ON_FIELD_NUMBER: _ClassVar[int] - endpoints: Endpoints - on: bool - def __init__(self, on: bool = ..., endpoints: _Optional[_Union[Endpoints, _Mapping]] = ...) -> None: ... - -class Publishing(_message.Message): - __slots__ = ["max_body_size", "topics", "validate_message_type"] - MAX_BODY_SIZE_FIELD_NUMBER: _ClassVar[int] - TOPICS_FIELD_NUMBER: _ClassVar[int] - VALIDATE_MESSAGE_TYPE_FIELD_NUMBER: _ClassVar[int] - max_body_size: int - topics: _containers.RepeatedCompositeFieldContainer[Resource] - validate_message_type: bool - def __init__(self, topics: _Optional[_Iterable[_Union[Resource, _Mapping]]] = ..., max_body_size: _Optional[int] = ..., validate_message_type: bool = ...) -> None: ... - -class Resource(_message.Message): - __slots__ = ["name", "resource_namespace"] - NAME_FIELD_NUMBER: _ClassVar[int] - RESOURCE_NAMESPACE_FIELD_NUMBER: _ClassVar[int] - name: str - resource_namespace: str - def __init__(self, resource_namespace: _Optional[str] = ..., name: _Optional[str] = ...) -> None: ... - -class RetryPolicy(_message.Message): - __slots__ = ["customized_backoff", "exponential_backoff", "max_attempts"] - CUSTOMIZED_BACKOFF_FIELD_NUMBER: _ClassVar[int] - EXPONENTIAL_BACKOFF_FIELD_NUMBER: _ClassVar[int] - MAX_ATTEMPTS_FIELD_NUMBER: _ClassVar[int] - customized_backoff: CustomizedBackoff - exponential_backoff: ExponentialBackoff - max_attempts: int - def __init__(self, max_attempts: _Optional[int] = ..., exponential_backoff: _Optional[_Union[ExponentialBackoff, _Mapping]] = ..., customized_backoff: _Optional[_Union[CustomizedBackoff, _Mapping]] = ...) -> None: ... - -class Settings(_message.Message): - __slots__ = ["access_point", "backoff_policy", "client_type", "metric", "publishing", "request_timeout", "subscription", "user_agent"] - ACCESS_POINT_FIELD_NUMBER: _ClassVar[int] - BACKOFF_POLICY_FIELD_NUMBER: _ClassVar[int] - CLIENT_TYPE_FIELD_NUMBER: _ClassVar[int] - METRIC_FIELD_NUMBER: _ClassVar[int] - PUBLISHING_FIELD_NUMBER: _ClassVar[int] - REQUEST_TIMEOUT_FIELD_NUMBER: _ClassVar[int] - SUBSCRIPTION_FIELD_NUMBER: _ClassVar[int] - USER_AGENT_FIELD_NUMBER: _ClassVar[int] - access_point: Endpoints - backoff_policy: RetryPolicy - client_type: ClientType - metric: Metric - publishing: Publishing - request_timeout: _duration_pb2.Duration - subscription: Subscription - user_agent: UA - def __init__(self, client_type: _Optional[_Union[ClientType, str]] = ..., access_point: _Optional[_Union[Endpoints, _Mapping]] = ..., backoff_policy: _Optional[_Union[RetryPolicy, _Mapping]] = ..., request_timeout: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., publishing: _Optional[_Union[Publishing, _Mapping]] = ..., subscription: _Optional[_Union[Subscription, _Mapping]] = ..., user_agent: _Optional[_Union[UA, _Mapping]] = ..., metric: _Optional[_Union[Metric, _Mapping]] = ...) -> None: ... - -class Status(_message.Message): - __slots__ = ["code", "message"] - CODE_FIELD_NUMBER: _ClassVar[int] - MESSAGE_FIELD_NUMBER: _ClassVar[int] - code: Code - message: str - def __init__(self, code: _Optional[_Union[Code, str]] = ..., message: _Optional[str] = ...) -> None: ... - -class Subscription(_message.Message): - __slots__ = ["fifo", "group", "long_polling_timeout", "receive_batch_size", "subscriptions"] - FIFO_FIELD_NUMBER: _ClassVar[int] - GROUP_FIELD_NUMBER: _ClassVar[int] - LONG_POLLING_TIMEOUT_FIELD_NUMBER: _ClassVar[int] - RECEIVE_BATCH_SIZE_FIELD_NUMBER: _ClassVar[int] - SUBSCRIPTIONS_FIELD_NUMBER: _ClassVar[int] - fifo: bool - group: Resource - long_polling_timeout: _duration_pb2.Duration - receive_batch_size: int - subscriptions: _containers.RepeatedCompositeFieldContainer[SubscriptionEntry] - def __init__(self, group: _Optional[_Union[Resource, _Mapping]] = ..., subscriptions: _Optional[_Iterable[_Union[SubscriptionEntry, _Mapping]]] = ..., fifo: bool = ..., receive_batch_size: _Optional[int] = ..., long_polling_timeout: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ...) -> None: ... - -class SubscriptionEntry(_message.Message): - __slots__ = ["expression", "topic"] - EXPRESSION_FIELD_NUMBER: _ClassVar[int] - TOPIC_FIELD_NUMBER: _ClassVar[int] - expression: FilterExpression - topic: Resource - def __init__(self, topic: _Optional[_Union[Resource, _Mapping]] = ..., expression: _Optional[_Union[FilterExpression, _Mapping]] = ...) -> None: ... - -class SystemProperties(_message.Message): - __slots__ = ["body_digest", "body_encoding", "born_host", "born_timestamp", "dead_letter_queue", "delivery_attempt", "delivery_timestamp", "invisible_duration", "keys", "message_group", "message_id", "message_type", "orphaned_transaction_recovery_duration", "queue_id", "queue_offset", "receipt_handle", "store_host", "store_timestamp", "tag", "trace_context"] - BODY_DIGEST_FIELD_NUMBER: _ClassVar[int] - BODY_ENCODING_FIELD_NUMBER: _ClassVar[int] - BORN_HOST_FIELD_NUMBER: _ClassVar[int] - BORN_TIMESTAMP_FIELD_NUMBER: _ClassVar[int] - DEAD_LETTER_QUEUE_FIELD_NUMBER: _ClassVar[int] - DELIVERY_ATTEMPT_FIELD_NUMBER: _ClassVar[int] - DELIVERY_TIMESTAMP_FIELD_NUMBER: _ClassVar[int] - INVISIBLE_DURATION_FIELD_NUMBER: _ClassVar[int] - KEYS_FIELD_NUMBER: _ClassVar[int] - MESSAGE_GROUP_FIELD_NUMBER: _ClassVar[int] - MESSAGE_ID_FIELD_NUMBER: _ClassVar[int] - MESSAGE_TYPE_FIELD_NUMBER: _ClassVar[int] - ORPHANED_TRANSACTION_RECOVERY_DURATION_FIELD_NUMBER: _ClassVar[int] - QUEUE_ID_FIELD_NUMBER: _ClassVar[int] - QUEUE_OFFSET_FIELD_NUMBER: _ClassVar[int] - RECEIPT_HANDLE_FIELD_NUMBER: _ClassVar[int] - STORE_HOST_FIELD_NUMBER: _ClassVar[int] - STORE_TIMESTAMP_FIELD_NUMBER: _ClassVar[int] - TAG_FIELD_NUMBER: _ClassVar[int] - TRACE_CONTEXT_FIELD_NUMBER: _ClassVar[int] - body_digest: Digest - body_encoding: Encoding - born_host: str - born_timestamp: _timestamp_pb2.Timestamp - dead_letter_queue: DeadLetterQueue - delivery_attempt: int - delivery_timestamp: _timestamp_pb2.Timestamp - invisible_duration: _duration_pb2.Duration - keys: _containers.RepeatedScalarFieldContainer[str] - message_group: str - message_id: str - message_type: MessageType - orphaned_transaction_recovery_duration: _duration_pb2.Duration - queue_id: int - queue_offset: int - receipt_handle: str - store_host: str - store_timestamp: _timestamp_pb2.Timestamp - tag: str - trace_context: str - def __init__(self, tag: _Optional[str] = ..., keys: _Optional[_Iterable[str]] = ..., message_id: _Optional[str] = ..., body_digest: _Optional[_Union[Digest, _Mapping]] = ..., body_encoding: _Optional[_Union[Encoding, str]] = ..., message_type: _Optional[_Union[MessageType, str]] = ..., born_timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., born_host: _Optional[str] = ..., store_timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., store_host: _Optional[str] = ..., delivery_timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., receipt_handle: _Optional[str] = ..., queue_id: _Optional[int] = ..., queue_offset: _Optional[int] = ..., invisible_duration: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., delivery_attempt: _Optional[int] = ..., message_group: _Optional[str] = ..., trace_context: _Optional[str] = ..., orphaned_transaction_recovery_duration: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., dead_letter_queue: _Optional[_Union[DeadLetterQueue, _Mapping]] = ...) -> None: ... - -class UA(_message.Message): - __slots__ = ["hostname", "language", "platform", "version"] - HOSTNAME_FIELD_NUMBER: _ClassVar[int] - LANGUAGE_FIELD_NUMBER: _ClassVar[int] - PLATFORM_FIELD_NUMBER: _ClassVar[int] - VERSION_FIELD_NUMBER: _ClassVar[int] - hostname: str - language: Language - platform: str - version: str - def __init__(self, language: _Optional[_Union[Language, str]] = ..., version: _Optional[str] = ..., platform: _Optional[str] = ..., hostname: _Optional[str] = ...) -> None: ... - -class TransactionResolution(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - -class TransactionSource(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - -class Permission(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - -class FilterType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - -class AddressScheme(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - -class MessageType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - -class DigestType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - -class ClientType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - -class Encoding(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - -class Code(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] - -class Language(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] diff --git a/python/rocketmq/protocol/service_pb2.py b/python/rocketmq/protocol/service_pb2.py deleted file mode 100644 index 2b4ec62da..000000000 --- a/python/rocketmq/protocol/service_pb2.py +++ /dev/null @@ -1,102 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: apache/rocketmq/v2/service.proto -"""Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from protocol import definition_pb2 as apache_dot_rocketmq_dot_v2_dot_definition__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n apache/rocketmq/v2/service.proto\x12\x12\x61pache.rocketmq.v2\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a#apache/rocketmq/v2/definition.proto\"r\n\x11QueryRouteRequest\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x30\n\tendpoints\x18\x02 \x01(\x0b\x32\x1d.apache.rocketmq.v2.Endpoints\"z\n\x12QueryRouteResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x38\n\x0emessage_queues\x18\x02 \x03(\x0b\x32 .apache.rocketmq.v2.MessageQueue\"C\n\x12SendMessageRequest\x12-\n\x08messages\x18\x01 \x03(\x0b\x32\x1b.apache.rocketmq.v2.Message\"y\n\x0fSendResultEntry\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x12\n\nmessage_id\x18\x02 \x01(\t\x12\x16\n\x0etransaction_id\x18\x03 \x01(\t\x12\x0e\n\x06offset\x18\x04 \x01(\x03\"w\n\x13SendMessageResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x34\n\x07\x65ntries\x18\x02 \x03(\x0b\x32#.apache.rocketmq.v2.SendResultEntry\"\xa4\x01\n\x16QueryAssignmentRequest\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12+\n\x05group\x18\x02 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x30\n\tendpoints\x18\x03 \x01(\x0b\x32\x1d.apache.rocketmq.v2.Endpoints\"z\n\x17QueryAssignmentResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x33\n\x0b\x61ssignments\x18\x02 \x03(\x0b\x32\x1e.apache.rocketmq.v2.Assignment\"\x90\x03\n\x15ReceiveMessageRequest\x12+\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x37\n\rmessage_queue\x18\x02 \x01(\x0b\x32 .apache.rocketmq.v2.MessageQueue\x12?\n\x11\x66ilter_expression\x18\x03 \x01(\x0b\x32$.apache.rocketmq.v2.FilterExpression\x12\x12\n\nbatch_size\x18\x04 \x01(\x05\x12:\n\x12invisible_duration\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x88\x01\x01\x12\x12\n\nauto_renew\x18\x06 \x01(\x08\x12<\n\x14long_polling_timeout\x18\x07 \x01(\x0b\x32\x19.google.protobuf.DurationH\x01\x88\x01\x01\x42\x15\n\x13_invisible_durationB\x17\n\x15_long_polling_timeout\"\xbb\x01\n\x16ReceiveMessageResponse\x12,\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.StatusH\x00\x12.\n\x07message\x18\x02 \x01(\x0b\x32\x1b.apache.rocketmq.v2.MessageH\x00\x12\x38\n\x12\x64\x65livery_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\t\n\x07\x63ontent\"=\n\x0f\x41\x63kMessageEntry\x12\x12\n\nmessage_id\x18\x01 \x01(\t\x12\x16\n\x0ereceipt_handle\x18\x02 \x01(\t\"\xa3\x01\n\x11\x41\x63kMessageRequest\x12+\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12+\n\x05topic\x18\x02 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x34\n\x07\x65ntries\x18\x03 \x03(\x0b\x32#.apache.rocketmq.v2.AckMessageEntry\"o\n\x15\x41\x63kMessageResultEntry\x12\x12\n\nmessage_id\x18\x01 \x01(\t\x12\x16\n\x0ereceipt_handle\x18\x02 \x01(\t\x12*\n\x06status\x18\x03 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\"|\n\x12\x41\x63kMessageResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12:\n\x07\x65ntries\x18\x02 \x03(\x0b\x32).apache.rocketmq.v2.AckMessageResultEntry\"\xe7\x01\n&ForwardMessageToDeadLetterQueueRequest\x12+\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12+\n\x05topic\x18\x02 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x16\n\x0ereceipt_handle\x18\x03 \x01(\t\x12\x12\n\nmessage_id\x18\x04 \x01(\t\x12\x18\n\x10\x64\x65livery_attempt\x18\x05 \x01(\x05\x12\x1d\n\x15max_delivery_attempts\x18\x06 \x01(\x05\"U\n\'ForwardMessageToDeadLetterQueueResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\"\x83\x01\n\x10HeartbeatRequest\x12\x30\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.ResourceH\x00\x88\x01\x01\x12\x33\n\x0b\x63lient_type\x18\x02 \x01(\x0e\x32\x1e.apache.rocketmq.v2.ClientTypeB\x08\n\x06_group\"?\n\x11HeartbeatResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\"\xfd\x01\n\x15\x45ndTransactionRequest\x12+\n\x05topic\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x12\n\nmessage_id\x18\x02 \x01(\t\x12\x16\n\x0etransaction_id\x18\x03 \x01(\t\x12=\n\nresolution\x18\x04 \x01(\x0e\x32).apache.rocketmq.v2.TransactionResolution\x12\x35\n\x06source\x18\x05 \x01(\x0e\x32%.apache.rocketmq.v2.TransactionSource\x12\x15\n\rtrace_context\x18\x06 \x01(\t\"D\n\x16\x45ndTransactionResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\"-\n\x1cPrintThreadStackTraceCommand\x12\r\n\x05nonce\x18\x01 \x01(\t\"Y\n\x10ThreadStackTrace\x12\r\n\x05nonce\x18\x01 \x01(\t\x12\x1f\n\x12thread_stack_trace\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x15\n\x13_thread_stack_trace\"S\n\x14VerifyMessageCommand\x12\r\n\x05nonce\x18\x01 \x01(\t\x12,\n\x07message\x18\x02 \x01(\x0b\x32\x1b.apache.rocketmq.v2.Message\"$\n\x13VerifyMessageResult\x12\r\n\x05nonce\x18\x01 \x01(\t\"i\n!RecoverOrphanedTransactionCommand\x12,\n\x07message\x18\x01 \x01(\x0b\x32\x1b.apache.rocketmq.v2.Message\x12\x16\n\x0etransaction_id\x18\x02 \x01(\t\"\xaa\x04\n\x10TelemetryCommand\x12/\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.StatusH\x01\x88\x01\x01\x12\x30\n\x08settings\x18\x02 \x01(\x0b\x32\x1c.apache.rocketmq.v2.SettingsH\x00\x12\x42\n\x12thread_stack_trace\x18\x03 \x01(\x0b\x32$.apache.rocketmq.v2.ThreadStackTraceH\x00\x12H\n\x15verify_message_result\x18\x04 \x01(\x0b\x32\'.apache.rocketmq.v2.VerifyMessageResultH\x00\x12\x65\n$recover_orphaned_transaction_command\x18\x05 \x01(\x0b\x32\x35.apache.rocketmq.v2.RecoverOrphanedTransactionCommandH\x00\x12\\\n print_thread_stack_trace_command\x18\x06 \x01(\x0b\x32\x30.apache.rocketmq.v2.PrintThreadStackTraceCommandH\x00\x12J\n\x16verify_message_command\x18\x07 \x01(\x0b\x32(.apache.rocketmq.v2.VerifyMessageCommandH\x00\x42\t\n\x07\x63ommandB\t\n\x07_status\"\\\n\x1eNotifyClientTerminationRequest\x12\x30\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.ResourceH\x00\x88\x01\x01\x42\x08\n\x06_group\"M\n\x1fNotifyClientTerminationResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\"\xdd\x01\n\x1e\x43hangeInvisibleDurationRequest\x12+\n\x05group\x18\x01 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12+\n\x05topic\x18\x02 \x01(\x0b\x32\x1c.apache.rocketmq.v2.Resource\x12\x16\n\x0ereceipt_handle\x18\x03 \x01(\t\x12\x35\n\x12invisible_duration\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nmessage_id\x18\x05 \x01(\t\"e\n\x1f\x43hangeInvisibleDurationResponse\x12*\n\x06status\x18\x01 \x01(\x0b\x32\x1a.apache.rocketmq.v2.Status\x12\x16\n\x0ereceipt_handle\x18\x02 \x01(\t2\xe0\t\n\x10MessagingService\x12]\n\nQueryRoute\x12%.apache.rocketmq.v2.QueryRouteRequest\x1a&.apache.rocketmq.v2.QueryRouteResponse\"\x00\x12Z\n\tHeartbeat\x12$.apache.rocketmq.v2.HeartbeatRequest\x1a%.apache.rocketmq.v2.HeartbeatResponse\"\x00\x12`\n\x0bSendMessage\x12&.apache.rocketmq.v2.SendMessageRequest\x1a\'.apache.rocketmq.v2.SendMessageResponse\"\x00\x12l\n\x0fQueryAssignment\x12*.apache.rocketmq.v2.QueryAssignmentRequest\x1a+.apache.rocketmq.v2.QueryAssignmentResponse\"\x00\x12k\n\x0eReceiveMessage\x12).apache.rocketmq.v2.ReceiveMessageRequest\x1a*.apache.rocketmq.v2.ReceiveMessageResponse\"\x00\x30\x01\x12]\n\nAckMessage\x12%.apache.rocketmq.v2.AckMessageRequest\x1a&.apache.rocketmq.v2.AckMessageResponse\"\x00\x12\x9c\x01\n\x1f\x46orwardMessageToDeadLetterQueue\x12:.apache.rocketmq.v2.ForwardMessageToDeadLetterQueueRequest\x1a;.apache.rocketmq.v2.ForwardMessageToDeadLetterQueueResponse\"\x00\x12i\n\x0e\x45ndTransaction\x12).apache.rocketmq.v2.EndTransactionRequest\x1a*.apache.rocketmq.v2.EndTransactionResponse\"\x00\x12]\n\tTelemetry\x12$.apache.rocketmq.v2.TelemetryCommand\x1a$.apache.rocketmq.v2.TelemetryCommand\"\x00(\x01\x30\x01\x12\x84\x01\n\x17NotifyClientTermination\x12\x32.apache.rocketmq.v2.NotifyClientTerminationRequest\x1a\x33.apache.rocketmq.v2.NotifyClientTerminationResponse\"\x00\x12\x84\x01\n\x17\x43hangeInvisibleDuration\x12\x32.apache.rocketmq.v2.ChangeInvisibleDurationRequest\x1a\x33.apache.rocketmq.v2.ChangeInvisibleDurationResponse\"\x00\x42<\n\x12\x61pache.rocketmq.v2B\tMQServiceP\x01\xa0\x01\x01\xd8\x01\x01\xaa\x02\x12\x41pache.Rocketmq.V2b\x06proto3') - -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'apache.rocketmq.v2.service_pb2', globals()) -if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\n\022apache.rocketmq.v2B\tMQServiceP\001\240\001\001\330\001\001\252\002\022Apache.Rocketmq.V2' - _QUERYROUTEREQUEST._serialized_start=158 - _QUERYROUTEREQUEST._serialized_end=272 - _QUERYROUTERESPONSE._serialized_start=274 - _QUERYROUTERESPONSE._serialized_end=396 - _SENDMESSAGEREQUEST._serialized_start=398 - _SENDMESSAGEREQUEST._serialized_end=465 - _SENDRESULTENTRY._serialized_start=467 - _SENDRESULTENTRY._serialized_end=588 - _SENDMESSAGERESPONSE._serialized_start=590 - _SENDMESSAGERESPONSE._serialized_end=709 - _QUERYASSIGNMENTREQUEST._serialized_start=712 - _QUERYASSIGNMENTREQUEST._serialized_end=876 - _QUERYASSIGNMENTRESPONSE._serialized_start=878 - _QUERYASSIGNMENTRESPONSE._serialized_end=1000 - _RECEIVEMESSAGEREQUEST._serialized_start=1003 - _RECEIVEMESSAGEREQUEST._serialized_end=1403 - _RECEIVEMESSAGERESPONSE._serialized_start=1406 - _RECEIVEMESSAGERESPONSE._serialized_end=1593 - _ACKMESSAGEENTRY._serialized_start=1595 - _ACKMESSAGEENTRY._serialized_end=1656 - _ACKMESSAGEREQUEST._serialized_start=1659 - _ACKMESSAGEREQUEST._serialized_end=1822 - _ACKMESSAGERESULTENTRY._serialized_start=1824 - _ACKMESSAGERESULTENTRY._serialized_end=1935 - _ACKMESSAGERESPONSE._serialized_start=1937 - _ACKMESSAGERESPONSE._serialized_end=2061 - _FORWARDMESSAGETODEADLETTERQUEUEREQUEST._serialized_start=2064 - _FORWARDMESSAGETODEADLETTERQUEUEREQUEST._serialized_end=2295 - _FORWARDMESSAGETODEADLETTERQUEUERESPONSE._serialized_start=2297 - _FORWARDMESSAGETODEADLETTERQUEUERESPONSE._serialized_end=2382 - _HEARTBEATREQUEST._serialized_start=2385 - _HEARTBEATREQUEST._serialized_end=2516 - _HEARTBEATRESPONSE._serialized_start=2518 - _HEARTBEATRESPONSE._serialized_end=2581 - _ENDTRANSACTIONREQUEST._serialized_start=2584 - _ENDTRANSACTIONREQUEST._serialized_end=2837 - _ENDTRANSACTIONRESPONSE._serialized_start=2839 - _ENDTRANSACTIONRESPONSE._serialized_end=2907 - _PRINTTHREADSTACKTRACECOMMAND._serialized_start=2909 - _PRINTTHREADSTACKTRACECOMMAND._serialized_end=2954 - _THREADSTACKTRACE._serialized_start=2956 - _THREADSTACKTRACE._serialized_end=3045 - _VERIFYMESSAGECOMMAND._serialized_start=3047 - _VERIFYMESSAGECOMMAND._serialized_end=3130 - _VERIFYMESSAGERESULT._serialized_start=3132 - _VERIFYMESSAGERESULT._serialized_end=3168 - _RECOVERORPHANEDTRANSACTIONCOMMAND._serialized_start=3170 - _RECOVERORPHANEDTRANSACTIONCOMMAND._serialized_end=3275 - _TELEMETRYCOMMAND._serialized_start=3278 - _TELEMETRYCOMMAND._serialized_end=3832 - _NOTIFYCLIENTTERMINATIONREQUEST._serialized_start=3834 - _NOTIFYCLIENTTERMINATIONREQUEST._serialized_end=3926 - _NOTIFYCLIENTTERMINATIONRESPONSE._serialized_start=3928 - _NOTIFYCLIENTTERMINATIONRESPONSE._serialized_end=4005 - _CHANGEINVISIBLEDURATIONREQUEST._serialized_start=4008 - _CHANGEINVISIBLEDURATIONREQUEST._serialized_end=4229 - _CHANGEINVISIBLEDURATIONRESPONSE._serialized_start=4231 - _CHANGEINVISIBLEDURATIONRESPONSE._serialized_end=4332 - _MESSAGINGSERVICE._serialized_start=4335 - _MESSAGINGSERVICE._serialized_end=5583 -# @@protoc_insertion_point(module_scope) diff --git a/python/rocketmq/protocol/service_pb2.pyi b/python/rocketmq/protocol/service_pb2.pyi deleted file mode 100644 index 7418dcb07..000000000 --- a/python/rocketmq/protocol/service_pb2.pyi +++ /dev/null @@ -1,294 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from google.protobuf import duration_pb2 as _duration_pb2 -from google.protobuf import timestamp_pb2 as _timestamp_pb2 -from protocol import definition_pb2 as _definition_pb2 -from google.protobuf.internal import containers as _containers -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class AckMessageEntry(_message.Message): - __slots__ = ["message_id", "receipt_handle"] - MESSAGE_ID_FIELD_NUMBER: _ClassVar[int] - RECEIPT_HANDLE_FIELD_NUMBER: _ClassVar[int] - message_id: str - receipt_handle: str - def __init__(self, message_id: _Optional[str] = ..., receipt_handle: _Optional[str] = ...) -> None: ... - -class AckMessageRequest(_message.Message): - __slots__ = ["entries", "group", "topic"] - ENTRIES_FIELD_NUMBER: _ClassVar[int] - GROUP_FIELD_NUMBER: _ClassVar[int] - TOPIC_FIELD_NUMBER: _ClassVar[int] - entries: _containers.RepeatedCompositeFieldContainer[AckMessageEntry] - group: _definition_pb2.Resource - topic: _definition_pb2.Resource - def __init__(self, group: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., topic: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., entries: _Optional[_Iterable[_Union[AckMessageEntry, _Mapping]]] = ...) -> None: ... - -class AckMessageResponse(_message.Message): - __slots__ = ["entries", "status"] - ENTRIES_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - entries: _containers.RepeatedCompositeFieldContainer[AckMessageResultEntry] - status: _definition_pb2.Status - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ..., entries: _Optional[_Iterable[_Union[AckMessageResultEntry, _Mapping]]] = ...) -> None: ... - -class AckMessageResultEntry(_message.Message): - __slots__ = ["message_id", "receipt_handle", "status"] - MESSAGE_ID_FIELD_NUMBER: _ClassVar[int] - RECEIPT_HANDLE_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - message_id: str - receipt_handle: str - status: _definition_pb2.Status - def __init__(self, message_id: _Optional[str] = ..., receipt_handle: _Optional[str] = ..., status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ...) -> None: ... - -class ChangeInvisibleDurationRequest(_message.Message): - __slots__ = ["group", "invisible_duration", "message_id", "receipt_handle", "topic"] - GROUP_FIELD_NUMBER: _ClassVar[int] - INVISIBLE_DURATION_FIELD_NUMBER: _ClassVar[int] - MESSAGE_ID_FIELD_NUMBER: _ClassVar[int] - RECEIPT_HANDLE_FIELD_NUMBER: _ClassVar[int] - TOPIC_FIELD_NUMBER: _ClassVar[int] - group: _definition_pb2.Resource - invisible_duration: _duration_pb2.Duration - message_id: str - receipt_handle: str - topic: _definition_pb2.Resource - def __init__(self, group: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., topic: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., receipt_handle: _Optional[str] = ..., invisible_duration: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., message_id: _Optional[str] = ...) -> None: ... - -class ChangeInvisibleDurationResponse(_message.Message): - __slots__ = ["receipt_handle", "status"] - RECEIPT_HANDLE_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - receipt_handle: str - status: _definition_pb2.Status - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ..., receipt_handle: _Optional[str] = ...) -> None: ... - -class EndTransactionRequest(_message.Message): - __slots__ = ["message_id", "resolution", "source", "topic", "trace_context", "transaction_id"] - MESSAGE_ID_FIELD_NUMBER: _ClassVar[int] - RESOLUTION_FIELD_NUMBER: _ClassVar[int] - SOURCE_FIELD_NUMBER: _ClassVar[int] - TOPIC_FIELD_NUMBER: _ClassVar[int] - TRACE_CONTEXT_FIELD_NUMBER: _ClassVar[int] - TRANSACTION_ID_FIELD_NUMBER: _ClassVar[int] - message_id: str - resolution: _definition_pb2.TransactionResolution - source: _definition_pb2.TransactionSource - topic: _definition_pb2.Resource - trace_context: str - transaction_id: str - def __init__(self, topic: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., message_id: _Optional[str] = ..., transaction_id: _Optional[str] = ..., resolution: _Optional[_Union[_definition_pb2.TransactionResolution, str]] = ..., source: _Optional[_Union[_definition_pb2.TransactionSource, str]] = ..., trace_context: _Optional[str] = ...) -> None: ... - -class EndTransactionResponse(_message.Message): - __slots__ = ["status"] - STATUS_FIELD_NUMBER: _ClassVar[int] - status: _definition_pb2.Status - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ...) -> None: ... - -class ForwardMessageToDeadLetterQueueRequest(_message.Message): - __slots__ = ["delivery_attempt", "group", "max_delivery_attempts", "message_id", "receipt_handle", "topic"] - DELIVERY_ATTEMPT_FIELD_NUMBER: _ClassVar[int] - GROUP_FIELD_NUMBER: _ClassVar[int] - MAX_DELIVERY_ATTEMPTS_FIELD_NUMBER: _ClassVar[int] - MESSAGE_ID_FIELD_NUMBER: _ClassVar[int] - RECEIPT_HANDLE_FIELD_NUMBER: _ClassVar[int] - TOPIC_FIELD_NUMBER: _ClassVar[int] - delivery_attempt: int - group: _definition_pb2.Resource - max_delivery_attempts: int - message_id: str - receipt_handle: str - topic: _definition_pb2.Resource - def __init__(self, group: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., topic: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., receipt_handle: _Optional[str] = ..., message_id: _Optional[str] = ..., delivery_attempt: _Optional[int] = ..., max_delivery_attempts: _Optional[int] = ...) -> None: ... - -class ForwardMessageToDeadLetterQueueResponse(_message.Message): - __slots__ = ["status"] - STATUS_FIELD_NUMBER: _ClassVar[int] - status: _definition_pb2.Status - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ...) -> None: ... - -class HeartbeatRequest(_message.Message): - __slots__ = ["client_type", "group"] - CLIENT_TYPE_FIELD_NUMBER: _ClassVar[int] - GROUP_FIELD_NUMBER: _ClassVar[int] - client_type: _definition_pb2.ClientType - group: _definition_pb2.Resource - def __init__(self, group: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., client_type: _Optional[_Union[_definition_pb2.ClientType, str]] = ...) -> None: ... - -class HeartbeatResponse(_message.Message): - __slots__ = ["status"] - STATUS_FIELD_NUMBER: _ClassVar[int] - status: _definition_pb2.Status - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ...) -> None: ... - -class NotifyClientTerminationRequest(_message.Message): - __slots__ = ["group"] - GROUP_FIELD_NUMBER: _ClassVar[int] - group: _definition_pb2.Resource - def __init__(self, group: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ...) -> None: ... - -class NotifyClientTerminationResponse(_message.Message): - __slots__ = ["status"] - STATUS_FIELD_NUMBER: _ClassVar[int] - status: _definition_pb2.Status - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ...) -> None: ... - -class PrintThreadStackTraceCommand(_message.Message): - __slots__ = ["nonce"] - NONCE_FIELD_NUMBER: _ClassVar[int] - nonce: str - def __init__(self, nonce: _Optional[str] = ...) -> None: ... - -class QueryAssignmentRequest(_message.Message): - __slots__ = ["endpoints", "group", "topic"] - ENDPOINTS_FIELD_NUMBER: _ClassVar[int] - GROUP_FIELD_NUMBER: _ClassVar[int] - TOPIC_FIELD_NUMBER: _ClassVar[int] - endpoints: _definition_pb2.Endpoints - group: _definition_pb2.Resource - topic: _definition_pb2.Resource - def __init__(self, topic: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., group: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., endpoints: _Optional[_Union[_definition_pb2.Endpoints, _Mapping]] = ...) -> None: ... - -class QueryAssignmentResponse(_message.Message): - __slots__ = ["assignments", "status"] - ASSIGNMENTS_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - assignments: _containers.RepeatedCompositeFieldContainer[_definition_pb2.Assignment] - status: _definition_pb2.Status - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ..., assignments: _Optional[_Iterable[_Union[_definition_pb2.Assignment, _Mapping]]] = ...) -> None: ... - -class QueryRouteRequest(_message.Message): - __slots__ = ["endpoints", "topic"] - ENDPOINTS_FIELD_NUMBER: _ClassVar[int] - TOPIC_FIELD_NUMBER: _ClassVar[int] - endpoints: _definition_pb2.Endpoints - topic: _definition_pb2.Resource - def __init__(self, topic: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., endpoints: _Optional[_Union[_definition_pb2.Endpoints, _Mapping]] = ...) -> None: ... - -class QueryRouteResponse(_message.Message): - __slots__ = ["message_queues", "status"] - MESSAGE_QUEUES_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - message_queues: _containers.RepeatedCompositeFieldContainer[_definition_pb2.MessageQueue] - status: _definition_pb2.Status - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ..., message_queues: _Optional[_Iterable[_Union[_definition_pb2.MessageQueue, _Mapping]]] = ...) -> None: ... - -class ReceiveMessageRequest(_message.Message): - __slots__ = ["auto_renew", "batch_size", "filter_expression", "group", "invisible_duration", "long_polling_timeout", "message_queue"] - AUTO_RENEW_FIELD_NUMBER: _ClassVar[int] - BATCH_SIZE_FIELD_NUMBER: _ClassVar[int] - FILTER_EXPRESSION_FIELD_NUMBER: _ClassVar[int] - GROUP_FIELD_NUMBER: _ClassVar[int] - INVISIBLE_DURATION_FIELD_NUMBER: _ClassVar[int] - LONG_POLLING_TIMEOUT_FIELD_NUMBER: _ClassVar[int] - MESSAGE_QUEUE_FIELD_NUMBER: _ClassVar[int] - auto_renew: bool - batch_size: int - filter_expression: _definition_pb2.FilterExpression - group: _definition_pb2.Resource - invisible_duration: _duration_pb2.Duration - long_polling_timeout: _duration_pb2.Duration - message_queue: _definition_pb2.MessageQueue - def __init__(self, group: _Optional[_Union[_definition_pb2.Resource, _Mapping]] = ..., message_queue: _Optional[_Union[_definition_pb2.MessageQueue, _Mapping]] = ..., filter_expression: _Optional[_Union[_definition_pb2.FilterExpression, _Mapping]] = ..., batch_size: _Optional[int] = ..., invisible_duration: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., auto_renew: bool = ..., long_polling_timeout: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ...) -> None: ... - -class ReceiveMessageResponse(_message.Message): - __slots__ = ["delivery_timestamp", "message", "status"] - DELIVERY_TIMESTAMP_FIELD_NUMBER: _ClassVar[int] - MESSAGE_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - delivery_timestamp: _timestamp_pb2.Timestamp - message: _definition_pb2.Message - status: _definition_pb2.Status - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ..., message: _Optional[_Union[_definition_pb2.Message, _Mapping]] = ..., delivery_timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... - -class RecoverOrphanedTransactionCommand(_message.Message): - __slots__ = ["message", "transaction_id"] - MESSAGE_FIELD_NUMBER: _ClassVar[int] - TRANSACTION_ID_FIELD_NUMBER: _ClassVar[int] - message: _definition_pb2.Message - transaction_id: str - def __init__(self, message: _Optional[_Union[_definition_pb2.Message, _Mapping]] = ..., transaction_id: _Optional[str] = ...) -> None: ... - -class SendMessageRequest(_message.Message): - __slots__ = ["messages"] - MESSAGES_FIELD_NUMBER: _ClassVar[int] - messages: _containers.RepeatedCompositeFieldContainer[_definition_pb2.Message] - def __init__(self, messages: _Optional[_Iterable[_Union[_definition_pb2.Message, _Mapping]]] = ...) -> None: ... - -class SendMessageResponse(_message.Message): - __slots__ = ["entries", "status"] - ENTRIES_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - entries: _containers.RepeatedCompositeFieldContainer[SendResultEntry] - status: _definition_pb2.Status - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ..., entries: _Optional[_Iterable[_Union[SendResultEntry, _Mapping]]] = ...) -> None: ... - -class SendResultEntry(_message.Message): - __slots__ = ["message_id", "offset", "status", "transaction_id"] - MESSAGE_ID_FIELD_NUMBER: _ClassVar[int] - OFFSET_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - TRANSACTION_ID_FIELD_NUMBER: _ClassVar[int] - message_id: str - offset: int - status: _definition_pb2.Status - transaction_id: str - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ..., message_id: _Optional[str] = ..., transaction_id: _Optional[str] = ..., offset: _Optional[int] = ...) -> None: ... - -class TelemetryCommand(_message.Message): - __slots__ = ["print_thread_stack_trace_command", "recover_orphaned_transaction_command", "settings", "status", "thread_stack_trace", "verify_message_command", "verify_message_result"] - PRINT_THREAD_STACK_TRACE_COMMAND_FIELD_NUMBER: _ClassVar[int] - RECOVER_ORPHANED_TRANSACTION_COMMAND_FIELD_NUMBER: _ClassVar[int] - SETTINGS_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - THREAD_STACK_TRACE_FIELD_NUMBER: _ClassVar[int] - VERIFY_MESSAGE_COMMAND_FIELD_NUMBER: _ClassVar[int] - VERIFY_MESSAGE_RESULT_FIELD_NUMBER: _ClassVar[int] - print_thread_stack_trace_command: PrintThreadStackTraceCommand - recover_orphaned_transaction_command: RecoverOrphanedTransactionCommand - settings: _definition_pb2.Settings - status: _definition_pb2.Status - thread_stack_trace: ThreadStackTrace - verify_message_command: VerifyMessageCommand - verify_message_result: VerifyMessageResult - def __init__(self, status: _Optional[_Union[_definition_pb2.Status, _Mapping]] = ..., settings: _Optional[_Union[_definition_pb2.Settings, _Mapping]] = ..., thread_stack_trace: _Optional[_Union[ThreadStackTrace, _Mapping]] = ..., verify_message_result: _Optional[_Union[VerifyMessageResult, _Mapping]] = ..., recover_orphaned_transaction_command: _Optional[_Union[RecoverOrphanedTransactionCommand, _Mapping]] = ..., print_thread_stack_trace_command: _Optional[_Union[PrintThreadStackTraceCommand, _Mapping]] = ..., verify_message_command: _Optional[_Union[VerifyMessageCommand, _Mapping]] = ...) -> None: ... - -class ThreadStackTrace(_message.Message): - __slots__ = ["nonce", "thread_stack_trace"] - NONCE_FIELD_NUMBER: _ClassVar[int] - THREAD_STACK_TRACE_FIELD_NUMBER: _ClassVar[int] - nonce: str - thread_stack_trace: str - def __init__(self, nonce: _Optional[str] = ..., thread_stack_trace: _Optional[str] = ...) -> None: ... - -class VerifyMessageCommand(_message.Message): - __slots__ = ["message", "nonce"] - MESSAGE_FIELD_NUMBER: _ClassVar[int] - NONCE_FIELD_NUMBER: _ClassVar[int] - message: _definition_pb2.Message - nonce: str - def __init__(self, nonce: _Optional[str] = ..., message: _Optional[_Union[_definition_pb2.Message, _Mapping]] = ...) -> None: ... - -class VerifyMessageResult(_message.Message): - __slots__ = ["nonce"] - NONCE_FIELD_NUMBER: _ClassVar[int] - nonce: str - def __init__(self, nonce: _Optional[str] = ...) -> None: ... diff --git a/python/rocketmq/publish_settings.py b/python/rocketmq/publish_settings.py deleted file mode 100644 index 4f09cb7ca..000000000 --- a/python/rocketmq/publish_settings.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import platform -import socket -from typing import Dict - -from rocketmq.exponential_backoff_retry_policy import \ - ExponentialBackoffRetryPolicy -from rocketmq.protocol.definition_pb2 import UA -from rocketmq.protocol.definition_pb2 import Publishing as ProtoPublishing -from rocketmq.protocol.definition_pb2 import Resource as ProtoResource -from rocketmq.protocol.definition_pb2 import Settings as ProtoSettings -from rocketmq.rpc_client import Endpoints -from rocketmq.settings import ClientType, ClientTypeHelper, Settings -from rocketmq.signature import Signature - - -class UserAgent: - def __init__(self): - self._version = Signature()._Signature__CLIENT_VERSION_KEY - self._platform = platform.platform() - self._hostname = socket.gethostname() - - def to_protobuf(self) -> UA: - return UA( - version=self._version, hostname=self._hostname, platform=self._platform - ) - - -class PublishingSettings(Settings): - def __init__( - self, - client_id: str, - endpoints: Endpoints, - retry_policy: ExponentialBackoffRetryPolicy, - request_timeout: int, - topics: Dict[str, bool], - ): - super().__init__( - client_id, ClientType.Producer, endpoints, retry_policy, request_timeout - ) - self._max_body_size_bytes = 4 * 1024 * 1024 - self._validate_message_type = True - self._topics = topics - - def get_max_body_size_bytes(self) -> int: - return self._max_body_size_bytes - - def is_validate_message_type(self) -> bool: - return self._validate_message_type - - def sync(self, settings: ProtoSettings) -> None: - if settings.pub_sub_case != ProtoSettings.PubSubOneofCase.PUBLISHING: - return - - self.retry_policy = self.retry_policy.inherit_backoff(settings.backoff_policy) - self._validate_message_type = settings.publishing.validate_message_type - self._max_body_size_bytes = settings.publishing.max_body_size - - def to_protobuf(self): - topics = [ProtoResource(name=topic_name) for topic_name in self._topics] - - publishing = ProtoPublishing( - topics=topics, - validate_message_type=self._validate_message_type, - max_body_size=self._max_body_size_bytes, - ) - return ProtoSettings( - publishing=publishing, - access_point=self.Endpoints.to_protobuf(), - client_type=ClientTypeHelper.to_protobuf(self.ClientType), - user_agent=UserAgent().to_protobuf(), - ) diff --git a/python/rocketmq/publishing_message.py b/python/rocketmq/publishing_message.py deleted file mode 100644 index 195399cb0..000000000 --- a/python/rocketmq/publishing_message.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import socket - -from definition import Encoding, EncodingHelper, MessageType, MessageTypeHelper -from google.protobuf.timestamp_pb2 import Timestamp -from message import Message -from message_id_codec import MessageIdCodec -from protocol.definition_pb2 import Message as ProtoMessage -from protocol.definition_pb2 import Resource, SystemProperties -from rocketmq.log import logger - - -class PublishingMessage(Message): - def __init__(self, message, publishing_settings, tx_enabled=False): - self.message = message - self.publishing_settings = publishing_settings - self.tx_enabled = tx_enabled - self.message_type = None - - max_body_size_bytes = publishing_settings.get_max_body_size_bytes() - if len(message.body) > max_body_size_bytes: - raise IOError(f"Message body size exceed the threshold, max size={max_body_size_bytes} bytes") - - self.message_id = MessageIdCodec.next_message_id() - - if not message.message_group and not message.delivery_timestamp and not tx_enabled: - self.message_type = MessageType.NORMAL - return - - if message.message_group and not tx_enabled: - self.message_type = MessageType.FIFO - return - - if message.delivery_timestamp and not tx_enabled: - self.message_type = MessageType.DELAY - return - - if message.message_group or message.delivery_timestamp or not tx_enabled: - pass - - self.message_type = MessageType.TRANSACTION - logger.debug(self.message_type) - - def to_protobuf(self, queue_id): - system_properties = SystemProperties( - keys=self.message.keys, - message_id=self.message_id, - # born_timestamp=Timestamp.FromDatetime(dt=datetime.datetime.utcnow()), - born_host=socket.gethostname(), - body_encoding=EncodingHelper.to_protobuf(Encoding.IDENTITY), - queue_id=queue_id, - message_type=MessageTypeHelper.to_protobuf(self.message_type) - ) - if self.message.tag: - system_properties.tag = self.message.tag - - if self.message.delivery_timestamp: - timestamp = Timestamp() - timestamp.FromDatetime(self.message.delivery_timestamp) - system_properties.delivery_timestamp.CopyFrom(timestamp) - - if self.message.message_group: - system_properties.message_group = self.message.message_group - - topic_resource = Resource(name=self.message.topic) - - return ProtoMessage( - topic=topic_resource, - body=self.message.body, - system_properties=system_properties, - user_properties=self.message.properties - ) diff --git a/python/rocketmq/rpc_client.py b/python/rocketmq/rpc_client.py deleted file mode 100644 index d907632a2..000000000 --- a/python/rocketmq/rpc_client.py +++ /dev/null @@ -1,296 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import asyncio -import operator -import socket -import time -from datetime import timedelta -from enum import Enum -from functools import reduce - -import certifi -from grpc import aio, ssl_channel_credentials -from rocketmq.log import logger -from rocketmq.protocol import service_pb2, service_pb2_grpc -from rocketmq.protocol.definition_pb2 import Address as ProtoAddress -from rocketmq.protocol.definition_pb2 import \ - AddressScheme as ProtoAddressScheme -from rocketmq.protocol.definition_pb2 import Endpoints as ProtoEndpoints - - -class AddressScheme(Enum): - Unspecified = 0 - Ipv4 = 1 - Ipv6 = 2 - DomainName = 3 - - @staticmethod - def to_protobuf(scheme): - if scheme == AddressScheme.Ipv4: - return ProtoAddressScheme.IPV4 - elif scheme == AddressScheme.Ipv6: - return ProtoAddressScheme.IPV6 - elif scheme == AddressScheme.DomainName: - return ProtoAddressScheme.DOMAIN_NAME - else: # Unspecified or other cases - return ProtoAddressScheme.ADDRESS_SCHEME_UNSPECIFIED - - -class Address: - def __init__(self, host, port): - self.host = host - self.port = port - - def to_protobuf(self): - proto_address = ProtoAddress() - proto_address.host = self.host - proto_address.port = self.port - return proto_address - - -class Endpoints: - HttpPrefix = "http://" - HttpsPrefix = "https://" - DefaultPort = 80 - EndpointSeparator = ":" - - def __init__(self, endpoints): - self.Addresses = [] - - self.scheme = AddressScheme.Unspecified - self._hash = None - - if type(endpoints) == str: - if endpoints.startswith(self.HttpPrefix): - endpoints = endpoints[len(self.HttpPrefix):] - if endpoints.startswith(self.HttpsPrefix): - endpoints = endpoints[len(self.HttpsPrefix):] - - index = endpoints.find(self.EndpointSeparator) - port = int(endpoints[index + 1:]) if index > 0 else 80 - host = endpoints[:index] if index > 0 else endpoints - address = Address(host, port) - self.Addresses.append(address) - try: - socket.inet_pton(socket.AF_INET, host) - self.scheme = AddressScheme.IPv4 - except socket.error: - try: - socket.inet_pton(socket.AF_INET6, host) - self.scheme = AddressScheme.IPv6 - except socket.error: - self.scheme = AddressScheme.DomainName - self.Addresses.append(address) - - # Assuming AddressListEqualityComparer exists - self._hash = 17 - self._hash = (self._hash * 31) + reduce( - operator.xor, (hash(address) for address in self.Addresses) - ) - self._hash = (self._hash * 31) + hash(self.scheme) - else: - self.Addresses = [ - Address(addr.host, addr.port) for addr in endpoints.addresses - ] - if not self.Addresses: - raise Exception("No available address") - - if endpoints.scheme == "Ipv4": - self.scheme = AddressScheme.Ipv4 - elif endpoints.scheme == "Ipv6": - self.scheme = AddressScheme.Ipv6 - else: - self.scheme = AddressScheme.DomainName - if len(self.Addresses) > 1: - raise Exception( - "Multiple addresses are\ - not allowed in domain scheme" - ) - - self._hash = self._calculate_hash() - - def _calculate_hash(self): - hash_value = 17 - for address in self.Addresses: - hash_value = (hash_value * 31) + hash(address) - hash_value = (hash_value * 31) + hash(self.scheme) - return hash_value - - def __str__(self): - for address in self.Addresses: - return str(address.host) + str(address.port) - - def grpc_target(self, sslEnabled): - for address in self.Addresses: - return address.host + ":" + str(address.port) - raise ValueError("No available address") - - def __eq__(self, other): - if other is None: - return False - if self is other: - return True - res = self.Addresses == other.Addresses and self.Scheme == other.Scheme - return res - - def __hash__(self): - return self._hash - - def to_protobuf(self): - proto_endpoints = ProtoEndpoints() - proto_endpoints.scheme = self.scheme.to_protobuf(self.scheme) - proto_endpoints.addresses.extend([i.to_protobuf() for i in self.Addresses]) - return proto_endpoints - - -class RpcClient: - channel_options = [ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ("grpc.connect_timeout_ms", 3000), - ] - - def __init__(self, endpoints: str, ssl_enabled: bool = True): - self.__endpoints = endpoints - self.__cert = certifi.contents().encode(encoding="utf-8") - if ssl_enabled: - self.__channel = aio.secure_channel( - endpoints, - ssl_channel_credentials(root_certificates=self.__cert), - options=RpcClient.channel_options, - ) - else: - self.__channel = aio.insecure_channel( - endpoints, options=RpcClient.channel_options - ) - self.__stub = service_pb2_grpc.MessagingServiceStub(self.__channel) - self.activity_nano_time = time.monotonic_ns() - - def idle_duration(self): - return timedelta( - microseconds=(time.monotonic_ns() - self.activity_nano_time) / 1000 - ) - - async def query_route( - self, request: service_pb2.QueryRouteRequest, metadata, timeout_seconds: int - ): - # metadata = [('x-mq-client-id', 'value1')] - return await self.__stub.QueryRoute( - request, timeout=timeout_seconds, metadata=metadata - ) - - async def heartbeat( - self, request: service_pb2.HeartbeatRequest, metadata, timeout_seconds: int - ): - return await self.__stub.Heartbeat( - request, metadata=metadata, timeout=timeout_seconds - ) - - async def send_message( - self, request: service_pb2.SendMessageRequest, metadata, timeout_seconds: int - ): - return await self.__stub.SendMessage( - request, metadata=metadata, timeout=timeout_seconds - ) - - async def receive_message( - self, request: service_pb2.ReceiveMessageRequest, metadata, timeout_seconds: int - ): - results = self.__stub.ReceiveMessage( - request, metadata=metadata, timeout=timeout_seconds - ) - response = [] - try: - async for result in results: - if result.HasField("message"): - response.append(result.message) - except Exception as e: - logger.info("An error occurred: %s", e) - # Handle error as appropriate for your use case - return response - - async def query_assignment( - self, - request: service_pb2.QueryAssignmentRequest, - metadata, - timeout_seconds: int, - ): - return await self.__stub.QueryAssignment( - request, metadata=metadata, timeout=timeout_seconds - ) - - async def ack_message( - self, request: service_pb2.AckMessageRequest, metadata, timeout_seconds: int - ): - return await self.__stub.AckMessage( - request, metadata=metadata, timeout=timeout_seconds - ) - - async def forward_message_to_dead_letter_queue( - self, - request: service_pb2.ForwardMessageToDeadLetterQueueRequest, - metadata, - timeout_seconds: int, - ): - return await self.__stub.ForwardMessageToDeadLetterQueue( - request, metadata=metadata, timeout=timeout_seconds - ) - - async def end_transaction( - self, request: service_pb2.EndTransactionRequest, metadata, timeout_seconds: int - ): - return await self.__stub.EndTransaction( - request, metadata=metadata, timeout=timeout_seconds - ) - - async def notify_client_termination( - self, - request: service_pb2.NotifyClientTerminationRequest, - metadata, - timeout_seconds: int, - ): - return await self.__stub.NotifyClientTermination( - request, metadata=metadata, timeout=timeout_seconds - ) - - async def change_invisible_duration( - self, - request: service_pb2.ChangeInvisibleDurationRequest, - metadata, - timeout_seconds: int, - ): - return await self.__stub.ChangeInvisibleDuration( - request, metadata=metadata, timeout=timeout_seconds - ) - - async def send_requests(self, requests, stream): - for request in requests: - await stream.send_message(request) - - def telemetry(self, metadata, timeout_seconds: int): - stream = self.__stub.Telemetry(metadata=metadata, timeout=timeout_seconds) - return stream - - -async def test(): - client = RpcClient("rmq-cn-jaj390gga04.cn-hangzhou.rmq.aliyuncs.com:8081") - request = service_pb2.SendMessageRequest() - response = await client.send_message(request, 3) - logger.info(response) - - -if __name__ == "__main__": - asyncio.run(test()) diff --git a/python/rocketmq/send_receipt.py b/python/rocketmq/send_receipt.py deleted file mode 100644 index 8e742da2d..000000000 --- a/python/rocketmq/send_receipt.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# from rocketmq.status_checker import StatusChecker -from rocketmq.log import logger -from rocketmq.message_id import MessageId -from rocketmq.protocol.definition_pb2 import Code as ProtoCode - - -class SendReceipt: - def __init__(self, message_id: MessageId, transaction_id, message_queue): - self.message_id = message_id - self.transaction_id = transaction_id - self.message_queue = message_queue - - @property - def endpoints(self): - return self.message_queue.broker.endpoints - - def __str__(self): - return f'MessageId: {self.message_id}' - - @staticmethod - def process_send_message_response(mq, invocation): - status = invocation.status - for entry in invocation.entries: - if entry.status.code == ProtoCode.OK: - status = entry.status - logger.debug(status) - # May throw exception. - # StatusChecker.check(status, invocation.request, invocation.request_id) - return [SendReceipt(entry.message_id, entry.transaction_id, mq) for entry in invocation.entries] diff --git a/python/rocketmq/session.py b/python/rocketmq/session.py deleted file mode 100644 index c3ea9d32e..000000000 --- a/python/rocketmq/session.py +++ /dev/null @@ -1,74 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import asyncio - -from rocketmq.log import logger -from rocketmq.protocol.service_pb2 import \ - TelemetryCommand as ProtoTelemetryCommand - - -class Session: - def __init__(self, endpoints, streaming_call, client): - self._endpoints = endpoints - self._semaphore = asyncio.Semaphore(1) - self._streaming_call = streaming_call - self._client = client - asyncio.create_task(self.loop()) - - async def loop(self): - try: - while True: - await self._streaming_call.read() - except asyncio.exceptions.InvalidStateError as e: - logger.error('Error:', e) - - async def write_async(self, telemetry_command: ProtoTelemetryCommand): - await asyncio.sleep(1) - try: - await self._streaming_call.write(telemetry_command) - # TODO handle read operation exceed the time limit - # await asyncio.wait_for(self._streaming_call.read(), timeout=5) - except asyncio.exceptions.InvalidStateError as e: - self.on_error(e) - except asyncio.TimeoutError: - logger.error('Timeout: The read operation exceeded the time limit') - - async def sync_settings(self, await_resp): - await self._semaphore.acquire() - try: - settings = self._client.get_settings() - telemetry_command = ProtoTelemetryCommand() - telemetry_command.settings.CopyFrom(settings.to_protobuf()) - await self.write_async(telemetry_command) - finally: - self._semaphore.release() - - def rebuild_telemetry(self): - logger.info("Try to rebuild telemetry") - stream = self._client.client_manager.telemetry(self._endpoints, 10) - self._streaming_call = stream - - def on_error(self, exception): - client_id = self._client.get_client_id() - logger.error("Caught InvalidStateError: RPC already finished.") - logger.error(f"Exception raised from stream, clientId={client_id}, endpoints={self._endpoints}", exception) - max_retry = 3 - for i in range(max_retry): - try: - self.rebuild_telemetry() - break - except Exception as e: - logger.error(f"An error occurred during rebuilding telemetry: {e}, attempt {i + 1} of {max_retry}") diff --git a/python/rocketmq/session_credentials.py b/python/rocketmq/session_credentials.py deleted file mode 100644 index 828e293c5..000000000 --- a/python/rocketmq/session_credentials.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class SessionCredentials: - def __init__(self, access_key=None, access_secret=None, security_token=None): - if access_key is None: - raise ValueError("accessKey should not be None") - if access_secret is None: - raise ValueError("accessSecret should not be None") - - self.access_key = access_key - self.access_secret = access_secret - self.security_token = security_token - - -class SessionCredentialsProvider: - def __init__(self, credentials): - if not isinstance(credentials, SessionCredentials): - raise ValueError("credentials should be an instance of SessionCredentials") - self.credentials = credentials - - def get_credentials(self): - return self.credentials diff --git a/python/rocketmq/settings.py b/python/rocketmq/settings.py deleted file mode 100644 index a270dd151..000000000 --- a/python/rocketmq/settings.py +++ /dev/null @@ -1,80 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -from abc import ABC, abstractmethod -from dataclasses import dataclass -from datetime import timedelta -from enum import Enum -from typing import Optional - -from rocketmq.protocol.definition_pb2 import ClientType as ProtoClientType -from rocketmq.protocol.definition_pb2 import RetryPolicy as ProtoRetryPolicy -from rocketmq.protocol.definition_pb2 import Settings as ProtoSettings -from rocketmq.rpc_client import Endpoints - - -class IRetryPolicy(ABC): - @abstractmethod - def GetMaxAttempts(self) -> int: - pass - - @abstractmethod - def GetNextAttemptDelay(self, attempt: int) -> datetime.timedelta: - pass - - @abstractmethod - def ToProtobuf(self) -> ProtoRetryPolicy: - pass - - @abstractmethod - def InheritBackoff(self, retryPolicy: ProtoRetryPolicy): - pass - - -class ClientType(Enum): - Producer = 1 - SimpleConsumer = 2 - PushConsumer = 3 - - -class ClientTypeHelper: - @staticmethod - def to_protobuf(clientType): - return { - ClientType.Producer: ProtoClientType.PRODUCER, - ClientType.SimpleConsumer: ProtoClientType.SIMPLE_CONSUMER, - ClientType.PushConsumer: ProtoClientType.PUSH_CONSUMER, - }.get(clientType, ProtoClientType.CLIENT_TYPE_UNSPECIFIED) - - -@dataclass -class Settings: - ClientId: str - ClientType: ClientType - Endpoints: Endpoints - RetryPolicy: Optional[IRetryPolicy] - RequestTimeout: timedelta - - def to_protobuf(self): - settings = ProtoSettings() - return settings - - def Sync(self, settings): - # Sync the settings properties from the Protobuf message - pass - - def GetRetryPolicy(self): - return self.RetryPolicy diff --git a/python/rocketmq/signature.py b/python/rocketmq/signature.py deleted file mode 100644 index ed39f9b70..000000000 --- a/python/rocketmq/signature.py +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import importlib.metadata -import uuid - -from rocketmq.client_config import ClientConfig -from rocketmq.utils import sign - - -class Signature: - __AUTHORIZATION_KEY = "authorization" - - __DATE_TIME_KEY = "x-mq-date-time" - __DATE_TIME_FORMAT = "%Y%m%dT%H%M%SZ" - - __SESSION_TOKEN_KEY = "x-mq-session-token" - __CLIENT_ID_KEY = "x-mq-client-id" - __REQUEST_ID_KEY = "x-mq-request-id" - __LANGUAGE_KEY = "x-mq-language" - __CLIENT_VERSION_KEY = "x-mq-client-version" - __PROTOCOL_VERSION = "x-mq-protocol" - - __ALGORITHM = "MQv2-HMAC-SHA1" - __CREDENTIAL = "Credential" - __SIGNED_HEADERS = "SignedHeaders" - __SIGNATURE = "Signature" - try: - __CLIENT_VERSION = importlib.metadata.version("rocketmq") - except Exception: - __CLIENT_VERSION = "0.1.0" - - @staticmethod - def sign(client_config: ClientConfig, client_id: str): - date_time = datetime.datetime.now().strftime(Signature.__DATE_TIME_FORMAT) - metadata = [ - (Signature.__LANGUAGE_KEY, "PYTHON"), - (Signature.__PROTOCOL_VERSION, "v2"), - (Signature.__CLIENT_VERSION_KEY, Signature.__CLIENT_VERSION), - ( - Signature.__DATE_TIME_KEY, - date_time, - ), - (Signature.__REQUEST_ID_KEY, str(uuid.uuid4())), - (Signature.__CLIENT_ID_KEY, client_id), - ] - if not client_config.session_credentials_provider: - return metadata - session_credentials = ( - client_config.session_credentials_provider.get_credentials() - ) - if not session_credentials: - return metadata - if session_credentials.security_token: - metadata.append( - (Signature.__SESSION_TOKEN_KEY, session_credentials.security_token) - ) - if (not session_credentials.access_key) or ( - not session_credentials.access_secret - ): - return metadata - signature = sign(session_credentials.access_secret, date_time) - authorization = ( - Signature.__ALGORITHM - + " " - + Signature.__CREDENTIAL - + "=" - + session_credentials.access_key - + ", " - + Signature.__SIGNED_HEADERS - + "=" - + Signature.__DATE_TIME_KEY - + ", " - + Signature.__SIGNATURE - + "=" - + signature - ) - metadata.append((Signature.__AUTHORIZATION_KEY, authorization)) - return metadata diff --git a/python/rocketmq/simple_consumer.py b/python/rocketmq/simple_consumer.py deleted file mode 100644 index a85eb8092..000000000 --- a/python/rocketmq/simple_consumer.py +++ /dev/null @@ -1,423 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import asyncio -import random -import re -import threading -from datetime import timedelta -from threading import Lock -from typing import Dict - -import rocketmq -from google.protobuf.duration_pb2 import Duration -from rocketmq.client_config import ClientConfig -from rocketmq.consumer import Consumer -from rocketmq.definition import PermissionHelper -from rocketmq.filter_expression import FilterExpression -from rocketmq.log import logger -from rocketmq.message import MessageView -from rocketmq.protocol.definition_pb2 import Resource -from rocketmq.protocol.definition_pb2 import Resource as ProtoResource -from rocketmq.protocol.service_pb2 import \ - AckMessageEntry as ProtoAckMessageEntry -from rocketmq.protocol.service_pb2 import \ - AckMessageRequest as ProtoAckMessageRequest -from rocketmq.protocol.service_pb2 import \ - ChangeInvisibleDurationRequest as ProtoChangeInvisibleDurationRequest -from rocketmq.rpc_client import Endpoints -from rocketmq.session_credentials import (SessionCredentials, - SessionCredentialsProvider) -from rocketmq.simple_subscription_settings import SimpleSubscriptionSettings -from rocketmq.state import State -from utils import get_positive_mod - - -class SubscriptionLoadBalancer: - """This class serves as a load balancer for message subscription. - It keeps track of a rotating index to help distribute the load evenly. - """ - - def __init__(self, topic_route_data): - #: current index for message queue selection - self._index = random.randint(0, 10000) # assuming a range of 0-10000 - #: thread lock to ensure atomic update to the index - self._index_lock = threading.Lock() - - #: filter the message queues which are readable and from the master broker - self._message_queues = [ - mq for mq in topic_route_data.message_queues - if PermissionHelper().is_readable(mq.permission) - and mq.broker.id == rocketmq.utils.master_broker_id - ] - - def update(self, topic_route_data): - """Updates the message queues based on the new topic route data.""" - self._index += 1 - self._message_queues = [ - mq for mq in topic_route_data.message_queues - if PermissionHelper().is_readable(mq.permission) - and mq.broker.id == rocketmq.utils.master_broker_id - ] - return self - - def take_message_queue(self): - """Fetches the next message queue based on the current index.""" - with self._index_lock: - index = get_positive_mod(self._index, len(self._message_queues)) - self._index += 1 - return self._message_queues[index] - - -class SimpleConsumer(Consumer): - """The SimpleConsumer class extends the Client class and is used to consume - messages from specific topics in RocketMQ. - """ - - def __init__(self, client_config: ClientConfig, consumer_group: str, await_duration: int, subscription_expressions: Dict[str, FilterExpression]): - """Create a new SimpleConsumer. - - :param client_config: The configuration for the client. - :param consumer_group: The consumer group. - :param await_duration: The await duration. - :param subscription_expressions: The subscription expressions. - """ - super().__init__(client_config, consumer_group) - - self._consumer_group = consumer_group - self._await_duration = await_duration - self._subscription_expressions = subscription_expressions - - self._simple_subscription_settings = SimpleSubscriptionSettings(self.client_id, self.endpoints, self._consumer_group, timedelta(seconds=10), 10, self._subscription_expressions) - self._subscription_route_data_cache = {} - self._topic_round_robin_index = 0 - self._state_lock = Lock() - self._state = State.New - self._subscription_load_balancer = {} # A dictionary to keep subscription load balancers - - def get_topics(self): - return set(self._subscription_expressions.keys()) - - def get_settings(self): - return self._simple_subscription_settings - - async def subscribe(self, topic: str, filter_expression: FilterExpression): - if self._state != State.Running: - raise Exception("Simple consumer is not running") - - await self.get_subscription_load_balancer(topic) - self._subscription_expressions[topic] = filter_expression - - def unsubscribe(self, topic: str): - if self._state != State.Running: - raise Exception("Simple consumer is not running") - try: - self._subscription_expressions.pop(topic) - except KeyError: - pass - - async def start(self): - """Start the RocketMQ consumer and log the operation.""" - logger.info(f"Begin to start the rocketmq consumer, client_id={self.client_id}") - with self._state_lock: - if self._state != State.New: - raise Exception("Consumer already started") - await super().start() - # Start all necessary operations - self._state = State.Running - logger.info(f"The rocketmq consumer starts successfully, client_id={self.client_id}") - - async def shutdown(self): - """Shutdown the RocketMQ consumer and log the operation.""" - logger.info(f"Begin to shutdown the rocketmq consumer, client_id={self.client_id}") - with self._state_lock: - if self._state != State.Running: - raise Exception("Consumer is not running") - # Shutdown all necessary operations - self._state = State.Terminated - await super().shutdown() - logger.info(f"Shutdown the rocketmq consumer successfully, client_id={self.client_id}") - - def update_subscription_load_balancer(self, topic, topic_route_data): - # if a load balancer for this topic already exists in the subscription routing data cache, update it - subscription_load_balancer = self._subscription_route_data_cache.get(topic) - if subscription_load_balancer: - subscription_load_balancer.update(topic_route_data) - # otherwise, create a new subscription load balancer - else: - subscription_load_balancer = SubscriptionLoadBalancer(topic_route_data) - - # store new or updated subscription load balancers in the subscription routing data cache - self._subscription_route_data_cache[topic] = subscription_load_balancer - return subscription_load_balancer - - async def get_subscription_load_balancer(self, topic): - # if a load balancer for this topic already exists in the subscription routing data cache, return it - subscription_load_balancer = self._subscription_route_data_cache.get(topic) - if subscription_load_balancer: - return subscription_load_balancer - - # otherwise, obtain the routing data for the topic - topic_route_data = await self.get_route_data(topic) - # update subscription load balancer - return self.update_subscription_load_balancer(topic, topic_route_data) - - async def receive(self, max_message_num, invisible_duration): - if self._state != State.Running: - raise Exception("Simple consumer is not running") - if max_message_num <= 0: - raise Exception("maxMessageNum must be greater than 0") - copy = dict(self._subscription_expressions) - topics = list(copy.keys()) - if len(topics) == 0: - raise ValueError("There is no topic to receive message") - - index = (self._topic_round_robin_index + 1) % len(topics) - self._topic_round_robin_index = index - topic = topics[index] - filter_expression = self._subscription_expressions[topic] - subscription_load_balancer = await self.get_subscription_load_balancer(topic) - mq = subscription_load_balancer.take_message_queue() - request = self.wrap_receive_message_request(max_message_num, mq, filter_expression, self._await_duration, invisible_duration) - result = await self.receive_message(request, mq, self._await_duration) - return result.messages - - def wrap_change_invisible_duration(self, message_view: MessageView, invisible_duration): - topic_resource = ProtoResource() - topic_resource.name = message_view.topic - - request = ProtoChangeInvisibleDurationRequest() - request.topic.CopyFrom(topic_resource) - group = ProtoResource() - group.name = message_view.message_group - logger.debug(message_view.message_group) - request.group.CopyFrom(group) - request.receipt_handle = message_view.receipt_handle - request.invisible_duration.CopyFrom(Duration(seconds=invisible_duration)) - request.message_id = message_view.message_id - - return request - - async def change_invisible_duration(self, message_view: MessageView, invisible_duration): - if self._state != State.Running: - raise Exception("Simple consumer is not running") - - request = self.wrap_change_invisible_duration(message_view, invisible_duration) - result = await self.client_manager.change_invisible_duration( - message_view.message_queue.broker.endpoints, - request, - self.client_config.request_timeout - ) - logger.debug(result) - - async def ack(self, message_view: MessageView): - if self._state != State.Running: - raise Exception("Simple consumer is not running") - request = self.wrap_ack_message_request(message_view) - result = await self.client_manager.ack_message(message_view.message_queue.broker.endpoints, request=request, timeout_seconds=self.client_config.request_timeout) - logger.info(result) - - def get_protobuf_group(self): - return ProtoResource(name=self.consumer_group) - - def wrap_ack_message_request(self, message_view: MessageView): - topic_resource = ProtoResource() - topic_resource.name = message_view.topic - entry = ProtoAckMessageEntry() - entry.message_id = message_view.message_id - entry.receipt_handle = message_view.receipt_handle - - request = ProtoAckMessageRequest(group=self.get_protobuf_group(), topic=topic_resource, entries=[entry]) - return request - - class Builder: - def __init__(self): - self._consumer_group_regex = re.compile(r"^[%a-zA-Z0-9_-]+$") - self._clientConfig = None - self._consumerGroup = None - self._awaitDuration = None - self._subscriptionExpressions = {} - - def set_client_config(self, client_config: ClientConfig): - if client_config is None: - raise ValueError("clientConfig should not be null") - self._clientConfig = client_config - return self - - def set_consumer_group(self, consumer_group: str): - if consumer_group is None: - raise ValueError("consumerGroup should not be null") - # Assuming CONSUMER_GROUP_REGEX is defined in the outer scope - if not re.match(self._consumer_group_regex, consumer_group): - raise ValueError(f"topic does not match the regex {self._consumer_group_regex}") - self._consumerGroup = consumer_group - return self - - def set_await_duration(self, await_duration: int): - self._awaitDuration = await_duration - return self - - def set_subscription_expression(self, subscription_expressions: Dict[str, FilterExpression]): - if subscription_expressions is None: - raise ValueError("subscriptionExpressions should not be null") - if len(subscription_expressions) == 0: - raise ValueError("subscriptionExpressions should not be empty") - self._subscriptionExpressions = subscription_expressions - return self - - async def build(self): - if self._clientConfig is None: - raise ValueError("clientConfig has not been set yet") - if self._consumerGroup is None: - raise ValueError("consumerGroup has not been set yet") - if len(self._subscriptionExpressions) == 0: - raise ValueError("subscriptionExpressions has not been set yet") - - simple_consumer = SimpleConsumer(self._clientConfig, self._consumerGroup, self._awaitDuration, self._subscriptionExpressions) - await simple_consumer.start() - return simple_consumer - - -async def test(): - credentials = SessionCredentials("username", "password") - credentials_provider = SessionCredentialsProvider(credentials) - client_config = ClientConfig( - endpoints=Endpoints("endpoint"), - session_credentials_provider=credentials_provider, - ssl_enabled=True, - ) - topic = Resource() - topic.name = "normal_topic" - - consumer_group = "yourConsumerGroup" - subscription = {topic.name: FilterExpression("*")} - simple_consumer = (await SimpleConsumer.Builder() - .set_client_config(client_config) - .set_consumer_group(consumer_group) - .set_await_duration(15) - .set_subscription_expression(subscription) - .build()) - logger.info(simple_consumer) - # while True: - message_views = await simple_consumer.receive(16, 15) - logger.info(message_views) - for message in message_views: - logger.info(message.body) - logger.info(f"Received a message, topic={message.topic}, message-id={message.message_id}, body-size={len(message.body)}") - await simple_consumer.ack(message) - logger.info(f"Message is acknowledged successfully, message-id={message.message_id}") - - -async def test_fifo_message(): - credentials = SessionCredentials("username", "password") - credentials_provider = SessionCredentialsProvider(credentials) - client_config = ClientConfig( - endpoints=Endpoints("endpoint"), - session_credentials_provider=credentials_provider, - ssl_enabled=True, - ) - topic = Resource() - topic.name = "fifo_topic" - - consumer_group = "yourConsumerGroup" - subscription = {topic.name: FilterExpression("*")} - simple_consumer = (await SimpleConsumer.Builder() - .set_client_config(client_config) - .set_consumer_group(consumer_group) - .set_await_duration(15) - .set_subscription_expression(subscription) - .build()) - logger.info(simple_consumer) - # while True: - message_views = await simple_consumer.receive(16, 15) - # logger.info(message_views) - for message in message_views: - logger.info(message.body) - logger.info(f"Received a message, topic={message.topic}, message-id={message.message_id}, body-size={len(message.body)}") - await simple_consumer.ack(message) - logger.info(f"Message is acknowledged successfully, message-id={message.message_id}") - - -async def test_change_invisible_duration(): - credentials = SessionCredentials("username", "password") - credentials_provider = SessionCredentialsProvider(credentials) - client_config = ClientConfig( - endpoints=Endpoints("endpoint"), - session_credentials_provider=credentials_provider, - ssl_enabled=True, - ) - topic = Resource() - topic.name = "fifo_topic" - - consumer_group = "yourConsumerGroup" - subscription = {topic.name: FilterExpression("*")} - simple_consumer = (await SimpleConsumer.Builder() - .set_client_config(client_config) - .set_consumer_group(consumer_group) - .set_await_duration(15) - .set_subscription_expression(subscription) - .build()) - logger.info(simple_consumer) - # while True: - message_views = await simple_consumer.receive(16, 15) - # logger.info(message_views) - for message in message_views: - await simple_consumer.change_invisible_duration(message_view=message, invisible_duration=3) - logger.info(message.body) - logger.info(f"Received a message, topic={message.topic}, message-id={message.message_id}, body-size={len(message.body)}") - await simple_consumer.ack(message) - logger.info(f"Message is acknowledged successfully, message-id={message.message_id}") - - -async def test_subscribe_unsubscribe(): - credentials = SessionCredentials("username", "password") - credentials_provider = SessionCredentialsProvider(credentials) - client_config = ClientConfig( - endpoints=Endpoints("endpoint"), - session_credentials_provider=credentials_provider, - ssl_enabled=True, - ) - topic = Resource() - topic.name = "normal_topic" - - consumer_group = "yourConsumerGroup" - subscription = {topic.name: FilterExpression("*")} - simple_consumer = (await SimpleConsumer.Builder() - .set_client_config(client_config) - .set_consumer_group(consumer_group) - .set_await_duration(15) - .set_subscription_expression(subscription) - .build()) - logger.info(simple_consumer) - # while True: - message_views = await simple_consumer.receive(16, 15) - logger.info(message_views) - for message in message_views: - logger.info(message.body) - logger.info(f"Received a message, topic={message.topic}, message-id={message.message_id}, body-size={len(message.body)}") - await simple_consumer.ack(message) - logger.info(f"Message is acknowledged successfully, message-id={message.message_id}") - simple_consumer.unsubscribe('normal_topic') - await simple_consumer.subscribe('fifo_topic', FilterExpression("*")) - message_views = await simple_consumer.receive(16, 15) - logger.info(message_views) - for message in message_views: - logger.info(message.body) - logger.info(f"Received a message, topic={message.topic}, message-id={message.message_id}, body-size={len(message.body)}") - await simple_consumer.ack(message) - logger.info(f"Message is acknowledged successfully, message-id={message.message_id}") - -if __name__ == "__main__": - asyncio.run(test_subscribe_unsubscribe()) diff --git a/python/rocketmq/simple_subscription_settings.py b/python/rocketmq/simple_subscription_settings.py deleted file mode 100644 index 2ee60089f..000000000 --- a/python/rocketmq/simple_subscription_settings.py +++ /dev/null @@ -1,82 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Dict - -from google.protobuf.duration_pb2 import Duration -from rocketmq.filter_expression import ExpressionType, FilterExpression -from rocketmq.log import logger -from rocketmq.protocol.definition_pb2 import \ - FilterExpression as ProtoFilterExpression -from rocketmq.protocol.definition_pb2 import FilterType as ProtoFilterType -from rocketmq.protocol.definition_pb2 import Resource as ProtoResource -from rocketmq.protocol.definition_pb2 import Settings as ProtoSettings -from rocketmq.protocol.definition_pb2 import Subscription as ProtoSubscription -from rocketmq.protocol.definition_pb2 import \ - SubscriptionEntry as ProtoSubscriptionEntry - -from .settings import ClientType, ClientTypeHelper, Settings - - -class SimpleSubscriptionSettings(Settings): - - def __init__(self, clientId, endpoints, consumerGroup, requestTimeout, longPollingTimeout, - subscriptionExpressions: Dict[str, FilterExpression]): - super().__init__(clientId, ClientType.SimpleConsumer, endpoints, None, requestTimeout) - self._group = consumerGroup # Simplified as string for now - self._longPollingTimeout = longPollingTimeout - self._subscriptionExpressions = subscriptionExpressions - - def Sync(self, settings: ProtoSettings): - if not isinstance(settings, ProtoSettings): - logger.error(f"[Bug] Issued settings doesn't match with the client type, clientId={self.ClientId}, clientType={self.ClientType}") - - def to_protobuf(self): - subscriptionEntries = [] - - for key, value in self._subscriptionExpressions.items(): - topic = ProtoResource() - topic.name = key - - subscriptionEntry = ProtoSubscriptionEntry() - filterExpression = ProtoFilterExpression() - - if value.type.value == ExpressionType.Tag.value: - filterExpression.type = ProtoFilterType.TAG - elif value.type.value == ExpressionType.Sql92.value: - filterExpression.type = ProtoFilterType.SQL - else: - logger.warn(f"[Bug] Unrecognized filter type={value.type} for simple consumer") - - filterExpression.expression = value.expression - subscriptionEntry.topic.CopyFrom(topic) - subscriptionEntries.append(subscriptionEntry) - - subscription = ProtoSubscription() - group = ProtoResource() - group.name = self._group - subscription.group.CopyFrom(group) - subscription.subscriptions.extend(subscriptionEntries) - duration_longPollingTimeout = Duration(seconds=self._longPollingTimeout) - subscription.long_polling_timeout.CopyFrom(duration_longPollingTimeout) - - settings = super().to_protobuf() - settings.access_point.CopyFrom(self.Endpoints.to_protobuf()) # Assuming Endpoints has a to_protobuf method - settings.client_type = ClientTypeHelper.to_protobuf(self.ClientType) - - settings.request_timeout.CopyFrom(Duration(seconds=int(self.RequestTimeout.total_seconds()))) - settings.subscription.CopyFrom(subscription) - - return settings diff --git a/python/rocketmq/status_checker.py b/python/rocketmq/status_checker.py deleted file mode 100644 index ae2d19130..000000000 --- a/python/rocketmq/status_checker.py +++ /dev/null @@ -1,212 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from rocketmq.log import logger -from rocketmq.message import Message -from rocketmq.protocol.definition_pb2 import Code as ProtoCode -from rocketmq.protocol.definition_pb2 import Message as ProtoMessage -from rocketmq.protocol.definition_pb2 import Status as ProtoStatus -from rocketmq.protocol.service_pb2 import \ - ReceiveMessageRequest as ProtoReceiveMessageRequest - - -class RocketMQException(Exception): - def __init__(self, status_code, request_id, status_message): - self.status_code = status_code - self.request_id = request_id - self.status_message = status_message - - def __str__(self): - return f"{self.__class__.__name__}: code={self.status_code}, requestId={self.request_id}, message={self.status_message}" - - -class BadRequestException(RocketMQException): - pass - - -class UnauthorizedException(RocketMQException): - pass - - -class PaymentRequiredException(RocketMQException): - pass - - -class ForbiddenException(RocketMQException): - pass - - -class NotFoundException(RocketMQException): - pass - - -class PayloadTooLargeException(RocketMQException): - pass - - -class TooManyRequestsException(RocketMQException): - pass - - -class RequestHeaderFieldsTooLargeException(RocketMQException): - pass - - -class InternalErrorException(RocketMQException): - pass - - -class ProxyTimeoutException(RocketMQException): - pass - - -class UnsupportedException(RocketMQException): - pass - - -class StatusChecker: - @staticmethod - def check(status: ProtoStatus, request: Message, request_id: str): - """Check the status of a request and raise an exception if necessary. - - :param status: A ProtoStatus object that contains the status code and message. - :param request: The request message object. - :param request_id: The ID of the request. - :raise BadRequestException: If the status code indicates a bad request. - :raise UnauthorizedException: If the status code indicates an unauthorized request. - :raise PaymentRequiredException: If the status code indicates payment is required. - :raise ForbiddenException: If the status code indicates a forbidden request. - :raise NotFoundException: If the status code indicates a resource is not found. - :raise PayloadTooLargeException: If the status code indicates the request payload is too large. - :raise TooManyRequestsException: If the status code indicates too many requests. - :raise RequestHeaderFieldsTooLargeException: If the status code indicates the request headers are too large. - :raise InternalErrorException: If the status code indicates an internal error. - :raise ProxyTimeoutException: If the status code indicates a proxy timeout. - :raise UnsupportedException: If the status code indicates an unsupported operation. - """ - status_code = status.code - status_message = status.message - - if status_code in [ProtoCode.OK, ProtoCode.MULTIPLE_RESULTS]: - return - elif status_code in [ - ProtoCode.BAD_REQUEST, - ProtoCode.ILLEGAL_ACCESS_POINT, - ProtoCode.ILLEGAL_TOPIC, - ProtoCode.ILLEGAL_CONSUMER_GROUP, - ProtoCode.ILLEGAL_MESSAGE_TAG, - ProtoCode.ILLEGAL_MESSAGE_KEY, - ProtoCode.ILLEGAL_MESSAGE_GROUP, - ProtoCode.ILLEGAL_MESSAGE_PROPERTY_KEY, - ProtoCode.INVALID_TRANSACTION_ID, - ProtoCode.ILLEGAL_MESSAGE_ID, - ProtoCode.ILLEGAL_FILTER_EXPRESSION, - ProtoCode.ILLEGAL_INVISIBLE_TIME, - ProtoCode.ILLEGAL_DELIVERY_TIME, - ProtoCode.INVALID_RECEIPT_HANDLE, - ProtoCode.MESSAGE_PROPERTY_CONFLICT_WITH_TYPE, - ProtoCode.UNRECOGNIZED_CLIENT_TYPE, - ProtoCode.MESSAGE_CORRUPTED, - ProtoCode.CLIENT_ID_REQUIRED, - ProtoCode.ILLEGAL_POLLING_TIME, - ]: - raise BadRequestException(status_code, request_id, status_message) - elif status_code == ProtoCode.UNAUTHORIZED: - raise UnauthorizedException(status_code, request_id, status_message) - elif status_code == ProtoCode.PAYMENT_REQUIRED: - raise PaymentRequiredException(status_code, request_id, status_message) - elif status_code == ProtoCode.FORBIDDEN: - raise ForbiddenException(status_code, request_id, status_message) - elif status_code == ProtoCode.MESSAGE_NOT_FOUND: - if isinstance(request, ProtoReceiveMessageRequest): - return - else: - # Fall through on purpose. - status_code = ProtoCode.NOT_FOUND - if status_code in [ - ProtoCode.NOT_FOUND, - ProtoCode.TOPIC_NOT_FOUND, - ProtoCode.CONSUMER_GROUP_NOT_FOUND, - ]: - raise NotFoundException(status_code, request_id, status_message) - elif status_code in [ - ProtoCode.PAYLOAD_TOO_LARGE, - ProtoCode.MESSAGE_BODY_TOO_LARGE, - ]: - raise PayloadTooLargeException(status_code, request_id, status_message) - elif status_code == ProtoCode.TOO_MANY_REQUESTS: - raise TooManyRequestsException(status_code, request_id, status_message) - elif status_code in [ - ProtoCode.REQUEST_HEADER_FIELDS_TOO_LARGE, - ProtoCode.MESSAGE_PROPERTIES_TOO_LARGE, - ]: - raise RequestHeaderFieldsTooLargeException(status_code, request_id, status_message) - elif status_code in [ - ProtoCode.INTERNAL_ERROR, - ProtoCode.INTERNAL_SERVER_ERROR, - ProtoCode.HA_NOT_AVAILABLE, - ]: - raise InternalErrorException(status_code, request_id, status_message) - elif status_code in [ - ProtoCode.PROXY_TIMEOUT, - ProtoCode.MASTER_PERSISTENCE_TIMEOUT, - ProtoCode.SLAVE_PERSISTENCE_TIMEOUT, - ]: - raise ProxyTimeoutException(status_code, request_id, status_message) - elif status_code in [ - ProtoCode.UNSUPPORTED, - ProtoCode.VERSION_UNSUPPORTED, - ProtoCode.VERIFY_FIFO_MESSAGE_UNSUPPORTED, - ]: - raise UnsupportedException(status_code, request_id, status_message) - else: - logger.warning(f"Unrecognized status code={status_code}, requestId={request_id}, statusMessage={status_message}") - raise UnsupportedException(status_code, request_id, status_message) - - -def main(): - # 创建一个表示'OK'状态的ProtoStatus - status_ok = ProtoStatus() - status_ok.code = ProtoCode.OK - status_ok.message = "Everything is OK" - - # 创建一个表示'BadRequest'状态的ProtoStatus - status_bad_request = ProtoStatus() - status_bad_request.code = ProtoCode.BAD_REQUEST - status_bad_request.message = "Bad request" - - # 创建一个表示'Unauthorized'状态的ProtoStatus - status_unauthorized = ProtoStatus() - status_unauthorized.code = ProtoCode.UNAUTHORIZED - status_unauthorized.message = "Unauthorized" - - request = ProtoMessage() - - # 进行一些测试 - StatusChecker.check(status_ok, request, "request1") # 不应抛出异常 - - try: - StatusChecker.check(status_bad_request, request, "request2") - except BadRequestException as e: - logger.error(f"Caught expected exception: {e}") - - try: - StatusChecker.check(status_unauthorized, request, "request3") - except UnauthorizedException as e: - logger.error(f"Caught expected exception: {e}") - - -if __name__ == "__main__": - main() diff --git a/python/rocketmq/utils.py b/python/rocketmq/utils.py deleted file mode 100644 index e5cf3b278..000000000 --- a/python/rocketmq/utils.py +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import hashlib -import hmac - -master_broker_id = 0 - - -def number_to_base(number, base): - alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - if number == 0: - return alphabet[0] - - result = [] - while number: - number, remainder = divmod(number, base) - result.append(alphabet[remainder]) - - return "".join(reversed(result)) - - -def sign(access_secret: str, datetime: str) -> str: - digester = hmac.new( - bytes(access_secret, encoding="UTF-8"), - bytes(datetime, encoding="UTF-8"), - hashlib.sha1, - ) - return digester.hexdigest().upper() - - -def get_positive_mod(k: int, n: int): - result = k % n - return result + n if result < 0 else result diff --git a/python/rocketmq/protocol/__init__.py b/python/rocketmq/v5/__init__.py similarity index 100% rename from python/rocketmq/protocol/__init__.py rename to python/rocketmq/v5/__init__.py diff --git a/python/rocketmq/v5/client/__init__.py b/python/rocketmq/v5/client/__init__.py new file mode 100644 index 000000000..c3deeeea4 --- /dev/null +++ b/python/rocketmq/v5/client/__init__.py @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .client import Client +from .client_configuration import ClientConfiguration, Credentials + +__all__ = [ + "Client", + "ClientConfiguration", + "Credentials", +] diff --git a/python/rocketmq/message_id.py b/python/rocketmq/v5/client/balancer/__init__.py similarity index 85% rename from python/rocketmq/message_id.py rename to python/rocketmq/v5/client/balancer/__init__.py index 9bb9176b2..cc2bafdaf 100644 --- a/python/rocketmq/message_id.py +++ b/python/rocketmq/v5/client/balancer/__init__.py @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from .queue_selector import QueueSelector -class MessageId: - def __init__(self, version: str, suffix: str): - self.__version = version - self.__suffix = suffix +__all__ = [ + "QueueSelector", +] diff --git a/python/rocketmq/v5/client/balancer/queue_selector.py b/python/rocketmq/v5/client/balancer/queue_selector.py new file mode 100644 index 000000000..add420891 --- /dev/null +++ b/python/rocketmq/v5/client/balancer/queue_selector.py @@ -0,0 +1,59 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random + +from rocketmq.v5.exception import IllegalArgumentException +from rocketmq.v5.model import TopicRouteData +from rocketmq.v5.util import AtomicInteger + + +class QueueSelector: + + NONE_TYPE_SELECTOR = 0 + PRODUCER_QUEUE_SELECTOR = 1 + SIMPLE_CONSUMER_QUEUE_SELECTOR = 2 + + def __init__(self, queues, selector_type=NONE_TYPE_SELECTOR): + self.__index = AtomicInteger(random.randint(1, 1000)) + self.__message_queues = queues + self.__selector_type = selector_type + + @classmethod + def producer_queue_selector(cls, topic_route: TopicRouteData): + return cls(list(filter(lambda queue: queue.is_writable() and queue.is_master_broker(), topic_route.message_queues)), + QueueSelector.PRODUCER_QUEUE_SELECTOR) + + @classmethod + def simple_consumer_queue_selector(cls, topic_route: TopicRouteData): + return cls(list(filter(lambda queue: queue.is_readable() and queue.is_master_broker(), topic_route.message_queues)), + QueueSelector.SIMPLE_CONSUMER_QUEUE_SELECTOR) + + def select_next_queue(self): + if self.__selector_type == QueueSelector.NONE_TYPE_SELECTOR: + raise IllegalArgumentException("error type for queue selector, type is NONE_TYPE_SELECTOR.") + return self.__message_queues[self.__index.get_and_increment() % len(self.__message_queues)] + + def all_queues(self): + index = self.__index.get_and_increment() % len(self.__message_queues) + return self.__message_queues[index:] + self.__message_queues[:index] + + def update(self, topic_route: TopicRouteData): + if topic_route.message_queues == self.__message_queues: + return + if self.__selector_type == QueueSelector.PRODUCER_QUEUE_SELECTOR: + self.__message_queues = list(filter(lambda queue: queue.is_writable() and queue.is_master_broker(), topic_route.message_queues)) + elif self.__selector_type == QueueSelector.SIMPLE_CONSUMER_QUEUE_SELECTOR: + self.__message_queues = list(filter(lambda queue: queue.is_readable() and queue.is_master_broker(), topic_route.message_queues)) diff --git a/python/rocketmq/v5/client/client.py b/python/rocketmq/v5/client/client.py new file mode 100644 index 000000000..14d479c5b --- /dev/null +++ b/python/rocketmq/v5/client/client.py @@ -0,0 +1,511 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import functools +import threading +from asyncio import InvalidStateError +from queue import Queue + +from grpc.aio import AioRpcError +from rocketmq.grpc_protocol import ClientType, Code, QueryRouteRequest +from rocketmq.v5.client.connection import RpcClient +from rocketmq.v5.client.metrics import ClientMetrics +from rocketmq.v5.exception import (IllegalArgumentException, + IllegalStateException) +from rocketmq.v5.log import logger +from rocketmq.v5.model import (CallbackResult, CallbackResultType, + TopicRouteData) +from rocketmq.v5.util import (ClientId, ConcurrentMap, MessagingResultChecker, + Misc, Signature) + + +class Client: + + def __init__(self, client_configuration, topics, client_type: ClientType, tls_enable=False): + if client_configuration is None: + raise IllegalArgumentException("clientConfiguration should not be null.") + self.__client_configuration = client_configuration + self.__client_type = client_type + self.__client_id = ClientId().client_id + # {topic, topicRouteData} + self.__topic_route_cache = ConcurrentMap() + self.__rpc_client = RpcClient(tls_enable) + self.__client_metrics = ClientMetrics(self.__client_id, client_configuration) + self.__topic_route_scheduler = None + self.__heartbeat_scheduler = None + self.__sync_setting_scheduler = None + self.__clear_idle_rpc_channels_scheduler = None + self.__topic_route_scheduler_threading_event = None + self.__heartbeat_scheduler_threading_event = None + self.__sync_setting_scheduler_threading_event = None + self.__clear_idle_rpc_channels_threading_event = None + if topics is not None: + self.__topics = set(filter(lambda topic: Misc.is_valid_topic(topic), topics)) + else: + self.__topics = set() + self.__callback_result_queue = Queue() + self.__callback_result_thread = None + self.__is_running = False + self.__client_thread_task_enabled = False + self.__had_shutdown = False + + def startup(self): + try: + if self.__had_shutdown is True: + raise Exception(f"client:{self.__client_id} had shutdown, can't startup again.") + + try: + # pre update topic route for producer or consumer + for topic in self.__topics: + self.__update_topic_route(topic) + except Exception as e: + # ignore this exception and retrieve again when calling send or receive + logger.warn( + f"update topic exception when client startup, ignore it, try it again in scheduler. exception: {e}") + self.__start_scheduler() + self.__start_callback_handler() + self.__is_running = True + self._start_success() + except Exception as e: + self.__is_running = False + self.__stop_client_threads() + self._start_failure() + logger.error(f"client:{self.__client_id} startup exception: {e}") + raise e + + def shutdown(self): + if self.is_running is False: + raise IllegalStateException(f"client:{self.__client_id} is not running.") + + if self.__had_shutdown is True: + raise IllegalStateException(f"client:{self.__client_id} had shutdown.") + + try: + self.__stop_client_threads() + self.__notify_client_termination() + self.__rpc_client.stop() + self.__topic_route_cache.clear() + self.__topics.clear() + self.__had_shutdown = True + self.__is_running = False + except Exception as e: + logger.error(f"{self.__str__()} shutdown exception: {e}") + raise e + + """ abstract """ + + def _start_success(self): + """ each subclass implements its own actions after a successful startup """ + pass + + def _start_failure(self): + """ each subclass implements its own actions after a startup failure """ + pass + + def _sync_setting_req(self, endpoints): + """ each subclass implements its own telemetry settings scheme """ + pass + + def _heartbeat_req(self): + """ each subclass implements its own heartbeat request """ + pass + + def _notify_client_termination_req(self): + """ each subclass implements its own client termination request """ + pass + + def _update_queue_selector(self, topic, topic_route): + """ each subclass implements its own queue selector """ + pass + + """ scheduler """ + + def __start_scheduler(self): + # start 4 schedulers in different threads, each thread use the same asyncio event loop. + try: + # update topic route every 30 seconds + self.__client_thread_task_enabled = True + self.__topic_route_scheduler = threading.Thread(target=self.__schedule_update_topic_route_cache, + name="update_topic_route_schedule_thread") + self.__topic_route_scheduler_threading_event = threading.Event() + self.__topic_route_scheduler.start() + logger.info("start topic route scheduler success.") + + # send heartbeat to all endpoints every 10 seconds + self.__heartbeat_scheduler = threading.Thread(target=self.__schedule_heartbeat, + name="heartbeat_schedule_thread") + self.__heartbeat_scheduler_threading_event = threading.Event() + self.__heartbeat_scheduler.start() + logger.info("start heartbeat scheduler success.") + + # send client setting to all endpoints every 5 seconds + self.__sync_setting_scheduler = threading.Thread(target=self.__schedule_update_setting, + name="sync_setting_schedule_thread") + self.__sync_setting_scheduler_threading_event = threading.Event() + self.__sync_setting_scheduler.start() + logger.info("start sync setting scheduler success.") + + # clear unused grpc channel(>30 minutes) every 60 seconds + self.__clear_idle_rpc_channels_scheduler = threading.Thread(target=self.__schedule_clear_idle_rpc_channels, + name="clear_idle_rpc_channel_schedule_thread") + self.__clear_idle_rpc_channels_threading_event = threading.Event() + self.__clear_idle_rpc_channels_scheduler.start() + logger.info("start clear idle rpc channels scheduler success.") + except Exception as e: + logger.info(f"start scheduler exception: {e}") + self.__stop_client_threads() + raise e + + def __schedule_update_topic_route_cache(self): + asyncio.set_event_loop(self._rpc_channel_io_loop()) + while True: + if self.__client_thread_task_enabled is True: + self.__topic_route_scheduler_threading_event.wait(30) + logger.debug(f"{self.__str__()} run scheduler for update topic route cache.") + # update topic route for each topic in cache + topics = self.__topic_route_cache.keys() + for topic in topics: + try: + if self.__client_thread_task_enabled is True: + self.__update_topic_route_async(topic) + except Exception as e: + logger.error( + f"{self.__str__()} run scheduler for update topic:{topic} route cache exception: {e}") + else: + break + logger.info(f"{self.__str__()} stop scheduler for update topic route cache success.") + + def __schedule_heartbeat(self): + asyncio.set_event_loop(self._rpc_channel_io_loop()) + while True: + if self.__client_thread_task_enabled is True: + self.__heartbeat_scheduler_threading_event.wait(10) + logger.debug(f"{self.__str__()} run scheduler for heartbeat.") + all_endpoints = self.__get_all_endpoints().values() + try: + for endpoints in all_endpoints: + if self.__client_thread_task_enabled is True: + self.__heartbeat_async(endpoints) + except Exception as e: + logger.error(f"{self.__str__()} run scheduler for heartbeat exception: {e}") + else: + break + logger.info(f"{self.__str__()} stop scheduler for heartbeat success.") + + def __schedule_update_setting(self): + asyncio.set_event_loop(self._rpc_channel_io_loop()) + while True: + if self.__client_thread_task_enabled is True: + self.__sync_setting_scheduler_threading_event.wait(5) + logger.debug(f"{self.__str__()} run scheduler for update setting.") + try: + all_endpoints = self.__get_all_endpoints().values() + for endpoints in all_endpoints: + if self.__client_thread_task_enabled is True: + # if stream_stream_call for grpc channel is none, create a new one, otherwise use the existing one + self.__retrieve_telemetry_stream_stream_call(endpoints) + self.__setting_write(endpoints) + except Exception as e: + logger.error(f"{self.__str__()} run scheduler for update setting exception: {e}") + else: + break + logger.info(f"{self.__str__()} stop scheduler for update setting success.") + + def __schedule_clear_idle_rpc_channels(self): + while True: + if self.__client_thread_task_enabled is True: + self.__clear_idle_rpc_channels_threading_event.wait(60) + logger.debug(f"{self.__str__()} run scheduler for clear idle rpc channels.") + try: + if self.__client_thread_task_enabled is True: + self.__rpc_client.clear_idle_rpc_channels() + except Exception as e: + logger.error(f"{self.__str__()} run scheduler for clear idle rpc channels: {e}") + else: + break + logger.info(f"{self.__str__()} stop scheduler for clear idle rpc channels success.") + + """ callback handler for async method """ + + def __start_callback_handler(self): + # a thread to handle callback when using async method such as send_async(), receive_async(). + # this handler switches user's callback thread from RpcClient's _io_loop_thread to client's callback_handler_thread + self.__callback_result_thread = threading.Thread(name="callback_handler_thread", target=self.__handle_callback) + self.__callback_result_thread.daemon = True + self.__callback_result_thread.start() + + def __handle_callback(self): + while True: + if self.__client_thread_task_enabled is True: + callback_result = self.__callback_result_queue.get() + if callback_result.result_type == CallbackResultType.END_CALLBACK_THREAD_RESULT: + # end infinite loop when client shutdown + self.__callback_result_queue.task_done() + break + else: + if callback_result.is_success: + callback_result.future.set_result(callback_result.result) + else: + callback_result.future.set_exception(callback_result.result) + self.__callback_result_queue.task_done() + else: + break + logger.info(f"{self.__str__()} stop client callback result handler thread success.") + + """ protect """ + + def _retrieve_topic_route_data(self, topic): + route = self.__topic_route_cache.get(topic) + if route is not None: + return route + else: + route = self.__update_topic_route(topic) + logger.info(f"{self.__str__()} update topic:{topic} route success.") + if route is not None: + self.__topics.add(topic) + return route + + def _remove_unused_topic_route_data(self, topic): + self.__topic_route_cache.remove(topic) + self.__topics.remove(topic) + + def _sign(self): + return Signature.metadata(self.__client_configuration, self.__client_id) + + def _set_future_callback_result(self, callback_result): + if self.__callback_result_queue is not None: + self.__callback_result_queue.put_nowait(callback_result) + + def _rpc_channel_io_loop(self): + return self.__rpc_client.get_channel_io_loop() + + """ private """ + + # topic route # + + def __update_topic_route(self, topic): + try: + future = self.__rpc_client.query_topic_route_async(self.__client_configuration.rpc_endpoints, + self.__topic_route_req(topic), metadata=self._sign(), + timeout=self.__client_configuration.request_timeout) + res = future.result() + route = self.__handle_topic_route_res(res, topic) + return route + except Exception as e: + logger.error(f"update topic route error, topic:{topic}, {e}") + raise e + + def __update_topic_route_async(self, topic): + callback = functools.partial(self.__query_topic_route_async_callback, topic=topic) + future = self.__rpc_client.query_topic_route_async(self.__client_configuration.rpc_endpoints, + self.__topic_route_req(topic), + metadata=self._sign(), + timeout=self.__client_configuration.request_timeout) + future.add_done_callback(callback) + + def __query_topic_route_async_callback(self, future, topic): + try: + res = future.result() + self.__handle_topic_route_res(res, topic) + except Exception as e: + raise e + + def __topic_route_req(self, topic): + req = QueryRouteRequest() + req.topic.name = topic + req.topic.resource_namespace = self.__client_configuration.namespace + req.endpoints.CopyFrom(self.__client_configuration.rpc_endpoints.endpoints) + return req + + def __handle_topic_route_res(self, res, topic): + if res is not None: + MessagingResultChecker.check(res.status) + if res.status.code == Code.OK: + topic_route = TopicRouteData(res.message_queues) + logger.debug(f"{self.__str__()} update topic:{topic} route, route info: {topic_route.__str__()}") + # if topic route has new endpoint, connect + self.__check_topic_route_endpoints_changed(topic, topic_route) + self.__topic_route_cache.put(topic, topic_route) + # producer or consumer update its queue selector + self._update_queue_selector(topic, topic_route) + return topic_route + else: + raise Exception(f"query topic route exception, topic:{topic}") + + # heartbeat # + + def __heartbeat_async(self, endpoints): + req = self._heartbeat_req() + callback = functools.partial(self.__heartbeat_callback, endpoints=endpoints) + future = self.__rpc_client.heartbeat_async(endpoints, req, metadata=self._sign(), + timeout=self.__client_configuration.request_timeout) + future.add_done_callback(callback) + + def __heartbeat_callback(self, future, endpoints): + try: + res = future.result() + if res is not None and res.status.code == Code.OK: + logger.info(f"{self.__str__()} send heartbeat to {endpoints.__str__()} success.") + else: + if res is not None: + logger.error( + f"{self.__str__()} send heartbeat to {endpoints.__str__()} error, code:{res.status.code}, message:{res.status.message}.") + else: + logger.error(f"{self.__str__()} send heartbeat to {endpoints.__str__()} error, response is none.") + except Exception as e: + logger.error(f"{self.__str__()} send heartbeat to {endpoints.__str__()} exception, e: {e}") + raise e + + # sync settings # + + def __setting_write(self, endpoints): + req = self._sync_setting_req(endpoints) + callback = functools.partial(self.__setting_write_callback, endpoints=endpoints) + future = self.__rpc_client.telemetry_write_async(endpoints, req) + future.add_done_callback(callback) + + def __retrieve_telemetry_stream_stream_call(self, endpoints, rebuild=False): + try: + self.__rpc_client.telemetry_stream(endpoints, self, metadata=self._sign(), timeout=60 * 60 * 24 * 365, + rebuild=rebuild) + except Exception as e: + logger.error( + f"{self.__str__()} rebuild stream_steam_call to {endpoints.__str__()} exception: {e}" if rebuild else f"{self.__str__()} create stream_steam_call to {endpoints.__str__()} exception: {e}") + + def __setting_write_callback(self, future, endpoints): + try: + future.result() + logger.debug(f"{self.__str__()} send setting to {endpoints.__str__()} success.") + except InvalidStateError as e: + logger.warn(f"{self.__str__()} send setting to {endpoints.__str__()} occurred InvalidStateError: {e}") + self.__retrieve_telemetry_stream_stream_call(endpoints, rebuild=True) + except AioRpcError as e: + logger.warn(f"{self.__str__()} send setting to {endpoints.__str__()} occurred AioRpcError: {e}") + self.__retrieve_telemetry_stream_stream_call(endpoints, rebuild=True) + except Exception as e: + logger.error(f"{self.__str__()} send setting to {endpoints.__str__()} exception: {e}") + self.__retrieve_telemetry_stream_stream_call(endpoints, rebuild=True) + + # metrics # + + def reset_metric(self, metric): + self.__client_metrics.reset_metrics(metric) + + # client termination # + + def __client_termination(self, endpoints): + req = self._notify_client_termination_req() + future = self.__rpc_client.notify_client_termination(endpoints, req, metadata=self._sign(), + timeout=self.__client_configuration.request_timeout) + future.result() + + # others ## + + def __get_all_endpoints(self): + endpoints_map = {} + all_route = self.__topic_route_cache.values() + for topic_route in all_route: + endpoints_map.update(topic_route.all_endpoints()) + return endpoints_map + + def __check_topic_route_endpoints_changed(self, topic, route): + old_route = self.__topic_route_cache.get(topic) + if old_route is None or old_route != route: + logger.info( + f"topic:{topic} route changed for {self.__str__()}. old route is {old_route}, new route is {route}") + all_endpoints = self.__get_all_endpoints() # the existing endpoints + topic_route_endpoints = route.all_endpoints() # the latest endpoints for topic route + diff = set(topic_route_endpoints.keys()).difference( + set(all_endpoints.keys())) # the diff between existing and latest + # create grpc channel, stream_stream_call for new endpoints, send setting to new endpoints + for address in diff: + endpoints = topic_route_endpoints[address] + self.__retrieve_telemetry_stream_stream_call(endpoints) + self.__setting_write(endpoints) + + def __notify_client_termination(self): + all_endpoints = self.__get_all_endpoints() + for endpoints in all_endpoints.values(): + try: + self.__client_termination(endpoints) + except Exception as e: + logger.error(f"notify client termination to {endpoints} exception: {e}") + + def __stop_client_threads(self): + self.__client_thread_task_enabled = False + if self.__topic_route_scheduler is not None: + if self.__topic_route_scheduler_threading_event is not None: + self.__topic_route_scheduler_threading_event.set() + self.__topic_route_scheduler.join() + + if self.__heartbeat_scheduler is not None: + if self.__heartbeat_scheduler_threading_event is not None: + self.__heartbeat_scheduler_threading_event.set() + self.__heartbeat_scheduler.join() + + if self.__sync_setting_scheduler is not None: + if self.__sync_setting_scheduler_threading_event is not None: + self.__sync_setting_scheduler_threading_event.set() + self.__sync_setting_scheduler.join() + + if self.__clear_idle_rpc_channels_scheduler is not None: + if self.__clear_idle_rpc_channels_threading_event is not None: + self.__clear_idle_rpc_channels_threading_event.set() + self.__clear_idle_rpc_channels_scheduler.join() + + if self.__callback_result_thread is not None: + self._set_future_callback_result(CallbackResult.end_callback_thread_result()) + self.__callback_result_thread.join() + + self.__topic_route_scheduler = None + self.__topic_route_scheduler_threading_event = None + self.__heartbeat_scheduler = None + self.__heartbeat_scheduler_threading_event = None + self.__sync_setting_scheduler = None + self.__sync_setting_scheduler_threading_event = None + self.__clear_idle_rpc_channels_scheduler = None + self.__clear_idle_rpc_channels_threading_event = None + self.__callback_result_thread = None + + """ property """ + + @property + def is_running(self): + return self.__is_running + + @property + def client_id(self): + return self.__client_id + + @property + def topics(self): + return self.__topics + + @property + def client_configuration(self): + return self.__client_configuration + + @property + def client_type(self): + return self.__client_type + + @property + def rpc_client(self): + return self.__rpc_client + + @property + def client_metrics(self): + return self.__client_metrics diff --git a/python/rocketmq/v5/client/client_configuration.py b/python/rocketmq/v5/client/client_configuration.py new file mode 100644 index 000000000..565402ab6 --- /dev/null +++ b/python/rocketmq/v5/client/client_configuration.py @@ -0,0 +1,106 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import socket + +from rocketmq.grpc_protocol import AddressScheme, Endpoints +from rocketmq.v5.client.connection import RpcEndpoints +from rocketmq.v5.log import logger + + +class Credentials: + + def __init__(self, ak, sk): + self.__ak = ak + self.__sk = sk + + @property + def ak(self): + return self.__ak + + @property + def sk(self): + return self.__sk + + +class ClientConfiguration: + + def __init__(self, endpoints: str, credentials: Credentials, namespace="", request_timeout=3): + self.__rpc_endpoints = RpcEndpoints(ClientConfiguration.__parse_endpoints(endpoints)) + self.__credentials = credentials + self.__request_timeout = request_timeout # seconds + self.__namespace = namespace + + @staticmethod + def __parse_endpoints(endpoints_str): + if len(endpoints_str) == 0: + return None + else: + try: + endpoints = Endpoints() + addresses = endpoints_str.split(";") + endpoints.scheme = ClientConfiguration.__parse_endpoints_scheme_type( + ClientConfiguration.__parse_endpoints_prefix(addresses[0].split(":")[0])) + for address in addresses: + if len(address) == 0: + continue + ad = endpoints.addresses.add() + address = ClientConfiguration.__parse_endpoints_prefix(address) + ad.host = address.split(":")[0] + ad.port = int(address.split(":")[1]) + return endpoints + except Exception as e: + logger.error(f"client configuration parse {endpoints_str} exception: {e}") + return None + + @staticmethod + def __parse_endpoints_scheme_type(host): + try: + socket.inet_pton(socket.AF_INET, host) + return AddressScheme.IPv4 + except socket.error: + try: + socket.inet_pton(socket.AF_INET6, host) + return AddressScheme.IPv6 + except socket.error: + return AddressScheme.DOMAIN_NAME + + @staticmethod + def __parse_endpoints_prefix(endpoints_str): + http_prefix = "http://" + https_prefix = "https://" + if endpoints_str.startswith(http_prefix): + return endpoints_str[len(http_prefix):] + elif endpoints_str.startswith(https_prefix): + return endpoints_str[len(https_prefix):] + return endpoints_str + + """ property """ + + @property + def rpc_endpoints(self) -> RpcEndpoints: + return self.__rpc_endpoints + + @property + def namespace(self): + return self.__namespace + + @property + def credentials(self): + return self.__credentials + + @property + def request_timeout(self): + return self.__request_timeout diff --git a/python/rocketmq/v5/client/connection/__init__.py b/python/rocketmq/v5/client/connection/__init__.py new file mode 100644 index 000000000..58070b0af --- /dev/null +++ b/python/rocketmq/v5/client/connection/__init__.py @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .rpc_channel import RpcChannel, RpcEndpoints +from .rpc_client import RpcClient + +__all__ = [ + "RpcEndpoints", + "RpcChannel", + "RpcClient", +] diff --git a/python/rocketmq/v5/client/connection/rpc_channel.py b/python/rocketmq/v5/client/connection/rpc_channel.py new file mode 100644 index 000000000..d79f54784 --- /dev/null +++ b/python/rocketmq/v5/client/connection/rpc_channel.py @@ -0,0 +1,231 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import time + +import grpc +from grpc import ChannelConnectivity, aio +from grpc.aio import AioRpcError +from rocketmq.grpc_protocol import (Address, AddressScheme, Code, Endpoints, + MessagingServiceStub) +from rocketmq.v5.exception import (IllegalArgumentException, + UnsupportedException) +from rocketmq.v5.log import logger + + +class RpcAddress: + + def __init__(self, address: Address): + self.__host = address.host + self.__port = address.port + + def __hash__(self) -> int: + return hash(self.__str__()) + + def __str__(self) -> str: + return self.__host + ":" + str(self.__port) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, RpcAddress): + return False + return self.__str__() == (other.__str__()) + + def __lt__(self, other): + if not isinstance(other, RpcAddress): + return False + return self.__str__() < (other.__str__()) + + def address0(self): + address = Address() + address.host = self.__host + address.port = self.__port + return address + + +class RpcEndpoints: + + def __init__(self, endpoints: Endpoints): + self.__endpoints = endpoints + self.__scheme = endpoints.scheme + self.__addresses = set(map(lambda address: RpcAddress(address), endpoints.addresses)) + if self.__scheme == AddressScheme.DOMAIN_NAME and len(self.__addresses) > 1: + raise UnsupportedException("Multiple addresses not allowed in domain schema") + self.__facade, self.__endpoint_desc = self.__facade() + + def __hash__(self) -> int: + return hash(str(self.__scheme) + ":" + self.__facade) + + def __eq__(self, other): + if not isinstance(other, RpcEndpoints): + return False + return self.__facade == other.__facade and self.__scheme == other.__scheme + + def __str__(self): + return self.__endpoint_desc + + """ private """ + + def __facade(self): + if self.__scheme is None or len( + self.__addresses) == 0 or self.__scheme == AddressScheme.ADDRESS_SCHEME_UNSPECIFIED: + return "" + + prefix = "dns:" + if self.__scheme == AddressScheme.IPv4: + prefix = "ipv4:" + elif self.__scheme == AddressScheme.IPv6: + prefix = "ipv6:" + + # formatted as: ip:port, ip:port, ip:port + sorted_list = sorted(self.__addresses) + ret = "" + for address in sorted_list: + ret = ret + address.__str__() + "," + return prefix + ret[0: len(ret) - 1], ret[0: len(ret) - 1] + + """ property """ + + @property + def endpoints(self): + return self.__endpoints + + @property + def facade(self): + return self.__facade + + +class RpcStreamStreamCall: + + def __init__(self, endpoints: RpcEndpoints, stream_stream_call, handler): + self.__endpoints = endpoints + self.__stream_stream_call = stream_stream_call # grpc stream_stream_call + self.__handler = handler # handler responsible for handling data from the server side stream. + + async def start_stream_read(self): + # start reading from a stream, including send setting result, sever check for transaction message + if self.__stream_stream_call is not None: + try: + while True: + res = await self.__stream_stream_call.read() + if res.HasField("settings"): + # read a response for send setting result + if res is not None and res.status.code == Code.OK: + logger.debug(f"async setting success. response status code: {res.status.code}") + if res.settings is not None and res.settings.metric is not None: + # reset metrics if needed + self.__handler.reset_metric(res.settings.metric) + elif res.HasField("recover_orphaned_transaction_command"): + # sever check for a transaction message + if self.__handler is not None: + transaction_id = res.recover_orphaned_transaction_command.transaction_id + message = res.recover_orphaned_transaction_command.message + await self.__handler.on_recover_orphaned_transaction_command(self.__endpoints, message, + transaction_id) + except AioRpcError as e: + logger.warn( + f"stream read from endpoints {self.__endpoints.__str__()} occurred AioRpcError. code: {e.code()}, message: {e.details()}") + except Exception as e: + logger.error(f"stream read from endpoints {self.__endpoints.__str__()} exception, {e}") + + async def stream_write(self, req): + if self.__stream_stream_call is not None: + try: + await self.__stream_stream_call.write(req) + except Exception as e: + raise e + + def close(self): + if self.__stream_stream_call is not None: + self.__stream_stream_call.cancel() + + +class RpcChannel: + + def __init__(self, endpoints: RpcEndpoints, tls_enabled=False): + self.__async_channel = None + self.__async_stub = None + self.__telemetry_stream_stream_call = None + self.__tls_enabled = tls_enabled + self.__endpoints = endpoints + self.__update_time = int(time.time()) + + def create_channel(self, loop): + # create grpc channel with the given loop + asyncio.set_event_loop(loop) + self.__create_aio_channel() + + def close_channel(self, loop): + if self.__async_channel is not None: + # close stream_stream_call + if self.__telemetry_stream_stream_call is not None: + self.__telemetry_stream_stream_call.close() + self.__telemetry_stream_stream_call = None + logger.info(f"channel[{self.__endpoints.__str__()}] close stream_stream_call success.") + if self.channel_state() is not ChannelConnectivity.SHUTDOWN: + # close grpc channel + asyncio.run_coroutine_threadsafe(self.__async_channel.close(), loop) + self.__async_channel = None + logger.info(f"channel[{self.__endpoints.__str__()}] close success.") + self.__async_stub = None + self.__endpoints = None + self.__update_time = None + + def channel_state(self, wait_for_ready=True): + return self.__async_channel.get_state(wait_for_ready) + + def register_telemetry_stream_stream_call(self, stream_stream_call, handler): + if self.__telemetry_stream_stream_call is not None: + self.__telemetry_stream_stream_call.close() + self.__telemetry_stream_stream_call = RpcStreamStreamCall(self.__endpoints, stream_stream_call, handler) + + """ private """ + + def __create_aio_channel(self): + try: + if self.__endpoints is None: + raise IllegalArgumentException("create_aio_channel exception, endpoints is None") + else: + options = [('grpc.enable_retries', 0), ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1)] + if self.__tls_enabled: + self.__async_channel = aio.secure_channel(self.__endpoints.facade, grpc.ssl_channel_credentials(), + options) + else: + self.__async_channel = aio.insecure_channel(self.__endpoints.facade, options) + self.__async_stub = MessagingServiceStub(self.__async_channel) + logger.debug( + f"create_aio_channel to [{self.__endpoints.__str__()}] success. channel state:{self.__async_channel.get_state()}") + except Exception as e: + logger.error(f"create_aio_channel to [{self.__endpoints.__str__()}] exception: {e}") + raise e + + """ property """ + + @property + def async_stub(self): + return self.__async_stub + + @property + def telemetry_stream_stream_call(self): + return self.__telemetry_stream_stream_call + + @property + def update_time(self): + return self.__update_time + + @update_time.setter + def update_time(self, update_time): + self.__update_time = update_time diff --git a/python/rocketmq/v5/client/connection/rpc_client.py b/python/rocketmq/v5/client/connection/rpc_client.py new file mode 100644 index 000000000..3bcd3e16c --- /dev/null +++ b/python/rocketmq/v5/client/connection/rpc_client.py @@ -0,0 +1,241 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import threading +import time +from concurrent.futures import Future + +from grpc import ChannelConnectivity +from rocketmq.grpc_protocol import (AckMessageRequest, + ChangeInvisibleDurationRequest, + EndTransactionRequest, HeartbeatRequest, + NotifyClientTerminationRequest, + QueryRouteRequest, ReceiveMessageRequest, + SendMessageRequest, TelemetryCommand) +from rocketmq.v5.client.connection import RpcChannel, RpcEndpoints +from rocketmq.v5.log import logger +from rocketmq.v5.util import ConcurrentMap + + +class RpcClient: + _instance_lock = threading.Lock() + _channel_lock = threading.Lock() + _io_loop = None # event loop for all async io + _io_loop_thread = None # thread for io loop + RPC_CLIENT_MAX_IDLE_SECONDS = 60 * 30 + + def __init__(self, tls_enable=False): + with RpcClient._instance_lock: + # start an event loop for async io + if RpcClient._io_loop is None: + initialized_event = threading.Event() + RpcClient._io_loop_thread = threading.Thread(target=RpcClient.__init_io_loop, args=(initialized_event,), + name="channel_io_loop_thread") + RpcClient._io_loop_thread.daemon = True + RpcClient._io_loop_thread.start() + # waiting for thread start success + initialized_event.wait() + self.channels = ConcurrentMap() + self.__clean_idle_channel_scheduler = None + self.__clean_idle_channel_scheduler_threading_event = None + self.__enable_retrieve_channel = True + self.__tls_enable = tls_enable + + def retrieve_or_create_channel(self, endpoints: RpcEndpoints): + if self.__enable_retrieve_channel is False: + raise Exception("RpcClient is not running.") + + try: + # get or create a new grpc channel + with RpcClient._channel_lock: + channel = self.__get_channel(endpoints) + if channel is not None: + channel.update_time = int(time.time()) + else: + channel = RpcChannel(endpoints, self.__tls_enable) + channel.create_channel(RpcClient.get_channel_io_loop()) + self.__put_channel(endpoints, channel) + return channel + except Exception as e: + logger.error(f"retrieve or create channel exception: {e}") + raise e + + def clear_idle_rpc_channels(self): + items = self.channels.items() + now = int(time.time()) + idle_endpoints = list() + for endpoints, channel in items: + if now - channel.update_time > RpcClient.RPC_CLIENT_MAX_IDLE_SECONDS: + idle_endpoints.append(endpoints) + with RpcClient._channel_lock: + for endpoints in idle_endpoints: + logger.info(f"remove idle channel {endpoints.__str__()}") + self.__close_rpc_channel(endpoints) + self.channels.remove(endpoints) + + def stop(self): + with RpcClient._channel_lock: + self.__enable_retrieve_channel = False + all_endpoints = self.channels.keys() + for endpoints in all_endpoints: + self.__close_rpc_channel(endpoints) + + @staticmethod + def get_channel_io_loop(): + return RpcClient._io_loop + + """ grpc MessageService """ + + def query_topic_route_async(self, endpoints: RpcEndpoints, req: QueryRouteRequest, metadata, timeout=3): + return RpcClient.__run_message_service_async( + self.__query_route_async_0(endpoints, req, metadata=metadata, timeout=timeout)) + + def send_message_async(self, endpoints: RpcEndpoints, req: SendMessageRequest, metadata, timeout=3): + return RpcClient.__run_message_service_async( + self.__send_message_0(endpoints, req, metadata=metadata, timeout=timeout)) + + def receive_message_async(self, endpoints: RpcEndpoints, req: ReceiveMessageRequest, metadata, timeout=3): + return RpcClient.__run_message_service_async( + self.__receive_message_0(endpoints, req, metadata=metadata, timeout=timeout)) + + def ack_message_async(self, endpoints: RpcEndpoints, req: AckMessageRequest, metadata, timeout=3): + return RpcClient.__run_message_service_async( + self.__ack_message_0(endpoints, req, metadata=metadata, timeout=timeout)) + + def change_invisible_duration_async(self, endpoints: RpcEndpoints, req: ChangeInvisibleDurationRequest, metadata, + timeout=3): + return RpcClient.__run_message_service_async( + self.__change_invisible_duration_0(endpoints, req, metadata=metadata, timeout=timeout)) + + def heartbeat_async(self, endpoints: RpcEndpoints, req: HeartbeatRequest, metadata, timeout=3): + return RpcClient.__run_message_service_async( + self.__heartbeat_async_0(endpoints, req, metadata=metadata, timeout=timeout)) + + def telemetry_write_async(self, endpoints: RpcEndpoints, req: TelemetryCommand): + return RpcClient.__run_message_service_async( + self.retrieve_or_create_channel(endpoints).telemetry_stream_stream_call.stream_write(req)) + + def end_transaction_async(self, endpoints: RpcEndpoints, req: EndTransactionRequest, metadata, timeout=3): + return RpcClient.__run_message_service_async( + self.__end_transaction_0(endpoints, req, metadata=metadata, timeout=timeout)) + + def notify_client_termination(self, endpoints: RpcEndpoints, req: NotifyClientTerminationRequest, metadata, + timeout=3): + return RpcClient.__run_message_service_async( + self.__notify_client_termination_0(endpoints, req, metadata=metadata, timeout=timeout)) + + def telemetry_stream(self, endpoints: RpcEndpoints, client, metadata, timeout=3000, rebuild=False): + try: + channel = self.retrieve_or_create_channel(endpoints) + if channel.telemetry_stream_stream_call is None or rebuild is True: + stream = channel.async_stub.Telemetry(metadata=metadata, timeout=timeout) + channel.register_telemetry_stream_stream_call(stream, client) + asyncio.run_coroutine_threadsafe(channel.telemetry_stream_stream_call.start_stream_read(), + RpcClient.get_channel_io_loop()) + logger.info( + f"{client.__str__()} rebuild stream_steam_call to {endpoints.__str__()} success." if rebuild else f"{client.__str__()} create stream_steam_call to {endpoints.__str__()} success.") + except Exception as e: + raise e + + def end_transaction_for_server_check(self, endpoints: RpcEndpoints, req: EndTransactionRequest, metadata, + timeout=3): + try: + return self.__end_transaction_0(endpoints, req, metadata=metadata, timeout=timeout) + except Exception as e: + logger.error( + f"end transaction exception, topic:{req.topic.name}, message_id:{req.message_id}, transaction_id:{req.transaction_id}: {e}") + raise e + + """ MessageService.stub impl """ + + async def __query_route_async_0(self, endpoints: RpcEndpoints, req: QueryRouteRequest, metadata, timeout=3): + return await self.retrieve_or_create_channel(endpoints).async_stub.QueryRoute(req, metadata=metadata, + timeout=timeout) + + async def __send_message_0(self, endpoints: RpcEndpoints, req: SendMessageRequest, metadata, timeout=3): + return await self.retrieve_or_create_channel(endpoints).async_stub.SendMessage(req, metadata=metadata, + timeout=timeout) + + async def __receive_message_0(self, endpoints: RpcEndpoints, req: ReceiveMessageRequest, metadata, timeout=3): + return self.retrieve_or_create_channel(endpoints).async_stub.ReceiveMessage(req, metadata=metadata, + timeout=timeout) + + async def __ack_message_0(self, endpoints: RpcEndpoints, req: AckMessageRequest, metadata, timeout=3): + return await self.retrieve_or_create_channel(endpoints).async_stub.AckMessage(req, metadata=metadata, + timeout=timeout) + + async def __heartbeat_async_0(self, endpoints: RpcEndpoints, req: HeartbeatRequest, metadata, timeout=3): + return await self.retrieve_or_create_channel(endpoints).async_stub.Heartbeat(req, metadata=metadata, + timeout=timeout) + + async def __change_invisible_duration_0(self, endpoints: RpcEndpoints, req: ChangeInvisibleDurationRequest, + metadata, timeout=3): + return await self.retrieve_or_create_channel(endpoints).async_stub.ChangeInvisibleDuration(req, + metadata=metadata, + timeout=timeout) + + async def __end_transaction_0(self, endpoints: RpcEndpoints, req: EndTransactionRequest, metadata, timeout=3): + return await self.retrieve_or_create_channel(endpoints).async_stub.EndTransaction(req, metadata=metadata, + timeout=timeout) + + async def __notify_client_termination_0(self, endpoints: RpcEndpoints, req: NotifyClientTerminationRequest, + metadata, timeout=3): + return await self.retrieve_or_create_channel(endpoints).async_stub.NotifyClientTermination(req, + metadata=metadata, + timeout=timeout) + + """ private """ + + def __get_channel(self, endpoints: RpcEndpoints) -> RpcChannel: + return self.channels.get(endpoints) + + def __put_channel(self, endpoints: RpcEndpoints, channel): + self.channels.put(endpoints, channel) + + def __close_rpc_channel(self, endpoints: RpcEndpoints): + channel = self.__get_channel(endpoints) + if channel is not None and channel.channel_state() is not ChannelConnectivity.SHUTDOWN: + try: + channel.close_channel(RpcClient.get_channel_io_loop()) + self.channels.remove(endpoints) + except Exception as e: + logger.error(f"close channel {endpoints.__str__()} error: {e}") + raise e + + @staticmethod + def __init_io_loop(initialized_event): + # start a thread, set an event loop to the thread. all clients use the same event loop for io operation + # loop only init once, running forever until the process ends + # RpcClient use RpcClient._io_loop to execute grpc call + try: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + RpcClient._io_loop = loop + initialized_event.set() + logger.info("start io loop thread success.") + loop.run_forever() + except Exception as e: + logger.error(f"start io loop thread exception: {e}") + + @staticmethod + def __run_message_service_async(func): + try: + # execute grpc call in RpcClient._io_loop + return asyncio.run_coroutine_threadsafe(func, RpcClient.get_channel_io_loop()) + except Exception as e: + future = Future() + future.set_exception(e) + return future diff --git a/python/rocketmq/state.py b/python/rocketmq/v5/client/metrics/__init__.py similarity index 85% rename from python/rocketmq/state.py rename to python/rocketmq/v5/client/metrics/__init__.py index e8f2d0106..47380ebbd 100644 --- a/python/rocketmq/state.py +++ b/python/rocketmq/v5/client/metrics/__init__.py @@ -13,13 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from enum import Enum +from .client_metrics import ClientMetrics - -class State(Enum): - New = 1 - Starting = 2 - Running = 3 - Stopping = 4 - Terminated = 5 - Failed = 6 +__all__ = [ + "ClientMetrics", +] diff --git a/python/rocketmq/v5/client/metrics/client_metrics.py b/python/rocketmq/v5/client/metrics/client_metrics.py new file mode 100644 index 000000000..7aa944332 --- /dev/null +++ b/python/rocketmq/v5/client/metrics/client_metrics.py @@ -0,0 +1,157 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading +import time + +from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import \ + OTLPMetricExporter +from opentelemetry.metrics import Histogram +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader +from opentelemetry.sdk.metrics.view import (ExplicitBucketHistogramAggregation, + View) +from opentelemetry.sdk.resources import Resource +from rocketmq.grpc_protocol import Metric +from rocketmq.v5.client.connection import RpcEndpoints +from rocketmq.v5.log import logger +from rocketmq.v5.model import HistogramEnum, MessageMetricType, MetricContext + + +class ClientMetrics: + METRIC_EXPORTER_RPC_TIMEOUT = 5 + METRIC_READER_INTERVAL = 60000 # 1 minute + METRIC_INSTRUMENTATION_NAME = "org.apache.rocketmq.message" + + def __init__(self, client_id, configuration): + self.__enabled = False + self.__endpoints = None + self.__client_id = client_id + self.__client_configuration = configuration + self.__send_success_cost_time_instrument = None + self.__meter_provider = None + self.__metric_lock = threading.Lock() + + def reset_metrics(self, metric: Metric): + # if metrics endpoints changed or metric.on from False to True, start a new client metrics + with self.__metric_lock: + if self.__satisfy(metric): + return + + # metric.on from True to False, shutdown client metrics + if not metric.on: + self.__meter_provider_shutdown() + self.__enabled = False + self.__endpoints = None + self.__send_success_cost_time_instrument = None + return + + self.__enabled = metric.on + self.__endpoints = RpcEndpoints(metric.endpoints) + self.__meter_provider_start() + logger.info(f"client:{self.__client_id} start metric provider success.") + + """ send metric """ + + def send_before(self, topic): + send_context = MetricContext(MessageMetricType.SEND) + # record send message time + start_timestamp = round(time.time() * 1000, 2) + send_context.put_attr("send_stopwatch", start_timestamp) + send_context.put_attr("topic", topic) + send_context.put_attr("client_id", self.__client_id) + return send_context + + def send_after(self, send_context: MetricContext, success: bool): + if send_context is None: + logger.warn("metrics do send after exception. send_context must not be none.") + return + + if send_context.metric_type != MessageMetricType.SEND: + logger.warn( + f"metric type must be MessageMetricType.SEND. current send_context type is {send_context.metric_type}") + return + + if send_context.get_attr("send_stopwatch") is None: + logger.warn("metrics do send after exception. send_stopwatch must not be none.") + return + + if send_context.get_attr("topic") is None: + logger.warn("metrics do send after exception. topic must not be none.") + return + + if send_context.get_attr("client_id") is None: + send_context.put_attr("client_id", self.__client_id) + + # record send RT and result + start_timestamp = send_context.get_attr("send_stopwatch") + cost = round(time.time() * 1000, 2) - start_timestamp + send_context.put_attr("invocation_status", "success" if success else "failure") + send_context.remove_attr("send_stopwatch") + self.__record_send_success_cost_time(send_context, cost) + + """ private """ + + def __satisfy(self, metric: Metric): + if metric.endpoints is None: + return True + # if metrics endpoints changed, return False + if self.__enabled and metric.on and self.__endpoints == RpcEndpoints(metric.endpoints): + return True + return not self.__enabled and not metric.on + + def __meter_provider_shutdown(self): + if self.__meter_provider is not None: + try: + self.__meter_provider.shutdown() + self.__meter_provider = None + except Exception as e: + logger.error(f"meter provider shutdown exception:{e}") + + def __meter_provider_start(self): + if self.__endpoints is None: + logger.warn(f"client:{self.__client_id} can't create meter provider, because endpoints is none.") + return + + try: + # setup OTLP exporter + exporter = OTLPMetricExporter(endpoint=self.__endpoints.__str__(), insecure=True, + timeout=ClientMetrics.METRIC_EXPORTER_RPC_TIMEOUT) + # create a metric reader and set the export interval + reader = PeriodicExportingMetricReader(exporter, + export_interval_millis=ClientMetrics.METRIC_READER_INTERVAL) + # create an empty resource + resource = Resource.get_empty() + # create view + send_cost_time_view = View(instrument_type=Histogram, + instrument_name=HistogramEnum.SEND_COST_TIME.histogram_name, + aggregation=ExplicitBucketHistogramAggregation( + HistogramEnum.SEND_COST_TIME.buckets)) + # create MeterProvider + self.__meter_provider = MeterProvider(metric_readers=[reader], resource=resource, + views=[send_cost_time_view]) + # define the histogram instruments + self.__send_success_cost_time_instrument = self.__meter_provider.get_meter( + ClientMetrics.METRIC_INSTRUMENTATION_NAME).create_histogram(HistogramEnum.SEND_COST_TIME.histogram_name) + except Exception as e: + logger.error(f"client:{self.__client_id} start meter provider exception: {e}") + + def __record_send_success_cost_time(self, context, amount): + if self.__enabled: + try: + # record send message cost time and result + self.__send_success_cost_time_instrument.record(amount, context.attributes) + except Exception as e: + logger.error(f"record send message cost time exception, e:{e}") diff --git a/python/rocketmq/v5/consumer/__init__.py b/python/rocketmq/v5/consumer/__init__.py new file mode 100644 index 000000000..bd12e42f1 --- /dev/null +++ b/python/rocketmq/v5/consumer/__init__.py @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .simple_consumer import SimpleConsumer + +__all__ = [ + "SimpleConsumer", +] diff --git a/python/rocketmq/v5/consumer/simple_consumer.py b/python/rocketmq/v5/consumer/simple_consumer.py new file mode 100644 index 000000000..33e936c41 --- /dev/null +++ b/python/rocketmq/v5/consumer/simple_consumer.py @@ -0,0 +1,411 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import functools +import threading +from concurrent.futures import Future + +from rocketmq.grpc_protocol import (AckMessageEntry, AckMessageRequest, + ChangeInvisibleDurationRequest, ClientType, + HeartbeatRequest, + NotifyClientTerminationRequest, + ReceiveMessageRequest, Settings, + Subscription, TelemetryCommand) +from rocketmq.v5.client import Client, ClientConfiguration +from rocketmq.v5.client.balancer import QueueSelector +from rocketmq.v5.exception import (IllegalArgumentException, + IllegalStateException) +from rocketmq.v5.log import logger +from rocketmq.v5.model import CallbackResult, FilterExpression, Message +from rocketmq.v5.util import (AtomicInteger, ConcurrentMap, + MessagingResultChecker, Misc) + + +class SimpleConsumer(Client): + + def __init__(self, client_configuration: ClientConfiguration, consumer_group, subscription: dict = None, + await_duration=20): + if consumer_group is None or consumer_group.strip() == '': + raise IllegalArgumentException("consumerGroup should not be null") + if Misc.is_valid_consumer_group(consumer_group) is False: + raise IllegalArgumentException( + f"consumerGroup does not match the regex [regex={Misc.CONSUMER_GROUP_PATTERN}]") + if await_duration is None: + raise IllegalArgumentException("awaitDuration should not be null") + + super().__init__(client_configuration, None if subscription is None else subscription.keys(), + ClientType.SIMPLE_CONSUMER) + self.__consumer_group = consumer_group + self.__await_duration = await_duration # long polling timeout, seconds + # + self.__subscriptions = ConcurrentMap() + if subscription is not None: + self.__subscriptions.update(subscription) + # + self.__receive_queue_selectors = ConcurrentMap() + self.__topic_index = AtomicInteger(0) + self.__queue_index_lock = threading.Lock() + + def __str__(self): + return f"{ClientType.Name(self.client_type)}:{self.consumer_group}, client_id:{self.client_id}" + + def subscribe(self, topic, filter_expression: FilterExpression = None): + if self.is_running is False: + raise IllegalStateException("unable to add subscription because simple consumer is not running") + + try: + if not self.__subscriptions.contains(topic): + self._retrieve_topic_route_data(topic) + self.__subscriptions.put(topic, filter_expression if filter_expression is not None else FilterExpression()) + except Exception as e: + logger.error(f"subscribe exception: {e}") + + def unsubscribe(self, topic): + if self.is_running is False: + raise IllegalStateException("unable to remove subscription because simple consumer is not running") + + if topic in self.__subscriptions: + self.__subscriptions.remove(topic) + self._remove_unused_topic_route_data(topic) + + def receive(self, max_message_num, invisible_duration): + if self.is_running is False: + raise IllegalStateException("unable to receive messages because simple consumer is not running") + + return self.__receive(max_message_num, invisible_duration) + + def receive_async(self, max_message_num, invisible_duration): + if self.is_running is False: + raise IllegalStateException("unable to receive messages because simple consumer is not running") + + return self.__receive_async(max_message_num, invisible_duration) + + def ack(self, message: Message): + if self.is_running is False: + raise IllegalStateException("unable to ack message because simple consumer is not running") + + queue = self.__select_topic_queue(message.topic) + return self.__ack(message, queue) + + def ack_async(self, message: Message): + if self.is_running is False: + raise IllegalStateException("unable to ack message because simple consumer is not running") + + queue = self.__select_topic_queue(message.topic) + return self.__ack_async(message, queue) + + def change_invisible_duration(self, message: Message, invisible_duration): + if self.is_running is False: + raise IllegalStateException("unable to change invisible duration because simple consumer is not running") + + queue = self.__select_topic_queue(message.topic) + return self.__change_invisible_duration(message, queue, invisible_duration) + + def change_invisible_duration_async(self, message: Message, invisible_duration): + if self.is_running is False: + raise IllegalStateException("unable to change invisible duration because simple consumer is not running") + + queue = self.__select_topic_queue(message.topic) + return self.__change_invisible_duration_async(message, queue, invisible_duration) + + """ override """ + + def _start_success(self): + logger.info(f"{self.__str__()} start success.") + + def _start_failure(self): + logger.info(f"{self.__str__()} start failed.") + + def _sync_setting_req(self, endpoints): + subscription = Subscription() + subscription.group.name = self.__consumer_group + subscription.group.resource_namespace = self.client_configuration.namespace + subscription.fifo = False + subscription.long_polling_timeout.seconds = self.__await_duration + items = self.__subscriptions.items() + for topic, expression in items: + sub_entry = subscription.subscriptions.add() + sub_entry.topic.name = topic + sub_entry.topic.resource_namespace = self.client_configuration.namespace + sub_entry.expression.type = expression.filter_type + + settings = Settings() + settings.client_type = self.client_type + settings.access_point.CopyFrom(endpoints.endpoints) + settings.request_timeout.seconds = self.client_configuration.request_timeout + settings.subscription.CopyFrom(subscription) + settings.user_agent.language = 6 + settings.user_agent.version = "5.0.1.1" + settings.user_agent.platform = Misc.get_os_description() + settings.user_agent.hostname = Misc.get_local_ip() + settings.metric.on = False + + cmd = TelemetryCommand() + cmd.settings.CopyFrom(settings) + return cmd + + def _heartbeat_req(self): + req = HeartbeatRequest() + req.client_type = self.client_type + req.group.name = self.__consumer_group + req.group.resource_namespace = self.client_configuration.namespace + return req + + def _notify_client_termination_req(self): + req = NotifyClientTerminationRequest() + req.group.resource_namespace = self.client_configuration.namespace + req.group.name = self.__consumer_group + return req + + def _update_queue_selector(self, topic, topic_route): + queue_selector = self.__receive_queue_selectors.get(topic) + if queue_selector is None: + return + queue_selector.update(topic_route) + + def shutdown(self): + logger.info(f"begin to to shutdown {self.__str__()}.") + super().shutdown() + logger.info(f"shutdown {self.__str__()} success.") + + """ private """ + + # receive message + + def __select_topic_for_receive(self): + try: + # select the next topic for receive + mod_index = self.__topic_index.get_and_increment() % len(self.__subscriptions.keys()) + return list(self.__subscriptions.keys())[mod_index] + except Exception as e: + logger.error(f"simple consumer select topic for receive message exception: {e}") + raise e + + def __select_topic_queue(self, topic): + try: + route = self._retrieve_topic_route_data(topic) + queue_selector = self.__receive_queue_selectors.put_if_absent(topic, + QueueSelector.simple_consumer_queue_selector( + route)) + return queue_selector.select_next_queue() + except Exception as e: + logger.error(f"simple consumer select topic queue for receive message exception: {e}") + raise e + + def __receive(self, max_message_num, invisible_duration): + self.__receive_pre_check(max_message_num) + topic = self.__select_topic_for_receive() + queue = self.__select_topic_queue(topic) + req = self.__receive_req(topic, queue, max_message_num, invisible_duration) + timeout = self.client_configuration.request_timeout + self.__await_duration + future = self.rpc_client.receive_message_async(queue.endpoints, req, metadata=self._sign(), timeout=timeout) + read_future = asyncio.run_coroutine_threadsafe(self.__receive_message_response(future.result()), + self._rpc_channel_io_loop()) + return self.__handle_receive_message_response(read_future.result()) + + def __receive_async(self, max_message_num, invisible_duration): + try: + self.__receive_pre_check(max_message_num) + topic = self.__select_topic_for_receive() + queue = self.__select_topic_queue(topic) + req = self.__receive_req(topic, queue, max_message_num, invisible_duration) + timeout = self.client_configuration.request_timeout + self.__await_duration + future = self.rpc_client.receive_message_async(queue.endpoints, req, metadata=self._sign(), timeout=timeout) + read_future = asyncio.run_coroutine_threadsafe(self.__receive_message_response(future.result()), + self._rpc_channel_io_loop()) + ret_future = Future() + handle_send_receipt_callback = functools.partial(self.__receive_message_callback, ret_future=ret_future) + read_future.add_done_callback(handle_send_receipt_callback) + return ret_future + except Exception as e: + raise e + + def __receive_pre_check(self, max_message_num): + if self.is_running is False: + raise IllegalStateException("consumer is not running now.") + if len(self.__subscriptions.keys()) == 0: + raise IllegalArgumentException("There is no topic to receive message") + if max_message_num <= 0: + raise IllegalArgumentException("max_message_num must be greater than 0") + + def __receive_req(self, topic, queue, max_message_num, invisible_duration): + filter_expression = self.__subscriptions.get(topic) + req = ReceiveMessageRequest() + req.group.name = self.__consumer_group + req.group.resource_namespace = self.client_configuration.namespace + req.message_queue.CopyFrom(queue.message_queue0()) + req.filter_expression.type = filter_expression.filter_type + req.filter_expression.expression = filter_expression.expression + req.batch_size = max_message_num + req.invisible_duration.seconds = invisible_duration + req.long_polling_timeout.seconds = self.__await_duration + req.auto_renew = False + return req + + def __receive_message_callback(self, future, ret_future): + try: + responses = future.result() + messages = self.__handle_receive_message_response(responses) + self._set_future_callback_result(CallbackResult.async_receive_callback_result(ret_future, messages)) + except Exception as e: + self._set_future_callback_result(CallbackResult.async_receive_callback_result(ret_future, e, False)) + + async def __receive_message_response(self, unary_stream_call): + try: + responses = list() + async for res in unary_stream_call: + if res.HasField("message") or res.HasField("status"): + logger.debug(f"consumer:{self.__consumer_group} receive response: {res}") + responses.append(res) + return responses + except Exception as e: + logger.error(f"consumer:{self.__consumer_group} receive message exception: {e}") + raise e + + def __handle_receive_message_response(self, responses): + messages = list() + status = None + + for res in responses: + if res.HasField("status"): + logger.debug( + f"simple_consumer[{self.__consumer_group}] receive_message, code:{res.status.code}, message:{res.status.message}.") + status = res.status + elif res.HasField("message"): + messages.append(Message().fromProtobuf(res.message)) + + MessagingResultChecker.check(status) + return messages + + # ack message + + def __ack(self, message: Message, queue): + if self.is_running is False: + raise IllegalStateException("consumer is not running now.") + + try: + req = self.__ack_req(message) + future = self.rpc_client.ack_message_async(queue.endpoints, req, metadata=self._sign()) + self.__handle_ack_result(future) + except Exception as e: + raise e + + def __ack_async(self, message: Message, queue): + if self.is_running is False: + raise IllegalStateException("consumer is not running now.") + + try: + req = self.__ack_req(message) + future = self.rpc_client.ack_message_async(queue.endpoints, req, metadata=self._sign()) + ret_future = Future() + ack_callback = functools.partial(self.__handle_ack_result, ret_future=ret_future) + future.add_done_callback(ack_callback) + return ret_future + except Exception as e: + raise e + + def __ack_req(self, message: Message): + req = AckMessageRequest() + req.group.name = self.__consumer_group + req.group.resource_namespace = self.client_configuration.namespace + req.topic.name = message.topic + req.topic.resource_namespace = self.client_configuration.namespace + + msg_entry = AckMessageEntry() + msg_entry.message_id = message.message_id + msg_entry.receipt_handle = message.receipt_handle + req.entries.append(msg_entry) + return req + + def __handle_ack_result(self, future, ret_future=None): + try: + res = future.result() + logger.debug(f"consumer[{self.__consumer_group}] ack response, {res.status}") + MessagingResultChecker.check(res.status) + if ret_future is not None: + self._set_future_callback_result(CallbackResult.async_ack_callback_result(ret_future, None)) + except Exception as e: + if ret_future is None: + raise e + else: + self._set_future_callback_result(CallbackResult.async_ack_callback_result(ret_future, e, False)) + + # change_invisible + + def __change_invisible_duration(self, message: Message, queue, invisible_duration): + if self.is_running is False: + raise IllegalStateException("consumer is not running now.") + + try: + req = self.__change_invisible_req(message, invisible_duration) + future = self.rpc_client.change_invisible_duration_async(queue.endpoints, req, metadata=self._sign()) + self.__handle_change_invisible_result(future) + except Exception as e: + raise e + + def __change_invisible_duration_async(self, message: Message, queue, invisible_duration): + if self.is_running is False: + raise IllegalArgumentException("consumer is not running now.") + + try: + req = self.__change_invisible_req(message, invisible_duration) + future = self.rpc_client.change_invisible_duration_async(queue.endpoints, req, metadata=self._sign()) + ret_future = Future() + change_invisible_callback = functools.partial(self.__handle_change_invisible_result, ret_future=ret_future) + future.add_done_callback(change_invisible_callback) + return ret_future + except Exception as e: + raise e + + def __change_invisible_req(self, message: Message, invisible_duration): + req = ChangeInvisibleDurationRequest() + req.topic.name = message.topic + req.topic.resource_namespace = self.client_configuration.namespace + req.group.name = self.consumer_group + req.group.resource_namespace = self.client_configuration.namespace + req.receipt_handle = message.receipt_handle + req.invisible_duration.seconds = invisible_duration + req.message_id = message.message_id + return req + + def __handle_change_invisible_result(self, future, ret_future=None): + try: + res = future.result() + logger.debug(f"consumer[{self.__consumer_group}] change invisible response, {res.status}") + MessagingResultChecker.check(res.status) + if ret_future is not None: + self._set_future_callback_result( + CallbackResult.async_change_invisible_duration_callback_result(ret_future, None)) + except Exception as e: + if ret_future is None: + raise e + else: + self._set_future_callback_result( + CallbackResult.async_change_invisible_duration_callback_result(ret_future, e, False)) + + """ property """ + + @property + def consumer_group(self): + return self.__consumer_group + + @property + def await_duration(self): + return self.__await_duration + + @await_duration.setter + def await_duration(self, await_duration): + self.__await_duration = await_duration diff --git a/python/rocketmq/v5/exception/__init__.py b/python/rocketmq/v5/exception/__init__.py new file mode 100644 index 000000000..2f58a9fd9 --- /dev/null +++ b/python/rocketmq/v5/exception/__init__.py @@ -0,0 +1,40 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .client_exception import (BadRequestException, ClientException, + ForbiddenException, IllegalArgumentException, + IllegalStateException, InternalErrorException, + NotFoundException, PayloadTooLargeException, + PaymentRequiredException, ProxyTimeoutException, + RequestHeaderFieldsTooLargeException, + TooManyRequestsException, UnauthorizedException, + UnsupportedException) + +__all__ = [ + "ClientException", + "BadRequestException", + "UnauthorizedException", + "PaymentRequiredException", + "ForbiddenException", + "NotFoundException", + "PayloadTooLargeException", + "TooManyRequestsException", + "RequestHeaderFieldsTooLargeException", + "InternalErrorException", + "ProxyTimeoutException", + "UnsupportedException", + "IllegalArgumentException", + "IllegalStateException", +] diff --git a/python/rocketmq/v5/exception/client_exception.py b/python/rocketmq/v5/exception/client_exception.py new file mode 100644 index 000000000..d42396450 --- /dev/null +++ b/python/rocketmq/v5/exception/client_exception.py @@ -0,0 +1,104 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +class ClientException(Exception): + + def __init__(self, message, code=None): + super().__init__(message) + self.__code = code + + def __str__(self): + if self.__code is not None: + return f"response code:{self.__code}, error message:{super().__str__()}" + else: + return f"error message:{super().__str__()}" + + +class BadRequestException(ClientException): + + def __init__(self, message, code): + super().__init__(message, code) + + +class UnauthorizedException(ClientException): + + def __init__(self, message, code): + super().__init__(message, code) + + +class PaymentRequiredException(ClientException): + + def __init__(self, message, code): + super().__init__(message, code) + + +class ForbiddenException(ClientException): + + def __init__(self, message, code): + super().__init__(message, code) + + +class NotFoundException(ClientException): + + def __init__(self, message, code): + super().__init__(message, code) + + +class PayloadTooLargeException(ClientException): + + def __init__(self, message, code): + super().__init__(message, code) + + +class TooManyRequestsException(ClientException): + + def __init__(self, message, code): + super().__init__(message, code) + + +class RequestHeaderFieldsTooLargeException(ClientException): + + def __init__(self, message, code): + super().__init__(message, code) + + +class InternalErrorException(ClientException): + + def __init__(self, message, code): + super().__init__(message, code) + + +class ProxyTimeoutException(ClientException): + + def __init__(self, message, code): + super().__init__(message, code) + + +class UnsupportedException(ClientException): + + def __init__(self, message, code=None): + super().__init__(message, code) + + +class IllegalArgumentException(ClientException): + + def __init__(self, message): + super().__init__(message) + + +class IllegalStateException(ClientException): + + def __init__(self, message): + super().__init__(message) diff --git a/python/rocketmq/foo.py b/python/rocketmq/v5/log/__init__.py similarity index 92% rename from python/rocketmq/foo.py rename to python/rocketmq/v5/log/__init__.py index a56f40308..862fa6cc8 100644 --- a/python/rocketmq/foo.py +++ b/python/rocketmq/v5/log/__init__.py @@ -13,4 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -bar = 1234 +from .log_config import logger + +__all__ = [ + "logger", +] diff --git a/python/rocketmq/v5/log/log_config.py b/python/rocketmq/v5/log/log_config.py new file mode 100644 index 000000000..1de3240ec --- /dev/null +++ b/python/rocketmq/v5/log/log_config.py @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging.config +import os + +__DIR = f'{os.path.expanduser("~/logs/rocketmq_python/")}' + +__LOG_CONFIG = { + 'version': 1.0, + 'disable_existing_loggers': False, + 'formatters': { + 'standard': { + 'format': '%(asctime)s [%(levelname)s] %(message)s' + }, + }, + 'handlers': { + # 'console': { + # 'level': 'DEBUG', + # 'class': 'logging.StreamHandler', + # 'formatter': 'standard' + # }, + 'file': { + 'class': 'logging.handlers.RotatingFileHandler', + 'level': 'INFO', + 'formatter': 'standard', + 'filename': f'{__DIR}/rocketmq_client.log', + 'maxBytes': 1024 * 1024 * 100, # 100MB + 'backupCount': 10, + }, + }, + 'loggers': { + 'rocketmq-python-client': { + 'handlers': ['file'], + 'level': 'INFO', + 'propagate': False + }, + } +} + +if not os.path.exists(__DIR): + os.makedirs(__DIR) + +logging.config.dictConfig(__LOG_CONFIG) +logger = logging.getLogger("rocketmq-python-client") diff --git a/python/rocketmq/v5/model/__init__.py b/python/rocketmq/v5/model/__init__.py new file mode 100644 index 000000000..5d2ddff95 --- /dev/null +++ b/python/rocketmq/v5/model/__init__.py @@ -0,0 +1,34 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .callback_result import CallbackResult, CallbackResultType +from .filter_expression import FilterExpression +from .message import Message +from .metrics import HistogramEnum, MessageMetricType, MetricContext +from .send_receipt import SendReceipt +from .topic_route import MessageQueue, TopicRouteData + +__all__ = [ + "MessageMetricType", + "MetricContext", + "HistogramEnum", + "Message", + "SendReceipt", + "MessageQueue", + "TopicRouteData", + "FilterExpression", + "CallbackResult", + "CallbackResultType", +] diff --git a/python/rocketmq/v5/model/callback_result.py b/python/rocketmq/v5/model/callback_result.py new file mode 100644 index 000000000..568d6c432 --- /dev/null +++ b/python/rocketmq/v5/model/callback_result.py @@ -0,0 +1,93 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class CallbackResultType(Enum): + ASYNC_SEND_CALLBACK_RESULT = 1 + ASYNC_RECEIVE_CALLBACK_RESULT = 2 + ASYNC_ACK_CALLBACK_RESULT = 3 + ASYNC_CHANGE_INVISIBLE_DURATION_RESULT = 4 + ASYNC_SEND_CALLBACK_EXCEPTION = 5 + ASYNC_RECEIVE_CALLBACK_EXCEPTION = 6 + ASYNC_ACK_CALLBACK_EXCEPTION = 7 + ASYNC_CHANGE_INVISIBLE_DURATION_EXCEPTION = 8 + END_CALLBACK_THREAD_RESULT = 100 + + +class CallbackResult: + + def __init__(self): + self.__future = None + self.__result = None + self.__result_type = None + self.__is_success = None + + @staticmethod + def callback_result(future, result, success): + callback_result = CallbackResult() + callback_result.__future = future + callback_result.__result = result + callback_result.__is_success = success + return callback_result + + @staticmethod + def async_send_callback_result(future, result, success=True): + callback_result = CallbackResult.callback_result(future, result, success) + callback_result.__result_type = CallbackResultType.ASYNC_SEND_CALLBACK_RESULT if success else CallbackResultType.ASYNC_SEND_CALLBACK_EXCEPTION + return callback_result + + @staticmethod + def async_receive_callback_result(future, result, success=True): + callback_result = CallbackResult.callback_result(future, result, success) + callback_result.__result_type = CallbackResultType.ASYNC_ACK_CALLBACK_RESULT if success else CallbackResultType.ASYNC_ACK_CALLBACK_EXCEPTION + return callback_result + + @staticmethod + def async_ack_callback_result(future, result, success=True): + callback_result = CallbackResult.callback_result(future, result, success) + callback_result.__result_type = CallbackResultType.ASYNC_RECEIVE_CALLBACK_RESULT if success else CallbackResultType.ASYNC_RECEIVE_CALLBACK_EXCEPTION + return callback_result + + @staticmethod + def async_change_invisible_duration_callback_result(future, result, success=True): + callback_result = CallbackResult.callback_result(future, result, success) + callback_result.__result_type = CallbackResultType.ASYNC_CHANGE_INVISIBLE_DURATION_RESULT if success else CallbackResultType.ASYNC_CHANGE_INVISIBLE_DURATION_EXCEPTION + return callback_result + + @staticmethod + def end_callback_thread_result(): + callback_result = CallbackResult() + callback_result.__result_type = CallbackResultType.END_CALLBACK_THREAD_RESULT + return callback_result + + """ @property """ + + @property + def future(self): + return self.__future + + @property + def result(self): + return self.__result + + @property + def result_type(self): + return self.__result_type + + @property + def is_success(self): + return self.__is_success diff --git a/python/rocketmq/filter_expression.py b/python/rocketmq/v5/model/filter_expression.py similarity index 69% rename from python/rocketmq/filter_expression.py rename to python/rocketmq/v5/model/filter_expression.py index 9e3e5117c..98e23da4d 100644 --- a/python/rocketmq/filter_expression.py +++ b/python/rocketmq/v5/model/filter_expression.py @@ -13,23 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -from enum import Enum +from rocketmq.grpc_protocol import FilterType -class ExpressionType(Enum): - Tag = 1 - Sql92 = 2 +class FilterExpression: + TAG_EXPRESSION_SUB_ALL = "*" + def __init__(self, expression=TAG_EXPRESSION_SUB_ALL, filter_type: FilterType = FilterType.TAG): + self.__expression = expression + self.__filter_type = filter_type -class FilterExpression: - def __init__(self, expression, expression_type=ExpressionType.Tag): - self._expression = expression - self._type = expression_type + """ property """ @property - def type(self): - return self._type + def expression(self): + return self.__expression @property - def expression(self): - return self._expression + def filter_type(self): + return self.__filter_type diff --git a/python/rocketmq/v5/model/message.py b/python/rocketmq/v5/model/message.py new file mode 100644 index 000000000..c333a9def --- /dev/null +++ b/python/rocketmq/v5/model/message.py @@ -0,0 +1,210 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rocketmq.grpc_protocol import DigestType, Encoding, definition_pb2 +from rocketmq.v5.exception import IllegalArgumentException +from rocketmq.v5.util import MessageIdCodec, Misc + + +class Message: + + def __init__(self): + self.__body = None + self.__topic = None + self.__namespace = None + self.__message_id = None + self.__tag = None + self.__message_group = None + self.__delivery_timestamp = None + self.__keys = set() + self.__properties = dict() + self.__born_host = None + self.__born_timestamp = None + self.__delivery_attempt = None + self.__receipt_handle = None + self.__message_type = None + + def __str__(self) -> str: + return f"topic:{self.__topic}, tag:{self.__tag}, messageGroup:{self.__message_group}, " \ + f"deliveryTimestamp:{self.__delivery_timestamp}, keys:{self.__keys}, properties:{self.__properties}" + + def fromProtobuf(self, message: definition_pb2.Message): # noqa + try: + self.__message_body_check_sum(message) + self.__topic = message.topic.name + self.__namespace = message.topic.resource_namespace + self.__message_id = MessageIdCodec.decode(message.system_properties.message_id) + self.__body = self.__uncompress_body(message) + self.__tag = message.system_properties.tag + self.__message_group = message.system_properties.message_group + self.__born_host = message.system_properties.born_host + self.__born_timestamp = message.system_properties.born_timestamp.seconds + self.__delivery_attempt = message.system_properties.delivery_attempt + self.__delivery_timestamp = message.system_properties.delivery_timestamp + self.__receipt_handle = message.system_properties.receipt_handle + self.__message_type = message.system_properties.message_type + + if message.system_properties.keys is not None: + self.__keys.update(message.system_properties.keys) + if message.user_properties is not None: + self.__properties.update(message.user_properties) + return self + except Exception as e: + raise e + + """ private """ + + @staticmethod + def __message_body_check_sum(message): + if message.system_properties.body_digest.type == DigestType.CRC32: + crc32_sum = Misc.crc32_checksum(message.body) + if message.system_properties.body_digest.checksum != crc32_sum: + raise Exception(f"(body_check_sum exception, {message.digest.checksum} != crc32_sum {crc32_sum}") + elif message.system_properties.body_digest.type == DigestType.MD5: + md5_sum = Misc.md5_checksum(message.body) + if message.system_properties.body_digest.checksum != md5_sum: + raise Exception(f"(body_check_sum exception, {message.digest.checksum} != crc32_sum {md5_sum}") + elif message.system_properties.body_digest.type == DigestType.SHA1: + sha1_sum = Misc.sha1_checksum(message.body) + if message.system_properties.body_digest.checksum != sha1_sum: + raise Exception(f"(body_check_sum exception, {message.digest.checksum} != crc32_sum {sha1_sum}") + else: + raise Exception(f"unsupported message body digest algorithm, {message.system_properties.body_digest.type}," + f" {message.topic}, {message.system_properties.message_id}") + + @staticmethod + def __uncompress_body(message): + if message.system_properties.body_encoding == Encoding.GZIP: + return Misc.uncompress_bytes_gzip(message.body) + elif message.system_properties.body_encoding == Encoding.IDENTITY: + return message.body + else: + raise Exception( + f"unsupported message encoding algorithm, {message.system_properties.body_encoding}, {message.topic}, {message.system_properties.message_id}") + + """ property """ + + @property + def body(self): + return self.__body + + @property + def topic(self): + return self.__topic + + @property + def namespace(self): + return self.__namespace + + @property + def message_id(self): + return self.__message_id + + @property + def tag(self): + return self.__tag + + @property + def message_group(self): + return self.__message_group + + @property + def delivery_timestamp(self): + return self.__delivery_timestamp + + @property + def keys(self): + return self.__keys + + @property + def properties(self): + return self.__properties + + @property + def born_host(self): + return self.__born_host + + @property + def born_timestamp(self): + return self.__born_timestamp + + @property + def delivery_attempt(self): + return self.__delivery_attempt + + @property + def receipt_handle(self): + return self.__receipt_handle + + @property + def message_type(self): + return self.__message_type + + @body.setter + def body(self, body): + if body is None or body.strip() == '': + raise IllegalArgumentException("body should not be blank") + self.__body = body + + @topic.setter + def topic(self, topic): + if Misc.is_valid_topic(topic): + self.__topic = topic + else: + raise IllegalArgumentException(f"topic does not match the regex [regex={Misc.TOPIC_PATTERN}]") + + @message_id.setter + def message_id(self, message_id): + self.__message_id = message_id + + @tag.setter + def tag(self, tag): + if tag is None or tag.strip() == '': + raise IllegalArgumentException("tag should not be blank") + if "|" in tag: + raise IllegalArgumentException("tag should not contain \"|\"") + self.__tag = tag + + @message_group.setter + def message_group(self, message_group): + if self.__delivery_timestamp is not None: + raise IllegalArgumentException("deliveryTimestamp and messageGroup should not be set at same time") + if message_group is None or len(message_group) == 0: + raise IllegalArgumentException("messageGroup should not be blank") + self.__message_group = message_group + + @delivery_timestamp.setter + def delivery_timestamp(self, delivery_timestamp): + if self.__message_group is not None: + raise IllegalArgumentException("deliveryTimestamp and messageGroup should not be set at same time") + self.__delivery_timestamp = delivery_timestamp + + @keys.setter + def keys(self, *keys): + for key in keys: + if not key or key.strip() == '': + raise IllegalArgumentException("key should not be blank") + self.__keys.update(set(keys)) + + @message_type.setter + def message_type(self, message_type): + self.__message_type = message_type + + def add_property(self, key, value): + if key is None or key.strip() == '': + raise IllegalArgumentException("key should not be blank") + if value is None or value.strip() == '': + raise IllegalArgumentException("value should not be blank") + self.__properties[key] = value diff --git a/python/rocketmq/v5/model/metrics.py b/python/rocketmq/v5/model/metrics.py new file mode 100644 index 000000000..1106ae4c8 --- /dev/null +++ b/python/rocketmq/v5/model/metrics.py @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class MessageMetricType(Enum): + # type of message publishing. + SEND = 1 + + # type of message reception. + RECEIVE = 2 + + +class MetricContext: + + def __init__(self, metric_type: MessageMetricType, attributes=None): + if attributes is None: + attributes = dict() + self.__metric_type = metric_type + self.__attributes = attributes + + def get_attr(self, key): + return self.__attributes.get(key) + + def put_attr(self, key, value): + self.__attributes[key] = value + + def remove_attr(self, key): + del self.__attributes[key] + + """ property """ + + @property + def metric_type(self): + return self.__metric_type + + @property + def attributes(self): + return self.__attributes + + +class HistogramEnum(Enum): + # a histogram that records the cost time of successful api calls of message publishing. + SEND_COST_TIME = ("rocketmq_send_cost_time", [1.0, 5.0, 10.0, 20.0, 50.0, 200.0, 500.0]) + # a histogram that records the latency of message delivery from remote. + DELIVERY_LATENCY = ("rocketmq_delivery_latency", [1.0, 5.0, 10.0, 20.0, 50.0, 200.0, 500.0]) + + def __init__(self, histogram_name, buckets): + self.__histogram_name = histogram_name + self.__buckets = buckets + + """ property """ + + @property + def histogram_name(self): + return self.__histogram_name + + @property + def buckets(self): + return self.__buckets diff --git a/python/rocketmq/log.py b/python/rocketmq/v5/model/send_receipt.py similarity index 54% rename from python/rocketmq/log.py rename to python/rocketmq/v5/model/send_receipt.py index f3e4eae34..90d576f5c 100644 --- a/python/rocketmq/log.py +++ b/python/rocketmq/v5/model/send_receipt.py @@ -13,26 +13,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging -import os +class SendReceipt: -logger = logging.getLogger("rocketmqlogger") -logger.setLevel(logging.DEBUG) + def __init__(self, message_id, transaction_id, message_queue, offset): + self.__message_id = message_id + self.__transaction_id = transaction_id + self.__message_queue = message_queue + self.__offset = offset -log_path = os.path.join( - os.path.expanduser("~"), "logs", "rocketmq", "rocketmq-client.log" -) -file_handler = logging.FileHandler(log_path) -file_handler.setLevel(logging.DEBUG) + def __str__(self): + return f"message_id:{self.__message_id}" -console_handler = logging.StreamHandler() -console_handler.setLevel(logging.DEBUG) + """ property """ -formatter = logging.Formatter( - "%(asctime)s [%(levelname)s] [%(process)d] [%(threadName)s] [%(filename)s#%(funcName)s:%(lineno)d] %(message)s" -) -file_handler.setFormatter(formatter) -console_handler.setFormatter(formatter) + @property + def message_id(self): + return self.__message_id -logger.addHandler(file_handler) -logger.addHandler(console_handler) + @property + def transaction_id(self): + return self.__transaction_id + + @property + def message_queue(self): + return self.__message_queue + + @property + def offset(self): + return self.__offset diff --git a/python/rocketmq/v5/model/topic_route.py b/python/rocketmq/v5/model/topic_route.py new file mode 100644 index 000000000..e62b6d6ff --- /dev/null +++ b/python/rocketmq/v5/model/topic_route.py @@ -0,0 +1,103 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rocketmq.grpc_protocol import Permission, definition_pb2 +from rocketmq.v5.client.connection import RpcEndpoints + + +class MessageQueue: + MASTER_BROKER_ID = 0 + + def __init__(self, queue): + self.__topic = queue.topic.name + self.__namespace = queue.topic.resource_namespace + self.__queue_id = queue.id + self.__permission = queue.permission + self.__broker_name = queue.broker.name + self.__broker_id = queue.broker.id + self.__broker_endpoints = RpcEndpoints(queue.broker.endpoints) + self.__accept_message_types = set(queue.accept_message_types) + + def is_readable(self): + return self.__permission == Permission.READ or self.__permission == Permission.READ_WRITE + + def is_writable(self): + return self.__permission == Permission.WRITE or self.__permission == Permission.READ_WRITE + + def is_master_broker(self): + return self.__broker_id == MessageQueue.MASTER_BROKER_ID + + def __eq__(self, other: object) -> bool: + if not isinstance(other, MessageQueue): + return False + ret = (self.__topic == other.__topic and self.__namespace == other.__namespace and self.__queue_id == other.__queue_id and self.__permission == other.__permission and self.__broker_name == other.__broker_name and self.__broker_id == other.__broker_id and self.__broker_endpoints == other.__broker_endpoints and sorted(self.__accept_message_types) == sorted(other.__accept_message_types)) + return ret + + def __str__(self): + return f"{self.__broker_name}.{self.__topic}.{self.__queue_id}" + + def message_queue0(self): + # to grpc MessageQueue + queue = definition_pb2.MessageQueue() # noqa + queue.topic.name = self.__topic + queue.topic.resource_namespace = self.__namespace + queue.id = self.__queue_id + queue.permission = self.__permission + queue.broker.name = self.__broker_name + queue.broker.id = self.__broker_id + queue.broker.endpoints.CopyFrom(self.__broker_endpoints.endpoints) + queue.accept_message_types.extend(self.__accept_message_types) + return queue + + """ property """ + + @property + def endpoints(self) -> RpcEndpoints: + return self.__broker_endpoints + + @property + def accept_message_types(self): + return self.__accept_message_types + + +class TopicRouteData: + + def __init__(self, message_queues): + self.__message_queues = list(map(lambda queue: MessageQueue(queue), message_queues)) + + def __eq__(self, other): + if self is other: + return True + if other is None or not isinstance(other, TopicRouteData): + return False + return self.__message_queues == other.__message_queues + + def __hash__(self): + return hash(tuple(self.__message_queues)) + + def __str__(self): + return "message_queues:(" + ', '.join(str(queue) for queue in self.__message_queues) + ")" + + def all_endpoints(self): + endpoints_map = {} + for queue in self.__message_queues: + endpoints_map[queue.endpoints.facade] = queue.endpoints + return endpoints_map + + """ property """ + + @property + def message_queues(self): + return self.__message_queues diff --git a/python/rocketmq/v5/producer/__init__.py b/python/rocketmq/v5/producer/__init__.py new file mode 100644 index 000000000..0ca6eaa15 --- /dev/null +++ b/python/rocketmq/v5/producer/__init__.py @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .producer import Producer, Transaction, TransactionChecker + +__all__ = [ + "Producer", + "TransactionChecker", + "Transaction", +] diff --git a/python/rocketmq/v5/producer/producer.py b/python/rocketmq/v5/producer/producer.py new file mode 100644 index 000000000..9f4ae3569 --- /dev/null +++ b/python/rocketmq/v5/producer/producer.py @@ -0,0 +1,447 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import functools +import threading +import time +from concurrent.futures import Future + +from rocketmq.grpc_protocol import (ClientType, Code, Encoding, + EndTransactionRequest, HeartbeatRequest, + MessageType, + NotifyClientTerminationRequest, Publishing, + SendMessageRequest, Settings, + TelemetryCommand, TransactionResolution, + TransactionSource) +from rocketmq.v5.client import Client +from rocketmq.v5.client.balancer import QueueSelector +from rocketmq.v5.exception import (ClientException, IllegalArgumentException, + IllegalStateException, + TooManyRequestsException) +from rocketmq.v5.log import logger +from rocketmq.v5.model import CallbackResult, Message, SendReceipt +from rocketmq.v5.util import (ConcurrentMap, MessageIdCodec, + MessagingResultChecker, Misc) + + +class Transaction: + __transaction_lock = threading.Lock() + + def __init__(self, producer): + self.__message = None + self.__send_receipt = None + self.__producer = producer + + def add_half_message(self, message: Message): + with Transaction.__transaction_lock: + if message is None: + raise IllegalArgumentException("add half message error, message is none.") + + if self.__message is None: + self.__message = message + else: + raise IllegalArgumentException(f"message already existed in transaction, topic:{message.topic}") + + def add_send_receipt(self, send_receipt): + with Transaction.__transaction_lock: + if self.__message is None: + raise IllegalArgumentException("add send receipt error, no message in transaction.") + if send_receipt is None: + raise IllegalArgumentException("add send receipt error, send receipt in none.") + if self.__message.message_id != send_receipt.message_id: + raise IllegalArgumentException("can't add another send receipt to a half message.") + + self.__send_receipt = send_receipt + + def commit(self): + return self.__commit_or_rollback(TransactionResolution.COMMIT) + + def rollback(self): + return self.__commit_or_rollback(TransactionResolution.ROLLBACK) + + def __commit_or_rollback(self, result): + if self.__message is None: + raise IllegalArgumentException("no message in transaction.") + if self.__send_receipt is None or self.__send_receipt.transaction_id is None: + raise IllegalArgumentException("no transaction_id in transaction, must send half message at first.") + + try: + res = self.__producer.end_transaction(self.__send_receipt.message_queue.endpoints, self.__message, + self.__send_receipt.transaction_id, result, + TransactionSource.SOURCE_CLIENT) + if res.status.code != Code.OK: + logger.error( + f"transaction commit or rollback error. topic:{self.__message.topic}, message_id:{self.__message.message_id}, transaction_id:{self.__send_receipt.transaction_id}, transactionResolution:{result}") + raise ClientException(res.status.message, res.status.code) + return res + except Exception as e: + logger.error( + f"end transaction error, topic:{self.__message.topic}, message_id:{self.__send_receipt.message_id}, transaction_id:{self.__send_receipt.transaction_id}, transactionResolution:{result}: {e}") + raise e + + """ property """ + + @property + def message_id(self): + return self.__message.message_id + + +class TransactionChecker(metaclass=abc.ABCMeta): + + @abc.abstractmethod + def check(self, message: Message) -> TransactionResolution: + pass + + +class Producer(Client): + MAX_SEND_ATTEMPTS = 3 # max retry times when send failed + + def __init__(self, client_configuration, topics=None, checker=None, tls_enable=False): + super().__init__(client_configuration, topics, ClientType.PRODUCER, tls_enable) + # {topic, QueueSelector} + self.__send_queue_selectors = ConcurrentMap() + self.__checker = checker # checker for transaction message, handle checking from server + + def __str__(self): + return f"{ClientType.Name(self.client_type)} client_id:{self.client_id}" + + # send message # + + def send(self, message: Message, transaction=None) -> SendReceipt: + if self.is_running is False: + raise IllegalStateException("producer is not running now.") + + self.__wrap_sending_message(message, False if transaction is None else True) + topic_queue = self.__select_send_queue(message.topic) + if message.message_type not in topic_queue.accept_message_types: + raise IllegalArgumentException( + f"current message type not match with queue accept message types, topic:{message.topic}, message_type:{message.message_type}, queue access type:{topic_queue.accept_message_types}") + + if transaction is None: + try: + return self.__send(message, topic_queue) + except Exception as e: + logger.error(f"send message exception, topic: {message.topic}, e: {e}") + raise e + else: + try: + transaction.add_half_message(message) + send_receipt = self.__send(message, topic_queue) + message.message_id = send_receipt.message_id + transaction.add_send_receipt(send_receipt) + return send_receipt + except IllegalArgumentException as e: + raise e + except Exception as e: + logger.error(f"send transaction message exception, topic: {message.topic}, e: {e}") + raise e + + def send_async(self, message: Message): + if self.is_running is False: + raise IllegalStateException("producer is not running now.") + + self.__wrap_sending_message(message, False) + topic_queue = self.__select_send_queue(message.topic) + if message.message_type not in topic_queue.accept_message_types: + raise IllegalArgumentException(f"current message type not match with queue accept message types, " + f"topic:{message.topic}, message_type:{message.message_type}, " + f"queue access type:{topic_queue.accept_message_types}") + + try: + return self.__send_async(message, topic_queue) + except Exception as e: + logger.error(f"send message exception, topic: {message.topic}, {e}") + raise e + + # transaction # + + def begin_transaction(self): + if self.is_running is False: + raise IllegalStateException("unable to begin transaction because producer is not running") + + if self.__checker is None: + raise IllegalArgumentException("Transaction checker should not be null.") + return Transaction(self) + + def end_transaction(self, endpoints, message, transaction_id, result, source): + if self.is_running is False: + raise IllegalStateException("unable to end transaction because producer is not running") + + if self.__checker is None: + raise IllegalArgumentException("Transaction checker should not be null.") + + req = self.__end_transaction_req(message, transaction_id, result, source) + future = self.rpc_client.end_transaction_async(endpoints, req, metadata=self._sign(), + timeout=self.client_configuration.request_timeout) + return future.result() + + async def on_recover_orphaned_transaction_command(self, endpoints, msg, transaction_id): + # call this function from server side stream, in RpcClient._io_loop + try: + if self.is_running is False: + raise IllegalStateException( + "unable to recover orphaned transaction command because producer is not running") + + if self.__checker is None: + raise IllegalArgumentException("No transaction checker registered.") + message = Message().fromProtobuf(msg) + result = self.__checker.check(message) + + if result == TransactionResolution.COMMIT: + res = await self.__commit_for_server_check(endpoints, message, transaction_id, + TransactionSource.SOURCE_SERVER_CHECK) + logger.debug( + f"commit message. message_id: {message.message_id}, transaction_id: {transaction_id}, res: {res}") + elif result == TransactionResolution.ROLLBACK: + res = await self.__rollback_for_server_check(endpoints, message, transaction_id, + TransactionSource.SOURCE_SERVER_CHECK) + logger.debug( + f"rollback message. message_id: {message.message_id}, transaction_id: {transaction_id}, res: {res}") + except Exception as e: + logger.error(f"on_recover_orphaned_transaction_command exception: {e}") + + """ override """ + + def _start_success(self): + logger.info(f"{self.__str__()} start success.") + + def _start_failure(self): + logger.error(f"{self.__str__()} start failed.") + + def _sync_setting_req(self, endpoints): + # publishing + pub = Publishing() + topics = self.topics + for topic in topics: + resource = pub.topics.add() + resource.name = topic + resource.resource_namespace = self.client_configuration.namespace + pub.max_body_size = 1024 * 1024 * 128 + pub.validate_message_type = True + + # setting + settings = Settings() + settings.client_type = self.client_type + settings.access_point.CopyFrom(endpoints.endpoints) + settings.request_timeout.seconds = self.client_configuration.request_timeout + settings.publishing.CopyFrom(pub) + + settings.user_agent.language = Misc.sdk_language() + settings.user_agent.version = Misc.sdk_version() + settings.user_agent.platform = Misc.get_os_description() + settings.user_agent.hostname = Misc.get_local_ip() + settings.metric.on = False + + cmd = TelemetryCommand() + cmd.settings.CopyFrom(settings) + return cmd + + def _heartbeat_req(self): + req = HeartbeatRequest() + req.client_type = self.client_type + return req + + def _notify_client_termination_req(self): + return NotifyClientTerminationRequest() + + def _update_queue_selector(self, topic, topic_route): + queue_selector = self.__send_queue_selectors.get(topic) + if queue_selector is None: + return + queue_selector.update(topic_route) + + def shutdown(self): + logger.info(f"begin to shutdown {self.__str__()}") + super().shutdown() + logger.info(f"shutdown {self.__str__()} success.") + + """ private """ + + def __send(self, message: Message, topic_queue, attempt=1) -> SendReceipt: + req = self.__send_req(message) + send_context = self.client_metrics.send_before(message.topic) + send_message_future = self.rpc_client.send_message_async(topic_queue.endpoints, req, self._sign()) + return self.__handle_sync_send_receipt(send_message_future, message, topic_queue, attempt, send_context) + + def __handle_sync_send_receipt(self, send_message_future, message, topic_queue, attempt, send_metric_context=None): + try: + send_receipt = self.__process_send_message_response(send_message_future, topic_queue) + self.client_metrics.send_after(send_metric_context, True) + return send_receipt + except Exception as e: + attempt += 1 + retry_exception_future = self.__check_send_retry_condition(message, topic_queue, attempt, e) + if retry_exception_future is not None: + # end retry with exception + self.client_metrics.send_after(send_metric_context, False) + raise retry_exception_future.exception() + + # resend message + topic_queue = self.__select_send_queue(message.topic) + return self.__send(message, topic_queue, attempt) + + def __send_async(self, message: Message, topic_queue, attempt=1, ret_future=None): + req = self.__send_req(message) + send_context = self.client_metrics.send_before(message.topic) + send_message_future = self.rpc_client.send_message_async(topic_queue.endpoints, req, self._sign()) + if ret_future is None: + ret_future = Future() + handle_send_receipt_callback = functools.partial(self.__handle_async_send_receipt, message=message, + topic_queue=topic_queue, attempt=attempt, + ret_future=ret_future, send_metric_context=send_context) + send_message_future.add_done_callback(handle_send_receipt_callback) + return ret_future + + def __handle_async_send_receipt(self, send_message_future, message, topic_queue, attempt, ret_future, + send_metric_context=None): + try: + send_receipt = self.__process_send_message_response(send_message_future, topic_queue) + self.client_metrics.send_after(send_metric_context, True) + self._set_future_callback_result(CallbackResult.async_send_callback_result(ret_future, send_receipt)) + except Exception as e: + attempt += 1 + retry_exception_future = self.__check_send_retry_condition(message, topic_queue, attempt, e) + if retry_exception_future is not None: + # end retry with exception + self.client_metrics.send_after(send_metric_context, False) + self._set_future_callback_result( + CallbackResult.async_send_callback_result(ret_future, retry_exception_future.exception(), False)) + return + # resend message + topic_queue = self.__select_send_queue(message.topic) + self.__send_async(message, topic_queue, attempt, ret_future) + + def __process_send_message_response(self, send_message_future, topic_queue): + res = send_message_future.result() + MessagingResultChecker.check(res.status) + entries = res.entries + assert len( + entries) == 1, f"entries size error, the send response entries size is {len(entries)}, {self.__str__()}" + entry = entries[0] + return SendReceipt(entry.message_id, entry.transaction_id, topic_queue, entry.offset) + + def __check_send_retry_condition(self, message, topic_queue, attempt, e): + end_retry = False + if attempt > Producer.MAX_SEND_ATTEMPTS: + logger.error( + f"{self.__str__()} failed to send message to {topic_queue.endpoints.__str__()}, because of run out of attempt times, topic:{message.topic}, message_id:{message.message_id}, message_type:{message.message_type}, attempt:{attempt}") + end_retry = True + + # no need more attempts for transactional message + if message.message_type == MessageType.TRANSACTION: + logger.error( + f"{self.__str__()} failed to send message to {topic_queue.endpoints.__str__()}, topic:{message.topic}, message_id:{message.message_id}, message_type:{message.message_type} ,attempt:{attempt}") + end_retry = True + + # end retry if system busy + if isinstance(e, TooManyRequestsException): + logger.error( + f"{self.__str__()} failed to send message to {topic_queue.endpoints.__str__()}, because of to too many requests, topic:{message.topic}, message_type:{message.message_type}, message_id:{message.message_id}, attempt:{attempt}") + end_retry = True + + if end_retry: + send_exception_future = Future() + send_exception_future.set_exception(e) + return send_exception_future + else: + return None + + def __wrap_sending_message(self, message, is_transaction): + message.message_id = MessageIdCodec().next_message_id() + message.message_type = self.__send_message_type(message, is_transaction) + + def __send_req(self, message: Message): + try: + req = SendMessageRequest() + msg = req.messages.add() + msg.topic.name = message.topic + msg.topic.resource_namespace = self.client_configuration.namespace + if message.body is None or len(message.body) == 0: + raise IllegalArgumentException("message body is none.") + max_body_size = 4 * 1024 * 1024 # max body size is 4m + if len(message.body) > max_body_size: + raise IllegalArgumentException( + f"Message body size exceeds the threshold, max size={max_body_size} bytes") + + msg.body = message.body + if message.tag is not None: + msg.system_properties.tag = message.tag + if message.keys is not None: + msg.system_properties.keys.extend(message.keys) + if message.properties is not None: + msg.user_properties.update(message.properties) + msg.system_properties.message_id = message.message_id + msg.system_properties.message_type = message.message_type + msg.system_properties.born_timestamp.seconds = int(time.time()) + msg.system_properties.born_host = Misc.get_local_ip() + msg.system_properties.body_encoding = Encoding.IDENTITY + if message.message_group is not None: + msg.system_properties.message_group = message.message_group + if message.delivery_timestamp is not None: + msg.system_properties.delivery_timestamp.seconds = message.delivery_timestamp + return req + except Exception as e: + raise e + + def __send_message_type(self, message: Message, is_transaction=False): + if message.message_group is None and message.delivery_timestamp is None and is_transaction is False: + return MessageType.NORMAL + + if message.message_group is not None and is_transaction is False: + return MessageType.FIFO + + if message.delivery_timestamp is not None and is_transaction is False: + return MessageType.DELAY + + if message.message_group is None and message.delivery_timestamp is None and is_transaction is True: + return MessageType.TRANSACTION + + # transaction semantics is conflicted with fifo/delay. + logger.error(f"{self.__str__()} set send message type exception, message: {str(message)}") + raise IllegalArgumentException("transactional message should not set messageGroup or deliveryTimestamp") + + def __select_send_queue(self, topic): + try: + route = self._retrieve_topic_route_data(topic) + queue_selector = self.__send_queue_selectors.put_if_absent(topic, + QueueSelector.producer_queue_selector(route)) + return queue_selector.select_next_queue() + except Exception as e: + logger.error(f"producer select topic:{topic} queue index exception, {e}") + raise e + + def __end_transaction_req(self, message: Message, transaction_id, result, source): + req = EndTransactionRequest() + req.topic.name = message.topic + req.topic.resource_namespace = self.client_configuration.namespace + req.message_id = message.message_id + req.transaction_id = transaction_id + req.resolution = result + req.source = source + return req + + def __commit_for_server_check(self, endpoints, message: Message, transaction_id, source): + return self.__end_transaction_for_server_check(endpoints, message, transaction_id, TransactionResolution.COMMIT, + source) + + def __rollback_for_server_check(self, endpoints, message: Message, transaction_id, source): + return self.__end_transaction_for_server_check(endpoints, message, transaction_id, + TransactionResolution.ROLLBACK, source) + + def __end_transaction_for_server_check(self, endpoints, message: Message, transaction_id, result, source): + req = self.__end_transaction_req(message, transaction_id, result, source) + return self.rpc_client.end_transaction_for_server_check(endpoints, req, metadata=self._sign(), + timeout=self.client_configuration.request_timeout) diff --git a/python/tests/__init__.py b/python/rocketmq/v5/test/__init__.py similarity index 92% rename from python/tests/__init__.py rename to python/rocketmq/v5/test/__init__.py index ae1e83eeb..73f766db6 100644 --- a/python/tests/__init__.py +++ b/python/rocketmq/v5/test/__init__.py @@ -12,3 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from .test_base import TestBase + +__all__ = [ + "TestBase", +] diff --git a/python/rocketmq/v5/test/test_base.py b/python/rocketmq/v5/test/test_base.py new file mode 100644 index 000000000..aa49fafc9 --- /dev/null +++ b/python/rocketmq/v5/test/test_base.py @@ -0,0 +1,147 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +from rocketmq.grpc_protocol import (AddressScheme, Broker, Code, Endpoints, + MessageType, Permission, Resource, Status, + definition_pb2) +from rocketmq.grpc_protocol.service_pb2 import ReceiveMessageResponse # noqa +from rocketmq.grpc_protocol.service_pb2 import SendMessageResponse # noqa +from rocketmq.v5.client import ClientConfiguration, Credentials +from rocketmq.v5.model import Message, MessageQueue, SendReceipt +from rocketmq.v5.util import ClientId, MessageIdCodec + + +class TestBase: + FAKE_TRANSACTION_ID = "foo-bar-transaction-id" + FAKE_NAMESPACE = "foo-bar-namespace" + FAKE_AK = "foo-bar-ak" + FAKE_SK = "foo-bar-sk" + FAKE_CLIENT_ID = ClientId() + FAKE_TOPIC_0 = "foo-bar-topic-0" + FAKE_TOPIC_1 = "foo-bar-topic-1" + FAKE_MESSAGE_BODY = "foobar".encode('utf-8') + FAKE_TAG_0 = "foo-bar-tag-0" + FAKE_BROKER_NAME_0 = "foo-bar-broker-name-0" + FAKE_BROKER_NAME_1 = "foo-bar-broker-name-1" + FAKE_RECEIPT_HANDLE_0 = "foo-bar-handle-0" + FAKE_RECEIPT_HANDLE_1 = "foo-bar-handle-1" + FAKE_ENDPOINTS = "127.0.0.1:9876" + FAKE_HOST_0 = "127.0.0.1" + FAKE_PORT_0 = 8080 + FAKE_HOST_1 = "127.0.0.2" + FAKE_PORT_1 = 8081 + FAKE_CONSUMER_GROUP_0 = "foo-bar-group-0" + + @staticmethod + def fake_client_config(): + credentials = Credentials(TestBase.FAKE_AK, TestBase.FAKE_SK) + config = ClientConfiguration(TestBase.FAKE_ENDPOINTS, credentials, TestBase.FAKE_NAMESPACE) + return config + + @staticmethod + def fake_topic_resource(topic): + fake_resource = Resource() + fake_resource.name = topic + fake_resource.resource_namespace = TestBase.FAKE_NAMESPACE + return fake_resource + + @staticmethod + def fake_send_message(topic): + msg = Message() + msg.topic = topic + msg.body = TestBase.FAKE_MESSAGE_BODY + return msg + + @staticmethod + def fake_Receive_message(topic): + msg = definition_pb2.Message() # noqa + msg.topic.name = TestBase.FAKE_TOPIC_0 + msg.topic.resource_namespace = TestBase.FAKE_NAMESPACE + msg.system_properties.message_id = MessageIdCodec().next_message_id() + msg.body = TestBase.FAKE_MESSAGE_BODY + msg.system_properties.born_host = TestBase.FAKE_HOST_0 + msg.system_properties.born_timestamp.seconds = int(time.time() * 1000) + msg.system_properties.delivery_timestamp.seconds = msg.system_properties.born_timestamp.seconds - 10 + msg.system_properties.message_type = 1 + msg.system_properties.body_encoding = 1 + return msg + + @staticmethod + def fake_broker(): + fake_broker = Broker() + endpoints = Endpoints() + endpoints.scheme = AddressScheme.IPv4 + address = endpoints.addresses.add() + address.host = TestBase.FAKE_HOST_0 + address.port = TestBase.FAKE_PORT_0 + fake_broker.name = TestBase.FAKE_BROKER_NAME_0 + fake_broker.id = 0 + fake_broker.endpoints.CopyFrom(endpoints) + return fake_broker + + @staticmethod + def fake_queue(topic): + fake_resource = TestBase.fake_topic_resource(topic) + fake_broker = TestBase.fake_broker() + fake_queue = definition_pb2.MessageQueue() # noqa + fake_queue.topic.CopyFrom(fake_resource) + fake_queue.id = 0 + fake_queue.broker.CopyFrom(fake_broker) + fake_queue.permission = Permission.READ_WRITE + fake_queue.accept_message_types.extend( + (MessageType.NORMAL, MessageType.FIFO, MessageType.DELAY, MessageType.TRANSACTION)) + return MessageQueue(fake_queue) + + @staticmethod + def fake_message_queue(topic): + fake_message_queue = MessageQueue(TestBase.fake_queue(topic)) + return fake_message_queue + + @staticmethod + def fake_ok_status(): + status = Status() + status.code = Code.OK + status.message = "OK" + return status + + @staticmethod + def fake_send_success_response(): + fake_response = SendMessageResponse() + status = TestBase.fake_ok_status() + fake_response.status.CopyFrom(status) + entry = fake_response.entries.add() + entry.status.CopyFrom(status) + entry.message_id = MessageIdCodec().next_message_id() + entry.transaction_id = entry.message_id + entry.offset = 999 + return fake_response + + @staticmethod + def fake_send_receipt(topic): + fake_response = TestBase.fake_send_success_response() + fake_message_queue = TestBase.fake_queue(topic) + fake_entry = fake_response.entries[0] + return SendReceipt(fake_entry.message_id, fake_entry.transaction_id, fake_message_queue, fake_entry.offset) + + @staticmethod + def fake_receive_receipt(): + res_status = ReceiveMessageResponse() + res_status.status.CopyFrom(TestBase.fake_ok_status()) + res_msg = ReceiveMessageResponse() + msg = TestBase.fake_Receive_message(TestBase.FAKE_TOPIC_0) + res_msg.message.CopyFrom(msg) + return [res_status, res_msg] diff --git a/python/rocketmq/v5/test/test_consumer.py b/python/rocketmq/v5/test/test_consumer.py new file mode 100644 index 000000000..47db493a8 --- /dev/null +++ b/python/rocketmq/v5/test/test_consumer.py @@ -0,0 +1,58 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from concurrent.futures import Future +from unittest.mock import patch + +from rocketmq.v5.client import Client +from rocketmq.v5.client.connection import RpcClient +from rocketmq.v5.consumer import SimpleConsumer +from rocketmq.v5.model import FilterExpression, Message +from rocketmq.v5.test import TestBase + + +class TestNormalConsumer(unittest.TestCase): + + @patch.object(Message, '_Message__message_body_check_sum') + @patch.object(SimpleConsumer, '_SimpleConsumer__receive_message_response') + @patch.object(RpcClient, 'receive_message_async') + @patch.object(SimpleConsumer, '_SimpleConsumer__select_topic_queue', + return_value=TestBase.fake_queue(TestBase.FAKE_TOPIC_0)) + @patch.object(SimpleConsumer, '_SimpleConsumer__select_topic_for_receive', return_value=TestBase.FAKE_TOPIC_0) + @patch.object(Client, '_Client__start_scheduler', return_value=None) + @patch.object(Client, '_Client__update_topic_route', return_value=None) + def test_receive(self, mock_update_topic_route, mock_start_scheduler, mock_select_topic_for_receive, + mock_select_topic_queue, mock_receive_message_async, mock_receive_message_response, + mock_message_body_check_sum): + future = Future() + future.set_result(list()) + mock_receive_message_async.return_value = future + mock_receive_message_response.return_value = TestBase.fake_receive_receipt() + + subs = {TestBase.FAKE_TOPIC_0: FilterExpression()} + consumer = SimpleConsumer(TestBase.fake_client_config(), TestBase.FAKE_CONSUMER_GROUP_0, subs) + consumer.startup() + messages = consumer.receive(32, 10) + self.assertIsInstance(messages[0], Message) + consumer.shutdown() + + mock_update_topic_route.assert_called() + mock_start_scheduler.assert_called_once() + mock_select_topic_queue.assert_called_once() + mock_select_topic_for_receive.assert_called_once() + mock_message_body_check_sum.assert_called_once() + mock_receive_message_response.assert_called_once() + mock_receive_message_async.assert_called_once() diff --git a/python/rocketmq/v5/test/test_producer.py b/python/rocketmq/v5/test/test_producer.py new file mode 100644 index 000000000..4dc73d292 --- /dev/null +++ b/python/rocketmq/v5/test/test_producer.py @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from concurrent.futures import Future +from unittest.mock import patch + +from rocketmq.v5.client import Client +from rocketmq.v5.client.connection import RpcClient +from rocketmq.v5.model import SendReceipt +from rocketmq.v5.producer import Producer +from rocketmq.v5.test import TestBase + + +class TestNormalProducer(unittest.TestCase): + + @patch.object(Producer, '_Producer__select_send_queue', return_value=TestBase.fake_queue(TestBase.FAKE_TOPIC_0)) + @patch.object(RpcClient, 'send_message_async') + @patch.object(Client, '_Client__start_scheduler', return_value=None) + def test_send(self, mock_start_scheduler, mock_send_message_async, mock_select_send_queue): + # mock send_message_async return future + future = Future() + future.set_result(TestBase.fake_send_success_response()) + mock_send_message_async.return_value = future + producer = Producer(TestBase.fake_client_config()) + producer.startup() + message = TestBase.fake_send_message(TestBase.FAKE_TOPIC_0) + result = producer.send(message) + self.assertIsInstance(result, SendReceipt) + producer.shutdown() + mock_start_scheduler.assert_called_once() + mock_select_send_queue.assert_called_once() + mock_send_message_async.assert_called_once() diff --git a/python/rocketmq/v5/util/__init__.py b/python/rocketmq/v5/util/__init__.py new file mode 100644 index 000000000..e61d1eb37 --- /dev/null +++ b/python/rocketmq/v5/util/__init__.py @@ -0,0 +1,32 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .atomic import AtomicInteger +from .client_id import ClientId +from .concurrent_map import ConcurrentMap +from .message_id_codec import MessageIdCodec +from .messaging_result_checker import MessagingResultChecker +from .misc import Misc +from .signature import Signature + +__all__ = [ + "AtomicInteger", + "ClientId", + "ConcurrentMap", + "Misc", + "MessageIdCodec", + "MessagingResultChecker", + "Signature", +] diff --git a/python/rocketmq/v5/util/atomic.py b/python/rocketmq/v5/util/atomic.py new file mode 100644 index 000000000..a3449f3d7 --- /dev/null +++ b/python/rocketmq/v5/util/atomic.py @@ -0,0 +1,67 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading + + +class AtomicInteger: + + def __init__(self, initial=0): + # init AtomicInteger,default is 0 + self._value = initial + self._lock = threading.Lock() + + def get(self): + with self._lock: + return self._value + + def set(self, value): + with self._lock: + self._value = value + + def get_and_set(self, value): + with self._lock: + old_value = self._value + self._value = value + return old_value + + def increment_and_get(self): + with self._lock: + self._value += 1 + return self._value + + def get_and_increment(self): + with self._lock: + old_value = self._value + self._value += 1 + return old_value + + def decrement_and_get(self): + with self._lock: + self._value -= 1 + return self._value + + def get_and_decrement(self): + with self._lock: + old_value = self._value + self._value -= 1 + return old_value + + def compare_and_set(self, expect, update): + with self._lock: + if self._value == expect: + self._value = update + return True + return False diff --git a/python/rocketmq/v5/util/client_id.py b/python/rocketmq/v5/util/client_id.py new file mode 100644 index 000000000..44ef02f0f --- /dev/null +++ b/python/rocketmq/v5/util/client_id.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import getpid +from socket import gethostname +from time import time_ns + +from rocketmq.v5.util.atomic import AtomicInteger +from rocketmq.v5.util.misc import Misc + + +class ClientId: + CLIENT_ID_SEPARATOR = "@" + __client_index = AtomicInteger() + + def __init__(self): + self.__client_index = ClientId.__client_index.get_and_increment() + host_name = gethostname() + process_id = getpid() + base36_time = Misc.to_base36(time_ns()) + self.__client_id = (host_name + ClientId.CLIENT_ID_SEPARATOR + str(process_id) + ClientId.CLIENT_ID_SEPARATOR + str(self.__client_index) + ClientId.CLIENT_ID_SEPARATOR + base36_time) + + @property + def client_id(self): + return self.__client_id + + @property + def client_index(self): + return self.__client_index diff --git a/python/rocketmq/v5/util/concurrent_map.py b/python/rocketmq/v5/util/concurrent_map.py new file mode 100644 index 000000000..71403fa57 --- /dev/null +++ b/python/rocketmq/v5/util/concurrent_map.py @@ -0,0 +1,64 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading + + +class ConcurrentMap: + + def __init__(self): + self._lock = threading.Lock() + self._map = {} + + def get(self, key, default=None): + with self._lock: + return self._map.get(key, default) + + def put(self, key, value): + with self._lock: + self._map[key] = value + + def remove(self, key): + with self._lock: + if key in self._map: + del self._map[key] + + def update(self, m): + with self._lock: + self._map.update(m) + + def contains(self, key): + with self._lock: + return key in self._map + + def keys(self): + with self._lock: + return list(self._map.keys()) + + def values(self): + with self._lock: + return list(self._map.values()) + + def items(self): + with self._lock: + return list(self._map.items()) + + def put_if_absent(self, key, value): + with self._lock: + return self._map.setdefault(key, value) + + def clear(self): + with self._lock: + self._map.clear() diff --git a/python/rocketmq/v5/util/message_id_codec.py b/python/rocketmq/v5/util/message_id_codec.py new file mode 100644 index 000000000..1fe264868 --- /dev/null +++ b/python/rocketmq/v5/util/message_id_codec.py @@ -0,0 +1,116 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading +from datetime import datetime, timezone +from os import getpid +from time import time +from uuid import getnode + +from rocketmq.v5.util.atomic import AtomicInteger + +# +# The codec for the message-id. +# +#

Codec here provides the following two functions: +# 1. Provide decoding function of message-id of all versions above v0. +# 2. Provide a generator of message-id of v1 version. +# +#

The message-id of versions above V1 consists of 17 bytes in total. The first two bytes represent the version +# number. For V1, these two bytes are 0x0001. +# +#

V1 message id example

+# +#
+#   ┌──┬────────────┬────┬────────┬────────┐
+#   │01│56F7E71C361B│21BC│024CCDBE│00000000│
+#   └──┴────────────┴────┴────────┴────────┘
+#   
+# +#

V1 version message id generation rules

+# +#
+#                       process id(lower 2bytes)
+#                               ▲
+#   mac address(lower 6bytes)   │   sequence number(big endian)
+#                      ▲        │          ▲ (4bytes)
+#                      │        │          │
+#                ┌─────┴─────┐ ┌┴┐ ┌───┐ ┌─┴─┐
+#         0x01+  │     6     │ │2│ │ 4 │ │ 4 │
+#                └───────────┘ └─┘ └─┬─┘ └───┘
+#                                    │
+#                                    ▼
+#             seconds since 2021-01-01 00:00:00(UTC+0)
+#                           (lower 4bytes)
+#   
+# + + +class MessageIdCodec: + MESSAGE_ID_LENGTH_FOR_V1_OR_LATER = 34 + MESSAGE_ID_VERSION_V0 = "00" + MESSAGE_ID_VERSION_V1 = "01" + + _instance_lock = threading.Lock() + __index = AtomicInteger() + + def __new__(cls, *args, **kwargs): + with MessageIdCodec._instance_lock: + if not hasattr(MessageIdCodec, "_instance"): + MessageIdCodec._instance = object.__new__(cls) + return MessageIdCodec._instance + + def __init__(self): + with MessageIdCodec._instance_lock: + if not hasattr(self, 'initialized'): + buffer = bytearray(8) + mac = getnode().to_bytes(6, byteorder='big') + buffer[0:6] = mac + pid = getpid() + pid_buffer = bytearray(4) + pid_buffer[0:4] = pid.to_bytes(4, byteorder='big') + buffer[6:8] = pid_buffer[2:4] + self.process_fixed_string_v1 = buffer.hex().upper() + self.seconds_since_custom_epoch = int( + (datetime.now(timezone.utc) - datetime(2021, 1, 1, 0, 0, 0, tzinfo=timezone.utc)).total_seconds()) + self.seconds_start_timestamp = int(time()) + self.seconds = self.__delta_time() + self.sequence = None + self.initialized = True + + def next_message_id(self): + delta_seconds = self.__delta_time() + if self.seconds != delta_seconds: + self.seconds = delta_seconds + buffer = bytearray(8) + buffer[0:4] = self.seconds.to_bytes(8, byteorder='big')[4:8] + buffer[4:8] = self.__sequence_id().to_bytes(4, byteorder='big') + return MessageIdCodec.MESSAGE_ID_VERSION_V1 + self.process_fixed_string_v1 + buffer.hex().upper() + + @staticmethod + def decode(message_id): + if len(message_id) == MessageIdCodec.MESSAGE_ID_LENGTH_FOR_V1_OR_LATER: + return message_id[2:] + else: + return message_id + + """ private """ + + def __delta_time(self): + return int(time()) - self.seconds_start_timestamp + self.seconds_since_custom_epoch + + def __sequence_id(self): + self.sequence = MessageIdCodec.__index.get_and_increment() + return self.sequence diff --git a/python/rocketmq/v5/util/messaging_result_checker.py b/python/rocketmq/v5/util/messaging_result_checker.py new file mode 100644 index 000000000..f8bc1ee0d --- /dev/null +++ b/python/rocketmq/v5/util/messaging_result_checker.py @@ -0,0 +1,68 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rocketmq.grpc_protocol import Code, Status +from rocketmq.v5.exception import (BadRequestException, ForbiddenException, + InternalErrorException, NotFoundException, + PayloadTooLargeException, + PaymentRequiredException, + ProxyTimeoutException, + RequestHeaderFieldsTooLargeException, + TooManyRequestsException, + UnauthorizedException, UnsupportedException) +from rocketmq.v5.log import logger + + +class MessagingResultChecker: + + @staticmethod + def check(status: Status): + code = status.code + message = status.message + + if code == Code.OK or code == Code.MULTIPLE_RESULTS: + return + elif code == Code.BAD_REQUEST or code == Code.ILLEGAL_ACCESS_POINT or code == Code.ILLEGAL_TOPIC \ + or code == Code.ILLEGAL_CONSUMER_GROUP or code == Code.ILLEGAL_MESSAGE_TAG or code == Code.ILLEGAL_MESSAGE_KEY \ + or code == Code.ILLEGAL_MESSAGE_GROUP or code == Code.ILLEGAL_MESSAGE_PROPERTY_KEY or code == Code.INVALID_TRANSACTION_ID \ + or code == Code.ILLEGAL_MESSAGE_ID or code == Code.ILLEGAL_FILTER_EXPRESSION or code == Code.ILLEGAL_INVISIBLE_TIME \ + or code == Code.ILLEGAL_DELIVERY_TIME or code == Code.INVALID_RECEIPT_HANDLE or code == Code.MESSAGE_PROPERTY_CONFLICT_WITH_TYPE \ + or code == Code.UNRECOGNIZED_CLIENT_TYPE or code == Code.MESSAGE_CORRUPTED or code == Code.CLIENT_ID_REQUIRED or code == Code.ILLEGAL_POLLING_TIME: + raise BadRequestException(message, code) + elif code == Code.UNAUTHORIZED: + raise UnauthorizedException(message, code) + elif code == Code.PAYMENT_REQUIRED: + raise PaymentRequiredException(message, code) + elif code == Code.FORBIDDEN: + raise ForbiddenException(message, code) + elif code == Code.MESSAGE_NOT_FOUND: + return + elif code == Code.NOT_FOUND or code == Code.TOPIC_NOT_FOUND or code == Code.CONSUMER_GROUP_NOT_FOUND: + raise NotFoundException(message, code) + elif code == Code.PAYLOAD_TOO_LARGE or code == Code.MESSAGE_BODY_TOO_LARGE: + raise PayloadTooLargeException(message, code) + elif code == Code.TOO_MANY_REQUESTS: + raise TooManyRequestsException(message, code) + elif code == Code.REQUEST_HEADER_FIELDS_TOO_LARGE or code == Code.MESSAGE_PROPERTIES_TOO_LARGE: + raise RequestHeaderFieldsTooLargeException(message, code) + elif code == Code.INTERNAL_ERROR or code == Code.INTERNAL_SERVER_ERROR or code == Code.HA_NOT_AVAILABLE: + raise InternalErrorException(message, code) + elif code == Code.PROXY_TIMEOUT or code == Code.MASTER_PERSISTENCE_TIMEOUT or code == Code.SLAVE_PERSISTENCE_TIMEOUT: + raise ProxyTimeoutException(message, code) + elif code == Code.UNSUPPORTED or code == Code.VERSION_UNSUPPORTED or code == Code.VERIFY_FIFO_MESSAGE_UNSUPPORTED: + raise UnsupportedException(message, code) + else: + logger.warn(f"unrecognized status code:{code}, message:{message}") + raise UnsupportedException(message, code) diff --git a/python/rocketmq/v5/util/misc.py b/python/rocketmq/v5/util/misc.py new file mode 100644 index 000000000..3d368b17f --- /dev/null +++ b/python/rocketmq/v5/util/misc.py @@ -0,0 +1,109 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gzip +import socket +import zlib +from hashlib import md5, sha1 +from platform import system, version +from re import compile + +from rocketmq.grpc_protocol import Language +from rocketmq.v5.log import logger + + +class Misc: + __LOCAL_IP = None + __OS_NAME = None + TOPIC_PATTERN = compile(r'^[%a-zA-Z0-9_-]+$') + CONSUMER_GROUP_PATTERN = compile(r'^[%a-zA-Z0-9_-]+$') + SDK_VERSION = "V5_0_1_SNAPSHOT" + + @staticmethod + def sdk_language(): + return Language.PYTHON + + @staticmethod + def sdk_version(): + return Misc.SDK_VERSION + + @staticmethod + def to_base36(n): + chars = '0123456789abcdefghijklmnopqrstuvwxyz' + result = [] + if n == 0: + return '0' + while n > 0: + n, r = divmod(n, 36) + result.append(chars[r]) + return ''.join(reversed(result)) + + @staticmethod + def get_local_ip(): + if Misc.__LOCAL_IP is None: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + s.connect(('8.8.8.8', 80)) + Misc.__LOCAL_IP = s.getsockname()[0] + except Exception as e: + logger.error(f"get local ip exception: {e}") + return '127.0.0.1' + finally: + s.close() + return Misc.__LOCAL_IP + + @staticmethod + def crc32_checksum(array): + crc32_value = zlib.crc32(array) & 0xffffffff + return format(crc32_value, '08X') + + @staticmethod + def md5_checksum(array): + md5_hash = md5() + md5_hash.update(array) + return md5_hash.hexdigest().upper() + + @staticmethod + def sha1_checksum(array): + sha1_hash = sha1() + sha1_hash.update(array) + return sha1_hash.hexdigest().upper() + + @staticmethod + def uncompress_bytes_gzip(body): + if body and body[:2] == b'\x1f\x8b': + body = gzip.decompress(body) # Standard Gzip format + else: + body = zlib.decompress(body) # deflate zip + return body + + @staticmethod + def get_os_description(): + if Misc.__OS_NAME is None: + os_name = system() # os system name + if os_name is None: + return None + os_version = version() # os system version + Misc.__OS_NAME = f"{os_name} {os_version}" if os_version else os_name + + return Misc.__OS_NAME + + @staticmethod + def is_valid_topic(topic): + return bool(Misc.TOPIC_PATTERN.match(topic)) + + @staticmethod + def is_valid_consumer_group(topic): + return bool(Misc.CONSUMER_GROUP_PATTERN.match(topic)) diff --git a/python/rocketmq/v5/util/signature.py b/python/rocketmq/v5/util/signature.py new file mode 100644 index 000000000..52949a5fe --- /dev/null +++ b/python/rocketmq/v5/util/signature.py @@ -0,0 +1,62 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from binascii import hexlify +from datetime import datetime +from hashlib import sha1 +from hmac import new +from uuid import uuid4 + +from rocketmq.v5.util import ClientId + + +class Signature: + + @staticmethod + def metadata(config, client_id: ClientId): + now = datetime.now() + formatted_date_time = now.strftime("%Y%m%dT%H%M%SZ") + request_id = str(uuid4()) + sign = Signature.sign(config.credentials.sk, formatted_date_time) + authorization = "MQv2-HMAC-SHA1" \ + + " " \ + + "Credential" \ + + "=" \ + + config.credentials.ak \ + + ", " \ + + "SignedHeaders" \ + + "=" \ + + "x-mq-date-time" \ + + ", " \ + + "Signature" \ + + "=" \ + + sign + metadata = [ + ("x-mq-language", "PYTHON"), + ("x-mq-protocol", "GRPC_V2"), + ("x-mq-client-version", "5.0.1.1"), + ("x-mq-date-time", formatted_date_time), + ("x-mq-request-id", request_id), + ("x-mq-client-id", client_id), + ("x-mq-namespace", config.namespace), + ("authorization", authorization) + ] + return metadata + + @staticmethod + def sign(access_secret, date_time): + signing_key = access_secret.encode('utf-8') + mac = new(signing_key, date_time.encode('utf-8'), sha1) + return hexlify(mac.digest()).decode('utf-8') diff --git a/python/tests/test_foo.py b/python/setup.py similarity index 66% rename from python/tests/test_foo.py rename to python/setup.py index 89b9ea313..a04504663 100644 --- a/python/tests/test_foo.py +++ b/python/setup.py @@ -13,11 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from rocketmq import foo -from rocketmq.log import logger +from setuptools import find_packages, setup - -def test_passing(): - assert (1, 2, 3) == (1, 2, 3) - logger.info("foo.bar=%d", foo.bar) - logger.info("test_passing") +setup( + name='rocketmq-python-client', + version='5.0.1', + packages=find_packages(), + install_requires=[ + "grpcio>=1.5.0", + "grpcio-tools>=1.5.0", + 'protobuf', + "opentelemetry-api>=1.2.0", + "opentelemetry-sdk>=1.2.0", + "opentelemetry-exporter-otlp>=1.2.0" + ], + python_requires='>=3.7', +)