From 46b1726a8a27c4bddbffe0b814179a1ecbbda90c Mon Sep 17 00:00:00 2001 From: kxz2002 Date: Tue, 14 Oct 2025 17:15:36 +0800 Subject: [PATCH 1/6] llm add tools param initial commit --- fastdeploy/engine/engine.py | 2 + fastdeploy/entrypoints/llm.py | 67 ++++++++++++++++++++++++++ fastdeploy/input/ernie4_5_processor.py | 3 +- fastdeploy/input/text_processor.py | 3 +- 4 files changed, 73 insertions(+), 2 deletions(-) diff --git a/fastdeploy/engine/engine.py b/fastdeploy/engine/engine.py index ca754566702..0c4aa2fe6c7 100644 --- a/fastdeploy/engine/engine.py +++ b/fastdeploy/engine/engine.py @@ -247,6 +247,8 @@ def add_requests(self, task, sampling_params=None, **kwargs): request.preprocess_start_time = time.time() chat_template_kwargs = kwargs.get("chat_template_kwargs") or {} chat_template_kwargs["chat_template"] = kwargs.get("chat_template") + chat_template_kwargs["tools"] = kwargs.get("tools") + print(f"DEBUG: LLMEngine.add_request: chat_template_kwargs:{chat_template_kwargs}") kwargs["chat_template_kwargs"] = chat_template_kwargs request = self.data_processor.process_request(request, self.cfg.model_config.max_model_len, **kwargs) request.prompt_token_ids_len = len(request.prompt_token_ids) diff --git a/fastdeploy/entrypoints/llm.py b/fastdeploy/entrypoints/llm.py index 302b65d2531..925cdf78e1d 100644 --- a/fastdeploy/entrypoints/llm.py +++ b/fastdeploy/entrypoints/llm.py @@ -23,12 +23,14 @@ import uuid from typing import Any, Optional, Union +from pydantic import ValidationError from tqdm import tqdm from fastdeploy.engine.args_utils import EngineArgs from fastdeploy.engine.engine import LLMEngine from fastdeploy.engine.sampling_params import SamplingParams from fastdeploy.entrypoints.chat_utils import load_chat_template +from fastdeploy.entrypoints.openai.protocol import ChatCompletionToolsParam from fastdeploy.entrypoints.openai.tool_parsers import ToolParserManager from fastdeploy.utils import ( deprecated_kwargs_warning, @@ -204,6 +206,7 @@ def chat( use_tqdm: bool = True, chat_template_kwargs: Optional[dict[str, Any]] = None, chat_template: Optional[str] = None, + tools: Optional[Union[ChatCompletionToolsParam, list[ChatCompletionToolsParam]]] = None, stream: bool = False, ): """ @@ -243,6 +246,12 @@ def chat( if chat_template is None: chat_template = self.chat_template + if tools is not None: + try: + validated_tools = _validate_tools(tools) + except ValueError as e: + raise RuntimeError(f"Failed to validate 'tools' parameter in chat method: {e}") from e + validated_tools = None messages_len = len(messages) for i in range(messages_len): messages[i] = {"messages": messages[i]} @@ -251,6 +260,7 @@ def chat( sampling_params=sampling_params, chat_template_kwargs=chat_template_kwargs, chat_template=chat_template, + tools=validated_tools, ) topk_logprobs = sampling_params[0].logprobs if sampling_params_len > 1 else sampling_params.logprobs @@ -559,6 +569,63 @@ def _create_incremental_result(self, current_result, previous_count, pos, prompt return incremental_result +def _validate_tools(raw_tools: Any) -> Optional[list[dict]]: + """ + 统一校验 tools 参数格式,支持以下合法输入: + 1. None(不传入 tools,返回 None) + 2. 单个工具字典(如 {"type": "function", "function": {...}},自动转为列表) + 3. 工具字典列表(如 [{"type": "function", "function": {...}}, ...]) + + 非法输入会抛出 ValueError,包含具体错误信息: + - 非 None/字典/列表类型(如字符串、数字、布尔值等) + - 空字典/空列表(允许,但会提示“无有效工具”) + - 字典格式不符合 ChatCompletionToolsParam 定义(如缺少 function.name、type 错误等) + + Args: + raw_tools: 从 kwargs 中获取的原始 tools 参数(可能是任何类型) + + Returns: + Optional[List[Dict[str, Any]]]: 校验通过的标准工具字典列表,或 None(当 raw_tools 为 None 时) + + Raises: + ValueError: 输入类型非法或格式不符合标准时抛出 + """ + if raw_tools is None: + return None + if isinstance(raw_tools, ChatCompletionToolsParam): + return [raw_tools] + if isinstance(raw_tools, list) and all(isinstance(t, ChatCompletionToolsParam) for t in raw_tools): + return raw_tools + + if not isinstance(raw_tools, dict) and not isinstance(raw_tools, list): + raise ValueError( + f"Invalid tools top-level type! Expected None, dict (single tool) or list (multiple tools), " + f"but got type '{type(raw_tools).__name__}' (value: {raw_tools})." + ) + tools_list: list[dict[str, Any]] = [raw_tools] if isinstance(raw_tools, dict) else raw_tools + + if not tools_list: + return None + + validated_tools = [] + for idx, tool in enumerate(tools_list): + if not isinstance(tool, dict): + raise ValueError( + f"Invalid element type in tools list! At index {idx}, " + f"expected dict (tool definition), but got type '{type(tool).__name__}' (value: {tool})." + ) + + try: + validated_tool_obj = ChatCompletionToolsParam.model_validate(tool) + validated_tools.append(validated_tool_obj.model_dump()) + except ValidationError as e: + raise ValueError( + f"Invalid tool format at index {idx} in tools list! " f"Tool content: {tool}\nError details: {e}" + ) from e + + return validated_tools + + if __name__ == "__main__": # llm = LLM(model="llama_model") # output = llm.generate(prompts="who are you?", use_tqdm=True) diff --git a/fastdeploy/input/ernie4_5_processor.py b/fastdeploy/input/ernie4_5_processor.py index 8d2463a088d..33a12dc6dc5 100644 --- a/fastdeploy/input/ernie4_5_processor.py +++ b/fastdeploy/input/ernie4_5_processor.py @@ -130,8 +130,9 @@ def process_request(self, request, max_model_len=None, **kwargs): if chat_template_kwargs: if isinstance(chat_template_kwargs, dict): for k, v in chat_template_kwargs.items(): - if k not in task: + if k not in task or task[k] is None: task[k] = v + print(f"DEBUG: ernie4_5 task[{k}]: {task[k]}") else: raise ValueError("Invalid input: chat_template_kwargs must be a dict") request.prompt_token_ids = self.messages2ids(task, **chat_template_kwargs) diff --git a/fastdeploy/input/text_processor.py b/fastdeploy/input/text_processor.py index a29e1b2605a..85a213e1917 100644 --- a/fastdeploy/input/text_processor.py +++ b/fastdeploy/input/text_processor.py @@ -245,8 +245,9 @@ def process_request(self, request, max_model_len=None, **kwargs): if chat_template_kwargs: if isinstance(chat_template_kwargs, dict): for k, v in chat_template_kwargs.items(): - if k not in task: + if k not in task or task[k] is None: task[k] = v + print(f"DEBUG: text_processor task[{k}]: {task[k]}") else: raise ValueError("Invalid input: chat_template_kwargs must be a dict") task.setdefault("enable_thinking", True) From e052fd6d45eb41d8a9e13e411186af2e116f85e6 Mon Sep 17 00:00:00 2001 From: kxz2002 Date: Tue, 14 Oct 2025 20:50:21 +0800 Subject: [PATCH 2/6] llm add tools param bugfix --- fastdeploy/entrypoints/llm.py | 2 +- fastdeploy/input/ernie4_5_processor.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/fastdeploy/entrypoints/llm.py b/fastdeploy/entrypoints/llm.py index 925cdf78e1d..3a2d2d7a8e1 100644 --- a/fastdeploy/entrypoints/llm.py +++ b/fastdeploy/entrypoints/llm.py @@ -246,12 +246,12 @@ def chat( if chat_template is None: chat_template = self.chat_template + validated_tools = None if tools is not None: try: validated_tools = _validate_tools(tools) except ValueError as e: raise RuntimeError(f"Failed to validate 'tools' parameter in chat method: {e}") from e - validated_tools = None messages_len = len(messages) for i in range(messages_len): messages[i] = {"messages": messages[i]} diff --git a/fastdeploy/input/ernie4_5_processor.py b/fastdeploy/input/ernie4_5_processor.py index 33a12dc6dc5..b2a3c58f076 100644 --- a/fastdeploy/input/ernie4_5_processor.py +++ b/fastdeploy/input/ernie4_5_processor.py @@ -132,7 +132,6 @@ def process_request(self, request, max_model_len=None, **kwargs): for k, v in chat_template_kwargs.items(): if k not in task or task[k] is None: task[k] = v - print(f"DEBUG: ernie4_5 task[{k}]: {task[k]}") else: raise ValueError("Invalid input: chat_template_kwargs must be a dict") request.prompt_token_ids = self.messages2ids(task, **chat_template_kwargs) From 84a8b98667d2ed1f7dccc4f50c95af7231c3a30b Mon Sep 17 00:00:00 2001 From: kxz2002 Date: Thu, 16 Oct 2025 11:29:10 +0800 Subject: [PATCH 3/6] offline add tools add unittests --- fastdeploy/engine/engine.py | 1 - fastdeploy/entrypoints/llm.py | 95 +++++++++++++++---------------- tests/entrypoints/test_chat.py | 100 +++++++++++++++++++++++++++++++++ 3 files changed, 146 insertions(+), 50 deletions(-) diff --git a/fastdeploy/engine/engine.py b/fastdeploy/engine/engine.py index 0c4aa2fe6c7..c877ccfe997 100644 --- a/fastdeploy/engine/engine.py +++ b/fastdeploy/engine/engine.py @@ -248,7 +248,6 @@ def add_requests(self, task, sampling_params=None, **kwargs): chat_template_kwargs = kwargs.get("chat_template_kwargs") or {} chat_template_kwargs["chat_template"] = kwargs.get("chat_template") chat_template_kwargs["tools"] = kwargs.get("tools") - print(f"DEBUG: LLMEngine.add_request: chat_template_kwargs:{chat_template_kwargs}") kwargs["chat_template_kwargs"] = chat_template_kwargs request = self.data_processor.process_request(request, self.cfg.model_config.max_model_len, **kwargs) request.prompt_token_ids_len = len(request.prompt_token_ids) diff --git a/fastdeploy/entrypoints/llm.py b/fastdeploy/entrypoints/llm.py index 3a2d2d7a8e1..3b08bc4ebb4 100644 --- a/fastdeploy/entrypoints/llm.py +++ b/fastdeploy/entrypoints/llm.py @@ -249,7 +249,7 @@ def chat( validated_tools = None if tools is not None: try: - validated_tools = _validate_tools(tools) + validated_tools = self._validate_tools(tools) except ValueError as e: raise RuntimeError(f"Failed to validate 'tools' parameter in chat method: {e}") from e messages_len = len(messages) @@ -568,62 +568,59 @@ def _create_incremental_result(self, current_result, previous_count, pos, prompt return incremental_result + def _validate_tools(self, raw_tools: Any) -> Optional[list[dict]]: + """ + Validate the format of the `tools` parameter for chat requests. + Valid inputs are accepted and standardized, while invalid inputs raise ValueError. + Empty dict/list will be returned as None. -def _validate_tools(raw_tools: Any) -> Optional[list[dict]]: - """ - 统一校验 tools 参数格式,支持以下合法输入: - 1. None(不传入 tools,返回 None) - 2. 单个工具字典(如 {"type": "function", "function": {...}},自动转为列表) - 3. 工具字典列表(如 [{"type": "function", "function": {...}}, ...]) - - 非法输入会抛出 ValueError,包含具体错误信息: - - 非 None/字典/列表类型(如字符串、数字、布尔值等) - - 空字典/空列表(允许,但会提示“无有效工具”) - - 字典格式不符合 ChatCompletionToolsParam 定义(如缺少 function.name、type 错误等) - - Args: - raw_tools: 从 kwargs 中获取的原始 tools 参数(可能是任何类型) - - Returns: - Optional[List[Dict[str, Any]]]: 校验通过的标准工具字典列表,或 None(当 raw_tools 为 None 时) + Args: + raw_tools: Raw `tools` parameter obtained from kwargs (can be any type) - Raises: - ValueError: 输入类型非法或格式不符合标准时抛出 - """ - if raw_tools is None: - return None - if isinstance(raw_tools, ChatCompletionToolsParam): - return [raw_tools] - if isinstance(raw_tools, list) and all(isinstance(t, ChatCompletionToolsParam) for t in raw_tools): - return raw_tools - - if not isinstance(raw_tools, dict) and not isinstance(raw_tools, list): - raise ValueError( - f"Invalid tools top-level type! Expected None, dict (single tool) or list (multiple tools), " - f"but got type '{type(raw_tools).__name__}' (value: {raw_tools})." - ) - tools_list: list[dict[str, Any]] = [raw_tools] if isinstance(raw_tools, dict) else raw_tools + Returns: + Optional[List[Dict[str, Any]]]: Standardized list of valid tool dictionaries if validation passes; + None if `raw_tools` is None or empty (empty dict/list). - if not tools_list: - return None + Raises: + ValueError: Raised when input type is invalid or format does not meet standards. + """ + if raw_tools is None: + return None + if isinstance(raw_tools, ChatCompletionToolsParam): + return [raw_tools] + if isinstance(raw_tools, list) and all(isinstance(t, ChatCompletionToolsParam) for t in raw_tools): + if not raw_tools: + return None + else: + return raw_tools - validated_tools = [] - for idx, tool in enumerate(tools_list): - if not isinstance(tool, dict): + if not isinstance(raw_tools, dict) and not isinstance(raw_tools, list): raise ValueError( - f"Invalid element type in tools list! At index {idx}, " - f"expected dict (tool definition), but got type '{type(tool).__name__}' (value: {tool})." + f"Invalid tools top-level type! Expected None, dict (single tool) or list (multiple tools), " + f"but got type '{type(raw_tools).__name__}' (value: {raw_tools})." ) + tools_list: list[dict[str, Any]] = [raw_tools] if isinstance(raw_tools, dict) else raw_tools - try: - validated_tool_obj = ChatCompletionToolsParam.model_validate(tool) - validated_tools.append(validated_tool_obj.model_dump()) - except ValidationError as e: - raise ValueError( - f"Invalid tool format at index {idx} in tools list! " f"Tool content: {tool}\nError details: {e}" - ) from e + if not tools_list: + return None - return validated_tools + validated_tools = [] + for idx, tool in enumerate(tools_list): + if not isinstance(tool, dict): + raise ValueError( + f"Invalid element type in tools list! At index {idx}, " + f"expected dict (tool definition), but got type '{type(tool).__name__}' (value: {tool})." + ) + + try: + validated_tool_obj = ChatCompletionToolsParam.model_validate(tool) + validated_tools.append(validated_tool_obj.model_dump()) + except ValidationError as e: + raise ValueError( + f"Invalid tool format at index {idx} in tools list! " f"Tool content: {tool}\nError details: {e}" + ) from e + + return validated_tools if __name__ == "__main__": diff --git a/tests/entrypoints/test_chat.py b/tests/entrypoints/test_chat.py index 0078cd8a18e..c643d5a7654 100644 --- a/tests/entrypoints/test_chat.py +++ b/tests/entrypoints/test_chat.py @@ -19,6 +19,7 @@ import weakref from fastdeploy.entrypoints.llm import LLM +from fastdeploy.entrypoints.openai.protocol import ChatCompletionToolsParam MODEL_NAME = os.getenv("MODEL_PATH") + "/ERNIE-4.5-0.3B-Paddle" @@ -58,6 +59,105 @@ def test_chat(self): outputs = self.llm.chat(messages=self.PROMPTS, sampling_params=None) self.assertEqual(len(self.PROMPTS), len(outputs)) + def test_chat_with_tools(self): + prompts = [{"role": "user", "content": "北京海淀区今天天气怎么样?用摄氏度表示温度。"}] + tools = [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Determine weather in my location", + "parameters": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, + "unit": {"type": "string", "enum": ["c", "f"]}, + }, + "additionalProperties": False, + "required": ["location", "unit"], + }, + "strict": True, + }, + } + ] + chat_template = "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('') and message.content.endswith('')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '' in content %}\n {%- set reasoning_content = content.split('')[0].rstrip('\\n').split('')[-1].lstrip('\\n') %}\n {%- set content = content.split('')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n\\n' + reasoning_content.strip('\\n') + '\\n\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n {%- if enable_thinking is defined and enable_thinking is false %}\n {{- '\\n\\n\\n\\n' }}\n {%- endif %}\n{%- endif %}" + outputs = self.llm.chat( + messages=prompts, + tools=tools, + chat_template=chat_template, + chat_template_kwargs={"enable_thinking": False}, + stream=False, + ) + output = outputs[0] + self.assertEqual(len(prompts), len(outputs)) + self.assertTrue(hasattr(output, "outputs")) + self.assertTrue(hasattr(output.outputs, "text")) + self.assertIn("tool_call", output.outputs.text.lower()) + + def test_validate_tools(self): + """Test both valid and invalid scenarios for _validate_tools method""" + # Prepare valid test data + valid_tool_dict = { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get real-time weather of a city", + "parameters": {"type": "object", "properties": {"city": {"type": "string"}}, "required": ["city"]}, + }, + } + valid_tool_model = ChatCompletionToolsParam(**valid_tool_dict) + valid_model_list = [valid_tool_model, valid_tool_model] + valid_dict_list = [valid_tool_dict, valid_tool_dict] + + # Test valid scenarios + # 1. Input is None + self.assertIsNone(self.llm._validate_tools(None)) + + # 2. Input is single ChatCompletionToolsParam instance + result = self.llm._validate_tools(valid_tool_model) + self.assertEqual(len(result), 1) + self.assertIsInstance(result[0], ChatCompletionToolsParam) + + # 3. Input is list of ChatCompletionToolsParam instances + self.assertEqual(self.llm._validate_tools(valid_model_list), valid_model_list) + + # 4. Input is single valid dict + result = self.llm._validate_tools(valid_tool_dict) + self.assertEqual(len(result), 1) + self.assertIsInstance(result[0], dict) + self.assertEqual(result[0]["type"], "function") + + # 5. Input is list of valid dicts + result = self.llm._validate_tools(valid_dict_list) + self.assertEqual(len(result), 2) + self.assertIsInstance(result[1], dict) + + # 6. Input is empty list + self.assertIsNone(self.llm._validate_tools([])) + + # Test invalid scenarios (should raise ValueError) + # 1. Input is string (invalid top-level type) + with self.assertRaises(ValueError): + self.llm._validate_tools("invalid_string") + + # 2. Input list contains non-dict element + with self.assertRaises(ValueError): + self.llm._validate_tools([valid_tool_dict, 123]) + + # 3. Tool dict missing required field (function.name) + invalid_tool_missing_name = {"type": "function", "function": {"description": "Missing 'name' field"}} + with self.assertRaises(ValueError): + self.llm._validate_tools(invalid_tool_missing_name) + + # 4. Tool dict with wrong 'type' value + invalid_tool_wrong_type = {"type": "invalid_type", "function": {"name": "test", "description": "Wrong type"}} + with self.assertRaises(ValueError): + self.llm._validate_tools(invalid_tool_wrong_type) + + # 5. Input is boolean + with self.assertRaises(ValueError): + self.llm._validate_tools(True) + if __name__ == "__main__": unittest.main() From 6802fd7bdd96431f110af17918d1dffb760302b9 Mon Sep 17 00:00:00 2001 From: kxz2002 Date: Thu, 16 Oct 2025 15:00:56 +0800 Subject: [PATCH 4/6] fix preprocessor --- fastdeploy/input/ernie4_5_processor.py | 2 +- fastdeploy/input/text_processor.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/fastdeploy/input/ernie4_5_processor.py b/fastdeploy/input/ernie4_5_processor.py index b2a3c58f076..8d2463a088d 100644 --- a/fastdeploy/input/ernie4_5_processor.py +++ b/fastdeploy/input/ernie4_5_processor.py @@ -130,7 +130,7 @@ def process_request(self, request, max_model_len=None, **kwargs): if chat_template_kwargs: if isinstance(chat_template_kwargs, dict): for k, v in chat_template_kwargs.items(): - if k not in task or task[k] is None: + if k not in task: task[k] = v else: raise ValueError("Invalid input: chat_template_kwargs must be a dict") diff --git a/fastdeploy/input/text_processor.py b/fastdeploy/input/text_processor.py index 85a213e1917..a29e1b2605a 100644 --- a/fastdeploy/input/text_processor.py +++ b/fastdeploy/input/text_processor.py @@ -245,9 +245,8 @@ def process_request(self, request, max_model_len=None, **kwargs): if chat_template_kwargs: if isinstance(chat_template_kwargs, dict): for k, v in chat_template_kwargs.items(): - if k not in task or task[k] is None: + if k not in task: task[k] = v - print(f"DEBUG: text_processor task[{k}]: {task[k]}") else: raise ValueError("Invalid input: chat_template_kwargs must be a dict") task.setdefault("enable_thinking", True) From ac615f84805d8ed0b3022f86e4e5669b40a60561 Mon Sep 17 00:00:00 2001 From: kxz2002 Date: Thu, 16 Oct 2025 19:38:00 +0800 Subject: [PATCH 5/6] move tools paramter into tasks --- fastdeploy/engine/engine.py | 1 - fastdeploy/entrypoints/llm.py | 2 ++ tests/entrypoints/test_chat.py | 54 ++++++++++++++++++++++++++-------- 3 files changed, 43 insertions(+), 14 deletions(-) diff --git a/fastdeploy/engine/engine.py b/fastdeploy/engine/engine.py index c877ccfe997..ca754566702 100644 --- a/fastdeploy/engine/engine.py +++ b/fastdeploy/engine/engine.py @@ -247,7 +247,6 @@ def add_requests(self, task, sampling_params=None, **kwargs): request.preprocess_start_time = time.time() chat_template_kwargs = kwargs.get("chat_template_kwargs") or {} chat_template_kwargs["chat_template"] = kwargs.get("chat_template") - chat_template_kwargs["tools"] = kwargs.get("tools") kwargs["chat_template_kwargs"] = chat_template_kwargs request = self.data_processor.process_request(request, self.cfg.model_config.max_model_len, **kwargs) request.prompt_token_ids_len = len(request.prompt_token_ids) diff --git a/fastdeploy/entrypoints/llm.py b/fastdeploy/entrypoints/llm.py index bf30f95bbc8..cd380761730 100644 --- a/fastdeploy/entrypoints/llm.py +++ b/fastdeploy/entrypoints/llm.py @@ -320,6 +320,8 @@ def _add_request( if current_sampling_params.guided_decoding is not None: guided_decoding_dict = current_sampling_params.guided_decoding.to_dict() tasks.update(guided_decoding_dict) + if kwargs.get("tools") is not None: + tasks["tools"] = kwargs.get("tools") self.llm_engine.add_requests(tasks, current_sampling_params, **kwargs) return req_ids diff --git a/tests/entrypoints/test_chat.py b/tests/entrypoints/test_chat.py index c643d5a7654..da6414b46fe 100644 --- a/tests/entrypoints/test_chat.py +++ b/tests/entrypoints/test_chat.py @@ -60,6 +60,10 @@ def test_chat(self): self.assertEqual(len(self.PROMPTS), len(outputs)) def test_chat_with_tools(self): + """Test chat with tools: + 1. spliced_message (after chat_template) contains tool-related content + 2. Model output contains tool_call + """ prompts = [{"role": "user", "content": "北京海淀区今天天气怎么样?用摄氏度表示温度。"}] tools = [ { @@ -80,19 +84,43 @@ def test_chat_with_tools(self): }, } ] - chat_template = "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('') and message.content.endswith('')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '' in content %}\n {%- set reasoning_content = content.split('')[0].rstrip('\\n').split('')[-1].lstrip('\\n') %}\n {%- set content = content.split('')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n\\n' + reasoning_content.strip('\\n') + '\\n\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n {%- if enable_thinking is defined and enable_thinking is false %}\n {{- '\\n\\n\\n\\n' }}\n {%- endif %}\n{%- endif %}" - outputs = self.llm.chat( - messages=prompts, - tools=tools, - chat_template=chat_template, - chat_template_kwargs={"enable_thinking": False}, - stream=False, - ) - output = outputs[0] - self.assertEqual(len(prompts), len(outputs)) - self.assertTrue(hasattr(output, "outputs")) - self.assertTrue(hasattr(output.outputs, "text")) - self.assertIn("tool_call", output.outputs.text.lower()) + chat_template = "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within XML tags:\\n\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n\\n\\nFor each function call, return a json object with function name and arguments within XML tags:\\n\\n{\\\"name\\\": , \\\"arguments\\\": }\\n<|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('') and message.content.endswith('')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '' in content %}\n {%- set reasoning_content = content.split('')[0].rstrip('\\n').split('')[-1].lstrip('\\n') %}\n {%- set content = content.split('')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n\\n' + reasoning_content.strip('\\n') + '\\n\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n\\n' }}\n {{- content }}\n {{- '\\n' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n {%- if enable_thinking is defined and enable_thinking is false %}\n {{- '\\n\\n\\n\\n' }}\n {%- endif %}\n{%- endif %}" + + data_processor = self.llm.llm_engine.data_processor + captured_spliced_message = None + + def capture_spliced_message(request_or_messages, **kwargs): + """Wrap original messages2ids to capture spliced_message""" + token_ids = data_processor.original_messages2ids(request_or_messages, **kwargs) + nonlocal captured_spliced_message + captured_spliced_message = request_or_messages.get("text_after_process") + return token_ids + + data_processor.original_messages2ids = data_processor.messages2ids + data_processor.messages2ids = capture_spliced_message + + try: + outputs = self.llm.chat( + messages=prompts, + tools=tools, + chat_template=chat_template, + chat_template_kwargs={"enable_thinking": False}, + stream=False, + ) + + self.assertIsNotNone(captured_spliced_message, "Failed to capture spliced_message from messages2ids") + self.assertIn( + "", + captured_spliced_message, + f"spliced_message '{captured_spliced_message}' missing tag (chat_template not applied)", + ) + + output = outputs[0] + self.assertEqual(len(prompts), len(outputs)) + self.assertTrue(hasattr(output, "outputs")) + self.assertTrue(hasattr(output.outputs, "text")) + finally: + data_processor.messages2ids = data_processor.original_messages2ids def test_validate_tools(self): """Test both valid and invalid scenarios for _validate_tools method""" From ea4f09f142209ddd8051bd9de808f2caf38dcce6 Mon Sep 17 00:00:00 2001 From: kxz2002 Date: Thu, 16 Oct 2025 21:46:41 +0800 Subject: [PATCH 6/6] change variable name --- tests/entrypoints/test_chat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/entrypoints/test_chat.py b/tests/entrypoints/test_chat.py index da6414b46fe..7167ce19aa0 100644 --- a/tests/entrypoints/test_chat.py +++ b/tests/entrypoints/test_chat.py @@ -93,7 +93,7 @@ def capture_spliced_message(request_or_messages, **kwargs): """Wrap original messages2ids to capture spliced_message""" token_ids = data_processor.original_messages2ids(request_or_messages, **kwargs) nonlocal captured_spliced_message - captured_spliced_message = request_or_messages.get("text_after_process") + captured_spliced_message = request_or_messages.get("prompt_tokens") return token_ids data_processor.original_messages2ids = data_processor.messages2ids