diff --git a/funclip/launch.py b/funclip/launch.py index fa771dc..2ea2ba1 100644 --- a/funclip/launch.py +++ b/funclip/launch.py @@ -117,10 +117,10 @@ def video_clip_addsub(dest_text, video_spk_input, start_ost, end_ost, state, out ) def llm_inference(system_content, user_content, srt_text, model, apikey): - SUPPORT_LLM_PREFIX = ['qwen', 'gpt', 'g4f', 'moonshot'] + SUPPORT_LLM_PREFIX = ['qwen', 'gpt', 'g4f', 'moonshot', 'deepseek'] if model.startswith('qwen'): return call_qwen_model(apikey, model, user_content+'\n'+srt_text, system_content) - if model.startswith('gpt') or model.startswith('moonshot'): + if model.startswith('gpt') or model.startswith('moonshot') or model.startswith('deepseek'): return openai_call(apikey, model, system_content, user_content+'\n'+srt_text) elif model.startswith('g4f'): model = "-".join(model.split('-')[1:]) @@ -210,12 +210,14 @@ def AI_clip_subti(LLM_res, dest_text, video_spk_input, start_ost, end_ost, video with gr.Column(): with gr.Row(): llm_model = gr.Dropdown( - choices=["qwen-plus", + choices=[ + "deepseek-chat" + "qwen-plus", "gpt-3.5-turbo", "gpt-3.5-turbo-0125", "gpt-4-turbo", "g4f-gpt-3.5-turbo"], - value="qwen-plus", + value="deepseek-chat", label="LLM Model Name", allow_custom_value=True) apikey_input = gr.Textbox(label="APIKEY") diff --git a/funclip/llm/openai_api.py b/funclip/llm/openai_api.py index 3a9fd39..fe8150a 100644 --- a/funclip/llm/openai_api.py +++ b/funclip/llm/openai_api.py @@ -26,9 +26,15 @@ def openai_call(apikey, model="gpt-3.5-turbo", user_content="如何做西红柿炖牛腩?", system_content=None): + base_url = None + if model.startswith("deepseek"): + base_url = "https://api.deepseek.com" + elif model.startswith("gpt-3.5-turbo"): + base_url = "https://api.moonshot.cn/v1" client = OpenAI( # This is the default and can be omitted api_key=apikey, + base_url=base_url ) if system_content is not None and len(system_content.strip()): messages = [