diff --git a/crates/mofa-foundation/src/llm/agent.rs b/crates/mofa-foundation/src/llm/agent.rs index 65f78a976..0c674ec09 100644 --- a/crates/mofa-foundation/src/llm/agent.rs +++ b/crates/mofa-foundation/src/llm/agent.rs @@ -29,9 +29,7 @@ use super::client::{ChatSession, LLMClient}; use super::provider::{ChatStream, LLMProvider}; use super::tool_executor::ToolExecutor; use super::types::{ChatMessage, LLMError, LLMResult, Tool}; -use crate::llm::{ - AnthropicConfig, AnthropicProvider, GeminiConfig, GeminiProvider, OllamaConfig, OllamaProvider, -}; +use crate::llm::{AnthropicConfig, AnthropicProvider, GeminiConfig, GeminiProvider}; use crate::prompt; use futures::{Stream, StreamExt}; use mofa_kernel::agent::AgentMetadata; @@ -3663,19 +3661,19 @@ impl LLMAgentBuilder { Arc::new(GeminiProvider::with_config(cfg)) } "ollama" => { - let mut ollama_config = OllamaConfig::new(); - ollama_config = ollama_config.with_base_url(&provider.api_base); - ollama_config = ollama_config.with_model(&agent.model_name); + let mut openai_config = OpenAIConfig::new("ollama") + .with_base_url(&provider.api_base) + .with_model(&agent.model_name); if let Some(temp) = agent.temperature { - ollama_config = ollama_config.with_temperature(temp); + openai_config = openai_config.with_temperature(temp); } if let Some(max_tokens) = agent.max_completion_tokens { - ollama_config = ollama_config.with_max_tokens(max_tokens as u32); + openai_config = openai_config.with_max_tokens(max_tokens as u32); } - Arc::new(OllamaProvider::with_config(ollama_config)) + Arc::new(OpenAIProvider::with_config(openai_config)) } other => { return Err(LLMError::Other(format!( diff --git a/crates/mofa-foundation/src/llm/mod.rs b/crates/mofa-foundation/src/llm/mod.rs index 2308bdf8d..a80f52026 100644 --- a/crates/mofa-foundation/src/llm/mod.rs +++ b/crates/mofa-foundation/src/llm/mod.rs @@ -368,7 +368,7 @@ pub use anthropic::{AnthropicConfig, AnthropicProvider}; // Re-export Google Gemini Provider pub use google::{GeminiConfig, GeminiProvider}; // Re-export Ollama Provider -pub use ollama::{OllamaConfig, OllamaProvider}; +pub use ollama::OllamaProvider; // Re-export 高级 API // Re-export Advanced API diff --git a/crates/mofa-sdk/src/lib.rs b/crates/mofa-sdk/src/lib.rs index d46cc2441..578f552a5 100644 --- a/crates/mofa-sdk/src/lib.rs +++ b/crates/mofa-sdk/src/lib.rs @@ -504,7 +504,7 @@ pub mod llm { pub use crate::llm_tools::ToolPluginExecutor; pub use mofa_foundation::llm::anthropic::{AnthropicConfig, AnthropicProvider}; pub use mofa_foundation::llm::google::{GeminiConfig, GeminiProvider}; - pub use mofa_foundation::llm::ollama::{OllamaConfig, OllamaProvider}; + pub use mofa_foundation::llm::ollama::OllamaProvider; pub use mofa_foundation::llm::openai::{OpenAIConfig, OpenAIProvider}; pub use mofa_foundation::llm::*; @@ -542,13 +542,39 @@ pub mod llm { Ok(OpenAIProvider::with_config(config)) } - /// Create an Ollama provider from environment variables (no API key required). + /// Create an Ollama-backed [`OpenAIProvider`] from environment variables (no API key required). /// /// Reads: - /// - `OLLAMA_BASE_URL`: base URL without `/v1` suffix, e.g. `http://localhost:11434` (optional) - /// - `OLLAMA_MODEL`: model name, e.g. `llama3` (optional) - pub fn ollama_from_env() -> Result { - Ok(crate::llm::OllamaProvider::from_env()) + /// - `OLLAMA_HOST`: optional host (default `localhost:11434`); may be `host:port` or a full `http://` URL + /// - `OLLAMA_BASE_URL`: optional base URL (e.g. `http://localhost:11434`); used if `OLLAMA_HOST` is unset + /// - `OLLAMA_MODEL`: model name, e.g. `llama3` (optional, default `llama3`) + pub fn ollama_from_env() -> Result { + let model = std::env::var("OLLAMA_MODEL").unwrap_or_else(|_| "llama3".to_string()); + let mut cfg = OpenAIConfig::new("ollama"); + let base_url = if let Ok(host) = std::env::var("OLLAMA_HOST") { + let h = host.trim(); + let base = if h.starts_with("http://") || h.starts_with("https://") { + h.trim_end_matches('/').to_string() + } else { + format!("http://{}", h.trim_end_matches('/')) + }; + if base.ends_with("/v1") { + base + } else { + format!("{}/v1", base) + } + } else if let Ok(base_url) = std::env::var("OLLAMA_BASE_URL") { + let base = base_url.trim().trim_end_matches('/'); + if base.ends_with("/v1") { + base.to_string() + } else { + format!("{}/v1", base) + } + } else { + "http://localhost:11434/v1".to_string() + }; + cfg = cfg.with_base_url(base_url).with_model(model); + Ok(OpenAIProvider::with_config(cfg)) } }