Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 7 additions & 9 deletions crates/mofa-foundation/src/llm/agent.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,7 @@ use super::client::{ChatSession, LLMClient};
use super::provider::{ChatStream, LLMProvider};
use super::tool_executor::ToolExecutor;
use super::types::{ChatMessage, LLMError, LLMResult, Tool};
use crate::llm::{
AnthropicConfig, AnthropicProvider, GeminiConfig, GeminiProvider, OllamaConfig, OllamaProvider,
};
use crate::llm::{AnthropicConfig, AnthropicProvider, GeminiConfig, GeminiProvider};
use crate::prompt;
use futures::{Stream, StreamExt};
use mofa_kernel::agent::AgentMetadata;
Expand Down Expand Up @@ -3663,19 +3661,19 @@ impl LLMAgentBuilder {
Arc::new(GeminiProvider::with_config(cfg))
}
"ollama" => {
let mut ollama_config = OllamaConfig::new();
ollama_config = ollama_config.with_base_url(&provider.api_base);
ollama_config = ollama_config.with_model(&agent.model_name);
let mut openai_config = OpenAIConfig::new("ollama")
.with_base_url(&provider.api_base)
.with_model(&agent.model_name);

if let Some(temp) = agent.temperature {
ollama_config = ollama_config.with_temperature(temp);
openai_config = openai_config.with_temperature(temp);
}

if let Some(max_tokens) = agent.max_completion_tokens {
ollama_config = ollama_config.with_max_tokens(max_tokens as u32);
openai_config = openai_config.with_max_tokens(max_tokens as u32);
}

Arc::new(OllamaProvider::with_config(ollama_config))
Arc::new(OpenAIProvider::with_config(openai_config))
}
other => {
return Err(LLMError::Other(format!(
Expand Down
2 changes: 1 addition & 1 deletion crates/mofa-foundation/src/llm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,7 @@ pub use anthropic::{AnthropicConfig, AnthropicProvider};
// Re-export Google Gemini Provider
pub use google::{GeminiConfig, GeminiProvider};
// Re-export Ollama Provider
pub use ollama::{OllamaConfig, OllamaProvider};
pub use ollama::OllamaProvider;

// Re-export 高级 API
// Re-export Advanced API
Expand Down
38 changes: 32 additions & 6 deletions crates/mofa-sdk/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ pub mod llm {
pub use crate::llm_tools::ToolPluginExecutor;
pub use mofa_foundation::llm::anthropic::{AnthropicConfig, AnthropicProvider};
pub use mofa_foundation::llm::google::{GeminiConfig, GeminiProvider};
pub use mofa_foundation::llm::ollama::{OllamaConfig, OllamaProvider};
pub use mofa_foundation::llm::ollama::OllamaProvider;
pub use mofa_foundation::llm::openai::{OpenAIConfig, OpenAIProvider};
pub use mofa_foundation::llm::*;

Expand Down Expand Up @@ -542,13 +542,39 @@ pub mod llm {
Ok(OpenAIProvider::with_config(config))
}

/// Create an Ollama provider from environment variables (no API key required).
/// Create an Ollama-backed [`OpenAIProvider`] from environment variables (no API key required).
///
/// Reads:
/// - `OLLAMA_BASE_URL`: base URL without `/v1` suffix, e.g. `http://localhost:11434` (optional)
/// - `OLLAMA_MODEL`: model name, e.g. `llama3` (optional)
pub fn ollama_from_env() -> Result<OllamaProvider, crate::llm::LLMError> {
Ok(crate::llm::OllamaProvider::from_env())
/// - `OLLAMA_HOST`: optional host (default `localhost:11434`); may be `host:port` or a full `http://` URL
/// - `OLLAMA_BASE_URL`: optional base URL (e.g. `http://localhost:11434`); used if `OLLAMA_HOST` is unset
/// - `OLLAMA_MODEL`: model name, e.g. `llama3` (optional, default `llama3`)
pub fn ollama_from_env() -> Result<OpenAIProvider, crate::llm::LLMError> {
let model = std::env::var("OLLAMA_MODEL").unwrap_or_else(|_| "llama3".to_string());
let mut cfg = OpenAIConfig::new("ollama");
let base_url = if let Ok(host) = std::env::var("OLLAMA_HOST") {
let h = host.trim();
let base = if h.starts_with("http://") || h.starts_with("https://") {
h.trim_end_matches('/').to_string()
} else {
format!("http://{}", h.trim_end_matches('/'))
};
if base.ends_with("/v1") {
base
} else {
format!("{}/v1", base)
}
} else if let Ok(base_url) = std::env::var("OLLAMA_BASE_URL") {
let base = base_url.trim().trim_end_matches('/');
if base.ends_with("/v1") {
base.to_string()
} else {
format!("{}/v1", base)
}
} else {
"http://localhost:11434/v1".to_string()
};
cfg = cfg.with_base_url(base_url).with_model(model);
Ok(OpenAIProvider::with_config(cfg))
}
}

Expand Down