Skip to content

Commit b131c4b

Browse files
authoredFeb 28, 2025
Update max_token and formatting (mem0ai#2273)
1 parent 6acb007 commit b131c4b

25 files changed

+31
-32
lines changed
 

‎docs/components/llms/models/aws_bedrock.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ config = {
2424
"config": {
2525
"model": "arn:aws:bedrock:us-east-1:123456789012:model/your-model-name",
2626
"temperature": 0.2,
27-
"max_tokens": 1500,
27+
"max_tokens": 2000,
2828
}
2929
}
3030
}

‎docs/components/llms/models/deepseek.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ config = {
1919
"config": {
2020
"model": "deepseek-chat", # default model
2121
"temperature": 0.2,
22-
"max_tokens": 1500,
22+
"max_tokens": 2000,
2323
"top_p": 1.0
2424
}
2525
}

‎docs/components/llms/models/gemini.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ config = {
1919
"config": {
2020
"model": "gemini-1.5-flash-latest",
2121
"temperature": 0.2,
22-
"max_tokens": 1500,
22+
"max_tokens": 2000,
2323
}
2424
}
2525
}

‎docs/components/llms/models/google_AI.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ config = {
1919
"config": {
2020
"model": "gemini/gemini-pro",
2121
"temperature": 0.2,
22-
"max_tokens": 1500,
22+
"max_tokens": 2000,
2323
}
2424
}
2525
}

‎docs/components/llms/models/groq.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ config = {
1717
"config": {
1818
"model": "mixtral-8x7b-32768",
1919
"temperature": 0.1,
20-
"max_tokens": 1000,
20+
"max_tokens": 2000,
2121
}
2222
}
2323
}

‎docs/components/llms/models/litellm.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ config = {
1414
"config": {
1515
"model": "gpt-4o-mini",
1616
"temperature": 0.2,
17-
"max_tokens": 1500,
17+
"max_tokens": 2000,
1818
}
1919
}
2020
}

‎docs/components/llms/models/openai.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ config = {
1818
"config": {
1919
"model": "gpt-4o",
2020
"temperature": 0.2,
21-
"max_tokens": 1500,
21+
"max_tokens": 2000,
2222
}
2323
}
2424
}

‎docs/components/llms/models/together.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ config = {
1515
"config": {
1616
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
1717
"temperature": 0.2,
18-
"max_tokens": 1500,
18+
"max_tokens": 2000,
1919
}
2020
}
2121
}

‎docs/components/llms/models/xAI.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ config = {
2121
"config": {
2222
"model": "grok-2-latest",
2323
"temperature": 0.1,
24-
"max_tokens": 1000,
24+
"max_tokens": 2000,
2525
}
2626
}
2727
}

‎docs/examples/mem0-with-ollama.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ config = {
3737
"config": {
3838
"model": "llama3.1:latest",
3939
"temperature": 0,
40-
"max_tokens": 8000,
40+
"max_tokens": 2000,
4141
"ollama_base_url": "http://localhost:11434", # Ensure this URL is correct
4242
},
4343
},

‎docs/features/custom-prompts.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ config = {
5353
"config": {
5454
"model": "gpt-4o",
5555
"temperature": 0.2,
56-
"max_tokens": 1500,
56+
"max_tokens": 2000,
5757
}
5858
},
5959
"custom_prompt": custom_prompt,

‎docs/integrations/llama-index.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ config = {
8080
"config": {
8181
"model": "gpt-4o",
8282
"temperature": 0.2,
83-
"max_tokens": 1500,
83+
"max_tokens": 2000,
8484
},
8585
},
8686
"embedder": {

‎docs/open-source/graph_memory/overview.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ config = {
8181
"config": {
8282
"model": "gpt-4o",
8383
"temperature": 0.2,
84-
"max_tokens": 1500,
84+
"max_tokens": 2000,
8585
}
8686
},
8787
"graph_store": {

‎mem0/configs/llms/base.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def __init__(
1616
model: Optional[str] = None,
1717
temperature: float = 0.1,
1818
api_key: Optional[str] = None,
19-
max_tokens: int = 3000,
19+
max_tokens: int = 2000,
2020
top_p: float = 0.1,
2121
top_k: int = 1,
2222
# Openrouter specific
@@ -48,7 +48,7 @@ def __init__(
4848
:type temperature: float, optional
4949
:param api_key: OpenAI API key to be use, defaults to None
5050
:type api_key: Optional[str], optional
51-
:param max_tokens: Controls how many tokens are generated, defaults to 3000
51+
:param max_tokens: Controls how many tokens are generated, defaults to 2000
5252
:type max_tokens: int, optional
5353
:param top_p: Controls the diversity of words. Higher values (closer to 1) make word selection more diverse,
5454
defaults to 1

‎mem0/embeddings/azure_openai.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def __init__(self, config: Optional[BaseEmbedderConfig] = None):
2626
default_headers=default_headers,
2727
)
2828

29-
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
29+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
3030
"""
3131
Get the embedding for the given text using OpenAI.
3232

‎mem0/embeddings/base.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def __init__(self, config: Optional[BaseEmbedderConfig] = None):
1818
self.config = config
1919

2020
@abstractmethod
21-
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]]):
21+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]]):
2222
"""
2323
Get the embedding for the given text.
2424

‎mem0/embeddings/gemini.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def __init__(self, config: Optional[BaseEmbedderConfig] = None):
1818

1919
genai.configure(api_key=api_key)
2020

21-
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
21+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
2222
"""
2323
Get the embedding for the given text using Google Generative AI.
2424
Args:

‎mem0/embeddings/huggingface.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def __init__(self, config: Optional[BaseEmbedderConfig] = None):
1616

1717
self.config.embedding_dims = self.config.embedding_dims or self.model.get_sentence_embedding_dimension()
1818

19-
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
19+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
2020
"""
2121
Get the embedding for the given text using Hugging Face.
2222

‎mem0/embeddings/ollama.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def _ensure_model_exists(self):
3939
if not any(model.get("name") == self.config.model for model in local_models):
4040
self.client.pull(self.config.model)
4141

42-
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
42+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
4343
"""
4444
Get the embedding for the given text using Ollama.
4545

‎mem0/embeddings/openai.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def __init__(self, config: Optional[BaseEmbedderConfig] = None):
1818
base_url = self.config.openai_base_url or os.getenv("OPENAI_API_BASE")
1919
self.client = OpenAI(api_key=api_key, base_url=base_url)
2020

21-
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
21+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
2222
"""
2323
Get the embedding for the given text using OpenAI.
2424

‎mem0/embeddings/together.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ def __init__(self, config: Optional[BaseEmbedderConfig] = None):
1717
self.config.embedding_dims = self.config.embedding_dims or 768
1818
self.client = Together(api_key=api_key)
1919

20-
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
20+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
2121
"""
2222
Get the embedding for the given text using OpenAI.
2323

‎mem0/embeddings/vertexai.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,13 @@ def __init__(self, config: Optional[BaseEmbedderConfig] = None):
1313

1414
self.config.model = self.config.model or "text-embedding-004"
1515
self.config.embedding_dims = self.config.embedding_dims or 256
16-
16+
1717
self.embedding_types = {
1818
"add": self.config.memory_add_embedding_type or "RETRIEVAL_DOCUMENT",
1919
"update": self.config.memory_update_embedding_type or "RETRIEVAL_DOCUMENT",
20-
"search": self.config.memory_search_embedding_type or "RETRIEVAL_QUERY"
20+
"search": self.config.memory_search_embedding_type or "RETRIEVAL_QUERY",
2121
}
22-
22+
2323
credentials_path = self.config.vertex_credentials_json
2424

2525
if credentials_path:
@@ -31,7 +31,7 @@ def __init__(self, config: Optional[BaseEmbedderConfig] = None):
3131

3232
self.model = TextEmbeddingModel.from_pretrained(self.config.model)
3333

34-
def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]] = None):
34+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
3535
"""
3636
Get the embedding for the given text using Vertex AI.
3737
@@ -45,9 +45,9 @@ def embed(self, text, memory_action:Optional[Literal["add", "search", "update"]]
4545
if memory_action is not None:
4646
if memory_action not in self.embedding_types:
4747
raise ValueError(f"Invalid memory action: {memory_action}")
48-
48+
4949
embedding_type = self.embedding_types[memory_action]
50-
50+
5151
text_input = TextEmbeddingInput(text=text, task_type=embedding_type)
5252
embeddings = self.model.get_embeddings(texts=[text_input], output_dimensionality=self.config.embedding_dims)
5353

‎mem0/llms/groq.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -84,4 +84,4 @@ def generate_response(
8484
params["tool_choice"] = tool_choice
8585

8686
response = self.client.chat.completions.create(**params)
87-
return self._parse_response(response, tools)
87+
return self._parse_response(response, tools)

‎mem0/llms/openai.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,6 @@ def generate_response(
6363
response_format=None,
6464
tools: Optional[List[Dict]] = None,
6565
tool_choice: str = "auto",
66-
max_tokens: int = 100,
6766
):
6867
"""
6968
Generate a response based on the given messages using OpenAI.
@@ -81,7 +80,7 @@ def generate_response(
8180
"model": self.config.model,
8281
"messages": messages,
8382
"temperature": self.config.temperature,
84-
"max_tokens": max_tokens,
83+
"max_tokens": self.config.max_tokens,
8584
"top_p": self.config.top_p,
8685
}
8786

‎tests/llms/test_azure_openai.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -128,4 +128,4 @@ def test_generate_with_http_proxies(default_headers):
128128
api_version=None,
129129
default_headers=default_headers,
130130
)
131-
mock_http_client.assert_called_once_with(proxies="http://testproxy.mem0.net:8000")
131+
mock_http_client.assert_called_once_with(proxies="http://testproxy.mem0.net:8000")

0 commit comments

Comments
 (0)
Please sign in to comment.