Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions docs/getting-started/basic-usage.md
Original file line number Diff line number Diff line change
Expand Up @@ -200,3 +200,21 @@ for fact in facts:
2. Vector similarity search across fact embeddings
3. Results ranked by cosine similarity
4. Returns top N most relevant facts

## Debug Logging

Enable debug logging to see what Memori is doing internally:

```python
import logging

# Enable BEFORE importing Memori
logging.basicConfig(level=logging.DEBUG)

from memori import Memori

# Optional: show full content instead of truncated
memori = Memori(conn, debug_truncate=False)
```

See [Troubleshooting > Debug Logging](../troubleshooting.md#debug-logging) for more details.
68 changes: 68 additions & 0 deletions docs/troubleshooting.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ This guide covers the most common issues developers face when using Memori and h
6. [API and Network Issues](#api-and-network-issues)
7. [LLM Integration Problems](#llm-integration-problems)
8. [Performance Issues](#performance-issues)
9. [Debug Logging](#debug-logging)

---

Expand Down Expand Up @@ -677,6 +678,73 @@ except Exception as e:

---

## Debug Logging

Enable debug logging to see exactly what Memori is doing internally.

### Enable Debug Logging

```python
import logging

# Enable BEFORE importing Memori
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s | %(name)s | %(levelname)s | %(message)s",
)

from memori import Memori
```

### What You'll See

```
DEBUG | memori.memory.recall - Recall started - query: "What's my favorite..." (25 chars)
DEBUG | memori.llm._embeddings - Generating embedding using model: all-mpnet-base-v2
DEBUG | memori._search - Retrieved 156 embeddings from database
DEBUG | memori._search - FAISS similarity search complete - top 5 matches: [0.92, 0.87, 0.84]
DEBUG | memori.llm._invoke - Sending request to LLM - provider: openai, model: gpt-4o-mini
DEBUG | memori.llm._base - LLM response received - latency: 1.23s
DEBUG | memori.memory._writer - Transaction committed - conversation_id: 42
DEBUG | memori.memory.augmentation._manager - AA enqueued - scheduling augmentation
```

### Control Log Truncation

By default, long content is truncated in logs for readability:

```python
# Default: truncate long content (recommended)
memori = Memori(conn)

# Show full content (for deep debugging)
memori = Memori(conn, debug_truncate=False)
```

### Filter Logs by Module

```python
import logging

# Only show specific module logs
logging.getLogger("memori.memory.recall").setLevel(logging.DEBUG)
logging.getLogger("memori.llm").setLevel(logging.DEBUG)

# Silence other memori logs
logging.getLogger("memori").setLevel(logging.WARNING)
```

### Production Logging

```python
import logging

# Production: only show warnings and errors
logging.getLogger("memori").setLevel(logging.WARNING)
```

---

## Getting Help

If you are still having issues after trying these solutions:
Expand Down
10 changes: 9 additions & 1 deletion memori/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,11 +69,19 @@ def register(


class Memori:
def __init__(self, conn: Callable[[], Any] | Any | None = None):
def __init__(
self,
conn: Callable[[], Any] | Any | None = None,
debug_truncate: bool = True,
):
from memori._logging import set_truncate_enabled

self.config = Config()
self.config.api_key = os.environ.get("MEMORI_API_KEY", None)
self.config.enterprise = os.environ.get("MEMORI_ENTERPRISE", "0") == "1"
self.config.session_id = uuid4()
self.config.debug_truncate = debug_truncate
set_truncate_enabled(debug_truncate)

if conn is None:
conn = self._get_default_connection()
Expand Down
1 change: 1 addition & 0 deletions memori/_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ def __init__(self):
self.api_key = None
self.augmentation = None
self.cache = Cache()
self.debug_truncate = True # Truncate long content in debug logs
self.embeddings = Embeddings()
self.enterprise = False
self.llm = Llm()
Expand Down
67 changes: 67 additions & 0 deletions memori/_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
r"""
__ __ _
| \/ | ___ _ __ ___ ___ _ __(_)
| |\/| |/ _ \ '_ ` _ \ / _ \| '__| |
| | | | __/ | | | | | (_) | | | |
|_| |_|\___|_| |_| |_|\___/|_| |_|
perfectam memoriam
memorilabs.ai
"""

import copy
import logging

logger = logging.getLogger(__name__)

# Global setting for truncation (controlled by Config.debug_truncate)
_truncate_enabled = True


def set_truncate_enabled(enabled: bool) -> None:
"""Set whether truncation is enabled for debug logs."""
global _truncate_enabled
_truncate_enabled = enabled
logger.debug("Debug truncation %s", "enabled" if enabled else "disabled")


def truncate(text: str, max_len: int = 200) -> str:
"""Truncate text for debug logging.

Respects the global _truncate_enabled setting. When disabled,
returns the full text regardless of length.

Args:
text: The text to truncate.
max_len: Maximum length before truncation (default: 200).

Returns:
Original text if truncation disabled or under max_len,
otherwise truncated with '...'
"""
if not text:
return text
if not _truncate_enabled:
return text
if len(text) <= max_len:
return text
return text[:max_len] + "..."


def sanitize_payload(payload: dict) -> dict:
"""Sanitize payload for safe logging by masking sensitive data.

Removes or masks:
- API keys
- Authorization tokens

Args:
payload: The payload dictionary to sanitize.

Returns:
A deep copy of the payload with sensitive data masked.
"""
sanitized = copy.deepcopy(payload)
if "meta" in sanitized and "api" in sanitized["meta"]:
if sanitized["meta"]["api"].get("key"):
sanitized["meta"]["api"]["key"] = "***REDACTED***"
return sanitized
66 changes: 65 additions & 1 deletion memori/_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
"""

import asyncio
import logging
import os
import ssl

Expand All @@ -27,6 +28,8 @@
QuotaExceededError,
)

logger = logging.getLogger(__name__)


class Api:
def __init__(self, config: Config):
Expand All @@ -53,6 +56,7 @@ async def augmentation_async(self, payload: dict) -> dict:
url = self.url("sdk/augmentation")
headers = self.headers()
ssl_context = ssl.create_default_context(cafile=certifi.where())
logger.debug("Sending augmentation request to %s", url)

def _default_client_error_message(status_code: int) -> str:
if status_code == 422:
Expand Down Expand Up @@ -88,7 +92,10 @@ async def _read_error_payload(response: aiohttp.ClientResponse):
json=payload,
timeout=aiohttp.ClientTimeout(total=30),
) as r:
logger.debug("Augmentation response - status: %d", r.status)

if r.status == 429:
logger.warning("Rate limit exceeded (429)")
if self._is_anonymous():
message, _data = await _read_error_payload(r)

Expand All @@ -100,6 +107,7 @@ async def _read_error_payload(response: aiohttp.ClientResponse):

if r.status == 422:
message, data = await _read_error_payload(r)
logger.error("Validation error (422): %s", message)
raise MemoriApiValidationError(
status_code=422,
message=message or _default_client_error_message(422),
Expand All @@ -108,6 +116,7 @@ async def _read_error_payload(response: aiohttp.ClientResponse):

if r.status == 433:
message, data = await _read_error_payload(r)
logger.error("Request rejected (433): %s", message)
raise MemoriApiRequestRejectedError(
status_code=433,
message=message or _default_client_error_message(433),
Expand All @@ -116,37 +125,45 @@ async def _read_error_payload(response: aiohttp.ClientResponse):

if 400 <= r.status <= 499:
message, data = await _read_error_payload(r)
logger.error("Client error (%d): %s", r.status, message)
raise MemoriApiClientError(
status_code=r.status,
message=message or _default_client_error_message(r.status),
details=data,
)

r.raise_for_status()
logger.debug("Augmentation request successful")
return await r.json()
except aiohttp.ClientResponseError:
raise
except (ssl.SSLError, aiohttp.ClientSSLError) as e:
logger.error("SSL/TLS error during augmentation request: %s", e)
raise MemoriApiError(
"Memori API request failed due to an SSL/TLS certificate error. "
"This is often caused by corporate proxies/SSL inspection. "
"Try updating your CA certificates and try again."
) from e
except (aiohttp.ClientError, asyncio.TimeoutError) as e:
logger.error("Network/timeout error during augmentation request: %s", e)
raise MemoriApiError(
"Memori API request failed (network/timeout). "
"Check your connection and try again."
) from e

def delete(self, route):
logger.debug("DELETE request to %s", route)
r = self.__session().delete(self.url(route), headers=self.headers())
logger.debug("DELETE response - status: %d", r.status_code)

r.raise_for_status()

return r.json()

def get(self, route):
logger.debug("GET request to %s", route)
r = self.__session().get(self.url(route), headers=self.headers())
logger.debug("GET response - status: %d", r.status_code)

r.raise_for_status()

Expand All @@ -156,7 +173,9 @@ async def get_async(self, route):
return await self.__request_async("GET", route)

def patch(self, route, json=None):
logger.debug("PATCH request to %s", route)
r = self.__session().patch(self.url(route), headers=self.headers(), json=json)
logger.debug("PATCH response - status: %d", r.status_code)

r.raise_for_status()

Expand All @@ -166,7 +185,9 @@ async def patch_async(self, route, json=None):
return await self.__request_async("PATCH", route, json=json)

def post(self, route, json=None):
logger.debug("POST request to %s", route)
r = self.__session().post(self.url(route), headers=self.headers(), json=json)
logger.debug("POST response - status: %d", r.status_code)

r.raise_for_status()

Expand Down Expand Up @@ -205,23 +226,66 @@ async def __request_async(self, method: str, route: str, json=None):
json=json,
timeout=aiohttp.ClientTimeout(total=30),
) as r:
logger.debug(
"Async %s response - status: %d, attempt: %d",
method.upper(),
r.status,
attempts + 1,
)
r.raise_for_status()
return await r.json()
except aiohttp.ClientResponseError as e:
if e.status < 500 or e.status > 599:
logger.error(
"Non-retryable error %d for %s %s",
e.status,
method.upper(),
url,
)
raise

if attempts >= max_retries:
logger.error(
"Max retries (%d) exceeded for %s %s",
max_retries,
method.upper(),
url,
)
raise

sleep = backoff_factor * (2**attempts)
logger.debug(
"Retrying %s %s in %.1fs (attempt %d/%d) after status %d",
method.upper(),
url,
sleep,
attempts + 2,
max_retries,
e.status,
)
await asyncio.sleep(sleep)
attempts += 1
except Exception:
except Exception as e:
if attempts >= max_retries:
logger.error(
"Max retries (%d) exceeded for %s %s: %s",
max_retries,
method.upper(),
url,
e,
)
raise

sleep = backoff_factor * (2**attempts)
logger.debug(
"Retrying %s %s in %.1fs (attempt %d/%d) after error: %s",
method.upper(),
url,
sleep,
attempts + 2,
max_retries,
e,
)
await asyncio.sleep(sleep)
attempts += 1

Expand Down
Loading
Loading