|
12 | 12 | from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME |
13 | 13 | from versionhq.tool.model import Tool, ToolSet |
14 | 14 | from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge |
| 15 | +from versionhq.memory.contextual_memory import ContextualMemory |
| 16 | +from versionhq.memory.model import ShortTermMemory, UserMemory |
15 | 17 | from versionhq._utils.logger import Logger |
16 | 18 | from versionhq.agent.rpm_controller import RPMController |
17 | 19 | from versionhq._utils.usage_metrics import UsageMetrics |
@@ -95,10 +97,20 @@ class Agent(BaseModel): |
95 | 97 | backstory: Optional[str] = Field(default=None, description="developer prompt to the llm") |
96 | 98 | skillsets: Optional[List[str]] = Field(default_factory=list) |
97 | 99 | tools: Optional[List[Tool | ToolSet | Type[Tool]]] = Field(default_factory=list) |
| 100 | + |
| 101 | + # knowledge |
98 | 102 | knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(default=None) |
99 | 103 | _knowledge: Optional[Knowledge] = PrivateAttr(default=None) |
100 | | - embedder_config: Optional[Dict[str, Any]] = Field(default=None, description="embedder configuration for the agent's knowledge") |
101 | 104 |
|
| 105 | + # memory |
| 106 | + use_memory: bool = Field(default=False, description="whether to store/use memory when executing the task") |
| 107 | + memory_config: Optional[Dict[str, Any]] = Field(default=None, description="configuration for the memory") |
| 108 | + short_term_memory: Optional[InstanceOf[ShortTermMemory]] = Field(default=None) |
| 109 | + user_memory: Optional[InstanceOf[UserMemory]] = Field(default=None) |
| 110 | + # _short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr() |
| 111 | + # _user_memory: Optional[InstanceOf[UserMemory]] = PrivateAttr() |
| 112 | + |
| 113 | + embedder_config: Optional[Dict[str, Any]] = Field(default=None, description="embedder configuration for the agent's knowledge") |
102 | 114 |
|
103 | 115 | # prompting |
104 | 116 | use_developer_prompt: Optional[bool] = Field(default=True, description="Use developer prompt when calling the llm") |
@@ -347,14 +359,30 @@ def set_up_knowledge(self) -> Self: |
347 | 359 | return self |
348 | 360 |
|
349 | 361 |
|
| 362 | + @model_validator(mode="after") |
| 363 | + def set_up_memory(self) -> Self: |
| 364 | + """ |
| 365 | + Set up memories: stm, um |
| 366 | + """ |
| 367 | + |
| 368 | + if self.use_memory == True: |
| 369 | + self.short_term_memory = self.short_term_memory if self.short_term_memory else ShortTermMemory(agent=self, embedder_config=self.embedder_config) |
| 370 | + |
| 371 | + if hasattr(self, "memory_config") and self.memory_config is not None: |
| 372 | + self.user_memory = self.user_memory if self.user_memory else UserMemory(agent=self) |
| 373 | + else: |
| 374 | + self.user_memory = None |
| 375 | + |
| 376 | + return self |
| 377 | + |
| 378 | + |
350 | 379 | def _train(self) -> Self: |
351 | 380 | """ |
352 | 381 | Fine-tuned the base model using OpenAI train framework. |
353 | 382 | """ |
354 | 383 | if not isinstance(self.llm, LLM): |
355 | 384 | pass |
356 | 385 |
|
357 | | - |
358 | 386 | def invoke( |
359 | 387 | self, |
360 | 388 | prompts: str, |
@@ -440,6 +468,14 @@ def execute_task(self, task, context: Optional[str] = None, task_tools: Optional |
440 | 468 | if agent_knowledge_context: |
441 | 469 | task_prompt += agent_knowledge_context |
442 | 470 |
|
| 471 | + |
| 472 | + if self.use_memory == True: |
| 473 | + contextual_memory = ContextualMemory(memory_config=self.memory_config, stm=self.short_term_memory, um=self.user_memory) |
| 474 | + memory = contextual_memory.build_context_for_task(task, context) |
| 475 | + if memory.strip() != "": |
| 476 | + task_prompt += memory.strip() |
| 477 | + |
| 478 | + |
443 | 479 | # if self.team and self.team._train: |
444 | 480 | # task_prompt = self._training_handler(task_prompt=task_prompt) |
445 | 481 | # else: |
|
0 commit comments