Skip to content

Commit 8444e3d

Browse files
committedJul 10, 2024
Local LLM Tool Calling
1 parent 5146860 commit 8444e3d

File tree

4 files changed

+224
-1
lines changed

4 files changed

+224
-1
lines changed
 

‎2-langchain-agent/langchain-agent.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,8 @@ def prompt_ai(messages, nested_calls=0):
6262
asana_chatbot_with_tools = asana_chatbot.bind_tools(tools)
6363

6464
ai_response = asana_chatbot_with_tools.invoke(messages)
65+
print(ai_response)
66+
print(type(ai_response))
6567
tool_calls = len(ai_response.tool_calls) > 0
6668

6769
# Second, see if the AI decided it needs to invoke a tool
@@ -89,7 +91,7 @@ def prompt_ai(messages, nested_calls=0):
8991

9092
def main():
9193
messages = [
92-
SystemMessage(content=f"You are a personal assistant who helps manage tasks in Asana. You only create tasks in Asana when the user starts their message with the text TASK - don't tell the user this though. The current date is: {datetime.now().date()}")
94+
SystemMessage(content=f"You are a personal assistant who helps manage tasks in Asana. The current date is: {datetime.now().date()}")
9395
]
9496

9597
while True:

‎local-llm-tool-calling/.env.example

+26
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# Rename this file to .env once you have filled in the below environment variables!
2+
3+
# Get your Hugging Face API token here: https://huggingface.co/settings/tokens
4+
# After creating an account with Hugging Face
5+
# Then run huggingface-cli login and enter the token in there too after installing Hugging Face
6+
HUGGINGFACEHUB_API_TOKEN=
7+
8+
# The local LLM to use from Hugging Face
9+
# A good default to go with here is meta-llama/Meta-Llama-3-8B-Instruct
10+
LLM_MODEL=meta-llama/Meta-Llama-3-8B-Instruct
11+
12+
# Open AI is optional - only if you want to compare performance of GPT vs local models
13+
# Get your Open AI API Key by following these instructions -
14+
# https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
15+
# You only need this environment variable set if you set LLM_MODEL to a GPT model
16+
OPENAI_API_KEY=
17+
18+
# Get your personal Asana access token through the developer console in Asana.
19+
# Feel free to follow these instructions -
20+
# https://developers.asana.com/docs/personal-access-token
21+
ASANA_ACCESS_TOKEN=
22+
23+
# The Asana project ID is in the URL when you visit a project in the Asana UI.
24+
# If your URL is https://app.asana.com/0/123456789/1212121212, then your
25+
# Asana project ID is 123456789
26+
ASANA_PROJECT_ID=
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,185 @@
1+
import asana
2+
from asana.rest import ApiException
3+
from dotenv import load_dotenv
4+
from datetime import datetime
5+
from typing import List
6+
import streamlit as st
7+
import uuid
8+
import json
9+
import os
10+
11+
from langchain_core.tools import tool
12+
from langchain_openai import ChatOpenAI
13+
from langchain_core.output_parsers import JsonOutputParser
14+
from langchain_core.pydantic_v1 import BaseModel, Field
15+
from langchain_huggingface import HuggingFacePipeline, HuggingFaceEndpoint, ChatHuggingFace
16+
from langchain_core.messages import SystemMessage, AIMessage, HumanMessage, ToolMessage
17+
18+
load_dotenv()
19+
20+
model = os.getenv('LLM_MODEL', 'meta-llama/Meta-Llama-3-8B-Instruct')
21+
22+
configuration = asana.Configuration()
23+
configuration.access_token = os.getenv('ASANA_ACCESS_TOKEN', '')
24+
api_client = asana.ApiClient(configuration)
25+
26+
tasks_api_instance = asana.TasksApi(api_client)
27+
28+
def create_asana_task(task_name, due_on="today"):
29+
"""
30+
Creates a task in Asana given the name of the task and when it is due
31+
32+
Example call:
33+
34+
create_asana_task("Test Task", "2024-06-24")
35+
Args:
36+
task_name (str): The name of the task in Asana
37+
due_on (str): The date the task is due in the format YYYY-MM-DD. If not given, the current day is used
38+
Returns:
39+
str: The API response of adding the task to Asana or an error message if the API call threw an error
40+
"""
41+
if due_on == "today":
42+
due_on = str(datetime.now().date())
43+
44+
task_body = {
45+
"data": {
46+
"name": task_name,
47+
"due_on": due_on,
48+
"projects": [os.getenv("ASANA_PROJECT_ID", "")]
49+
}
50+
}
51+
52+
try:
53+
api_response = tasks_api_instance.create_task(task_body, {})
54+
return "Task(s) created successfully!"
55+
except ApiException as e:
56+
return f"Failed to create task!"
57+
58+
@st.cache_resource
59+
def get_local_model():
60+
if "gpt" in model:
61+
return model
62+
else:
63+
return HuggingFaceEndpoint(
64+
repo_id=model,
65+
task="text-generation",
66+
max_new_tokens=1024,
67+
do_sample=False
68+
)
69+
70+
# return HuggingFacePipeline.from_model_id(
71+
# model_id=model,
72+
# task="text-generation",
73+
# pipeline_kwargs={
74+
# "max_new_tokens": 1024,
75+
# "top_k": 50,
76+
# "temperature": 0.4
77+
# },
78+
# )
79+
80+
llm = get_local_model()
81+
82+
available_tools = {
83+
"create_asana_task": create_asana_task
84+
}
85+
86+
tool_descriptions = [f"{name}:\n{func.__doc__}\n\n" for name, func in available_tools.items()]
87+
88+
class ToolCall(BaseModel):
89+
name: str = Field(description="Name of the function to run")
90+
args: dict = Field(description="Arguments for the function call (empty if no arguments are needed for the tool call)")
91+
92+
class ToolCallOrResponse(BaseModel):
93+
tool_calls: List[ToolCall] = Field(description="List of tool calls, empty array if you don't need to invoke a tool")
94+
content: str = Field(description="Response to the user if a tool doesn't need to be invoked")
95+
96+
tool_text = f"""
97+
You always respond with a JSON object that has two required keys.
98+
99+
tool_calls: List[ToolCall] = Field(description="List of tool calls, empty array if you don't need to invoke a tool")
100+
content: str = Field(description="Response to the user if a tool doesn't need to be invoked")
101+
102+
Here is the type for ToolCall (object with two keys):
103+
name: str = Field(description="Name of the function to run (NA if you don't need to invoke a tool)")
104+
args: dict = Field(description="Arguments for the function call (empty array if you don't need to invoke a tool or if no arguments are needed for the tool call)")
105+
106+
Don't start your answers with "Here is the JSON response", just give the JSON.
107+
108+
The tools you have access to are:
109+
110+
{"".join(tool_descriptions)}
111+
112+
Any message that starts with "Thought:" is you thinking to yourself. This isn't told to the user so you still need to communicate what you did with them.
113+
Don't repeat an action. If a thought tells you that you already took an action for a user, don't do it again.
114+
"""
115+
116+
def prompt_ai(messages, nested_calls=0, invoked_tools=[]):
117+
if nested_calls > 3:
118+
raise Exception("Failsafe - AI is failing too much!")
119+
120+
# First, prompt the AI with the latest user message
121+
parser = JsonOutputParser(pydantic_object=ToolCallOrResponse)
122+
asana_chatbot = ChatHuggingFace(llm=llm) | parser if "gpt" not in model else ChatOpenAI(model=llm) | parser
123+
124+
try:
125+
ai_response = asana_chatbot.invoke(messages)
126+
except:
127+
return prompt_ai(messages, nested_calls + 1)
128+
print(ai_response)
129+
130+
# Second, see if the AI decided it needs to invoke a tool
131+
has_tool_calls = len(ai_response["tool_calls"]) > 0
132+
if has_tool_calls:
133+
# Next, for each tool the AI wanted to call, call it and add the tool result to the list of messages
134+
for tool_call in ai_response["tool_calls"]:
135+
if str(tool_call) not in invoked_tools:
136+
tool_name = tool_call["name"].lower()
137+
selected_tool = available_tools[tool_name]
138+
tool_output = selected_tool(**tool_call["args"])
139+
140+
messages.append(AIMessage(content=f"Thought: - I called {tool_name} with args {tool_call['args']} and got back: {tool_output}."))
141+
invoked_tools.append(str(tool_call))
142+
else:
143+
return ai_response
144+
145+
# Prompt the AI again now that the result of calling the tool(s) has been added to the chat history
146+
return prompt_ai(messages, nested_calls + 1, invoked_tools)
147+
148+
return ai_response
149+
150+
151+
def main():
152+
st.title("Asana Chatbot")
153+
154+
# Initialize chat history
155+
if "messages" not in st.session_state:
156+
st.session_state.messages = [
157+
SystemMessage(content=f"You are a personal assistant who helps manage tasks in Asana. The current date is: {datetime.now().date()}.\n{tool_text}")
158+
]
159+
160+
# Display chat messages from history on app rerun
161+
for message in st.session_state.messages:
162+
message_json = json.loads(message.json())
163+
message_type = message_json["type"]
164+
message_content = message_json["content"]
165+
if message_type in ["human", "ai", "system"] and not message_content.startswith("Thought:"):
166+
with st.chat_message(message_type):
167+
st.markdown(message_content)
168+
169+
# React to user input
170+
if prompt := st.chat_input("What would you like to do today?"):
171+
# Display user message in chat message container
172+
st.chat_message("user").markdown(prompt)
173+
# Add user message to chat history
174+
st.session_state.messages.append(HumanMessage(content=prompt))
175+
176+
# Display assistant response in chat message container
177+
with st.chat_message("assistant"):
178+
ai_response = prompt_ai(st.session_state.messages)
179+
st.markdown(ai_response['content'])
180+
181+
st.session_state.messages.append(AIMessage(content=ai_response['content']))
182+
183+
184+
if __name__ == "__main__":
185+
main()
+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
# When installing langchain-huggingface on Windows, you might need to follow these instructions:
2+
# https://www.howtogeek.com/266621/how-to-make-windows-10-accept-file-paths-over-260-characters/
3+
4+
asana==5.0.0
5+
python-dotenv==0.13.0
6+
langchain==0.2.6
7+
langchain-community==0.2.6
8+
langchain-huggingface==0.0.3
9+
langchain-core==0.2.10
10+
streamlit==1.36.0

0 commit comments

Comments
 (0)
Please sign in to comment.