When using
var GetWeatherTool = agents.NewFunctionTool("get_weather", "gets the current weather in a city", GetWeather)
with llama-server i get (formatted json after the error copypasta dump):
panic: POST "http://localhost:9090/chat/completions": 500 Internal Server Error {"code":500,"message":"Failed to parse tools: [json.exception.out_of_range.403] key 'description' not found; tools = [\n {\n \"function\": {\n \"name\": \"get_weather\",\n \"parameters\": {\n \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n \"additionalProperties\": false,\n \"description\": \"gets the current weather in a city\",\n \"properties\": {\n \"city\": {\n \"type\": \"string\"\n }\n },\n \"required\": [\n \"city\"\n ],\n \"type\": \"object\"\n }\n },\n \"type\": \"function\"\n }\n]","type":"server_error"}
goroutine 1 [running]:
main.main()
/home/wuhei/dev/llmagent/main.go:93 +0x307
exit status 2
Here's my code, if it helps (basically a mashup of your custom LLM example):
package main
import (
"context"
"fmt"
"io"
"net/http"
"os"
"github.com/nlpodyssey/openai-agents-go/agents"
"github.com/nlpodyssey/openai-agents-go/tracing"
"github.com/openai/openai-go/v2/packages/param"
)
var (
BaseURL = "http://localhost:9090" //os.Getenv("LLM_URL")
APIKey = "funfunfun" //os.Getenv("LLM_API_KEY")
ModelName = "ziocaro" //os.Getenv("LLM_MODEL_NAME")
)
func init() {
if BaseURL == "" || APIKey == "" || ModelName == "" {
fmt.Println("set vars")
os.Exit(1)
}
}
var Client = agents.NewOpenaiClient(param.NewOpt(BaseURL), param.NewOpt(APIKey))
func init() {
tracing.SetTracingDisabled(true)
}
type GetWeatherArgs struct {
City string `json:"city"`
}
func GetWeather(_ context.Context, args GetWeatherArgs) (string, error) {
fmt.Printf("[debug] getting weather for %s\n", args.City)
resp, err := http.Get("https://wttr.in/" + args.City)
if err != nil {
return "HTTP request to wttr.in error", err
}
defer func() {
_ = resp.Body.Close()
}()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "failed to read wttr.in body response:", err
}
return string(body), nil
}
var GetWeatherTool = agents.NewFunctionTool("get_weather", "gets the current weather in a city", GetWeather)
func main() {
fmt.Println("starting agent with custom LLM provider")
// This agent will use the custom LLM provider
agent := agents.New("Assistant").
WithInstructions("You only respond in haikus.").
WithModelInstance(agents.NewOpenAIChatCompletionsModel(ModelName, Client)).
WithTools(GetWeatherTool)
result, err := agents.Run(context.Background(), agent, "What's the weather in Tokyo?")
if err != nil {
panic(err)
}
fmt.Println(result.FinalOutput)
}
llama server version:
version: 6795 (ee09828cb)
built with cc (Debian 12.2.0-14) 12.2.0 for aarch64-linux-gnu
I'm probably missing something trivial to spot due to extreme golang inexperience but i'm stuck, any pointers when you have time would be great, thanks in advance
When using
with llama-server i get (formatted json after the error copypasta dump):
Here's my code, if it helps (basically a mashup of your custom LLM example):
llama server version:
version: 6795 (ee09828cb)
built with cc (Debian 12.2.0-14) 12.2.0 for aarch64-linux-gnu
I'm probably missing something trivial to spot due to extreme golang inexperience but i'm stuck, any pointers when you have time would be great, thanks in advance