diff --git a/lib/lib_llm.metta b/lib/lib_llm.metta index ed78df5..430298e 100644 --- a/lib/lib_llm.metta +++ b/lib/lib_llm.metta @@ -1,3 +1,6 @@ +;Import Python helpers for OpenRouter +!(import! &self (library lib_llm.py)) + ;Create string by appending the list entries (= (py-str-helper () $outp) $outp) (= (py-str-helper $L $outp) @@ -17,11 +20,6 @@ ;Add to builtins a function that can access by index: !(py-eval "setattr(__import__('builtins'),'index', (lambda o,i: o[i])) or 0") -;Add to builtins a function that POST JSON to OpenRouter and return the assistant reply text -!(py-eval "setattr(__import__('builtins'),'or_post',(lambda model,prompt,key: __import__('json').loads(__import__('urllib.request',fromlist=['urlopen','Request']).urlopen(__import__('urllib.request',fromlist=['Request']).Request('https://openrouter.ai/api/v1/chat/completions',__import__('json').dumps({'model':model,'messages':[{'role':'user','content':prompt}]}).encode('utf-8'),{'Authorization':'Bearer '+key,'Content-Type':'application/json'})).read())['choices'][0]['message']['content'])) or 0") -;Add to builtins a function that POST to OpenRouter embeddings endpoint and return the embedding vector -!(py-eval "setattr(__import__('builtins'),'or_embed',(lambda model,text,key: __import__('json').loads(__import__('urllib.request',fromlist=['urlopen','Request']).urlopen(__import__('urllib.request',fromlist=['Request']).Request('https://openrouter.ai/api/v1/embeddings',__import__('json').dumps({'model':model,'input':text}).encode('utf-8'),{'Authorization':'Bearer '+key,'Content-Type':'application/json'})).read())['data'][0]['embedding'])) or 0") - ;Function to prompt GPT via OpenAI API: (= (useGPT $prompt) (useGPT gpt-5.2 1000000 medium $prompt)) @@ -39,9 +37,9 @@ ;Function to prompt a model via OpenRouter API: (= (useOpenRouter $model $prompt) - (let* (($apikey (once (py-eval "__import__('os').environ.get('OPENROUTER_API_KEY','')"))) - ($text (once (py-call (builtins.or_post $model $prompt $apikey))))) - $text)) + (useOpenRouter $model 100000 medium $prompt)) +(= (useOpenRouter $model $max_tokens $effort $prompt) + (py-call (lib_llm.openrouter_chat $model $prompt $max_tokens $effort))) (= (sread-safe $w) (case (catch (sread $w)) @@ -62,6 +60,4 @@ $vector)) (= (useOpenRouterEmbedding $model $text) - (let* (($apikey (once (py-eval "__import__('os').environ.get('OPENROUTER_API_KEY','')"))) - ($vector (once (py-call (builtins.or_embed $model $text $apikey))))) - $vector)) + (py-call (lib_llm.openrouter_embed $model $text))) diff --git a/lib/lib_llm.py b/lib/lib_llm.py new file mode 100644 index 0000000..8fa76a7 --- /dev/null +++ b/lib/lib_llm.py @@ -0,0 +1,33 @@ +import json +import os +import urllib.request + +def openrouter_chat(model, prompt, max_tokens, effort): + key = os.environ.get('OPENROUTER_API_KEY', '') + data = json.dumps({ + 'model': model, + 'messages': [{'role': 'user', 'content': prompt}], + 'max_tokens': max_tokens, + 'reasoning': {'effort': effort} + }).encode('utf-8') + req = urllib.request.Request( + 'https://openrouter.ai/api/v1/chat/completions', + data, + {'Authorization': 'Bearer ' + key, 'Content-Type': 'application/json'} + ) + response = urllib.request.urlopen(req).read() + return json.loads(response)['choices'][0]['message']['content'] + +def openrouter_embed(model, text): + key = os.environ.get('OPENROUTER_API_KEY', '') + data = json.dumps({ + 'model': model, + 'input': text + }).encode('utf-8') + req = urllib.request.Request( + 'https://openrouter.ai/api/v1/embeddings', + data, + {'Authorization': 'Bearer ' + key, 'Content-Type': 'application/json'} + ) + response = urllib.request.urlopen(req).read() + return json.loads(response)['data'][0]['embedding']