From 14833ac114f4cd37a75833c0670296a642bdb8cc Mon Sep 17 00:00:00 2001 From: leungmanhin Date: Mon, 2 Mar 2026 22:38:10 +0800 Subject: [PATCH 1/3] Move the OpenRouter calls to lib_llm.py --- lib/lib_llm.metta | 16 +++++----------- lib/lib_llm.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 11 deletions(-) create mode 100644 lib/lib_llm.py diff --git a/lib/lib_llm.metta b/lib/lib_llm.metta index ed78df5..6dc029b 100644 --- a/lib/lib_llm.metta +++ b/lib/lib_llm.metta @@ -1,3 +1,6 @@ +;Import Python helpers for OpenRouter +!(import! &self "lib/lib_llm.py") + ;Create string by appending the list entries (= (py-str-helper () $outp) $outp) (= (py-str-helper $L $outp) @@ -17,11 +20,6 @@ ;Add to builtins a function that can access by index: !(py-eval "setattr(__import__('builtins'),'index', (lambda o,i: o[i])) or 0") -;Add to builtins a function that POST JSON to OpenRouter and return the assistant reply text -!(py-eval "setattr(__import__('builtins'),'or_post',(lambda model,prompt,key: __import__('json').loads(__import__('urllib.request',fromlist=['urlopen','Request']).urlopen(__import__('urllib.request',fromlist=['Request']).Request('https://openrouter.ai/api/v1/chat/completions',__import__('json').dumps({'model':model,'messages':[{'role':'user','content':prompt}]}).encode('utf-8'),{'Authorization':'Bearer '+key,'Content-Type':'application/json'})).read())['choices'][0]['message']['content'])) or 0") -;Add to builtins a function that POST to OpenRouter embeddings endpoint and return the embedding vector -!(py-eval "setattr(__import__('builtins'),'or_embed',(lambda model,text,key: __import__('json').loads(__import__('urllib.request',fromlist=['urlopen','Request']).urlopen(__import__('urllib.request',fromlist=['Request']).Request('https://openrouter.ai/api/v1/embeddings',__import__('json').dumps({'model':model,'input':text}).encode('utf-8'),{'Authorization':'Bearer '+key,'Content-Type':'application/json'})).read())['data'][0]['embedding'])) or 0") - ;Function to prompt GPT via OpenAI API: (= (useGPT $prompt) (useGPT gpt-5.2 1000000 medium $prompt)) @@ -39,9 +37,7 @@ ;Function to prompt a model via OpenRouter API: (= (useOpenRouter $model $prompt) - (let* (($apikey (once (py-eval "__import__('os').environ.get('OPENROUTER_API_KEY','')"))) - ($text (once (py-call (builtins.or_post $model $prompt $apikey))))) - $text)) + (py-call (lib_llm.openrouter_chat $model $prompt))) (= (sread-safe $w) (case (catch (sread $w)) @@ -62,6 +58,4 @@ $vector)) (= (useOpenRouterEmbedding $model $text) - (let* (($apikey (once (py-eval "__import__('os').environ.get('OPENROUTER_API_KEY','')"))) - ($vector (once (py-call (builtins.or_embed $model $text $apikey))))) - $vector)) + (py-call (lib_llm.openrouter_embed $model $text))) diff --git a/lib/lib_llm.py b/lib/lib_llm.py new file mode 100644 index 0000000..ba69270 --- /dev/null +++ b/lib/lib_llm.py @@ -0,0 +1,31 @@ +import json +import os +import urllib.request + +def openrouter_chat(model, prompt): + key = os.environ.get('OPENROUTER_API_KEY', '') + data = json.dumps({ + 'model': model, + 'messages': [{'role': 'user', 'content': prompt}] + }).encode('utf-8') + req = urllib.request.Request( + 'https://openrouter.ai/api/v1/chat/completions', + data, + {'Authorization': 'Bearer ' + key, 'Content-Type': 'application/json'} + ) + response = urllib.request.urlopen(req).read() + return json.loads(response)['choices'][0]['message']['content'] + +def openrouter_embed(model, text): + key = os.environ.get('OPENROUTER_API_KEY', '') + data = json.dumps({ + 'model': model, + 'input': text + }).encode('utf-8') + req = urllib.request.Request( + 'https://openrouter.ai/api/v1/embeddings', + data, + {'Authorization': 'Bearer ' + key, 'Content-Type': 'application/json'} + ) + response = urllib.request.urlopen(req).read() + return json.loads(response)['data'][0]['embedding'] From 00c603e352c265d700c9004829c09ec676280eed Mon Sep 17 00:00:00 2001 From: leungmanhin Date: Mon, 2 Mar 2026 23:35:23 +0800 Subject: [PATCH 2/3] Make reasoning effort and max_tokens more easily configurable --- lib/lib_llm.metta | 4 +++- lib/lib_llm.py | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/lib_llm.metta b/lib/lib_llm.metta index 6dc029b..30e7b01 100644 --- a/lib/lib_llm.metta +++ b/lib/lib_llm.metta @@ -37,7 +37,9 @@ ;Function to prompt a model via OpenRouter API: (= (useOpenRouter $model $prompt) - (py-call (lib_llm.openrouter_chat $model $prompt))) + (useOpenRouter $model 100000 medium $prompt)) +(= (useOpenRouter $model $max_tokens $effort $prompt) + (py-call (lib_llm.openrouter_chat $model $prompt $max_tokens $effort))) (= (sread-safe $w) (case (catch (sread $w)) diff --git a/lib/lib_llm.py b/lib/lib_llm.py index ba69270..8fa76a7 100644 --- a/lib/lib_llm.py +++ b/lib/lib_llm.py @@ -2,11 +2,13 @@ import os import urllib.request -def openrouter_chat(model, prompt): +def openrouter_chat(model, prompt, max_tokens, effort): key = os.environ.get('OPENROUTER_API_KEY', '') data = json.dumps({ 'model': model, - 'messages': [{'role': 'user', 'content': prompt}] + 'messages': [{'role': 'user', 'content': prompt}], + 'max_tokens': max_tokens, + 'reasoning': {'effort': effort} }).encode('utf-8') req = urllib.request.Request( 'https://openrouter.ai/api/v1/chat/completions', From fa8acd9353122bede4d8a87b272c4739ce19a5af Mon Sep 17 00:00:00 2001 From: leungmanhin Date: Tue, 3 Mar 2026 20:30:54 +0800 Subject: [PATCH 3/3] Fix library path --- lib/lib_llm.metta | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/lib_llm.metta b/lib/lib_llm.metta index 30e7b01..430298e 100644 --- a/lib/lib_llm.metta +++ b/lib/lib_llm.metta @@ -1,5 +1,5 @@ ;Import Python helpers for OpenRouter -!(import! &self "lib/lib_llm.py") +!(import! &self (library lib_llm.py)) ;Create string by appending the list entries (= (py-str-helper () $outp) $outp)