Skip to content

Commit 68559af

Browse files
committed
fix import errors
1 parent c8434d3 commit 68559af

File tree

4 files changed

+19
-17
lines changed

4 files changed

+19
-17
lines changed

oneping/curl.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ async def stream_async(query, provider=DEFAULT_PROVIDER, history=None, prefill=N
222222
## embeddings
223223
##
224224

225-
def embed(text, provider=DEFAULT_PROVIDER, base_url=None, path=None, api_key=None, model=None, **kwargs):
225+
def embed(text, provider=DEFAULT_PROVIDER, base_url=None, path=None, api_key=None, model=None, timeout=None, **kwargs):
226226
# get provider details
227227
prov = get_provider(provider)
228228
url = prepare_url(prov, f'embed_path', base_url=base_url, path=path)
@@ -240,7 +240,7 @@ def embed(text, provider=DEFAULT_PROVIDER, base_url=None, path=None, api_key=Non
240240
payload = {**payload_model, **payload_message, **kwargs}
241241

242242
# make the request
243-
response = requests.post(url, headers=headers, data=json.dumps(payload))
243+
response = requests.post(url, headers=headers, data=json.dumps(payload), timeout=timeout)
244244
response.raise_for_status()
245245

246246
# extract result
@@ -250,7 +250,7 @@ def embed(text, provider=DEFAULT_PROVIDER, base_url=None, path=None, api_key=Non
250250
# return result
251251
return result
252252

253-
def tokenize(text, provider=DEFAULT_PROVIDER, base_url=None, path=None, api_key=None, model=None, **kwargs):
253+
def tokenize(text, provider=DEFAULT_PROVIDER, base_url=None, path=None, api_key=None, model=None, timeout=None, **kwargs):
254254
# get provider details
255255
prov = get_provider(provider)
256256
url = prepare_url(prov, 'tokenize_path', base_url=base_url, path=path)
@@ -268,7 +268,7 @@ def tokenize(text, provider=DEFAULT_PROVIDER, base_url=None, path=None, api_key=
268268
payload = {**payload_model, **payload_message, **kwargs}
269269

270270
# make the request
271-
response = requests.post(url, headers=headers, data=json.dumps(payload))
271+
response = requests.post(url, headers=headers, data=json.dumps(payload), timeout=timeout)
272272
response.raise_for_status()
273273

274274
# extract result

oneping/native/azure.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
DEFAULT_SYSTEM, OPENAI_MODEL, OPENAI_EMBED, OPENAI_TRANSCRIBE, AZURE_API_VERSION, AZURE_KEYENV,
88
content_openai, convert_history, payload_openai,
99
response_openai_native, stream_openai_native,
10-
embed_openai, transcribe_openai
10+
embed_response_openai, transcribe_response_openai
1111
)
1212

1313
##
@@ -71,11 +71,11 @@ def embed(
7171
):
7272
client = make_client(azure_endpoint, azure_deployment=azure_deployment, api_version=api_version, api_key=api_key)
7373
response = client.embeddings.create(model=model, **kwargs)
74-
return embed_openai(response)
74+
return embed_response_openai(response)
7575

7676
def transcribe(
7777
audio, model=OPENAI_TRANSCRIBE, azure_endpoint=None, azure_deployment=None, api_version=AZURE_API_VERSION, api_key=None, **kwargs
7878
):
7979
client = make_client(azure_endpoint, azure_deployment=azure_deployment, api_version=api_version, api_key=api_key)
8080
response = client.audio.transcriptions.create(model=model, file=audio, **kwargs)
81-
return transcribe_openai(response)
81+
return transcribe_response_openai(response)

oneping/native/openai.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
DEFAULT_SYSTEM, OPENAI_MODEL, OPENAI_EMBED, OPENAI_TRANSCRIBE, OPENAI_KEYENV,
88
content_openai, convert_history, payload_openai,
99
response_openai_native, stream_openai_native,
10-
embed_openai, transcribe_openai
10+
embed_response_openai, transcribe_response_openai
1111
)
1212

1313
##
@@ -23,10 +23,10 @@ def make_payload(query, image=None, prediction=None, system=None, history=None):
2323
## common interface
2424
##
2525

26-
def make_client(api_key=None, async_client=False):
26+
def make_client(api_key=None, async_client=False, base_url=None):
2727
api_key = api_key if api_key is not None else os.environ.get(OPENAI_KEYENV)
2828
client_class = openai.AsyncOpenAI if async_client else openai.OpenAI
29-
return client_class(api_key=api_key)
29+
return client_class(api_key=api_key, base_url=base_url)
3030

3131
def reply(query, image=None, history=None, prefill=None, prediction=None, system=DEFAULT_SYSTEM, api_key=None, model=OPENAI_MODEL, max_tokens=None, base_url=None, **kwargs):
3232
client = make_client(api_key=api_key, base_url=base_url)
@@ -57,9 +57,9 @@ async def stream_async(query, image=None, history=None, prefill=None, prediction
5757
def embed(query, model=OPENAI_EMBED, api_key=None, base_url=None, **kwargs):
5858
client = make_client(api_key=api_key, base_url=base_url)
5959
response = client.embeddings.create(query, model=model, **kwargs)
60-
return embed_openai(response)
60+
return embed_response_openai(response)
6161

6262
def transcribe(audio, model=OPENAI_TRANSCRIBE, api_key=None, base_url=None, **kwargs):
6363
client = make_client(api_key=api_key, base_url=base_url)
6464
response = client.audio.transcriptions.create(audio, model=model, **kwargs)
65-
return transcribe_openai(response)
65+
return transcribe_response_openai(response)

oneping/providers.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
## models
1616
##
1717

18-
OPENAI_MODEL = 'gpt-4o'
18+
OPENAI_MODEL = 'gpt-5'
1919
OPENAI_EMBED = 'text-embedding-3-large'
20-
OPENAI_TRANSCRIBE = 'gpt-4o-transcribe'
21-
ANTHROPIC_MODEL = 'claude-3-7-sonnet-latest'
20+
OPENAI_TRANSCRIBE = 'gpt-5-transcribe'
21+
ANTHROPIC_MODEL = 'claude-opus-4-1-20250805'
2222
FIREWORKS_MODEL = 'accounts/fireworks/models/llama-v3p3-70b-instruct'
2323
GROQ_MODEL = 'llama-3.3-70b-versatile'
2424
DEEPSEEK_MODEL = 'deepseek-chat'
@@ -212,7 +212,9 @@ def embed_payload_openai(text):
212212
return {'input': text}
213213

214214
def embed_response_openai(reply):
215-
return reply['data'][0]['embedding']
215+
return [
216+
item['embedding'] for item in reply['data']
217+
]
216218

217219
def embed_payload_tei(text):
218220
return {'inputs': text}
@@ -248,7 +250,7 @@ def tokenize_response_vllm(reply):
248250
## transcribe handlers
249251
##
250252

251-
def transcribe_openai(audio):
253+
def transcribe_response_openai(audio):
252254
return audio.text
253255

254256
##

0 commit comments

Comments
 (0)