Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions examples/audio/create_transcription/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use openai_dive::v1::api::Client;
use openai_dive::v1::models::WhisperModel;
use openai_dive::v1::models::TranscriptionModel;
use openai_dive::v1::resources::audio::{AudioOutputFormat, AudioTranscriptionParametersBuilder};
use openai_dive::v1::resources::shared::FileUpload;

Expand All @@ -9,7 +9,7 @@ async fn main() {

let parameters = AudioTranscriptionParametersBuilder::default()
.file(FileUpload::File("./audio/micro-machines.mp3".to_string()))
.model(WhisperModel::Whisper1.to_string())
.model(TranscriptionModel::Whisper1.to_string())
.response_format(AudioOutputFormat::VerboseJson)
.build()
.unwrap();
Expand Down
4 changes: 2 additions & 2 deletions examples/audio/create_translation/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use openai_dive::v1::api::Client;
use openai_dive::v1::models::WhisperModel;
use openai_dive::v1::models::TranscriptionModel;
use openai_dive::v1::resources::audio::{AudioOutputFormat, AudioTranslationParametersBuilder};
use openai_dive::v1::resources::shared::FileUpload;

Expand All @@ -9,7 +9,7 @@ async fn main() {

let parameters = AudioTranslationParametersBuilder::default()
.file(FileUpload::File("./audio/multilingual.mp3".to_string()))
.model(WhisperModel::Whisper1.to_string())
.model(TranscriptionModel::Whisper1.to_string())
.response_format(AudioOutputFormat::Srt)
.build()
.unwrap();
Expand Down
14 changes: 10 additions & 4 deletions examples/chat/create_chat_completion/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new_from_env();

let parameters = ChatCompletionParametersBuilder::default()
.model(ReasoningModel::O1Mini.to_string())
.model(ReasoningModel::O3Mini.to_string())
.messages(vec![
ChatMessage::User {
content: ChatMessageContent::Text("Hello!".to_string()),
name: None,
name: Some("Judy".to_string()),
},
ChatMessage::User {
content: ChatMessageContent::Text("What is the capital of Vietnam?".to_string()),
name: None,
content: ChatMessageContent::Text("What is the capital of Singapore?".to_string()),
name: Some("Judy".to_string()),
},
])
.response_format(ChatCompletionResponseFormat::Text)
Expand All @@ -32,5 +32,11 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {

println!("{:#?}", result);

for choice in &result.choices {
if let Some(text) = choice.message.text() {
println!("{}", text);
}
}

Ok(())
}
50 changes: 26 additions & 24 deletions examples/chat/structured_outputs/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,31 +26,33 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
name: None,
},
])
.response_format(ChatCompletionResponseFormat::JsonSchema(JsonSchemaBuilder::default()
.name("math_reasoning")
.schema(serde_json::json!({
"type": "object",
"properties": {
"steps": {
"type": "array",
"items": {
"type": "object",
"properties": {
"explanation": { "type": "string" },
"output": { "type": "string" }
},
"required": ["explanation", "output"],
"additionalProperties": false
}
.response_format(ChatCompletionResponseFormat::JsonSchema {
json_schema: JsonSchemaBuilder::default()
.name("math_reasoning")
.schema(serde_json::json!({
"type": "object",
"properties": {
"steps": {
"type": "array",
"items": {
"type": "object",
"properties": {
"explanation": { "type": "string" },
"output": { "type": "string" }
},
"required": ["explanation", "output"],
"additionalProperties": false
}
},
"final_answer": { "type": "string" }
},
"final_answer": { "type": "string" }
},
"required": ["steps", "final_answer"],
"additionalProperties": false
}))
.strict(true)
.build()?
))
"required": ["steps", "final_answer"],
"additionalProperties": false
}))
.strict(true)
.build()?
}
)
.build()?;

let result = client.chat().create(parameters).await?;
Expand Down
4 changes: 2 additions & 2 deletions examples/images/create_image/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use openai_dive::v1::api::Client;
use openai_dive::v1::models::DallEModel;
use openai_dive::v1::models::ImageModel;
use openai_dive::v1::resources::image::{
CreateImageParametersBuilder, ImageQuality, ImageSize, ImageStyle, ResponseFormat,
};
Expand All @@ -10,7 +10,7 @@ async fn main() {

let parameters = CreateImageParametersBuilder::default()
.prompt("A cute dog in the park")
.model(DallEModel::DallE3.to_string())
.model(ImageModel::DallE3.to_string())
.n(1u32)
.quality(ImageQuality::Standard)
.response_format(ResponseFormat::Url)
Expand Down
3 changes: 2 additions & 1 deletion examples/images/create_image_edit/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use openai_dive::v1::api::Client;
use openai_dive::v1::resources::image::{EditImageParametersBuilder, ImageSize};
use openai_dive::v1::resources::image::{EditImageParametersBuilder, ImageSize, MimeType};
use openai_dive::v1::resources::shared::FileUpload;

#[tokio::main]
Expand All @@ -13,6 +13,7 @@ async fn main() {
.prompt("A cute baby sea otter")
.mask(FileUpload::File("./images/image_edit_mask.png".to_string()))
.n(1u32)
.mime_type(MimeType::Png)
.size(ImageSize::Size512X512)
.build()
.unwrap();
Expand Down
8 changes: 8 additions & 0 deletions examples/images/create_multiple_images/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
[package]
name = "create_multiple_images"
version = "0.1.0"
edition = "2021"

[dependencies]
openai_dive = { path = "./../../../openai_dive" }
tokio = { version = "1.0", features = ["full"] }
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
44 changes: 44 additions & 0 deletions examples/images/create_multiple_images/src/main.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
use openai_dive::v1::api::Client;
use openai_dive::v1::models::ImageModel;
use openai_dive::v1::resources::image::{
EditImageParametersBuilder, ImageQuality, ImageSize, MimeType,
};
use openai_dive::v1::resources::shared::FileUpload;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new_from_env();

let parameters = EditImageParametersBuilder::default()
.prompt("Make this person smile with full teeth")
.image(FileUpload::File("./images/person.png".to_string()))
.model(ImageModel::GptImage1.to_string())
.quality(ImageQuality::Low)
.mime_type(MimeType::Png)
.n(1u32)
.size(ImageSize::Size1024X1024)
.build()?;

let result = client.images().edit(parameters).await?;

println!("{:#?}", result);

let parameters = EditImageParametersBuilder::default()
.prompt("Combine the person into the orginal image")
.image(FileUpload::FileArray(vec![
"./images/image_edit_original.png".to_string(),
"./images/person.png".to_string(),
]))
.model("gpt-image-1")
.quality(ImageQuality::Low)
.mime_type(MimeType::Png)
.n(1u32)
.size(ImageSize::Size1024X1024)
.build()?;

let result = client.images().edit(parameters).await?;

println!("{:#?}", result);

Ok(())
}
28 changes: 27 additions & 1 deletion examples/responses/functions/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
use ftail::Ftail;
use openai_dive::v1::api::Client;
use openai_dive::v1::models::CostOptimizedModel;
use openai_dive::v1::resources::response::request::{ResponseInput, ResponseParametersBuilder};
use openai_dive::v1::resources::response::items::{FunctionToolCallOutput, InputItemStatus};
use openai_dive::v1::resources::response::request::{
InputItem, ResponseInput, ResponseInputItem, ResponseParametersBuilder,
};
use openai_dive::v1::resources::response::response::ResponseOutput;
use openai_dive::v1::resources::response::shared::{ResponseTool, ResponseToolChoice};

#[tokio::main]
Expand Down Expand Up @@ -44,5 +48,27 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {

println!("{:#?}", result);

let call = match &result.output[0] {
ResponseOutput::FunctionToolCall(call) => call,
_ => panic!("unexpected output"),
};

let parameters = ResponseParametersBuilder::default()
.model(CostOptimizedModel::Gpt4OMini.to_string())
.input(ResponseInput::List(vec![ResponseInputItem::Item(
InputItem::FunctionToolCallOutput(FunctionToolCallOutput {
id: None,
call_id: call.call_id.clone(),
output: "{\"temperature_2m\":30,\"wind_speed_10m\":5}".to_string(),
status: InputItemStatus::Completed,
}),
)]))
.previous_response_id(result.id)
.build()?;

let result = client.responses().create(parameters).await?;

println!("{:#?}", result);

Ok(())
}
2 changes: 1 addition & 1 deletion openai_dive/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "openai_dive"
version = "1.0.1"
version = "1.2.2"
edition = "2021"
license = "MIT"
description = "OpenAI Dive is an unofficial async Rust library that allows you to interact with the OpenAI API."
Expand Down
70 changes: 38 additions & 32 deletions openai_dive/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ OpenAI Dive is an unofficial async Rust library that allows you to interact with

```ini
[dependencies]
openai_dive = "1.0"
openai_dive = "1.2"
```

## Get started
Expand Down Expand Up @@ -264,31 +264,33 @@ let parameters = ChatCompletionParametersBuilder::default()
name: None,
},
])
.response_format(ChatCompletionResponseFormat::JsonSchema(JsonSchemaBuilder::default()
.name("math_reasoning")
.schema(serde_json::json!({
"type": "object",
"properties": {
"steps": {
"type": "array",
"items": {
"type": "object",
"properties": {
"explanation": { "type": "string" },
"output": { "type": "string" }
},
"required": ["explanation", "output"],
"additionalProperties": false
}
.response_format(ChatCompletionResponseFormat::JsonSchema {
json_schema: JsonSchemaBuilder::default()
.name("math_reasoning")
.schema(serde_json::json!({
"type": "object",
"properties": {
"steps": {
"type": "array",
"items": {
"type": "object",
"properties": {
"explanation": { "type": "string" },
"output": { "type": "string" }
},
"required": ["explanation", "output"],
"additionalProperties": false
}
},
"final_answer": { "type": "string" }
},
"final_answer": { "type": "string" }
},
"required": ["steps", "final_answer"],
"additionalProperties": false
}))
.strict(true)
.build()?
))
"required": ["steps", "final_answer"],
"additionalProperties": false
}))
.strict(true)
.build()?
}
)
.build()?;

let result = client.chat().create(parameters).await?;
Expand Down Expand Up @@ -534,6 +536,8 @@ let mut client = Client::new(deepseek_api_key);
client.set_base_url("https://api.deepseek.com");
```

Use `extra_body` in `ChatCompletionParametersBuilder` to pass non-standard parameters supported by OpenAI-compatible APIs.

### Set organization/project ID

You can create multiple organizations and projects in the OpenAI platform. This allows you to group files, fine-tuned models and other resources.
Expand Down Expand Up @@ -587,20 +591,20 @@ You can use these predefined constants to set the model in the parameters or use

#### Flagship Models

- Gpt45Preview (`gpt-4.5-preview`)
- Gpt41 (`gpt-4.1`)
- Gpt4O (`gpt-4o`)
- Gpt4OAudioPreview (`gpt-4o-audio-preview`)

#### Cost-Optimized Models

- O4Mini (`o4-mini`)
- Gpt41Nano (`gpt-4.1-nano`)
- Gpt4OMini (`gpt-4o-mini`)
- Gpt4OMiniAudioPreview (`gpt-4o-mini-audio-preview`)

#### Reasoning Models

- O4Mini (`o4-mini`)
- O3Mini (`o3-mini`)
- O1 (`o1`)
- O1Mini (`o1-mini`)

#### Tool Models

Expand All @@ -611,24 +615,26 @@ You can use these predefined constants to set the model in the parameters or use
#### Moderation Models

- OmniModerationLatest (`omni-moderation-latest`)
- TextModerationLatest (`text-moderation-latest`)

#### Embedding Models

- TextEmbedding3Small (`text-embedding-3-small`)
- TextEmbedding3Large (`text-embedding-3-large`)

#### Whisper Models
#### Transcription Models

- Gpt4OTranscribe (`gpt-4o-transcribe`)
- Whisper1 (`whisper-1`)

#### TTS Models

- Gpt4OMiniTts (`gpt-4o-mini-tts`)
- Tts1 (`tts-1`)
- Tts1HD (`tts-1-hd`)

#### DALL·E Models
#### Image Models

- GptImage1 (`gpt-image-1`)
- DallE3 (`dall-e-3`)
- DallE2 (`dall-e-2`)

Expand Down
Loading