diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b4e9013b..6db19b95 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.16.0" + ".": "0.17.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index e567ce69..65629665 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 109 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-721e6ccaa72205ee14c71f8163129920464fb814b95d3df9567a9476bbd9b7fb.yml -openapi_spec_hash: 2115413a21df8b5bf9e4552a74df4312 -config_hash: 9606bb315a193bfd8da0459040143242 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6a1bfd4738fff02ef5becc3fdb2bf0cd6c026f2c924d4147a2a515474477dd9a.yml +openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba +config_hash: a67c5e195a59855fe8a5db0dc61a3e7f diff --git a/CHANGELOG.md b/CHANGELOG.md index 29ba3df1..7d404b32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## 0.17.0 (2025-08-08) + +Full Changelog: [v0.16.0...v0.17.0](https://github.com/openai/openai-ruby/compare/v0.16.0...v0.17.0) + +### Features + +* **api:** adds GPT-5 and new API features: platform.openai.com/docs/guides/gpt-5 ([068a381](https://github.com/openai/openai-ruby/commit/068a381a17dd2d60865e67fcd17fa84d919f3f5c)) +* **api:** manual updates ([1d79621](https://github.com/openai/openai-ruby/commit/1d79621120fbccc8dd41f5af6df5a9b1a9018e73)) + + +### Bug Fixes + +* **client:** dont try to parse if content is missing ([#770](https://github.com/openai/openai-ruby/issues/770)) ([7f8f2d3](https://github.com/openai/openai-ruby/commit/7f8f2d32863fafc39ee4a884937673a2ad9be358)) +* **client:** fix verbosity parameter location in Responses ([a6b7ae8](https://github.com/openai/openai-ruby/commit/a6b7ae8b568c2214d4883fad44c9cf2e8a7d53e2)) +* **internal:** fix rbi error ([803f20b](https://github.com/openai/openai-ruby/commit/803f20ba0c3751d28175dca99853783f0d851645)) +* **respones:** undo accidently deleted fields ([#177](https://github.com/openai/openai-ruby/issues/177)) ([90a7c3a](https://github.com/openai/openai-ruby/commit/90a7c3ac8d22cc90b8ecaa3b091598ea3bc73029)) +* **responses:** remove incorrect verbosity param ([127e2d1](https://github.com/openai/openai-ruby/commit/127e2d1b96b72307178446f0aa8acc1d3ad31367)) + + +### Chores + +* **internal:** increase visibility of internal helper method ([eddbcda](https://github.com/openai/openai-ruby/commit/eddbcda189ac0a864fc3dadc5dd3578d730c491f)) +* update @stainless-api/prism-cli to v5.15.0 ([aaa7d89](https://github.com/openai/openai-ruby/commit/aaa7d895a3dba31f32cf5f4373a49d1571667fc6)) + ## 0.16.0 (2025-07-30) Full Changelog: [v0.15.0...v0.16.0](https://github.com/openai/openai-ruby/compare/v0.15.0...v0.16.0) diff --git a/Gemfile.lock b/Gemfile.lock index 10efc131..0448918d 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -11,7 +11,7 @@ GIT PATH remote: . specs: - openai (0.16.0) + openai (0.17.0) connection_pool GEM diff --git a/README.md b/README.md index a6b09746..8db9d78f 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application ```ruby -gem "openai", "~> 0.16.0" +gem "openai", "~> 0.17.0" ``` @@ -30,10 +30,7 @@ openai = OpenAI::Client.new( api_key: ENV["OPENAI_API_KEY"] # This is the default and can be omitted ) -chat_completion = openai.chat.completions.create( - messages: [{role: "user", content: "Say this is a test"}], - model: :"gpt-4.1" -) +chat_completion = openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5") puts(chat_completion) ``` @@ -45,7 +42,7 @@ We provide support for streaming responses using Server-Sent Events (SSE). ```ruby stream = openai.responses.stream( input: "Write a haiku about OpenAI.", - model: :"gpt-4.1" + model: :"gpt-5" ) stream.each do |event| @@ -343,7 +340,7 @@ openai = OpenAI::Client.new( # Or, configure per-request: openai.chat.completions.create( messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}], - model: :"gpt-4.1", + model: :"gpt-5", request_options: {max_retries: 5} ) ``` @@ -361,7 +358,7 @@ openai = OpenAI::Client.new( # Or, configure per-request: openai.chat.completions.create( messages: [{role: "user", content: "How can I list all files in a directory using Python?"}], - model: :"gpt-4.1", + model: :"gpt-5", request_options: {timeout: 5} ) ``` @@ -396,7 +393,7 @@ Note: the `extra_` parameters of the same name overrides the documented paramete chat_completion = openai.chat.completions.create( messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}], - model: :"gpt-4.1", + model: :"gpt-5", request_options: { extra_query: {my_query_parameter: value}, extra_body: {my_body_parameter: value}, @@ -444,7 +441,7 @@ You can provide typesafe request parameters like so: ```ruby openai.chat.completions.create( messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")], - model: :"gpt-4.1" + model: :"gpt-5" ) ``` @@ -452,15 +449,12 @@ Or, equivalently: ```ruby # Hashes work, but are not typesafe: -openai.chat.completions.create( - messages: [{role: "user", content: "Say this is a test"}], - model: :"gpt-4.1" -) +openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5") # You can also splat a full Params class: params = OpenAI::Chat::CompletionCreateParams.new( messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")], - model: :"gpt-4.1" + model: :"gpt-5" ) openai.chat.completions.create(**params) ``` @@ -470,11 +464,11 @@ openai.chat.completions.create(**params) Since this library does not depend on `sorbet-runtime`, it cannot provide [`T::Enum`](https://sorbet.org/docs/tenum) instances. Instead, we provide "tagged symbols" instead, which is always a primitive at runtime: ```ruby -# :low -puts(OpenAI::ReasoningEffort::LOW) +# :minimal +puts(OpenAI::ReasoningEffort::MINIMAL) # Revealed type: `T.all(OpenAI::ReasoningEffort, Symbol)` -T.reveal_type(OpenAI::ReasoningEffort::LOW) +T.reveal_type(OpenAI::ReasoningEffort::MINIMAL) ``` Enum parameters have a "relaxed" type, so you can either pass in enum constants or their literal value: @@ -482,13 +476,13 @@ Enum parameters have a "relaxed" type, so you can either pass in enum constants ```ruby # Using the enum constants preserves the tagged type information: openai.chat.completions.create( - reasoning_effort: OpenAI::ReasoningEffort::LOW, + reasoning_effort: OpenAI::ReasoningEffort::MINIMAL, # … ) # Literal values are also permissible: openai.chat.completions.create( - reasoning_effort: :low, + reasoning_effort: :minimal, # … ) ``` diff --git a/examples/structured_outputs_chat_completions_function_calling.rb b/examples/structured_outputs_chat_completions_function_calling.rb index 456c22a2..f30d4dd1 100755 --- a/examples/structured_outputs_chat_completions_function_calling.rb +++ b/examples/structured_outputs_chat_completions_function_calling.rb @@ -27,6 +27,11 @@ class GetWeather < OpenAI::BaseModel .reject { _1.message.refusal } .flat_map { _1.message.tool_calls.to_a } .each do |tool_call| - # parsed is an instance of `GetWeather` - pp(tool_call.function.parsed) + case tool_call + when OpenAI::Chat::ChatCompletionMessageFunctionToolCall + # parsed is an instance of `GetWeather` + pp(tool_call.function.parsed) + else + puts("Unexpected tool call type: #{tool_call.type}") + end end diff --git a/lib/openai.rb b/lib/openai.rb index b062b182..b7bfe419 100644 --- a/lib/openai.rb +++ b/lib/openai.rb @@ -183,6 +183,8 @@ require_relative "openai/models/beta/thread_stream_event" require_relative "openai/models/beta/thread_update_params" require_relative "openai/models/chat/chat_completion" +require_relative "openai/models/chat/chat_completion_allowed_tool_choice" +require_relative "openai/models/chat/chat_completion_allowed_tools" require_relative "openai/models/chat/chat_completion_assistant_message_param" require_relative "openai/models/chat/chat_completion_audio" require_relative "openai/models/chat/chat_completion_audio_param" @@ -192,14 +194,19 @@ require_relative "openai/models/chat/chat_completion_content_part_input_audio" require_relative "openai/models/chat/chat_completion_content_part_refusal" require_relative "openai/models/chat/chat_completion_content_part_text" +require_relative "openai/models/chat/chat_completion_custom_tool" require_relative "openai/models/chat/chat_completion_deleted" require_relative "openai/models/chat/chat_completion_developer_message_param" require_relative "openai/models/chat/chat_completion_function_call_option" require_relative "openai/models/chat/chat_completion_function_message_param" +require_relative "openai/models/chat/chat_completion_function_tool" +require_relative "openai/models/chat/chat_completion_message_custom_tool_call" +require_relative "openai/models/chat/chat_completion_message_function_tool_call" require_relative "openai/models/chat/chat_completion_message_param" require_relative "openai/models/chat/chat_completion_message_tool_call" require_relative "openai/models/chat/chat_completion_modality" require_relative "openai/models/chat/chat_completion_named_tool_choice" +require_relative "openai/models/chat/chat_completion_named_tool_choice_custom" require_relative "openai/models/chat/chat_completion_prediction_content" require_relative "openai/models/chat/chat_completion_reasoning_effort" require_relative "openai/models/chat/chat_completion_role" @@ -240,6 +247,7 @@ require_relative "openai/models/containers/file_retrieve_response" require_relative "openai/models/containers/files/content_retrieve_params" require_relative "openai/models/create_embedding_response" +require_relative "openai/models/custom_tool_input_format" require_relative "openai/models/embedding" require_relative "openai/models/embedding_create_params" require_relative "openai/models/embedding_model" @@ -348,7 +356,10 @@ require_relative "openai/models/response_format_json_object" require_relative "openai/models/response_format_json_schema" require_relative "openai/models/response_format_text" +require_relative "openai/models/response_format_text_grammar" +require_relative "openai/models/response_format_text_python" require_relative "openai/models/responses/computer_tool" +require_relative "openai/models/responses/custom_tool" require_relative "openai/models/responses/easy_input_message" require_relative "openai/models/responses/file_search_tool" require_relative "openai/models/responses/function_tool" @@ -374,6 +385,10 @@ require_relative "openai/models/responses/response_content_part_done_event" require_relative "openai/models/responses/response_created_event" require_relative "openai/models/responses/response_create_params" +require_relative "openai/models/responses/response_custom_tool_call" +require_relative "openai/models/responses/response_custom_tool_call_input_delta_event" +require_relative "openai/models/responses/response_custom_tool_call_input_done_event" +require_relative "openai/models/responses/response_custom_tool_call_output" require_relative "openai/models/responses/response_delete_params" require_relative "openai/models/responses/response_error" require_relative "openai/models/responses/response_error_event" @@ -426,12 +441,12 @@ require_relative "openai/models/responses/response_prompt" require_relative "openai/models/responses/response_queued_event" require_relative "openai/models/responses/response_reasoning_item" -require_relative "openai/models/responses/response_reasoning_summary_delta_event" -require_relative "openai/models/responses/response_reasoning_summary_done_event" require_relative "openai/models/responses/response_reasoning_summary_part_added_event" require_relative "openai/models/responses/response_reasoning_summary_part_done_event" require_relative "openai/models/responses/response_reasoning_summary_text_delta_event" require_relative "openai/models/responses/response_reasoning_summary_text_done_event" +require_relative "openai/models/responses/response_reasoning_text_delta_event" +require_relative "openai/models/responses/response_reasoning_text_done_event" require_relative "openai/models/responses/response_refusal_delta_event" require_relative "openai/models/responses/response_refusal_done_event" require_relative "openai/models/responses/response_retrieve_params" @@ -445,6 +460,8 @@ require_relative "openai/models/responses/response_web_search_call_in_progress_event" require_relative "openai/models/responses/response_web_search_call_searching_event" require_relative "openai/models/responses/tool" +require_relative "openai/models/responses/tool_choice_allowed" +require_relative "openai/models/responses/tool_choice_custom" require_relative "openai/models/responses/tool_choice_function" require_relative "openai/models/responses/tool_choice_mcp" require_relative "openai/models/responses/tool_choice_options" diff --git a/lib/openai/internal/transport/base_client.rb b/lib/openai/internal/transport/base_client.rb index 6b8c2d3a..9ff4effb 100644 --- a/lib/openai/internal/transport/base_client.rb +++ b/lib/openai/internal/transport/base_client.rb @@ -365,7 +365,7 @@ def initialize( # # @raise [OpenAI::Errors::APIError] # @return [Array(Integer, Net::HTTPResponse, Enumerable)] - private def send_request(request, redirect_count:, retry_count:, send_retry_header:) + def send_request(request, redirect_count:, retry_count:, send_retry_header:) url, headers, max_retries, timeout = request.fetch_values(:url, :headers, :max_retries, :timeout) input = {**request.except(:timeout), deadline: OpenAI::Internal::Util.monotonic_secs + timeout} diff --git a/lib/openai/internal/type/enum.rb b/lib/openai/internal/type/enum.rb index 3ea232d9..70476264 100644 --- a/lib/openai/internal/type/enum.rb +++ b/lib/openai/internal/type/enum.rb @@ -19,11 +19,11 @@ module Type # @example # # `chat_model` is a `OpenAI::ChatModel` # case chat_model - # when OpenAI::ChatModel::GPT_4_1 + # when OpenAI::ChatModel::GPT_5 # # ... - # when OpenAI::ChatModel::GPT_4_1_MINI + # when OpenAI::ChatModel::GPT_5_MINI # # ... - # when OpenAI::ChatModel::GPT_4_1_NANO + # when OpenAI::ChatModel::GPT_5_NANO # # ... # else # puts(chat_model) @@ -31,11 +31,11 @@ module Type # # @example # case chat_model - # in :"gpt-4.1" + # in :"gpt-5" # # ... - # in :"gpt-4.1-mini" + # in :"gpt-5-mini" # # ... - # in :"gpt-4.1-nano" + # in :"gpt-5-nano" # # ... # else # puts(chat_model) diff --git a/lib/openai/internal/type/union.rb b/lib/openai/internal/type/union.rb index 5b00bc39..dda813a2 100644 --- a/lib/openai/internal/type/union.rb +++ b/lib/openai/internal/type/union.rb @@ -6,28 +6,24 @@ module Type # @api private # # @example - # # `chat_completion_content_part` is a `OpenAI::Chat::ChatCompletionContentPart` - # case chat_completion_content_part - # when OpenAI::Chat::ChatCompletionContentPartText - # puts(chat_completion_content_part.text) - # when OpenAI::Chat::ChatCompletionContentPartImage - # puts(chat_completion_content_part.image_url) - # when OpenAI::Chat::ChatCompletionContentPartInputAudio - # puts(chat_completion_content_part.input_audio) + # # `custom_tool_input_format` is a `OpenAI::CustomToolInputFormat` + # case custom_tool_input_format + # when OpenAI::CustomToolInputFormat::Text + # puts(custom_tool_input_format.type) + # when OpenAI::CustomToolInputFormat::Grammar + # puts(custom_tool_input_format.definition) # else - # puts(chat_completion_content_part) + # puts(custom_tool_input_format) # end # # @example - # case chat_completion_content_part - # in {type: :text, text: text} - # puts(text) - # in {type: :image_url, image_url: image_url} - # puts(image_url) - # in {type: :input_audio, input_audio: input_audio} - # puts(input_audio) + # case custom_tool_input_format + # in {type: :text} + # # ... + # in {type: :grammar, definition: definition, syntax: syntax} + # puts(definition) # else - # puts(chat_completion_content_part) + # puts(custom_tool_input_format) # end module Union include OpenAI::Internal::Type::Converter diff --git a/lib/openai/models.rb b/lib/openai/models.rb index 26243742..23993a69 100644 --- a/lib/openai/models.rb +++ b/lib/openai/models.rb @@ -93,6 +93,8 @@ module OpenAI CreateEmbeddingResponse = OpenAI::Models::CreateEmbeddingResponse + CustomToolInputFormat = OpenAI::Models::CustomToolInputFormat + Embedding = OpenAI::Models::Embedding EmbeddingCreateParams = OpenAI::Models::EmbeddingCreateParams @@ -209,6 +211,10 @@ module OpenAI ResponseFormatText = OpenAI::Models::ResponseFormatText + ResponseFormatTextGrammar = OpenAI::Models::ResponseFormatTextGrammar + + ResponseFormatTextPython = OpenAI::Models::ResponseFormatTextPython + Responses = OpenAI::Models::Responses ResponsesModel = OpenAI::Models::ResponsesModel diff --git a/lib/openai/models/beta/assistant_create_params.rb b/lib/openai/models/beta/assistant_create_params.rb index 84e6d083..6aa28dd3 100644 --- a/lib/openai/models/beta/assistant_create_params.rb +++ b/lib/openai/models/beta/assistant_create_params.rb @@ -49,12 +49,11 @@ class AssistantCreateParams < OpenAI::Internal::Type::BaseModel optional :name, String, nil?: true # @!attribute reasoning_effort - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -133,7 +132,7 @@ class AssistantCreateParams < OpenAI::Internal::Type::BaseModel # # @param name [String, nil] The name of the assistant. The maximum length is 256 characters. # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only** + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: # diff --git a/lib/openai/models/beta/assistant_update_params.rb b/lib/openai/models/beta/assistant_update_params.rb index 6f8f9b27..433bd650 100644 --- a/lib/openai/models/beta/assistant_update_params.rb +++ b/lib/openai/models/beta/assistant_update_params.rb @@ -49,12 +49,11 @@ class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel optional :name, String, nil?: true # @!attribute reasoning_effort - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -133,7 +132,7 @@ class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel # # @param name [String, nil] The name of the assistant. The maximum length is 256 characters. # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only** + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: # @@ -157,6 +156,18 @@ module Model variant String + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_MINI } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_NANO } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_2025_08_07 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_MINI_2025_08_07 } + + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_5_NANO_2025_08_07 } + variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1 } variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_MINI } @@ -238,6 +249,12 @@ module Model # @!group + GPT_5 = :"gpt-5" + GPT_5_MINI = :"gpt-5-mini" + GPT_5_NANO = :"gpt-5-nano" + GPT_5_2025_08_07 = :"gpt-5-2025-08-07" + GPT_5_MINI_2025_08_07 = :"gpt-5-mini-2025-08-07" + GPT_5_NANO_2025_08_07 = :"gpt-5-nano-2025-08-07" GPT_4_1 = :"gpt-4.1" GPT_4_1_MINI = :"gpt-4.1-mini" GPT_4_1_NANO = :"gpt-4.1-nano" diff --git a/lib/openai/models/beta/threads/run_create_params.rb b/lib/openai/models/beta/threads/run_create_params.rb index 9efd3f28..3c910eb7 100644 --- a/lib/openai/models/beta/threads/run_create_params.rb +++ b/lib/openai/models/beta/threads/run_create_params.rb @@ -107,12 +107,11 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean # @!attribute reasoning_effort - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -216,7 +215,7 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel # # @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only** + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: # diff --git a/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb b/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb new file mode 100644 index 00000000..bc81ddfb --- /dev/null +++ b/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb @@ -0,0 +1,33 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionAllowedToolChoice < OpenAI::Internal::Type::BaseModel + # @!attribute allowed_tools + # Constrains the tools available to the model to a pre-defined set. + # + # @return [OpenAI::Models::Chat::ChatCompletionAllowedTools] + required :allowed_tools, -> { OpenAI::Chat::ChatCompletionAllowedTools } + + # @!attribute type + # Allowed tool configuration type. Always `allowed_tools`. + # + # @return [Symbol, :allowed_tools] + required :type, const: :allowed_tools + + # @!method initialize(allowed_tools:, type: :allowed_tools) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionAllowedToolChoice} for more details. + # + # Constrains the tools available to the model to a pre-defined set. + # + # @param allowed_tools [OpenAI::Models::Chat::ChatCompletionAllowedTools] Constrains the tools available to the model to a pre-defined set. + # + # @param type [Symbol, :allowed_tools] Allowed tool configuration type. Always `allowed_tools`. + end + end + + ChatCompletionAllowedToolChoice = Chat::ChatCompletionAllowedToolChoice + end +end diff --git a/lib/openai/models/chat/chat_completion_allowed_tools.rb b/lib/openai/models/chat/chat_completion_allowed_tools.rb new file mode 100644 index 00000000..59d2c94b --- /dev/null +++ b/lib/openai/models/chat/chat_completion_allowed_tools.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionAllowedTools < OpenAI::Internal::Type::BaseModel + # @!attribute mode + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + # + # @return [Symbol, OpenAI::Models::Chat::ChatCompletionAllowedTools::Mode] + required :mode, enum: -> { OpenAI::Chat::ChatCompletionAllowedTools::Mode } + + # @!attribute tools + # A list of tool definitions that the model should be allowed to call. + # + # For the Chat Completions API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "function": { "name": "get_weather" } }, + # { "type": "function", "function": { "name": "get_time" } } + # ] + # ``` + # + # @return [ArrayObject}>] + required :tools, + OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]] + + # @!method initialize(mode:, tools:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionAllowedTools} for more details. + # + # Constrains the tools available to the model to a pre-defined set. + # + # @param mode [Symbol, OpenAI::Models::Chat::ChatCompletionAllowedTools::Mode] Constrains the tools available to the model to a pre-defined set. + # + # @param tools [ArrayObject}>] A list of tool definitions that the model should be allowed to call. + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + # + # @see OpenAI::Models::Chat::ChatCompletionAllowedTools#mode + module Mode + extend OpenAI::Internal::Type::Enum + + AUTO = :auto + REQUIRED = :required + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/chat/chat_completion_assistant_message_param.rb b/lib/openai/models/chat/chat_completion_assistant_message_param.rb index 338351a8..ddff94b5 100644 --- a/lib/openai/models/chat/chat_completion_assistant_message_param.rb +++ b/lib/openai/models/chat/chat_completion_assistant_message_param.rb @@ -55,11 +55,9 @@ class ChatCompletionAssistantMessageParam < OpenAI::Internal::Type::BaseModel # @!attribute tool_calls # The tool calls generated by the model, such as function calls. # - # @return [Array, nil] + # @return [Array, nil] optional :tool_calls, - -> { - OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionMessageToolCall] - } + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionMessageToolCall] } # @!method initialize(audio: nil, content: nil, function_call: nil, name: nil, refusal: nil, tool_calls: nil, role: :assistant) # Some parameter documentations has been truncated, see @@ -77,7 +75,7 @@ class ChatCompletionAssistantMessageParam < OpenAI::Internal::Type::BaseModel # # @param refusal [String, nil] The refusal message by the assistant. # - # @param tool_calls [Array] The tool calls generated by the model, such as function calls. + # @param tool_calls [Array] The tool calls generated by the model, such as function calls. # # @param role [Symbol, :assistant] The role of the messages author, in this case `assistant`. diff --git a/lib/openai/models/chat/chat_completion_custom_tool.rb b/lib/openai/models/chat/chat_completion_custom_tool.rb new file mode 100644 index 00000000..fed4564e --- /dev/null +++ b/lib/openai/models/chat/chat_completion_custom_tool.rb @@ -0,0 +1,163 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionCustomTool < OpenAI::Internal::Type::BaseModel + # @!attribute custom + # Properties of the custom tool. + # + # @return [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom] + required :custom, -> { OpenAI::Chat::ChatCompletionCustomTool::Custom } + + # @!attribute type + # The type of the custom tool. Always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!method initialize(custom:, type: :custom) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionCustomTool} for more details. + # + # A custom tool that processes input using a specified format. + # + # @param custom [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom] Properties of the custom tool. + # + # @param type [Symbol, :custom] The type of the custom tool. Always `custom`. + + # @see OpenAI::Models::Chat::ChatCompletionCustomTool#custom + class Custom < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the custom tool, used to identify it in tool calls. + # + # @return [String] + required :name, String + + # @!attribute description + # Optional description of the custom tool, used to provide more context. + # + # @return [String, nil] + optional :description, String + + # @!attribute format_ + # The input format for the custom tool. Default is unconstrained text. + # + # @return [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Text, OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar, nil] + optional :format_, + union: -> { + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format + }, + api_name: :format + + # @!method initialize(name:, description: nil, format_: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionCustomTool::Custom} for more details. + # + # Properties of the custom tool. + # + # @param name [String] The name of the custom tool, used to identify it in tool calls. + # + # @param description [String] Optional description of the custom tool, used to provide more context. + # + # @param format_ [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Text, OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar] The input format for the custom tool. Default is unconstrained text. + + # The input format for the custom tool. Default is unconstrained text. + # + # @see OpenAI::Models::Chat::ChatCompletionCustomTool::Custom#format_ + module Format + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Unconstrained free-form text. + variant :text, -> { OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text } + + # A grammar defined by the user. + variant :grammar, -> { OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar } + + class Text < OpenAI::Internal::Type::BaseModel + # @!attribute type + # Unconstrained text format. Always `text`. + # + # @return [Symbol, :text] + required :type, const: :text + + # @!method initialize(type: :text) + # Unconstrained free-form text. + # + # @param type [Symbol, :text] Unconstrained text format. Always `text`. + end + + class Grammar < OpenAI::Internal::Type::BaseModel + # @!attribute grammar + # Your chosen grammar. + # + # @return [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar] + required :grammar, + -> { + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar + } + + # @!attribute type + # Grammar format. Always `grammar`. + # + # @return [Symbol, :grammar] + required :type, const: :grammar + + # @!method initialize(grammar:, type: :grammar) + # A grammar defined by the user. + # + # @param grammar [OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar] Your chosen grammar. + # + # @param type [Symbol, :grammar] Grammar format. Always `grammar`. + + # @see OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar#grammar + class Grammar < OpenAI::Internal::Type::BaseModel + # @!attribute definition + # The grammar definition. + # + # @return [String] + required :definition, String + + # @!attribute syntax + # The syntax of the grammar definition. One of `lark` or `regex`. + # + # @return [Symbol, OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax] + required :syntax, + enum: -> { + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax + } + + # @!method initialize(definition:, syntax:) + # Your chosen grammar. + # + # @param definition [String] The grammar definition. + # + # @param syntax [Symbol, OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax] The syntax of the grammar definition. One of `lark` or `regex`. + + # The syntax of the grammar definition. One of `lark` or `regex`. + # + # @see OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar#syntax + module Syntax + extend OpenAI::Internal::Type::Enum + + LARK = :lark + REGEX = :regex + + # @!method self.values + # @return [Array] + end + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Text, OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar)] + end + end + end + end + + ChatCompletionCustomTool = Chat::ChatCompletionCustomTool + end +end diff --git a/lib/openai/models/chat/chat_completion_function_tool.rb b/lib/openai/models/chat/chat_completion_function_tool.rb new file mode 100644 index 00000000..dbedf8e5 --- /dev/null +++ b/lib/openai/models/chat/chat_completion_function_tool.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionFunctionTool < OpenAI::Internal::Type::BaseModel + # @!attribute function + # + # @return [OpenAI::Models::FunctionDefinition] + required :function, -> { OpenAI::FunctionDefinition } + + # @!attribute type + # The type of the tool. Currently, only `function` is supported. + # + # @return [Symbol, :function] + required :type, const: :function + + # @!method initialize(function:, type: :function) + # A function tool that can be used to generate a response. + # + # @param function [OpenAI::Models::FunctionDefinition] + # + # @param type [Symbol, :function] The type of the tool. Currently, only `function` is supported. + end + end + + ChatCompletionFunctionTool = Chat::ChatCompletionFunctionTool + end +end diff --git a/lib/openai/models/chat/chat_completion_message.rb b/lib/openai/models/chat/chat_completion_message.rb index 97bd8c64..7aa0fb19 100644 --- a/lib/openai/models/chat/chat_completion_message.rb +++ b/lib/openai/models/chat/chat_completion_message.rb @@ -56,11 +56,9 @@ class ChatCompletionMessage < OpenAI::Internal::Type::BaseModel # @!attribute tool_calls # The tool calls generated by the model, such as function calls. # - # @return [Array, nil] + # @return [Array, nil] optional :tool_calls, - -> { - OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionMessageToolCall] - } + -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionMessageToolCall] } # @!method initialize(content:, refusal:, annotations: nil, audio: nil, function_call: nil, tool_calls: nil, role: :assistant) # Some parameter documentations has been truncated, see @@ -78,7 +76,7 @@ class ChatCompletionMessage < OpenAI::Internal::Type::BaseModel # # @param function_call [OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall] Deprecated and replaced by `tool_calls`. The name and arguments of a function th # - # @param tool_calls [Array] The tool calls generated by the model, such as function calls. + # @param tool_calls [Array] The tool calls generated by the model, such as function calls. # # @param role [Symbol, :assistant] The role of the author of this message. diff --git a/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb b/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb new file mode 100644 index 00000000..2d83c132 --- /dev/null +++ b/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb @@ -0,0 +1,60 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionMessageCustomToolCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The ID of the tool call. + # + # @return [String] + required :id, String + + # @!attribute custom + # The custom tool that the model called. + # + # @return [OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall::Custom] + required :custom, -> { OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom } + + # @!attribute type + # The type of the tool. Always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!method initialize(id:, custom:, type: :custom) + # A call to a custom tool created by the model. + # + # @param id [String] The ID of the tool call. + # + # @param custom [OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall::Custom] The custom tool that the model called. + # + # @param type [Symbol, :custom] The type of the tool. Always `custom`. + + # @see OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall#custom + class Custom < OpenAI::Internal::Type::BaseModel + # @!attribute input + # The input for the custom tool call generated by the model. + # + # @return [String] + required :input, String + + # @!attribute name + # The name of the custom tool to call. + # + # @return [String] + required :name, String + + # @!method initialize(input:, name:) + # The custom tool that the model called. + # + # @param input [String] The input for the custom tool call generated by the model. + # + # @param name [String] The name of the custom tool to call. + end + end + end + + ChatCompletionMessageCustomToolCall = Chat::ChatCompletionMessageCustomToolCall + end +end diff --git a/lib/openai/models/chat/chat_completion_message_function_tool_call.rb b/lib/openai/models/chat/chat_completion_message_function_tool_call.rb new file mode 100644 index 00000000..3cd8808a --- /dev/null +++ b/lib/openai/models/chat/chat_completion_message_function_tool_call.rb @@ -0,0 +1,73 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionMessageFunctionToolCall < OpenAI::Internal::Type::BaseModel + # @!attribute id + # The ID of the tool call. + # + # @return [String] + required :id, String + + # @!attribute function + # The function that the model called. + # + # @return [OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall::Function] + required :function, -> { OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function } + + # @!attribute type + # The type of the tool. Currently, only `function` is supported. + # + # @return [Symbol, :function] + required :type, const: :function + + # @!method initialize(id:, function:, type: :function) + # A call to a function tool created by the model. + # + # @param id [String] The ID of the tool call. + # + # @param function [OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall::Function] The function that the model called. + # + # @param type [Symbol, :function] The type of the tool. Currently, only `function` is supported. + + # @see OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall#function + class Function < OpenAI::Internal::Type::BaseModel + # @!attribute arguments + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + # + # @return [String] + required :arguments, String + + # @!attribute parsed + # The parsed contents of the arguments. + # + # @return [Object, nil] + required :parsed, OpenAI::StructuredOutput::ParsedJson + + # @!attribute name + # The name of the function to call. + # + # @return [String] + required :name, String + + # @!method initialize(arguments:, name:) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall::Function} for more + # details. + # + # The function that the model called. + # + # @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma + # + # @param name [String] The name of the function to call. + end + end + end + + ChatCompletionMessageFunctionToolCall = Chat::ChatCompletionMessageFunctionToolCall + end +end diff --git a/lib/openai/models/chat/chat_completion_message_tool_call.rb b/lib/openai/models/chat/chat_completion_message_tool_call.rb index a9cc5e74..15d9bd07 100644 --- a/lib/openai/models/chat/chat_completion_message_tool_call.rb +++ b/lib/openai/models/chat/chat_completion_message_tool_call.rb @@ -3,66 +3,20 @@ module OpenAI module Models module Chat - class ChatCompletionMessageToolCall < OpenAI::Internal::Type::BaseModel - # @!attribute id - # The ID of the tool call. - # - # @return [String] - required :id, String + # A call to a function tool created by the model. + module ChatCompletionMessageToolCall + extend OpenAI::Internal::Type::Union - # @!attribute function - # The function that the model called. - # - # @return [OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function] - required :function, -> { OpenAI::Chat::ChatCompletionMessageToolCall::Function } + discriminator :type - # @!attribute type - # The type of the tool. Currently, only `function` is supported. - # - # @return [Symbol, :function] - required :type, const: :function + # A call to a function tool created by the model. + variant :function, -> { OpenAI::Chat::ChatCompletionMessageFunctionToolCall } - # @!method initialize(id:, function:, type: :function) - # @param id [String] The ID of the tool call. - # - # @param function [OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function] The function that the model called. - # - # @param type [Symbol, :function] The type of the tool. Currently, only `function` is supported. + # A call to a custom tool created by the model. + variant :custom, -> { OpenAI::Chat::ChatCompletionMessageCustomToolCall } - # @see OpenAI::Models::Chat::ChatCompletionMessageToolCall#function - class Function < OpenAI::Internal::Type::BaseModel - # @!attribute arguments - # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. - # - # @return [String] - required :arguments, String - - # @!attribute parsed - # The parsed contents of the arguments. - # - # @return [Object, nil] - required :parsed, OpenAI::StructuredOutput::ParsedJson - - # @!attribute name - # The name of the function to call. - # - # @return [String] - required :name, String - - # @!method initialize(arguments:, name:) - # Some parameter documentations has been truncated, see - # {OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function} for more - # details. - # - # The function that the model called. - # - # @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma - # - # @param name [String] The name of the function to call. - end + # @!method self.variants + # @return [Array(OpenAI::Models::Chat::ChatCompletionMessageFunctionToolCall, OpenAI::Models::Chat::ChatCompletionMessageCustomToolCall)] end end diff --git a/lib/openai/models/chat/chat_completion_named_tool_choice.rb b/lib/openai/models/chat/chat_completion_named_tool_choice.rb index 493e6c0c..a7cea41d 100644 --- a/lib/openai/models/chat/chat_completion_named_tool_choice.rb +++ b/lib/openai/models/chat/chat_completion_named_tool_choice.rb @@ -10,7 +10,7 @@ class ChatCompletionNamedToolChoice < OpenAI::Internal::Type::BaseModel required :function, -> { OpenAI::Chat::ChatCompletionNamedToolChoice::Function } # @!attribute type - # The type of the tool. Currently, only `function` is supported. + # For function calling, the type is always `function`. # # @return [Symbol, :function] required :type, const: :function @@ -21,7 +21,7 @@ class ChatCompletionNamedToolChoice < OpenAI::Internal::Type::BaseModel # # @param function [OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function] # - # @param type [Symbol, :function] The type of the tool. Currently, only `function` is supported. + # @param type [Symbol, :function] For function calling, the type is always `function`. # @see OpenAI::Models::Chat::ChatCompletionNamedToolChoice#function class Function < OpenAI::Internal::Type::BaseModel diff --git a/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb b/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb new file mode 100644 index 00000000..cf2d854e --- /dev/null +++ b/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Chat + class ChatCompletionNamedToolChoiceCustom < OpenAI::Internal::Type::BaseModel + # @!attribute custom + # + # @return [OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom::Custom] + required :custom, -> { OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom } + + # @!attribute type + # For custom tool calling, the type is always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!method initialize(custom:, type: :custom) + # Specifies a tool the model should use. Use to force the model to call a specific + # custom tool. + # + # @param custom [OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom::Custom] + # + # @param type [Symbol, :custom] For custom tool calling, the type is always `custom`. + + # @see OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom#custom + class Custom < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the custom tool to call. + # + # @return [String] + required :name, String + + # @!method initialize(name:) + # @param name [String] The name of the custom tool to call. + end + end + end + + ChatCompletionNamedToolChoiceCustom = Chat::ChatCompletionNamedToolChoiceCustom + end +end diff --git a/lib/openai/models/chat/chat_completion_stream_options.rb b/lib/openai/models/chat/chat_completion_stream_options.rb index acb72bd9..8ce0d03e 100644 --- a/lib/openai/models/chat/chat_completion_stream_options.rb +++ b/lib/openai/models/chat/chat_completion_stream_options.rb @@ -4,6 +4,17 @@ module OpenAI module Models module Chat class ChatCompletionStreamOptions < OpenAI::Internal::Type::BaseModel + # @!attribute include_obfuscation + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + # + # @return [Boolean, nil] + optional :include_obfuscation, OpenAI::Internal::Type::Boolean + # @!attribute include_usage # If set, an additional chunk will be streamed before the `data: [DONE]` message. # The `usage` field on this chunk shows the token usage statistics for the entire @@ -16,12 +27,14 @@ class ChatCompletionStreamOptions < OpenAI::Internal::Type::BaseModel # @return [Boolean, nil] optional :include_usage, OpenAI::Internal::Type::Boolean - # @!method initialize(include_usage: nil) + # @!method initialize(include_obfuscation: nil, include_usage: nil) # Some parameter documentations has been truncated, see # {OpenAI::Models::Chat::ChatCompletionStreamOptions} for more details. # # Options for streaming response. Only set this when you set `stream: true`. # + # @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds + # # @param include_usage [Boolean] If set, an additional chunk will be streamed before the `data: [DONE]` end end diff --git a/lib/openai/models/chat/chat_completion_tool.rb b/lib/openai/models/chat/chat_completion_tool.rb index d6ee8c94..a9a2facf 100644 --- a/lib/openai/models/chat/chat_completion_tool.rb +++ b/lib/openai/models/chat/chat_completion_tool.rb @@ -3,22 +3,20 @@ module OpenAI module Models module Chat - class ChatCompletionTool < OpenAI::Internal::Type::BaseModel - # @!attribute function - # - # @return [OpenAI::Models::FunctionDefinition] - required :function, -> { OpenAI::FunctionDefinition } + # A function tool that can be used to generate a response. + module ChatCompletionTool + extend OpenAI::Internal::Type::Union - # @!attribute type - # The type of the tool. Currently, only `function` is supported. - # - # @return [Symbol, :function] - required :type, const: :function + discriminator :type - # @!method initialize(function:, type: :function) - # @param function [OpenAI::Models::FunctionDefinition] - # - # @param type [Symbol, :function] The type of the tool. Currently, only `function` is supported. + # A function tool that can be used to generate a response. + variant :function, -> { OpenAI::Chat::ChatCompletionFunctionTool } + + # A custom tool that processes input using a specified format. + variant :custom, -> { OpenAI::Chat::ChatCompletionCustomTool } + + # @!method self.variants + # @return [Array(OpenAI::Models::Chat::ChatCompletionFunctionTool, OpenAI::Models::Chat::ChatCompletionCustomTool)] end end diff --git a/lib/openai/models/chat/chat_completion_tool_choice_option.rb b/lib/openai/models/chat/chat_completion_tool_choice_option.rb index 73874236..cbb70889 100644 --- a/lib/openai/models/chat/chat_completion_tool_choice_option.rb +++ b/lib/openai/models/chat/chat_completion_tool_choice_option.rb @@ -18,9 +18,15 @@ module ChatCompletionToolChoiceOption # `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. variant enum: -> { OpenAI::Chat::ChatCompletionToolChoiceOption::Auto } + # Constrains the tools available to the model to a pre-defined set. + variant -> { OpenAI::Chat::ChatCompletionAllowedToolChoice } + # Specifies a tool the model should use. Use to force the model to call a specific function. variant -> { OpenAI::Chat::ChatCompletionNamedToolChoice } + # Specifies a tool the model should use. Use to force the model to call a specific custom tool. + variant -> { OpenAI::Chat::ChatCompletionNamedToolChoiceCustom } + # `none` means the model will not call any tool and instead generates a message. # `auto` means the model can pick between generating a message or calling one or # more tools. `required` means the model must call one or more tools. @@ -36,7 +42,7 @@ module Auto end # @!method self.variants - # @return [Array(Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice)] + # @return [Array(Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom)] end end diff --git a/lib/openai/models/chat/completion_create_params.rb b/lib/openai/models/chat/completion_create_params.rb index 11545213..e95c14eb 100644 --- a/lib/openai/models/chat/completion_create_params.rb +++ b/lib/openai/models/chat/completion_create_params.rb @@ -191,12 +191,11 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel optional :prompt_cache_key, String # @!attribute reasoning_effort - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -303,13 +302,13 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # `none` is the default when no tools are present. `auto` is the default if tools # are present. # - # @return [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, nil] + # @return [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom, nil] optional :tool_choice, union: -> { OpenAI::Chat::ChatCompletionToolChoiceOption } # @!attribute tools - # A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. + # A list of tools the model may call. You can provide either + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + # or [function tools](https://platform.openai.com/docs/guides/function-calling). # # @return [Array, nil] optional :tools, @@ -349,6 +348,14 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @return [String, nil] optional :user, String + # @!attribute verbosity + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + # + # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Verbosity, nil] + optional :verbosity, enum: -> { OpenAI::Chat::CompletionCreateParams::Verbosity }, nil?: true + # @!attribute web_search_options # This tool searches the web for relevant results to use in a response. Learn more # about the @@ -357,7 +364,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil] optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions } - # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {}) + # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) # Some parameter documentations has been truncated, see # {OpenAI::Models::Chat::CompletionCreateParams} for more details. # @@ -395,7 +402,7 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only** + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. # @@ -413,9 +420,9 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] Controls which (if any) tool is called by the model. + # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model. # - # @param tools [Array] A list of tools the model may call. Currently, only functions are supported as a + # @param tools [Array] A list of tools the model may call. You can provide either # # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # @@ -423,6 +430,8 @@ class CompletionCreateParams < OpenAI::Internal::Type::BaseModel # # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use # + # @param verbosity [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in + # # @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response. # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] @@ -622,6 +631,20 @@ module Stop StringArray = OpenAI::Internal::Type::ArrayOf[String] end + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + module Verbosity + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + + # @!method self.values + # @return [Array] + end + class WebSearchOptions < OpenAI::Internal::Type::BaseModel # @!attribute search_context_size # High level guidance for the amount of context window space to use for the diff --git a/lib/openai/models/chat_model.rb b/lib/openai/models/chat_model.rb index e62d4d46..adeb1665 100644 --- a/lib/openai/models/chat_model.rb +++ b/lib/openai/models/chat_model.rb @@ -5,6 +5,13 @@ module Models module ChatModel extend OpenAI::Internal::Type::Enum + GPT_5 = :"gpt-5" + GPT_5_MINI = :"gpt-5-mini" + GPT_5_NANO = :"gpt-5-nano" + GPT_5_2025_08_07 = :"gpt-5-2025-08-07" + GPT_5_MINI_2025_08_07 = :"gpt-5-mini-2025-08-07" + GPT_5_NANO_2025_08_07 = :"gpt-5-nano-2025-08-07" + GPT_5_CHAT_LATEST = :"gpt-5-chat-latest" GPT_4_1 = :"gpt-4.1" GPT_4_1_MINI = :"gpt-4.1-mini" GPT_4_1_NANO = :"gpt-4.1-nano" diff --git a/lib/openai/models/custom_tool_input_format.rb b/lib/openai/models/custom_tool_input_format.rb new file mode 100644 index 00000000..e716a67d --- /dev/null +++ b/lib/openai/models/custom_tool_input_format.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: true + +module OpenAI + module Models + # The input format for the custom tool. Default is unconstrained text. + module CustomToolInputFormat + extend OpenAI::Internal::Type::Union + + discriminator :type + + # Unconstrained free-form text. + variant :text, -> { OpenAI::CustomToolInputFormat::Text } + + # A grammar defined by the user. + variant :grammar, -> { OpenAI::CustomToolInputFormat::Grammar } + + class Text < OpenAI::Internal::Type::BaseModel + # @!attribute type + # Unconstrained text format. Always `text`. + # + # @return [Symbol, :text] + required :type, const: :text + + # @!method initialize(type: :text) + # Unconstrained free-form text. + # + # @param type [Symbol, :text] Unconstrained text format. Always `text`. + end + + class Grammar < OpenAI::Internal::Type::BaseModel + # @!attribute definition + # The grammar definition. + # + # @return [String] + required :definition, String + + # @!attribute syntax + # The syntax of the grammar definition. One of `lark` or `regex`. + # + # @return [Symbol, OpenAI::Models::CustomToolInputFormat::Grammar::Syntax] + required :syntax, enum: -> { OpenAI::CustomToolInputFormat::Grammar::Syntax } + + # @!attribute type + # Grammar format. Always `grammar`. + # + # @return [Symbol, :grammar] + required :type, const: :grammar + + # @!method initialize(definition:, syntax:, type: :grammar) + # A grammar defined by the user. + # + # @param definition [String] The grammar definition. + # + # @param syntax [Symbol, OpenAI::Models::CustomToolInputFormat::Grammar::Syntax] The syntax of the grammar definition. One of `lark` or `regex`. + # + # @param type [Symbol, :grammar] Grammar format. Always `grammar`. + + # The syntax of the grammar definition. One of `lark` or `regex`. + # + # @see OpenAI::Models::CustomToolInputFormat::Grammar#syntax + module Syntax + extend OpenAI::Internal::Type::Enum + + LARK = :lark + REGEX = :regex + + # @!method self.values + # @return [Array] + end + end + + # @!method self.variants + # @return [Array(OpenAI::Models::CustomToolInputFormat::Text, OpenAI::Models::CustomToolInputFormat::Grammar)] + end + end +end diff --git a/lib/openai/models/evals/create_eval_completions_run_data_source.rb b/lib/openai/models/evals/create_eval_completions_run_data_source.rb index 28b05c47..0fd34948 100644 --- a/lib/openai/models/evals/create_eval_completions_run_data_source.rb +++ b/lib/openai/models/evals/create_eval_completions_run_data_source.rb @@ -512,8 +512,8 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # tool. Use this to provide a list of functions the model may generate JSON inputs # for. A max of 128 functions are supported. # - # @return [Array, nil] - optional :tools, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTool] } + # @return [Array, nil] + optional :tools, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionFunctionTool] } # @!attribute top_p # An alternative to temperature for nucleus sampling; 1.0 includes all tokens. @@ -534,7 +534,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # # @param temperature [Float] A higher temperature increases randomness in the outputs. # - # @param tools [Array] A list of tools the model may call. Currently, only functions are supported as a + # @param tools [Array] A list of tools the model may call. Currently, only functions are supported as a # # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. diff --git a/lib/openai/models/evals/run_cancel_response.rb b/lib/openai/models/evals/run_cancel_response.rb index 0c9bb2d2..49fda248 100644 --- a/lib/openai/models/evals/run_cancel_response.rb +++ b/lib/openai/models/evals/run_cancel_response.rb @@ -686,7 +686,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # the model to call your own code. Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -708,7 +708,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. diff --git a/lib/openai/models/evals/run_create_params.rb b/lib/openai/models/evals/run_create_params.rb index 0237a2ab..494bce0c 100644 --- a/lib/openai/models/evals/run_create_params.rb +++ b/lib/openai/models/evals/run_create_params.rb @@ -650,7 +650,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # the model to call your own code. Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -672,7 +672,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text] Configuration options for a text response from the model. Can be plain # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. diff --git a/lib/openai/models/evals/run_create_response.rb b/lib/openai/models/evals/run_create_response.rb index 6c0cdf67..175718a2 100644 --- a/lib/openai/models/evals/run_create_response.rb +++ b/lib/openai/models/evals/run_create_response.rb @@ -686,7 +686,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # the model to call your own code. Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -708,7 +708,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. diff --git a/lib/openai/models/evals/run_list_response.rb b/lib/openai/models/evals/run_list_response.rb index 66bfa2f1..86690fce 100644 --- a/lib/openai/models/evals/run_list_response.rb +++ b/lib/openai/models/evals/run_list_response.rb @@ -685,7 +685,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # the model to call your own code. Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -707,7 +707,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. diff --git a/lib/openai/models/evals/run_retrieve_response.rb b/lib/openai/models/evals/run_retrieve_response.rb index 1c0a6f8b..942f613c 100644 --- a/lib/openai/models/evals/run_retrieve_response.rb +++ b/lib/openai/models/evals/run_retrieve_response.rb @@ -690,7 +690,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # the model to call your own code. Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -712,7 +712,7 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel # # @param text [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens. diff --git a/lib/openai/models/reasoning.rb b/lib/openai/models/reasoning.rb index 2cdebe06..d39b2671 100644 --- a/lib/openai/models/reasoning.rb +++ b/lib/openai/models/reasoning.rb @@ -4,12 +4,11 @@ module OpenAI module Models class Reasoning < OpenAI::Internal::Type::BaseModel # @!attribute effort - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. # # @return [Symbol, OpenAI::Models::ReasoningEffort, nil] optional :effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true @@ -43,7 +42,7 @@ class Reasoning < OpenAI::Internal::Type::BaseModel # Configuration options for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). # - # @param effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only** + # @param effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param generate_summary [Symbol, OpenAI::Models::Reasoning::GenerateSummary, nil] **Deprecated:** use `summary` instead. # diff --git a/lib/openai/models/reasoning_effort.rb b/lib/openai/models/reasoning_effort.rb index f9990508..486b6d31 100644 --- a/lib/openai/models/reasoning_effort.rb +++ b/lib/openai/models/reasoning_effort.rb @@ -2,15 +2,15 @@ module OpenAI module Models - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. module ReasoningEffort extend OpenAI::Internal::Type::Enum + MINIMAL = :minimal LOW = :low MEDIUM = :medium HIGH = :high diff --git a/lib/openai/models/response_format_text_grammar.rb b/lib/openai/models/response_format_text_grammar.rb new file mode 100644 index 00000000..8d43e38f --- /dev/null +++ b/lib/openai/models/response_format_text_grammar.rb @@ -0,0 +1,27 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ResponseFormatTextGrammar < OpenAI::Internal::Type::BaseModel + # @!attribute grammar + # The custom grammar for the model to follow. + # + # @return [String] + required :grammar, String + + # @!attribute type + # The type of response format being defined. Always `grammar`. + # + # @return [Symbol, :grammar] + required :type, const: :grammar + + # @!method initialize(grammar:, type: :grammar) + # A custom grammar for the model to follow when generating text. Learn more in the + # [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars). + # + # @param grammar [String] The custom grammar for the model to follow. + # + # @param type [Symbol, :grammar] The type of response format being defined. Always `grammar`. + end + end +end diff --git a/lib/openai/models/response_format_text_python.rb b/lib/openai/models/response_format_text_python.rb new file mode 100644 index 00000000..9e12a904 --- /dev/null +++ b/lib/openai/models/response_format_text_python.rb @@ -0,0 +1,20 @@ +# frozen_string_literal: true + +module OpenAI + module Models + class ResponseFormatTextPython < OpenAI::Internal::Type::BaseModel + # @!attribute type + # The type of response format being defined. Always `python`. + # + # @return [Symbol, :python] + required :type, const: :python + + # @!method initialize(type: :python) + # Configure the model to generate valid Python code. See the + # [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars) + # for more details. + # + # @param type [Symbol, :python] The type of response format being defined. Always `python`. + end + end +end diff --git a/lib/openai/models/responses/custom_tool.rb b/lib/openai/models/responses/custom_tool.rb new file mode 100644 index 00000000..05c3665f --- /dev/null +++ b/lib/openai/models/responses/custom_tool.rb @@ -0,0 +1,48 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class CustomTool < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the custom tool, used to identify it in tool calls. + # + # @return [String] + required :name, String + + # @!attribute type + # The type of the custom tool. Always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!attribute description + # Optional description of the custom tool, used to provide more context. + # + # @return [String, nil] + optional :description, String + + # @!attribute format_ + # The input format for the custom tool. Default is unconstrained text. + # + # @return [OpenAI::Models::CustomToolInputFormat::Text, OpenAI::Models::CustomToolInputFormat::Grammar, nil] + optional :format_, union: -> { OpenAI::CustomToolInputFormat }, api_name: :format + + # @!method initialize(name:, description: nil, format_: nil, type: :custom) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::CustomTool} for more details. + # + # A custom tool that processes input using a specified format. Learn more about + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools). + # + # @param name [String] The name of the custom tool, used to identify it in tool calls. + # + # @param description [String] Optional description of the custom tool, used to provide more context. + # + # @param format_ [OpenAI::Models::CustomToolInputFormat::Text, OpenAI::Models::CustomToolInputFormat::Grammar] The input format for the custom tool. Default is unconstrained text. + # + # @param type [Symbol, :custom] The type of the custom tool. Always `custom`. + end + end + end +end diff --git a/lib/openai/models/responses/response.rb b/lib/openai/models/responses/response.rb index 9e0fb745..f1f341d7 100644 --- a/lib/openai/models/responses/response.rb +++ b/lib/openai/models/responses/response.rb @@ -38,7 +38,7 @@ class Response < OpenAI::Internal::Type::BaseModel # response will not be carried over to the next response. This makes it simple to # swap out system (or developer) messages in new responses. # - # @return [String, Array, nil] + # @return [String, Array, nil] required :instructions, union: -> { OpenAI::Responses::Response::Instructions }, nil?: true # @!attribute metadata @@ -77,7 +77,7 @@ class Response < OpenAI::Internal::Type::BaseModel # an `assistant` message with the content generated by the model, you might # consider using the `output_text` property where supported in SDKs. # - # @return [Array] + # @return [Array] required :output, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputItem] } # @!attribute parallel_tool_calls @@ -100,7 +100,7 @@ class Response < OpenAI::Internal::Type::BaseModel # response. See the `tools` parameter to see how to specify which tools the model # can call. # - # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] + # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] required :tool_choice, union: -> { OpenAI::Responses::Response::ToolChoice } # @!attribute tools @@ -116,10 +116,12 @@ class Response < OpenAI::Internal::Type::BaseModel # Learn more about # [built-in tools](https://platform.openai.com/docs/guides/tools). # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. # - # @return [Array] + # @return [Array] required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_p @@ -295,7 +297,7 @@ def output_text texts.join end - # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response) + # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response) # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::Response} for more details. # @@ -307,21 +309,21 @@ def output_text # # @param incomplete_details [OpenAI::Models::Responses::Response::IncompleteDetails, nil] Details about why the response is incomplete. # - # @param instructions [String, Array, nil] A system (or developer) message inserted into the model's context. + # @param instructions [String, Array, nil] A system (or developer) message inserted into the model's context. # # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI # - # @param output [Array] An array of content items generated by the model. + # @param output [Array] An array of content items generated by the model. # # @param parallel_tool_calls [Boolean] Whether to allow the model to run tool calls in parallel. # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, # @@ -403,7 +405,7 @@ module Instructions variant -> { OpenAI::Models::Responses::Response::Instructions::ResponseInputItemArray } # @!method self.variants - # @return [Array(String, Array)] + # @return [Array(String, Array)] # @type [OpenAI::Internal::Type::Converter] ResponseInputItemArray = @@ -428,6 +430,9 @@ module ToolChoice # `required` means the model must call one or more tools. variant enum: -> { OpenAI::Responses::ToolChoiceOptions } + # Constrains the tools available to the model to a pre-defined set. + variant -> { OpenAI::Responses::ToolChoiceAllowed } + # Indicates that the model should use a built-in tool to generate a response. # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). variant -> { OpenAI::Responses::ToolChoiceTypes } @@ -438,8 +443,11 @@ module ToolChoice # Use this option to force the model to call a specific tool on a remote MCP server. variant -> { OpenAI::Responses::ToolChoiceMcp } + # Use this option to force the model to call a specific custom tool. + variant -> { OpenAI::Responses::ToolChoiceCustom } + # @!method self.variants - # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)] + # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)] end # Specifies the processing type used for serving the request. diff --git a/lib/openai/models/responses/response_create_params.rb b/lib/openai/models/responses/response_create_params.rb index 8efb9f7a..89906f08 100644 --- a/lib/openai/models/responses/response_create_params.rb +++ b/lib/openai/models/responses/response_create_params.rb @@ -51,7 +51,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) # - [Function calling](https://platform.openai.com/docs/guides/function-calling) # - # @return [String, Array, nil] + # @return [String, Array, nil] optional :input, union: -> { OpenAI::Responses::ResponseCreateParams::Input } # @!attribute instructions @@ -178,6 +178,12 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @return [Boolean, nil] optional :store, OpenAI::Internal::Type::Boolean, nil?: true + # @!attribute stream_options + # Options for streaming responses. Only set this when you set `stream: true`. + # + # @return [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil] + optional :stream_options, -> { OpenAI::Responses::ResponseCreateParams::StreamOptions }, nil?: true + # @!attribute temperature # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will # make the output more random, while lower values like 0.2 will make it more @@ -208,7 +214,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # response. See the `tools` parameter to see how to specify which tools the model # can call. # - # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, nil] + # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, nil] optional :tool_choice, union: -> { OpenAI::Responses::ResponseCreateParams::ToolChoice } # @!attribute tools @@ -224,10 +230,12 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # Learn more about # [built-in tools](https://platform.openai.com/docs/guides/tools). # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. # - # @return [Array, nil] + # @return [Array, nil] optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] } # @!attribute top_logprobs @@ -271,7 +279,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # @return [String, nil] optional :user, String - # @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) + # @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::ResponseCreateParams} for more details. # @@ -279,7 +287,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # # @param include [Array, nil] Specify additional output data to include in the model response. Currently # - # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. + # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. # # @param instructions [String, nil] A system (or developer) message inserted into the model's context. # @@ -307,13 +315,15 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel # # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via # + # @param stream_options [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil] Options for streaming responses. Only set this when you set `stream: true`. + # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # - # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # @@ -346,7 +356,7 @@ module Input variant -> { OpenAI::Responses::ResponseInput } # @!method self.variants - # @return [Array(String, Array)] + # @return [Array(String, Array)] end # Specifies the processing type used for serving the request. @@ -379,6 +389,28 @@ module ServiceTier # @return [Array] end + class StreamOptions < OpenAI::Internal::Type::BaseModel + # @!attribute include_obfuscation + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + # + # @return [Boolean, nil] + optional :include_obfuscation, OpenAI::Internal::Type::Boolean + + # @!method initialize(include_obfuscation: nil) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCreateParams::StreamOptions} for more + # details. + # + # Options for streaming responses. Only set this when you set `stream: true`. + # + # @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds + end + # How the model should select which tool (or tools) to use when generating a # response. See the `tools` parameter to see how to specify which tools the model # can call. @@ -395,6 +427,9 @@ module ToolChoice # `required` means the model must call one or more tools. variant enum: -> { OpenAI::Responses::ToolChoiceOptions } + # Constrains the tools available to the model to a pre-defined set. + variant -> { OpenAI::Responses::ToolChoiceAllowed } + # Indicates that the model should use a built-in tool to generate a response. # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). variant -> { OpenAI::Responses::ToolChoiceTypes } @@ -405,8 +440,11 @@ module ToolChoice # Use this option to force the model to call a specific tool on a remote MCP server. variant -> { OpenAI::Responses::ToolChoiceMcp } + # Use this option to force the model to call a specific custom tool. + variant -> { OpenAI::Responses::ToolChoiceCustom } + # @!method self.variants - # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)] + # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)] end # The truncation strategy to use for the model response. diff --git a/lib/openai/models/responses/response_custom_tool_call.rb b/lib/openai/models/responses/response_custom_tool_call.rb new file mode 100644 index 00000000..48dae1e0 --- /dev/null +++ b/lib/openai/models/responses/response_custom_tool_call.rb @@ -0,0 +1,55 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseCustomToolCall < OpenAI::Internal::Type::BaseModel + # @!attribute call_id + # An identifier used to map this custom tool call to a tool call output. + # + # @return [String] + required :call_id, String + + # @!attribute input + # The input for the custom tool call generated by the model. + # + # @return [String] + required :input, String + + # @!attribute name + # The name of the custom tool being called. + # + # @return [String] + required :name, String + + # @!attribute type + # The type of the custom tool call. Always `custom_tool_call`. + # + # @return [Symbol, :custom_tool_call] + required :type, const: :custom_tool_call + + # @!attribute id + # The unique ID of the custom tool call in the OpenAI platform. + # + # @return [String, nil] + optional :id, String + + # @!method initialize(call_id:, input:, name:, id: nil, type: :custom_tool_call) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCustomToolCall} for more details. + # + # A call to a custom tool created by the model. + # + # @param call_id [String] An identifier used to map this custom tool call to a tool call output. + # + # @param input [String] The input for the custom tool call generated by the model. + # + # @param name [String] The name of the custom tool being called. + # + # @param id [String] The unique ID of the custom tool call in the OpenAI platform. + # + # @param type [Symbol, :custom_tool_call] The type of the custom tool call. Always `custom_tool_call`. + end + end + end +end diff --git a/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb b/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb new file mode 100644 index 00000000..5fa83189 --- /dev/null +++ b/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallInputDeltaEvent < OpenAI::Internal::Type::BaseModel + # @!attribute delta + # The incremental input data (delta) for the custom tool call. + # + # @return [String] + required :delta, String + + # @!attribute item_id + # Unique identifier for the API item associated with this event. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output this delta applies to. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The event type identifier. + # + # @return [Symbol, :"response.custom_tool_call_input.delta"] + required :type, const: :"response.custom_tool_call_input.delta" + + # @!method initialize(delta:, item_id:, output_index:, sequence_number:, type: :"response.custom_tool_call_input.delta") + # Event representing a delta (partial update) to the input of a custom tool call. + # + # @param delta [String] The incremental input data (delta) for the custom tool call. + # + # @param item_id [String] Unique identifier for the API item associated with this event. + # + # @param output_index [Integer] The index of the output this delta applies to. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.custom_tool_call_input.delta"] The event type identifier. + end + end + end +end diff --git a/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb b/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb new file mode 100644 index 00000000..e45a41e0 --- /dev/null +++ b/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallInputDoneEvent < OpenAI::Internal::Type::BaseModel + # @!attribute input + # The complete input data for the custom tool call. + # + # @return [String] + required :input, String + + # @!attribute item_id + # Unique identifier for the API item associated with this event. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output this event applies to. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The event type identifier. + # + # @return [Symbol, :"response.custom_tool_call_input.done"] + required :type, const: :"response.custom_tool_call_input.done" + + # @!method initialize(input:, item_id:, output_index:, sequence_number:, type: :"response.custom_tool_call_input.done") + # Event indicating that input for a custom tool call is complete. + # + # @param input [String] The complete input data for the custom tool call. + # + # @param item_id [String] Unique identifier for the API item associated with this event. + # + # @param output_index [Integer] The index of the output this event applies to. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.custom_tool_call_input.done"] The event type identifier. + end + end + end +end diff --git a/lib/openai/models/responses/response_custom_tool_call_output.rb b/lib/openai/models/responses/response_custom_tool_call_output.rb new file mode 100644 index 00000000..644997e7 --- /dev/null +++ b/lib/openai/models/responses/response_custom_tool_call_output.rb @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallOutput < OpenAI::Internal::Type::BaseModel + # @!attribute call_id + # The call ID, used to map this custom tool call output to a custom tool call. + # + # @return [String] + required :call_id, String + + # @!attribute output + # The output from the custom tool call generated by your code. + # + # @return [String] + required :output, String + + # @!attribute type + # The type of the custom tool call output. Always `custom_tool_call_output`. + # + # @return [Symbol, :custom_tool_call_output] + required :type, const: :custom_tool_call_output + + # @!attribute id + # The unique ID of the custom tool call output in the OpenAI platform. + # + # @return [String, nil] + optional :id, String + + # @!method initialize(call_id:, output:, id: nil, type: :custom_tool_call_output) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseCustomToolCallOutput} for more details. + # + # The output of a custom tool call from your code, being sent back to the model. + # + # @param call_id [String] The call ID, used to map this custom tool call output to a custom tool call. + # + # @param output [String] The output from the custom tool call generated by your code. + # + # @param id [String] The unique ID of the custom tool call output in the OpenAI platform. + # + # @param type [Symbol, :custom_tool_call_output] The type of the custom tool call output. Always `custom_tool_call_output`. + end + end + end +end diff --git a/lib/openai/models/responses/response_input_item.rb b/lib/openai/models/responses/response_input_item.rb index a8f469fa..330e1e7c 100644 --- a/lib/openai/models/responses/response_input_item.rb +++ b/lib/openai/models/responses/response_input_item.rb @@ -80,6 +80,12 @@ module ResponseInputItem # An invocation of a tool on an MCP server. variant :mcp_call, -> { OpenAI::Responses::ResponseInputItem::McpCall } + # The output of a custom tool call from your code, being sent back to the model. + variant :custom_tool_call_output, -> { OpenAI::Responses::ResponseCustomToolCallOutput } + + # A call to a custom tool created by the model. + variant :custom_tool_call, -> { OpenAI::Responses::ResponseCustomToolCall } + # An internal identifier for an item to reference. variant :item_reference, -> { OpenAI::Responses::ResponseInputItem::ItemReference } @@ -869,7 +875,7 @@ module Type end # @!method self.variants - # @return [Array(OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference)] + # @return [Array(OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference)] end end end diff --git a/lib/openai/models/responses/response_output_item.rb b/lib/openai/models/responses/response_output_item.rb index b6fb663b..9d9b204f 100644 --- a/lib/openai/models/responses/response_output_item.rb +++ b/lib/openai/models/responses/response_output_item.rb @@ -52,6 +52,9 @@ module ResponseOutputItem # A request for human approval of a tool invocation. variant :mcp_approval_request, -> { OpenAI::Responses::ResponseOutputItem::McpApprovalRequest } + # A call to a custom tool created by the model. + variant :custom_tool_call, -> { OpenAI::Responses::ResponseCustomToolCall } + class ImageGenerationCall < OpenAI::Internal::Type::BaseModel # @!attribute id # The unique ID of the image generation call. @@ -435,7 +438,7 @@ class McpApprovalRequest < OpenAI::Internal::Type::BaseModel end # @!method self.variants - # @return [Array(OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest)] + # @return [Array(OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall)] end end end diff --git a/lib/openai/models/responses/response_output_item_added_event.rb b/lib/openai/models/responses/response_output_item_added_event.rb index 119bea71..1ca221ee 100644 --- a/lib/openai/models/responses/response_output_item_added_event.rb +++ b/lib/openai/models/responses/response_output_item_added_event.rb @@ -7,7 +7,7 @@ class ResponseOutputItemAddedEvent < OpenAI::Internal::Type::BaseModel # @!attribute item # The output item that was added. # - # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest] + # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] required :item, union: -> { OpenAI::Responses::ResponseOutputItem } # @!attribute output_index @@ -34,7 +34,7 @@ class ResponseOutputItemAddedEvent < OpenAI::Internal::Type::BaseModel # # Emitted when a new output item is added. # - # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest] The output item that was added. + # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was added. # # @param output_index [Integer] The index of the output item that was added. # diff --git a/lib/openai/models/responses/response_output_item_done_event.rb b/lib/openai/models/responses/response_output_item_done_event.rb index 45b061b7..f96435eb 100644 --- a/lib/openai/models/responses/response_output_item_done_event.rb +++ b/lib/openai/models/responses/response_output_item_done_event.rb @@ -7,7 +7,7 @@ class ResponseOutputItemDoneEvent < OpenAI::Internal::Type::BaseModel # @!attribute item # The output item that was marked done. # - # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest] + # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] required :item, union: -> { OpenAI::Responses::ResponseOutputItem } # @!attribute output_index @@ -34,7 +34,7 @@ class ResponseOutputItemDoneEvent < OpenAI::Internal::Type::BaseModel # # Emitted when an output item is marked done. # - # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest] The output item that was marked done. + # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was marked done. # # @param output_index [Integer] The index of the output item that was marked done. # diff --git a/lib/openai/models/responses/response_reasoning_item.rb b/lib/openai/models/responses/response_reasoning_item.rb index fa878448..3c4068fb 100644 --- a/lib/openai/models/responses/response_reasoning_item.rb +++ b/lib/openai/models/responses/response_reasoning_item.rb @@ -11,7 +11,7 @@ class ResponseReasoningItem < OpenAI::Internal::Type::BaseModel required :id, String # @!attribute summary - # Reasoning text contents. + # Reasoning summary content. # # @return [Array] required :summary, @@ -23,6 +23,13 @@ class ResponseReasoningItem < OpenAI::Internal::Type::BaseModel # @return [Symbol, :reasoning] required :type, const: :reasoning + # @!attribute content + # Reasoning text content. + # + # @return [Array, nil] + optional :content, + -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseReasoningItem::Content] } + # @!attribute encrypted_content # The encrypted content of the reasoning item - populated when a response is # generated with `reasoning.encrypted_content` in the `include` parameter. @@ -37,7 +44,7 @@ class ResponseReasoningItem < OpenAI::Internal::Type::BaseModel # @return [Symbol, OpenAI::Models::Responses::ResponseReasoningItem::Status, nil] optional :status, enum: -> { OpenAI::Responses::ResponseReasoningItem::Status } - # @!method initialize(id:, summary:, encrypted_content: nil, status: nil, type: :reasoning) + # @!method initialize(id:, summary:, content: nil, encrypted_content: nil, status: nil, type: :reasoning) # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::ResponseReasoningItem} for more details. # @@ -48,7 +55,9 @@ class ResponseReasoningItem < OpenAI::Internal::Type::BaseModel # # @param id [String] The unique identifier of the reasoning content. # - # @param summary [Array] Reasoning text contents. + # @param summary [Array] Reasoning summary content. + # + # @param content [Array] Reasoning text content. # # @param encrypted_content [String, nil] The encrypted content of the reasoning item - populated when a response is # @@ -58,7 +67,7 @@ class ResponseReasoningItem < OpenAI::Internal::Type::BaseModel class Summary < OpenAI::Internal::Type::BaseModel # @!attribute text - # A short summary of the reasoning used by the model when generating the response. + # A summary of the reasoning output from the model so far. # # @return [String] required :text, String @@ -73,11 +82,33 @@ class Summary < OpenAI::Internal::Type::BaseModel # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::ResponseReasoningItem::Summary} for more details. # - # @param text [String] A short summary of the reasoning used by the model when generating + # @param text [String] A summary of the reasoning output from the model so far. # # @param type [Symbol, :summary_text] The type of the object. Always `summary_text`. end + class Content < OpenAI::Internal::Type::BaseModel + # @!attribute text + # Reasoning text output from the model. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the object. Always `reasoning_text`. + # + # @return [Symbol, :reasoning_text] + required :type, const: :reasoning_text + + # @!method initialize(text:, type: :reasoning_text) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningItem::Content} for more details. + # + # @param text [String] Reasoning text output from the model. + # + # @param type [Symbol, :reasoning_text] The type of the object. Always `reasoning_text`. + end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. # Populated when items are returned via API. # diff --git a/lib/openai/models/responses/response_reasoning_summary_delta_event.rb b/lib/openai/models/responses/response_reasoning_summary_delta_event.rb deleted file mode 100644 index f3299e9f..00000000 --- a/lib/openai/models/responses/response_reasoning_summary_delta_event.rb +++ /dev/null @@ -1,65 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - module Models - module Responses - class ResponseReasoningSummaryDeltaEvent < OpenAI::Internal::Type::BaseModel - # @!attribute delta - # The partial update to the reasoning summary content. - # - # @return [Object] - required :delta, OpenAI::Internal::Type::Unknown - - # @!attribute item_id - # The unique identifier of the item for which the reasoning summary is being - # updated. - # - # @return [String] - required :item_id, String - - # @!attribute output_index - # The index of the output item in the response's output array. - # - # @return [Integer] - required :output_index, Integer - - # @!attribute sequence_number - # The sequence number of this event. - # - # @return [Integer] - required :sequence_number, Integer - - # @!attribute summary_index - # The index of the summary part within the output item. - # - # @return [Integer] - required :summary_index, Integer - - # @!attribute type - # The type of the event. Always 'response.reasoning_summary.delta'. - # - # @return [Symbol, :"response.reasoning_summary.delta"] - required :type, const: :"response.reasoning_summary.delta" - - # @!method initialize(delta:, item_id:, output_index:, sequence_number:, summary_index:, type: :"response.reasoning_summary.delta") - # Some parameter documentations has been truncated, see - # {OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent} for more - # details. - # - # Emitted when there is a delta (partial update) to the reasoning summary content. - # - # @param delta [Object] The partial update to the reasoning summary content. - # - # @param item_id [String] The unique identifier of the item for which the reasoning summary is being updat - # - # @param output_index [Integer] The index of the output item in the response's output array. - # - # @param sequence_number [Integer] The sequence number of this event. - # - # @param summary_index [Integer] The index of the summary part within the output item. - # - # @param type [Symbol, :"response.reasoning_summary.delta"] The type of the event. Always 'response.reasoning_summary.delta'. - end - end - end -end diff --git a/lib/openai/models/responses/response_reasoning_summary_done_event.rb b/lib/openai/models/responses/response_reasoning_summary_done_event.rb deleted file mode 100644 index 42716381..00000000 --- a/lib/openai/models/responses/response_reasoning_summary_done_event.rb +++ /dev/null @@ -1,60 +0,0 @@ -# frozen_string_literal: true - -module OpenAI - module Models - module Responses - class ResponseReasoningSummaryDoneEvent < OpenAI::Internal::Type::BaseModel - # @!attribute item_id - # The unique identifier of the item for which the reasoning summary is finalized. - # - # @return [String] - required :item_id, String - - # @!attribute output_index - # The index of the output item in the response's output array. - # - # @return [Integer] - required :output_index, Integer - - # @!attribute sequence_number - # The sequence number of this event. - # - # @return [Integer] - required :sequence_number, Integer - - # @!attribute summary_index - # The index of the summary part within the output item. - # - # @return [Integer] - required :summary_index, Integer - - # @!attribute text - # The finalized reasoning summary text. - # - # @return [String] - required :text, String - - # @!attribute type - # The type of the event. Always 'response.reasoning_summary.done'. - # - # @return [Symbol, :"response.reasoning_summary.done"] - required :type, const: :"response.reasoning_summary.done" - - # @!method initialize(item_id:, output_index:, sequence_number:, summary_index:, text:, type: :"response.reasoning_summary.done") - # Emitted when the reasoning summary content is finalized for an item. - # - # @param item_id [String] The unique identifier of the item for which the reasoning summary is finalized. - # - # @param output_index [Integer] The index of the output item in the response's output array. - # - # @param sequence_number [Integer] The sequence number of this event. - # - # @param summary_index [Integer] The index of the summary part within the output item. - # - # @param text [String] The finalized reasoning summary text. - # - # @param type [Symbol, :"response.reasoning_summary.done"] The type of the event. Always 'response.reasoning_summary.done'. - end - end - end -end diff --git a/lib/openai/models/responses/response_reasoning_text_delta_event.rb b/lib/openai/models/responses/response_reasoning_text_delta_event.rb new file mode 100644 index 00000000..fc3380bb --- /dev/null +++ b/lib/openai/models/responses/response_reasoning_text_delta_event.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseReasoningTextDeltaEvent < OpenAI::Internal::Type::BaseModel + # @!attribute content_index + # The index of the reasoning content part this delta is associated with. + # + # @return [Integer] + required :content_index, Integer + + # @!attribute delta + # The text delta that was added to the reasoning content. + # + # @return [String] + required :delta, String + + # @!attribute item_id + # The ID of the item this reasoning text delta is associated with. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item this reasoning text delta is associated with. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute type + # The type of the event. Always `response.reasoning_text.delta`. + # + # @return [Symbol, :"response.reasoning_text.delta"] + required :type, const: :"response.reasoning_text.delta" + + # @!method initialize(content_index:, delta:, item_id:, output_index:, sequence_number:, type: :"response.reasoning_text.delta") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningTextDeltaEvent} for more details. + # + # Emitted when a delta is added to a reasoning text. + # + # @param content_index [Integer] The index of the reasoning content part this delta is associated with. + # + # @param delta [String] The text delta that was added to the reasoning content. + # + # @param item_id [String] The ID of the item this reasoning text delta is associated with. + # + # @param output_index [Integer] The index of the output item this reasoning text delta is associated with. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param type [Symbol, :"response.reasoning_text.delta"] The type of the event. Always `response.reasoning_text.delta`. + end + end + end +end diff --git a/lib/openai/models/responses/response_reasoning_text_done_event.rb b/lib/openai/models/responses/response_reasoning_text_done_event.rb new file mode 100644 index 00000000..e07630bc --- /dev/null +++ b/lib/openai/models/responses/response_reasoning_text_done_event.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ResponseReasoningTextDoneEvent < OpenAI::Internal::Type::BaseModel + # @!attribute content_index + # The index of the reasoning content part. + # + # @return [Integer] + required :content_index, Integer + + # @!attribute item_id + # The ID of the item this reasoning text is associated with. + # + # @return [String] + required :item_id, String + + # @!attribute output_index + # The index of the output item this reasoning text is associated with. + # + # @return [Integer] + required :output_index, Integer + + # @!attribute sequence_number + # The sequence number of this event. + # + # @return [Integer] + required :sequence_number, Integer + + # @!attribute text + # The full text of the completed reasoning content. + # + # @return [String] + required :text, String + + # @!attribute type + # The type of the event. Always `response.reasoning_text.done`. + # + # @return [Symbol, :"response.reasoning_text.done"] + required :type, const: :"response.reasoning_text.done" + + # @!method initialize(content_index:, item_id:, output_index:, sequence_number:, text:, type: :"response.reasoning_text.done") + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ResponseReasoningTextDoneEvent} for more details. + # + # Emitted when a reasoning text is completed. + # + # @param content_index [Integer] The index of the reasoning content part. + # + # @param item_id [String] The ID of the item this reasoning text is associated with. + # + # @param output_index [Integer] The index of the output item this reasoning text is associated with. + # + # @param sequence_number [Integer] The sequence number of this event. + # + # @param text [String] The full text of the completed reasoning content. + # + # @param type [Symbol, :"response.reasoning_text.done"] The type of the event. Always `response.reasoning_text.done`. + end + end + end +end diff --git a/lib/openai/models/responses/response_retrieve_params.rb b/lib/openai/models/responses/response_retrieve_params.rb index 979fe0b2..67aa9110 100644 --- a/lib/openai/models/responses/response_retrieve_params.rb +++ b/lib/openai/models/responses/response_retrieve_params.rb @@ -17,18 +17,31 @@ class ResponseRetrieveParams < OpenAI::Internal::Type::BaseModel # @return [Array, nil] optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] } + # @!attribute include_obfuscation + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + # + # @return [Boolean, nil] + optional :include_obfuscation, OpenAI::Internal::Type::Boolean + # @!attribute starting_after # The sequence number of the event after which to start streaming. # # @return [Integer, nil] optional :starting_after, Integer - # @!method initialize(include: nil, starting_after: nil, request_options: {}) + # @!method initialize(include: nil, include_obfuscation: nil, starting_after: nil, request_options: {}) # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::ResponseRetrieveParams} for more details. # # @param include [Array] Additional fields to include in the response. See the `include` # + # @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds + # # @param starting_after [Integer] The sequence number of the event after which to start streaming. # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}] diff --git a/lib/openai/models/responses/response_stream_event.rb b/lib/openai/models/responses/response_stream_event.rb index 21dec3eb..d0bc4144 100644 --- a/lib/openai/models/responses/response_stream_event.rb +++ b/lib/openai/models/responses/response_stream_event.rb @@ -110,6 +110,12 @@ module ResponseStreamEvent variant :"response.reasoning_summary_text.done", -> { OpenAI::Responses::ResponseReasoningSummaryTextDoneEvent } + # Emitted when a delta is added to a reasoning text. + variant :"response.reasoning_text.delta", -> { OpenAI::Responses::ResponseReasoningTextDeltaEvent } + + # Emitted when a reasoning text is completed. + variant :"response.reasoning_text.done", -> { OpenAI::Responses::ResponseReasoningTextDoneEvent } + # Emitted when there is a partial refusal text. variant :"response.refusal.delta", -> { OpenAI::Responses::ResponseRefusalDeltaEvent } @@ -191,20 +197,16 @@ module ResponseStreamEvent # Emitted when a response is queued and waiting to be processed. variant :"response.queued", -> { OpenAI::Responses::ResponseQueuedEvent } - # Emitted when there is a delta (partial update) to the reasoning summary content. - variant :"response.reasoning_summary.delta", - -> { - OpenAI::Responses::ResponseReasoningSummaryDeltaEvent - } + # Event representing a delta (partial update) to the input of a custom tool call. + variant :"response.custom_tool_call_input.delta", + -> { OpenAI::Responses::ResponseCustomToolCallInputDeltaEvent } - # Emitted when the reasoning summary content is finalized for an item. - variant :"response.reasoning_summary.done", - -> { - OpenAI::Responses::ResponseReasoningSummaryDoneEvent - } + # Event indicating that input for a custom tool call is complete. + variant :"response.custom_tool_call_input.done", + -> { OpenAI::Responses::ResponseCustomToolCallInputDoneEvent } # @!method self.variants - # @return [Array(OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent)] + # @return [Array(OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseReasoningTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDeltaEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDoneEvent)] end end end diff --git a/lib/openai/models/responses/response_text_config.rb b/lib/openai/models/responses/response_text_config.rb index 9a203e84..7017ea8f 100644 --- a/lib/openai/models/responses/response_text_config.rb +++ b/lib/openai/models/responses/response_text_config.rb @@ -22,7 +22,15 @@ class ResponseTextConfig < OpenAI::Internal::Type::BaseModel # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil] optional :format_, union: -> { OpenAI::Responses::ResponseFormatTextConfig }, api_name: :format - # @!method initialize(format_: nil) + # @!attribute verbosity + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + # + # @return [Symbol, OpenAI::Models::Responses::ResponseTextConfig::Verbosity, nil] + optional :verbosity, enum: -> { OpenAI::Responses::ResponseTextConfig::Verbosity }, nil?: true + + # @!method initialize(format_: nil, verbosity: nil) # Some parameter documentations has been truncated, see # {OpenAI::Models::Responses::ResponseTextConfig} for more details. # @@ -33,6 +41,24 @@ class ResponseTextConfig < OpenAI::Internal::Type::BaseModel # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) # # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. + # + # @param verbosity [Symbol, OpenAI::Models::Responses::ResponseTextConfig::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in + + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + # + # @see OpenAI::Models::Responses::ResponseTextConfig#verbosity + module Verbosity + extend OpenAI::Internal::Type::Enum + + LOW = :low + MEDIUM = :medium + HIGH = :high + + # @!method self.values + # @return [Array] + end end end end diff --git a/lib/openai/models/responses/tool.rb b/lib/openai/models/responses/tool.rb index 46add9fc..85a11b6e 100644 --- a/lib/openai/models/responses/tool.rb +++ b/lib/openai/models/responses/tool.rb @@ -33,6 +33,10 @@ module Tool # A tool that allows the model to execute shell commands in a local environment. variant :local_shell, -> { OpenAI::Responses::Tool::LocalShell } + # A custom tool that processes input using a specified format. Learn more about + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools). + variant :custom, -> { OpenAI::Responses::CustomTool } + # This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search). variant -> { OpenAI::Responses::WebSearchTool } @@ -542,7 +546,7 @@ class LocalShell < OpenAI::Internal::Type::BaseModel end # @!method self.variants - # @return [Array(OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool)] + # @return [Array(OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool)] end end end diff --git a/lib/openai/models/responses/tool_choice_allowed.rb b/lib/openai/models/responses/tool_choice_allowed.rb new file mode 100644 index 00000000..228e4611 --- /dev/null +++ b/lib/openai/models/responses/tool_choice_allowed.rb @@ -0,0 +1,73 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ToolChoiceAllowed < OpenAI::Internal::Type::BaseModel + # @!attribute mode + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + # + # @return [Symbol, OpenAI::Models::Responses::ToolChoiceAllowed::Mode] + required :mode, enum: -> { OpenAI::Responses::ToolChoiceAllowed::Mode } + + # @!attribute tools + # A list of tool definitions that the model should be allowed to call. + # + # For the Responses API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "name": "get_weather" }, + # { "type": "mcp", "server_label": "deepwiki" }, + # { "type": "image_generation" } + # ] + # ``` + # + # @return [ArrayObject}>] + required :tools, + OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]] + + # @!attribute type + # Allowed tool configuration type. Always `allowed_tools`. + # + # @return [Symbol, :allowed_tools] + required :type, const: :allowed_tools + + # @!method initialize(mode:, tools:, type: :allowed_tools) + # Some parameter documentations has been truncated, see + # {OpenAI::Models::Responses::ToolChoiceAllowed} for more details. + # + # Constrains the tools available to the model to a pre-defined set. + # + # @param mode [Symbol, OpenAI::Models::Responses::ToolChoiceAllowed::Mode] Constrains the tools available to the model to a pre-defined set. + # + # @param tools [ArrayObject}>] A list of tool definitions that the model should be allowed to call. + # + # @param type [Symbol, :allowed_tools] Allowed tool configuration type. Always `allowed_tools`. + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + # + # @see OpenAI::Models::Responses::ToolChoiceAllowed#mode + module Mode + extend OpenAI::Internal::Type::Enum + + AUTO = :auto + REQUIRED = :required + + # @!method self.values + # @return [Array] + end + end + end + end +end diff --git a/lib/openai/models/responses/tool_choice_custom.rb b/lib/openai/models/responses/tool_choice_custom.rb new file mode 100644 index 00000000..310413ed --- /dev/null +++ b/lib/openai/models/responses/tool_choice_custom.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +module OpenAI + module Models + module Responses + class ToolChoiceCustom < OpenAI::Internal::Type::BaseModel + # @!attribute name + # The name of the custom tool to call. + # + # @return [String] + required :name, String + + # @!attribute type + # For custom tool calling, the type is always `custom`. + # + # @return [Symbol, :custom] + required :type, const: :custom + + # @!method initialize(name:, type: :custom) + # Use this option to force the model to call a specific custom tool. + # + # @param name [String] The name of the custom tool to call. + # + # @param type [Symbol, :custom] For custom tool calling, the type is always `custom`. + end + end + end +end diff --git a/lib/openai/models/vector_store_search_params.rb b/lib/openai/models/vector_store_search_params.rb index c1ae6419..fdf4e91f 100644 --- a/lib/openai/models/vector_store_search_params.rb +++ b/lib/openai/models/vector_store_search_params.rb @@ -85,6 +85,7 @@ module Filters class RankingOptions < OpenAI::Internal::Type::BaseModel # @!attribute ranker + # Enable re-ranking; set to `none` to disable, which can help reduce latency. # # @return [Symbol, OpenAI::Models::VectorStoreSearchParams::RankingOptions::Ranker, nil] optional :ranker, enum: -> { OpenAI::VectorStoreSearchParams::RankingOptions::Ranker } @@ -97,13 +98,17 @@ class RankingOptions < OpenAI::Internal::Type::BaseModel # @!method initialize(ranker: nil, score_threshold: nil) # Ranking options for search. # - # @param ranker [Symbol, OpenAI::Models::VectorStoreSearchParams::RankingOptions::Ranker] + # @param ranker [Symbol, OpenAI::Models::VectorStoreSearchParams::RankingOptions::Ranker] Enable re-ranking; set to `none` to disable, which can help reduce latency. + # # @param score_threshold [Float] + # Enable re-ranking; set to `none` to disable, which can help reduce latency. + # # @see OpenAI::Models::VectorStoreSearchParams::RankingOptions#ranker module Ranker extend OpenAI::Internal::Type::Enum + NONE = :none AUTO = :auto DEFAULT_2024_11_15 = :"default-2024-11-15" diff --git a/lib/openai/resources/beta/assistants.rb b/lib/openai/resources/beta/assistants.rb index 8b8adf5a..77b58e66 100644 --- a/lib/openai/resources/beta/assistants.rb +++ b/lib/openai/resources/beta/assistants.rb @@ -21,7 +21,7 @@ class Assistants # # @param name [String, nil] The name of the assistant. The maximum length is 256 characters. # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only** + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: # @@ -88,7 +88,7 @@ def retrieve(assistant_id, params = {}) # # @param name [String, nil] The name of the assistant. The maximum length is 256 characters. # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only** + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https: # diff --git a/lib/openai/resources/beta/threads/runs.rb b/lib/openai/resources/beta/threads/runs.rb index 6c1d4b54..37648e04 100644 --- a/lib/openai/resources/beta/threads/runs.rb +++ b/lib/openai/resources/beta/threads/runs.rb @@ -43,7 +43,7 @@ class Runs # # @param parallel_tool_calls [Boolean] Body param: Whether to enable [parallel function calling](https://platform.opena # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Body param: **o-series models only** + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Body param: Constrains effort on reasoning for # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Body param: Specifies the format that the model must output. Compatible with [GP # @@ -113,7 +113,7 @@ def create(thread_id, params) # # @param parallel_tool_calls [Boolean] Body param: Whether to enable [parallel function calling](https://platform.opena # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Body param: **o-series models only** + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Body param: Constrains effort on reasoning for # # @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Body param: Specifies the format that the model must output. Compatible with [GP # diff --git a/lib/openai/resources/chat/completions.rb b/lib/openai/resources/chat/completions.rb index 6765d8b4..a3518b48 100644 --- a/lib/openai/resources/chat/completions.rb +++ b/lib/openai/resources/chat/completions.rb @@ -30,7 +30,7 @@ class Completions # unsupported parameters in reasoning models, # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). # - # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {}) + # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) # # @param messages [Array] A list of messages comprising the conversation so far. Depending on the # @@ -66,7 +66,7 @@ class Completions # # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only** + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. # @@ -84,9 +84,9 @@ class Completions # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] Controls which (if any) tool is called by the model. + # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model. # - # @param tools [Array] A list of tools the model may call. Currently, only functions are supported as a + # @param tools [Array] A list of tools the model may call. You can provide either # # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # @@ -94,6 +94,8 @@ class Completions # # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use # + # @param verbosity [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in + # # @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response. # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] @@ -166,7 +168,8 @@ def create(params) raw[:choices]&.each do |choice| message = choice.fetch(:message) begin - parsed = JSON.parse(message.fetch(:content), symbolize_names: true) + content = message.fetch(:content) + parsed = content.nil? ? nil : JSON.parse(content, symbolize_names: true) rescue JSON::ParserError => e parsed = e end @@ -180,7 +183,8 @@ def create(params) next if (model = tool_models[func.fetch(:name)]).nil? begin - parsed = JSON.parse(func.fetch(:arguments), symbolize_names: true) + arguments = func.fetch(:arguments) + parsed = arguments.nil? ? nil : JSON.parse(arguments, symbolize_names: true) rescue JSON::ParserError => e parsed = e end @@ -230,7 +234,7 @@ def stream # unsupported parameters in reasoning models, # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). # - # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {}) + # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {}) # # @param messages [Array] A list of messages comprising the conversation so far. Depending on the # @@ -266,7 +270,7 @@ def stream # # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi # - # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only** + # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for # # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output. # @@ -284,9 +288,9 @@ def stream # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # - # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] Controls which (if any) tool is called by the model. + # @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionAllowedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, OpenAI::Models::Chat::ChatCompletionNamedToolChoiceCustom] Controls which (if any) tool is called by the model. # - # @param tools [Array] A list of tools the model may call. Currently, only functions are supported as a + # @param tools [Array] A list of tools the model may call. You can provide either # # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # @@ -294,6 +298,8 @@ def stream # # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use # + # @param verbosity [Symbol, OpenAI::Models::Chat::CompletionCreateParams::Verbosity, nil] Constrains the verbosity of the model's response. Lower values will result in + # # @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response. # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] diff --git a/lib/openai/resources/responses.rb b/lib/openai/resources/responses.rb index 79e14f03..5f6b353b 100644 --- a/lib/openai/resources/responses.rb +++ b/lib/openai/resources/responses.rb @@ -23,13 +23,13 @@ class Responses # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use # your own data as input for the model's response. # - # @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) + # @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) # # @param background [Boolean, nil] Whether to run the model response in the background. # # @param include [Array, nil] Specify additional output data to include in the model response. Currently # - # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. + # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. # # @param instructions [String, nil] A system (or developer) message inserted into the model's context. # @@ -57,13 +57,15 @@ class Responses # # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via # + # @param stream_options [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil] Options for streaming responses. Only set this when you set `stream: true`. + # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # - # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # @@ -118,31 +120,37 @@ def create(params = {}) # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use # your own data as input for the model's response. # - # @overload stream_raw(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) - # - # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. - # - # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI + # @overload stream(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {}) # # @param background [Boolean, nil] Whether to run the model response in the background. # # @param include [Array, nil] Specify additional output data to include in the model response. Currently # + # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. + # # @param instructions [String, nil] A system (or developer) message inserted into the model's context. # # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in # + # @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r + # # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be # + # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI + # # @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel. # - # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to resume streams from a given response. + # @param previous_response_id [String, nil] The unique ID of the previous response to the response to the model. Use this to resume streams from a given response. # # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables. # + # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi + # # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only** # - # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is + # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi + # + # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request. # # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via # @@ -150,19 +158,21 @@ def create(params = {}) # # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # - # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating + # + # @param tools [Array] An array of tools the model may call while generating a response. You # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, # # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response. # - # @param user [String] A stable identifier for your end-users. + # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Helpers::Streaming::ResponseStream] + # @return [OpenAI::Internal::Stream] # # @see OpenAI::Models::Responses::ResponseCreateParams def stream(params) @@ -234,7 +244,7 @@ def stream(params) # # @param include [Array, nil] Specify additional output data to include in the model response. Currently # - # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. + # @param input [String, Array] Text, image, or file inputs to the model, used to generate a response. # # @param instructions [String, nil] A system (or developer) message inserted into the model's context. # @@ -262,13 +272,15 @@ def stream(params) # # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via # + # @param stream_options [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil] Options for streaming responses. Only set this when you set `stream: true`. + # # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m # # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain # - # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating + # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating # - # @param tools [Array] An array of tools the model may call while generating a response. You + # @param tools [Array] An array of tools the model may call while generating a response. You # # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to # @@ -280,7 +292,7 @@ def stream(params) # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Internal::Stream] + # @return [OpenAI::Internal::Stream] # # @see OpenAI::Models::Responses::ResponseCreateParams def stream_raw(params = {}) @@ -309,12 +321,14 @@ def stream_raw(params = {}) # # Retrieves a model response with the given ID. # - # @overload retrieve(response_id, include: nil, starting_after: nil, request_options: {}) + # @overload retrieve(response_id, include: nil, include_obfuscation: nil, starting_after: nil, request_options: {}) # # @param response_id [String] The ID of the response to retrieve. # # @param include [Array] Additional fields to include in the response. See the `include` # + # @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds + # # @param starting_after [Integer] The sequence number of the event after which to start streaming. # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] @@ -344,17 +358,19 @@ def retrieve(response_id, params = {}) # # Retrieves a model response with the given ID. # - # @overload retrieve_streaming(response_id, include: nil, starting_after: nil, request_options: {}) + # @overload retrieve_streaming(response_id, include: nil, include_obfuscation: nil, starting_after: nil, request_options: {}) # # @param response_id [String] The ID of the response to retrieve. # # @param include [Array] Additional fields to include in the response. See the `include` # + # @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds + # # @param starting_after [Integer] The sequence number of the event after which to start streaming. # # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] # - # @return [OpenAI::Internal::Stream] + # @return [OpenAI::Internal::Stream] # # @see OpenAI::Models::Responses::ResponseRetrieveParams def retrieve_streaming(response_id, params = {}) diff --git a/lib/openai/version.rb b/lib/openai/version.rb index a619a057..182ed68d 100644 --- a/lib/openai/version.rb +++ b/lib/openai/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true module OpenAI - VERSION = "0.16.0" + VERSION = "0.17.0" end diff --git a/rbi/openai/internal/transport/base_client.rbi b/rbi/openai/internal/transport/base_client.rbi index d41cf4f8..095d4476 100644 --- a/rbi/openai/internal/transport/base_client.rbi +++ b/rbi/openai/internal/transport/base_client.rbi @@ -221,7 +221,7 @@ module OpenAI send_retry_header: T::Boolean ).returns([Integer, Net::HTTPResponse, T::Enumerable[String]]) end - private def send_request( + def send_request( request, redirect_count:, retry_count:, diff --git a/rbi/openai/models.rbi b/rbi/openai/models.rbi index c5a9d836..c2fa35a9 100644 --- a/rbi/openai/models.rbi +++ b/rbi/openai/models.rbi @@ -55,6 +55,8 @@ module OpenAI CreateEmbeddingResponse = OpenAI::Models::CreateEmbeddingResponse + CustomToolInputFormat = OpenAI::Models::CustomToolInputFormat + Embedding = OpenAI::Models::Embedding EmbeddingCreateParams = OpenAI::Models::EmbeddingCreateParams @@ -172,6 +174,10 @@ module OpenAI ResponseFormatText = OpenAI::Models::ResponseFormatText + ResponseFormatTextGrammar = OpenAI::Models::ResponseFormatTextGrammar + + ResponseFormatTextPython = OpenAI::Models::ResponseFormatTextPython + Responses = OpenAI::Models::Responses ResponsesModel = OpenAI::Models::ResponsesModel diff --git a/rbi/openai/models/beta/assistant_create_params.rbi b/rbi/openai/models/beta/assistant_create_params.rbi index c04793b1..f4783b7e 100644 --- a/rbi/openai/models/beta/assistant_create_params.rbi +++ b/rbi/openai/models/beta/assistant_create_params.rbi @@ -45,12 +45,11 @@ module OpenAI sig { returns(T.nilable(String)) } attr_accessor :name - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -208,12 +207,11 @@ module OpenAI metadata: nil, # The name of the assistant. The maximum length is 256 characters. name: nil, - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/rbi/openai/models/beta/assistant_update_params.rbi b/rbi/openai/models/beta/assistant_update_params.rbi index b044c34b..217e526e 100644 --- a/rbi/openai/models/beta/assistant_update_params.rbi +++ b/rbi/openai/models/beta/assistant_update_params.rbi @@ -65,12 +65,11 @@ module OpenAI sig { returns(T.nilable(String)) } attr_accessor :name - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -232,12 +231,11 @@ module OpenAI model: nil, # The name of the assistant. The maximum length is 256 characters. name: nil, - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -354,6 +352,36 @@ module OpenAI end OrSymbol = T.type_alias { T.any(Symbol, String) } + GPT_5 = + T.let( + :"gpt-5", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_5_MINI = + T.let( + :"gpt-5-mini", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_5_NANO = + T.let( + :"gpt-5-nano", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_5_2025_08_07 = + T.let( + :"gpt-5-2025-08-07", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_5_MINI_2025_08_07 = + T.let( + :"gpt-5-mini-2025-08-07", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) + GPT_5_NANO_2025_08_07 = + T.let( + :"gpt-5-nano-2025-08-07", + OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol + ) GPT_4_1 = T.let( :"gpt-4.1", diff --git a/rbi/openai/models/beta/threads/run_create_params.rbi b/rbi/openai/models/beta/threads/run_create_params.rbi index bb9d6882..04d643a2 100644 --- a/rbi/openai/models/beta/threads/run_create_params.rbi +++ b/rbi/openai/models/beta/threads/run_create_params.rbi @@ -111,12 +111,11 @@ module OpenAI sig { params(parallel_tool_calls: T::Boolean).void } attr_writer :parallel_tool_calls - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -330,12 +329,11 @@ module OpenAI # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) # during tool use. parallel_tool_calls: nil, - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi b/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi new file mode 100644 index 00000000..17d1cecc --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi @@ -0,0 +1,60 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionAllowedToolChoice = Chat::ChatCompletionAllowedToolChoice + + module Chat + class ChatCompletionAllowedToolChoice < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionAllowedToolChoice, + OpenAI::Internal::AnyHash + ) + end + + # Constrains the tools available to the model to a pre-defined set. + sig { returns(OpenAI::Chat::ChatCompletionAllowedTools) } + attr_reader :allowed_tools + + sig do + params( + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools::OrHash + ).void + end + attr_writer :allowed_tools + + # Allowed tool configuration type. Always `allowed_tools`. + sig { returns(Symbol) } + attr_accessor :type + + # Constrains the tools available to the model to a pre-defined set. + sig do + params( + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Constrains the tools available to the model to a pre-defined set. + allowed_tools:, + # Allowed tool configuration type. Always `allowed_tools`. + type: :allowed_tools + ) + end + + sig do + override.returns( + { + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_allowed_tools.rbi b/rbi/openai/models/chat/chat_completion_allowed_tools.rbi new file mode 100644 index 00000000..6dbb2e20 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_allowed_tools.rbi @@ -0,0 +1,118 @@ +# typed: strong + +module OpenAI + module Models + module Chat + class ChatCompletionAllowedTools < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionAllowedTools, + OpenAI::Internal::AnyHash + ) + end + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + sig do + returns(OpenAI::Chat::ChatCompletionAllowedTools::Mode::OrSymbol) + end + attr_accessor :mode + + # A list of tool definitions that the model should be allowed to call. + # + # For the Chat Completions API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "function": { "name": "get_weather" } }, + # { "type": "function", "function": { "name": "get_time" } } + # ] + # ``` + sig { returns(T::Array[T::Hash[Symbol, T.anything]]) } + attr_accessor :tools + + # Constrains the tools available to the model to a pre-defined set. + sig do + params( + mode: OpenAI::Chat::ChatCompletionAllowedTools::Mode::OrSymbol, + tools: T::Array[T::Hash[Symbol, T.anything]] + ).returns(T.attached_class) + end + def self.new( + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + mode:, + # A list of tool definitions that the model should be allowed to call. + # + # For the Chat Completions API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "function": { "name": "get_weather" } }, + # { "type": "function", "function": { "name": "get_time" } } + # ] + # ``` + tools: + ) + end + + sig do + override.returns( + { + mode: OpenAI::Chat::ChatCompletionAllowedTools::Mode::OrSymbol, + tools: T::Array[T::Hash[Symbol, T.anything]] + } + ) + end + def to_hash + end + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + module Mode + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::ChatCompletionAllowedTools::Mode) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Chat::ChatCompletionAllowedTools::Mode::TaggedSymbol + ) + REQUIRED = + T.let( + :required, + OpenAI::Chat::ChatCompletionAllowedTools::Mode::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionAllowedTools::Mode::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi b/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi index b2233b53..59c05235 100644 --- a/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi +++ b/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi @@ -85,7 +85,14 @@ module OpenAI # The tool calls generated by the model, such as function calls. sig do returns( - T.nilable(T::Array[OpenAI::Chat::ChatCompletionMessageToolCall]) + T.nilable( + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall, + OpenAI::Chat::ChatCompletionMessageCustomToolCall + ) + ] + ) ) end attr_reader :tool_calls @@ -93,7 +100,12 @@ module OpenAI sig do params( tool_calls: - T::Array[OpenAI::Chat::ChatCompletionMessageToolCall::OrHash] + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::OrHash, + OpenAI::Chat::ChatCompletionMessageCustomToolCall::OrHash + ) + ] ).void end attr_writer :tool_calls @@ -116,7 +128,12 @@ module OpenAI name: String, refusal: T.nilable(String), tool_calls: - T::Array[OpenAI::Chat::ChatCompletionMessageToolCall::OrHash], + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::OrHash, + OpenAI::Chat::ChatCompletionMessageCustomToolCall::OrHash + ) + ], role: Symbol ).returns(T.attached_class) end @@ -160,7 +177,13 @@ module OpenAI ), name: String, refusal: T.nilable(String), - tool_calls: T::Array[OpenAI::Chat::ChatCompletionMessageToolCall] + tool_calls: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall, + OpenAI::Chat::ChatCompletionMessageCustomToolCall + ) + ] } ) end diff --git a/rbi/openai/models/chat/chat_completion_custom_tool.rbi b/rbi/openai/models/chat/chat_completion_custom_tool.rbi new file mode 100644 index 00000000..2ce1f84a --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_custom_tool.rbi @@ -0,0 +1,335 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionCustomTool = Chat::ChatCompletionCustomTool + + module Chat + class ChatCompletionCustomTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool, + OpenAI::Internal::AnyHash + ) + end + + # Properties of the custom tool. + sig { returns(OpenAI::Chat::ChatCompletionCustomTool::Custom) } + attr_reader :custom + + sig do + params( + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom::OrHash + ).void + end + attr_writer :custom + + # The type of the custom tool. Always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # A custom tool that processes input using a specified format. + sig do + params( + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Properties of the custom tool. + custom:, + # The type of the custom tool. Always `custom`. + type: :custom + ) + end + + sig do + override.returns( + { + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom, + type: Symbol + } + ) + end + def to_hash + end + + class Custom < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom, + OpenAI::Internal::AnyHash + ) + end + + # The name of the custom tool, used to identify it in tool calls. + sig { returns(String) } + attr_accessor :name + + # Optional description of the custom tool, used to provide more context. + sig { returns(T.nilable(String)) } + attr_reader :description + + sig { params(description: String).void } + attr_writer :description + + # The input format for the custom tool. Default is unconstrained text. + sig do + returns( + T.nilable( + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar + ) + ) + ) + end + attr_reader :format_ + + sig do + params( + format_: + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::OrHash + ) + ).void + end + attr_writer :format_ + + # Properties of the custom tool. + sig do + params( + name: String, + description: String, + format_: + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::OrHash + ) + ).returns(T.attached_class) + end + def self.new( + # The name of the custom tool, used to identify it in tool calls. + name:, + # Optional description of the custom tool, used to provide more context. + description: nil, + # The input format for the custom tool. Default is unconstrained text. + format_: nil + ) + end + + sig do + override.returns( + { + name: String, + description: String, + format_: + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar + ) + } + ) + end + def to_hash + end + + # The input format for the custom tool. Default is unconstrained text. + module Format + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar + ) + end + + class Text < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text, + OpenAI::Internal::AnyHash + ) + end + + # Unconstrained text format. Always `text`. + sig { returns(Symbol) } + attr_accessor :type + + # Unconstrained free-form text. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # Unconstrained text format. Always `text`. + type: :text + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + class Grammar < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar, + OpenAI::Internal::AnyHash + ) + end + + # Your chosen grammar. + sig do + returns( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar + ) + end + attr_reader :grammar + + sig do + params( + grammar: + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::OrHash + ).void + end + attr_writer :grammar + + # Grammar format. Always `grammar`. + sig { returns(Symbol) } + attr_accessor :type + + # A grammar defined by the user. + sig do + params( + grammar: + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Your chosen grammar. + grammar:, + # Grammar format. Always `grammar`. + type: :grammar + ) + end + + sig do + override.returns( + { + grammar: + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar, + type: Symbol + } + ) + end + def to_hash + end + + class Grammar < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar, + OpenAI::Internal::AnyHash + ) + end + + # The grammar definition. + sig { returns(String) } + attr_accessor :definition + + # The syntax of the grammar definition. One of `lark` or `regex`. + sig do + returns( + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::OrSymbol + ) + end + attr_accessor :syntax + + # Your chosen grammar. + sig do + params( + definition: String, + syntax: + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::OrSymbol + ).returns(T.attached_class) + end + def self.new( + # The grammar definition. + definition:, + # The syntax of the grammar definition. One of `lark` or `regex`. + syntax: + ) + end + + sig do + override.returns( + { + definition: String, + syntax: + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::OrSymbol + } + ) + end + def to_hash + end + + # The syntax of the grammar definition. One of `lark` or `regex`. + module Syntax + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all( + Symbol, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax + ) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LARK = + T.let( + :lark, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::TaggedSymbol + ) + REGEX = + T.let( + :regex, + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::Syntax::TaggedSymbol + ] + ) + end + def self.values + end + end + end + end + + sig do + override.returns( + T::Array[ + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Variants + ] + ) + end + def self.variants + end + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_function_tool.rbi b/rbi/openai/models/chat/chat_completion_function_tool.rbi new file mode 100644 index 00000000..b8fa3e81 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_function_tool.rbi @@ -0,0 +1,51 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionFunctionTool = Chat::ChatCompletionFunctionTool + + module Chat + class ChatCompletionFunctionTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionFunctionTool, + OpenAI::Internal::AnyHash + ) + end + + sig { returns(OpenAI::FunctionDefinition) } + attr_reader :function + + sig { params(function: OpenAI::FunctionDefinition::OrHash).void } + attr_writer :function + + # The type of the tool. Currently, only `function` is supported. + sig { returns(Symbol) } + attr_accessor :type + + # A function tool that can be used to generate a response. + sig do + params( + function: OpenAI::FunctionDefinition::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + function:, + # The type of the tool. Currently, only `function` is supported. + type: :function + ) + end + + sig do + override.returns( + { function: OpenAI::FunctionDefinition, type: Symbol } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_message.rbi b/rbi/openai/models/chat/chat_completion_message.rbi index 99aa4116..3f41659a 100644 --- a/rbi/openai/models/chat/chat_completion_message.rbi +++ b/rbi/openai/models/chat/chat_completion_message.rbi @@ -78,7 +78,9 @@ module OpenAI # The tool calls generated by the model, such as function calls. sig do returns( - T.nilable(T::Array[OpenAI::Chat::ChatCompletionMessageToolCall]) + T.nilable( + T::Array[OpenAI::Chat::ChatCompletionMessageToolCall::Variants] + ) ) end attr_reader :tool_calls @@ -86,7 +88,12 @@ module OpenAI sig do params( tool_calls: - T::Array[OpenAI::Chat::ChatCompletionMessageToolCall::OrHash] + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::OrHash, + OpenAI::Chat::ChatCompletionMessageCustomToolCall::OrHash + ) + ] ).void end attr_writer :tool_calls @@ -102,7 +109,12 @@ module OpenAI function_call: OpenAI::Chat::ChatCompletionMessage::FunctionCall::OrHash, tool_calls: - T::Array[OpenAI::Chat::ChatCompletionMessageToolCall::OrHash], + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::OrHash, + OpenAI::Chat::ChatCompletionMessageCustomToolCall::OrHash + ) + ], role: Symbol ).returns(T.attached_class) end @@ -138,7 +150,8 @@ module OpenAI T::Array[OpenAI::Chat::ChatCompletionMessage::Annotation], audio: T.nilable(OpenAI::Chat::ChatCompletionAudio), function_call: OpenAI::Chat::ChatCompletionMessage::FunctionCall, - tool_calls: T::Array[OpenAI::Chat::ChatCompletionMessageToolCall] + tool_calls: + T::Array[OpenAI::Chat::ChatCompletionMessageToolCall::Variants] } ) end diff --git a/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi b/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi new file mode 100644 index 00000000..77a50d60 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi @@ -0,0 +1,105 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionMessageCustomToolCall = + Chat::ChatCompletionMessageCustomToolCall + + module Chat + class ChatCompletionMessageCustomToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessageCustomToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the tool call. + sig { returns(String) } + attr_accessor :id + + # The custom tool that the model called. + sig do + returns(OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom) + end + attr_reader :custom + + sig do + params( + custom: + OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom::OrHash + ).void + end + attr_writer :custom + + # The type of the tool. Always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # A call to a custom tool created by the model. + sig do + params( + id: String, + custom: + OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the tool call. + id:, + # The custom tool that the model called. + custom:, + # The type of the tool. Always `custom`. + type: :custom + ) + end + + sig do + override.returns( + { + id: String, + custom: OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom, + type: Symbol + } + ) + end + def to_hash + end + + class Custom < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom, + OpenAI::Internal::AnyHash + ) + end + + # The input for the custom tool call generated by the model. + sig { returns(String) } + attr_accessor :input + + # The name of the custom tool to call. + sig { returns(String) } + attr_accessor :name + + # The custom tool that the model called. + sig { params(input: String, name: String).returns(T.attached_class) } + def self.new( + # The input for the custom tool call generated by the model. + input:, + # The name of the custom tool to call. + name: + ) + end + + sig { override.returns({ input: String, name: String }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi b/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi new file mode 100644 index 00000000..d62f0f17 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi @@ -0,0 +1,118 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionMessageFunctionToolCall = + Chat::ChatCompletionMessageFunctionToolCall + + module Chat + class ChatCompletionMessageFunctionToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall, + OpenAI::Internal::AnyHash + ) + end + + # The ID of the tool call. + sig { returns(String) } + attr_accessor :id + + # The function that the model called. + sig do + returns(OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function) + end + attr_reader :function + + sig do + params( + function: + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function::OrHash + ).void + end + attr_writer :function + + # The type of the tool. Currently, only `function` is supported. + sig { returns(Symbol) } + attr_accessor :type + + # A call to a function tool created by the model. + sig do + params( + id: String, + function: + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The ID of the tool call. + id:, + # The function that the model called. + function:, + # The type of the tool. Currently, only `function` is supported. + type: :function + ) + end + + sig do + override.returns( + { + id: String, + function: + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function, + type: Symbol + } + ) + end + def to_hash + end + + class Function < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function, + OpenAI::Internal::AnyHash + ) + end + + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + sig { returns(String) } + attr_accessor :arguments + + # The parsed contents of the arguments. + sig { returns(T.anything) } + attr_accessor :parsed + + # The name of the function to call. + sig { returns(String) } + attr_accessor :name + + # The function that the model called. + sig do + params(arguments: String, name: String).returns(T.attached_class) + end + def self.new( + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. + arguments:, + # The name of the function to call. + name: + ) + end + + sig { override.returns({ arguments: String, name: String }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_message_tool_call.rbi b/rbi/openai/models/chat/chat_completion_message_tool_call.rbi index 97da5d08..6c99f830 100644 --- a/rbi/openai/models/chat/chat_completion_message_tool_call.rbi +++ b/rbi/openai/models/chat/chat_completion_message_tool_call.rbi @@ -5,107 +5,24 @@ module OpenAI ChatCompletionMessageToolCall = Chat::ChatCompletionMessageToolCall module Chat - class ChatCompletionMessageToolCall < OpenAI::Internal::Type::BaseModel - OrHash = + # A call to a function tool created by the model. + module ChatCompletionMessageToolCall + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias do T.any( - OpenAI::Chat::ChatCompletionMessageToolCall, - OpenAI::Internal::AnyHash + OpenAI::Chat::ChatCompletionMessageFunctionToolCall, + OpenAI::Chat::ChatCompletionMessageCustomToolCall ) end - # The ID of the tool call. - sig { returns(String) } - attr_accessor :id - - # The function that the model called. - sig { returns(OpenAI::Chat::ChatCompletionMessageToolCall::Function) } - attr_reader :function - - sig do - params( - function: - OpenAI::Chat::ChatCompletionMessageToolCall::Function::OrHash - ).void - end - attr_writer :function - - # The type of the tool. Currently, only `function` is supported. - sig { returns(Symbol) } - attr_accessor :type - - sig do - params( - id: String, - function: - OpenAI::Chat::ChatCompletionMessageToolCall::Function::OrHash, - type: Symbol - ).returns(T.attached_class) - end - def self.new( - # The ID of the tool call. - id:, - # The function that the model called. - function:, - # The type of the tool. Currently, only `function` is supported. - type: :function - ) - end - sig do override.returns( - { - id: String, - function: OpenAI::Chat::ChatCompletionMessageToolCall::Function, - type: Symbol - } + T::Array[OpenAI::Chat::ChatCompletionMessageToolCall::Variants] ) end - def to_hash - end - - class Function < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Chat::ChatCompletionMessageToolCall::Function, - OpenAI::Internal::AnyHash - ) - end - - # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. - sig { returns(String) } - attr_accessor :arguments - - # The parsed contents of the arguments. - sig { returns(T.anything) } - attr_accessor :parsed - - # The name of the function to call. - sig { returns(String) } - attr_accessor :name - - # The function that the model called. - sig do - params(arguments: String, name: String).returns(T.attached_class) - end - def self.new( - # The arguments to call the function with, as generated by the model in JSON - # format. Note that the model does not always generate valid JSON, and may - # hallucinate parameters not defined by your function schema. Validate the - # arguments in your code before calling your function. - arguments:, - # The name of the function to call. - name: - ) - end - - sig { override.returns({ arguments: String, name: String }) } - def to_hash - end + def self.variants end end end diff --git a/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi b/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi index daed342c..f85abf64 100644 --- a/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi +++ b/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi @@ -25,7 +25,7 @@ module OpenAI end attr_writer :function - # The type of the tool. Currently, only `function` is supported. + # For function calling, the type is always `function`. sig { returns(Symbol) } attr_accessor :type @@ -40,7 +40,7 @@ module OpenAI end def self.new( function:, - # The type of the tool. Currently, only `function` is supported. + # For function calling, the type is always `function`. type: :function ) end diff --git a/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi b/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi new file mode 100644 index 00000000..0c4ba4f2 --- /dev/null +++ b/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi @@ -0,0 +1,89 @@ +# typed: strong + +module OpenAI + module Models + ChatCompletionNamedToolChoiceCustom = + Chat::ChatCompletionNamedToolChoiceCustom + + module Chat + class ChatCompletionNamedToolChoiceCustom < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom, + OpenAI::Internal::AnyHash + ) + end + + sig do + returns(OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom) + end + attr_reader :custom + + sig do + params( + custom: + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom::OrHash + ).void + end + attr_writer :custom + + # For custom tool calling, the type is always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # Specifies a tool the model should use. Use to force the model to call a specific + # custom tool. + sig do + params( + custom: + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom::OrHash, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + custom:, + # For custom tool calling, the type is always `custom`. + type: :custom + ) + end + + sig do + override.returns( + { + custom: OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom, + type: Symbol + } + ) + end + def to_hash + end + + class Custom < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom, + OpenAI::Internal::AnyHash + ) + end + + # The name of the custom tool to call. + sig { returns(String) } + attr_accessor :name + + sig { params(name: String).returns(T.attached_class) } + def self.new( + # The name of the custom tool to call. + name: + ) + end + + sig { override.returns({ name: String }) } + def to_hash + end + end + end + end + end +end diff --git a/rbi/openai/models/chat/chat_completion_stream_options.rbi b/rbi/openai/models/chat/chat_completion_stream_options.rbi index 7061b1ff..e970e19e 100644 --- a/rbi/openai/models/chat/chat_completion_stream_options.rbi +++ b/rbi/openai/models/chat/chat_completion_stream_options.rbi @@ -14,6 +14,18 @@ module OpenAI ) end + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :include_obfuscation + + sig { params(include_obfuscation: T::Boolean).void } + attr_writer :include_obfuscation + # If set, an additional chunk will be streamed before the `data: [DONE]` message. # The `usage` field on this chunk shows the token usage statistics for the entire # request, and the `choices` field will always be an empty array. @@ -28,8 +40,20 @@ module OpenAI attr_writer :include_usage # Options for streaming response. Only set this when you set `stream: true`. - sig { params(include_usage: T::Boolean).returns(T.attached_class) } + sig do + params( + include_obfuscation: T::Boolean, + include_usage: T::Boolean + ).returns(T.attached_class) + end def self.new( + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + include_obfuscation: nil, # If set, an additional chunk will be streamed before the `data: [DONE]` message. # The `usage` field on this chunk shows the token usage statistics for the entire # request, and the `choices` field will always be an empty array. @@ -41,7 +65,11 @@ module OpenAI ) end - sig { override.returns({ include_usage: T::Boolean }) } + sig do + override.returns( + { include_obfuscation: T::Boolean, include_usage: T::Boolean } + ) + end def to_hash end end diff --git a/rbi/openai/models/chat/chat_completion_tool.rbi b/rbi/openai/models/chat/chat_completion_tool.rbi index 0fcacb06..4f687406 100644 --- a/rbi/openai/models/chat/chat_completion_tool.rbi +++ b/rbi/openai/models/chat/chat_completion_tool.rbi @@ -5,41 +5,22 @@ module OpenAI ChatCompletionTool = Chat::ChatCompletionTool module Chat - class ChatCompletionTool < OpenAI::Internal::Type::BaseModel - OrHash = + # A function tool that can be used to generate a response. + module ChatCompletionTool + extend OpenAI::Internal::Type::Union + + Variants = T.type_alias do - T.any(OpenAI::Chat::ChatCompletionTool, OpenAI::Internal::AnyHash) + T.any( + OpenAI::Chat::ChatCompletionFunctionTool, + OpenAI::Chat::ChatCompletionCustomTool + ) end - sig { returns(OpenAI::FunctionDefinition) } - attr_reader :function - - sig { params(function: OpenAI::FunctionDefinition::OrHash).void } - attr_writer :function - - # The type of the tool. Currently, only `function` is supported. - sig { returns(Symbol) } - attr_accessor :type - - sig do - params( - function: OpenAI::FunctionDefinition::OrHash, - type: Symbol - ).returns(T.attached_class) - end - def self.new( - function:, - # The type of the tool. Currently, only `function` is supported. - type: :function - ) - end - sig do - override.returns( - { function: OpenAI::FunctionDefinition, type: Symbol } - ) + override.returns(T::Array[OpenAI::Chat::ChatCompletionTool::Variants]) end - def to_hash + def self.variants end end end diff --git a/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi b/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi index 5363a308..df0f67a7 100644 --- a/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi +++ b/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi @@ -21,7 +21,9 @@ module OpenAI T.type_alias do T.any( OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::TaggedSymbol, - OpenAI::Chat::ChatCompletionNamedToolChoice + OpenAI::Chat::ChatCompletionAllowedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom ) end diff --git a/rbi/openai/models/chat/completion_create_params.rbi b/rbi/openai/models/chat/completion_create_params.rbi index 5b5298fb..edbf9656 100644 --- a/rbi/openai/models/chat/completion_create_params.rbi +++ b/rbi/openai/models/chat/completion_create_params.rbi @@ -225,12 +225,11 @@ module OpenAI sig { params(prompt_cache_key: String).void } attr_writer :prompt_cache_key - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :reasoning_effort @@ -368,7 +367,9 @@ module OpenAI T.nilable( T.any( OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, - OpenAI::Chat::ChatCompletionNamedToolChoice + OpenAI::Chat::ChatCompletionAllowedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom ) ) ) @@ -380,16 +381,29 @@ module OpenAI tool_choice: T.any( OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, - OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash + OpenAI::Chat::ChatCompletionAllowedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::OrHash ) ).void end attr_writer :tool_choice - # A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. - sig { returns(T.nilable(T::Array[OpenAI::Chat::ChatCompletionTool])) } + # A list of tools the model may call. You can provide either + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + # or [function tools](https://platform.openai.com/docs/guides/function-calling). + sig do + returns( + T.nilable( + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionFunctionTool, + OpenAI::Chat::ChatCompletionCustomTool + ) + ] + ) + ) + end attr_reader :tools sig do @@ -397,8 +411,9 @@ module OpenAI tools: T::Array[ T.any( - OpenAI::Chat::ChatCompletionTool::OrHash, - OpenAI::StructuredOutput::JsonSchemaConverter + OpenAI::StructuredOutput::JsonSchemaConverter, + OpenAI::Chat::ChatCompletionFunctionTool::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::OrHash ) ] ).void @@ -430,6 +445,16 @@ module OpenAI sig { params(user: String).void } attr_writer :user + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + sig do + returns( + T.nilable(OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol) + ) + end + attr_accessor :verbosity + # This tool searches the web for relevant results to use in a response. Learn more # about the # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). @@ -511,18 +536,25 @@ module OpenAI tool_choice: T.any( OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, - OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash + OpenAI::Chat::ChatCompletionAllowedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::OrHash ), tools: T::Array[ T.any( - OpenAI::Chat::ChatCompletionTool::OrHash, - OpenAI::StructuredOutput::JsonSchemaConverter + OpenAI::StructuredOutput::JsonSchemaConverter, + OpenAI::Chat::ChatCompletionFunctionTool::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::OrHash ) ], top_logprobs: T.nilable(Integer), top_p: T.nilable(Float), user: String, + verbosity: + T.nilable( + OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol + ), web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions::OrHash, request_options: OpenAI::RequestOptions::OrHash @@ -631,12 +663,11 @@ module OpenAI # hit rates. Replaces the `user` field. # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). prompt_cache_key: nil, - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # An object specifying the format that the model must output. # @@ -707,9 +738,9 @@ module OpenAI # `none` is the default when no tools are present. `auto` is the default if tools # are present. tool_choice: nil, - # A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. + # A list of tools the model may call. You can provide either + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + # or [function tools](https://platform.openai.com/docs/guides/function-calling). tools: nil, # An integer between 0 and 20 specifying the number of most likely tokens to # return at each token position, each with an associated log probability. @@ -727,6 +758,10 @@ module OpenAI # similar requests and to help OpenAI detect and prevent abuse. # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). user: nil, + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + verbosity: nil, # This tool searches the web for relevant results to use in a response. Learn more # about the # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). @@ -798,18 +833,25 @@ module OpenAI tool_choice: T.any( OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, - OpenAI::Chat::ChatCompletionNamedToolChoice + OpenAI::Chat::ChatCompletionAllowedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoice, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom ), tools: T::Array[ T.any( - OpenAI::Chat::ChatCompletionTool, - OpenAI::StructuredOutput::JsonSchemaConverter + OpenAI::StructuredOutput::JsonSchemaConverter, + OpenAI::Chat::ChatCompletionFunctionTool, + OpenAI::Chat::ChatCompletionCustomTool ) ], top_logprobs: T.nilable(Integer), top_p: T.nilable(Float), user: String, + verbosity: + T.nilable( + OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol + ), web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions, request_options: OpenAI::RequestOptions @@ -1137,6 +1179,45 @@ module OpenAI ) end + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + module Verbosity + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Chat::CompletionCreateParams::Verbosity) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let( + :low, + OpenAI::Chat::CompletionCreateParams::Verbosity::TaggedSymbol + ) + MEDIUM = + T.let( + :medium, + OpenAI::Chat::CompletionCreateParams::Verbosity::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Chat::CompletionCreateParams::Verbosity::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Chat::CompletionCreateParams::Verbosity::TaggedSymbol + ] + ) + end + def self.values + end + end + class WebSearchOptions < OpenAI::Internal::Type::BaseModel OrHash = T.type_alias do diff --git a/rbi/openai/models/chat_model.rbi b/rbi/openai/models/chat_model.rbi index 26dc28c7..7ba1f29a 100644 --- a/rbi/openai/models/chat_model.rbi +++ b/rbi/openai/models/chat_model.rbi @@ -8,6 +8,17 @@ module OpenAI TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ChatModel) } OrSymbol = T.type_alias { T.any(Symbol, String) } + GPT_5 = T.let(:"gpt-5", OpenAI::ChatModel::TaggedSymbol) + GPT_5_MINI = T.let(:"gpt-5-mini", OpenAI::ChatModel::TaggedSymbol) + GPT_5_NANO = T.let(:"gpt-5-nano", OpenAI::ChatModel::TaggedSymbol) + GPT_5_2025_08_07 = + T.let(:"gpt-5-2025-08-07", OpenAI::ChatModel::TaggedSymbol) + GPT_5_MINI_2025_08_07 = + T.let(:"gpt-5-mini-2025-08-07", OpenAI::ChatModel::TaggedSymbol) + GPT_5_NANO_2025_08_07 = + T.let(:"gpt-5-nano-2025-08-07", OpenAI::ChatModel::TaggedSymbol) + GPT_5_CHAT_LATEST = + T.let(:"gpt-5-chat-latest", OpenAI::ChatModel::TaggedSymbol) GPT_4_1 = T.let(:"gpt-4.1", OpenAI::ChatModel::TaggedSymbol) GPT_4_1_MINI = T.let(:"gpt-4.1-mini", OpenAI::ChatModel::TaggedSymbol) GPT_4_1_NANO = T.let(:"gpt-4.1-nano", OpenAI::ChatModel::TaggedSymbol) diff --git a/rbi/openai/models/custom_tool_input_format.rbi b/rbi/openai/models/custom_tool_input_format.rbi new file mode 100644 index 00000000..d2f57068 --- /dev/null +++ b/rbi/openai/models/custom_tool_input_format.rbi @@ -0,0 +1,136 @@ +# typed: strong + +module OpenAI + module Models + # The input format for the custom tool. Default is unconstrained text. + module CustomToolInputFormat + extend OpenAI::Internal::Type::Union + + Variants = + T.type_alias do + T.any( + OpenAI::CustomToolInputFormat::Text, + OpenAI::CustomToolInputFormat::Grammar + ) + end + + class Text < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::CustomToolInputFormat::Text, + OpenAI::Internal::AnyHash + ) + end + + # Unconstrained text format. Always `text`. + sig { returns(Symbol) } + attr_accessor :type + + # Unconstrained free-form text. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # Unconstrained text format. Always `text`. + type: :text + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + + class Grammar < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::CustomToolInputFormat::Grammar, + OpenAI::Internal::AnyHash + ) + end + + # The grammar definition. + sig { returns(String) } + attr_accessor :definition + + # The syntax of the grammar definition. One of `lark` or `regex`. + sig do + returns(OpenAI::CustomToolInputFormat::Grammar::Syntax::OrSymbol) + end + attr_accessor :syntax + + # Grammar format. Always `grammar`. + sig { returns(Symbol) } + attr_accessor :type + + # A grammar defined by the user. + sig do + params( + definition: String, + syntax: OpenAI::CustomToolInputFormat::Grammar::Syntax::OrSymbol, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The grammar definition. + definition:, + # The syntax of the grammar definition. One of `lark` or `regex`. + syntax:, + # Grammar format. Always `grammar`. + type: :grammar + ) + end + + sig do + override.returns( + { + definition: String, + syntax: OpenAI::CustomToolInputFormat::Grammar::Syntax::OrSymbol, + type: Symbol + } + ) + end + def to_hash + end + + # The syntax of the grammar definition. One of `lark` or `regex`. + module Syntax + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::CustomToolInputFormat::Grammar::Syntax) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LARK = + T.let( + :lark, + OpenAI::CustomToolInputFormat::Grammar::Syntax::TaggedSymbol + ) + REGEX = + T.let( + :regex, + OpenAI::CustomToolInputFormat::Grammar::Syntax::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::CustomToolInputFormat::Grammar::Syntax::TaggedSymbol + ] + ) + end + def self.values + end + end + end + + sig do + override.returns(T::Array[OpenAI::CustomToolInputFormat::Variants]) + end + def self.variants + end + end + end +end diff --git a/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi b/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi index d929b514..79dee373 100644 --- a/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +++ b/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi @@ -935,12 +935,16 @@ module OpenAI # A list of tools the model may call. Currently, only functions are supported as a # tool. Use this to provide a list of functions the model may generate JSON inputs # for. A max of 128 functions are supported. - sig { returns(T.nilable(T::Array[OpenAI::Chat::ChatCompletionTool])) } + sig do + returns( + T.nilable(T::Array[OpenAI::Chat::ChatCompletionFunctionTool]) + ) + end attr_reader :tools sig do params( - tools: T::Array[OpenAI::Chat::ChatCompletionTool::OrHash] + tools: T::Array[OpenAI::Chat::ChatCompletionFunctionTool::OrHash] ).void end attr_writer :tools @@ -963,7 +967,7 @@ module OpenAI ), seed: Integer, temperature: Float, - tools: T::Array[OpenAI::Chat::ChatCompletionTool::OrHash], + tools: T::Array[OpenAI::Chat::ChatCompletionFunctionTool::OrHash], top_p: Float ).returns(T.attached_class) end @@ -1006,7 +1010,7 @@ module OpenAI ), seed: Integer, temperature: Float, - tools: T::Array[OpenAI::Chat::ChatCompletionTool], + tools: T::Array[OpenAI::Chat::ChatCompletionFunctionTool], top_p: Float } ) diff --git a/rbi/openai/models/evals/run_cancel_response.rbi b/rbi/openai/models/evals/run_cancel_response.rbi index d9c6617c..13f94289 100644 --- a/rbi/openai/models/evals/run_cancel_response.rbi +++ b/rbi/openai/models/evals/run_cancel_response.rbi @@ -1174,6 +1174,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ] @@ -1205,6 +1206,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ], diff --git a/rbi/openai/models/evals/run_create_params.rbi b/rbi/openai/models/evals/run_create_params.rbi index c4577765..e7ea35ca 100644 --- a/rbi/openai/models/evals/run_create_params.rbi +++ b/rbi/openai/models/evals/run_create_params.rbi @@ -1125,6 +1125,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::CustomTool, OpenAI::Responses::WebSearchTool ) ] @@ -1145,6 +1146,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ] @@ -1176,6 +1178,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ], @@ -1233,6 +1236,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::CustomTool, OpenAI::Responses::WebSearchTool ) ], diff --git a/rbi/openai/models/evals/run_create_response.rbi b/rbi/openai/models/evals/run_create_response.rbi index 7126ad47..bf8ed611 100644 --- a/rbi/openai/models/evals/run_create_response.rbi +++ b/rbi/openai/models/evals/run_create_response.rbi @@ -1174,6 +1174,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ] @@ -1205,6 +1206,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ], diff --git a/rbi/openai/models/evals/run_list_response.rbi b/rbi/openai/models/evals/run_list_response.rbi index 28b81204..ab3d4305 100644 --- a/rbi/openai/models/evals/run_list_response.rbi +++ b/rbi/openai/models/evals/run_list_response.rbi @@ -1170,6 +1170,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ] @@ -1201,6 +1202,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ], diff --git a/rbi/openai/models/evals/run_retrieve_response.rbi b/rbi/openai/models/evals/run_retrieve_response.rbi index 306630c7..8797d797 100644 --- a/rbi/openai/models/evals/run_retrieve_response.rbi +++ b/rbi/openai/models/evals/run_retrieve_response.rbi @@ -1176,6 +1176,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ] @@ -1207,6 +1208,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ], diff --git a/rbi/openai/models/reasoning.rbi b/rbi/openai/models/reasoning.rbi index dc89cb60..5243471d 100644 --- a/rbi/openai/models/reasoning.rbi +++ b/rbi/openai/models/reasoning.rbi @@ -6,12 +6,11 @@ module OpenAI OrHash = T.type_alias { T.any(OpenAI::Reasoning, OpenAI::Internal::AnyHash) } - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) } attr_accessor :effort @@ -42,12 +41,11 @@ module OpenAI ).returns(T.attached_class) end def self.new( - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. effort: nil, # **Deprecated:** use `summary` instead. # diff --git a/rbi/openai/models/reasoning_effort.rbi b/rbi/openai/models/reasoning_effort.rbi index 30ff7a5f..fb0629b1 100644 --- a/rbi/openai/models/reasoning_effort.rbi +++ b/rbi/openai/models/reasoning_effort.rbi @@ -2,18 +2,18 @@ module OpenAI module Models - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. module ReasoningEffort extend OpenAI::Internal::Type::Enum TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ReasoningEffort) } OrSymbol = T.type_alias { T.any(Symbol, String) } + MINIMAL = T.let(:minimal, OpenAI::ReasoningEffort::TaggedSymbol) LOW = T.let(:low, OpenAI::ReasoningEffort::TaggedSymbol) MEDIUM = T.let(:medium, OpenAI::ReasoningEffort::TaggedSymbol) HIGH = T.let(:high, OpenAI::ReasoningEffort::TaggedSymbol) diff --git a/rbi/openai/models/response_format_text_grammar.rbi b/rbi/openai/models/response_format_text_grammar.rbi new file mode 100644 index 00000000..771688ea --- /dev/null +++ b/rbi/openai/models/response_format_text_grammar.rbi @@ -0,0 +1,35 @@ +# typed: strong + +module OpenAI + module Models + class ResponseFormatTextGrammar < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ResponseFormatTextGrammar, OpenAI::Internal::AnyHash) + end + + # The custom grammar for the model to follow. + sig { returns(String) } + attr_accessor :grammar + + # The type of response format being defined. Always `grammar`. + sig { returns(Symbol) } + attr_accessor :type + + # A custom grammar for the model to follow when generating text. Learn more in the + # [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars). + sig { params(grammar: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The custom grammar for the model to follow. + grammar:, + # The type of response format being defined. Always `grammar`. + type: :grammar + ) + end + + sig { override.returns({ grammar: String, type: Symbol }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/response_format_text_python.rbi b/rbi/openai/models/response_format_text_python.rbi new file mode 100644 index 00000000..cc36114b --- /dev/null +++ b/rbi/openai/models/response_format_text_python.rbi @@ -0,0 +1,30 @@ +# typed: strong + +module OpenAI + module Models + class ResponseFormatTextPython < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::ResponseFormatTextPython, OpenAI::Internal::AnyHash) + end + + # The type of response format being defined. Always `python`. + sig { returns(Symbol) } + attr_accessor :type + + # Configure the model to generate valid Python code. See the + # [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars) + # for more details. + sig { params(type: Symbol).returns(T.attached_class) } + def self.new( + # The type of response format being defined. Always `python`. + type: :python + ) + end + + sig { override.returns({ type: Symbol }) } + def to_hash + end + end + end +end diff --git a/rbi/openai/models/responses/custom_tool.rbi b/rbi/openai/models/responses/custom_tool.rbi new file mode 100644 index 00000000..066ca268 --- /dev/null +++ b/rbi/openai/models/responses/custom_tool.rbi @@ -0,0 +1,96 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class CustomTool < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any(OpenAI::Responses::CustomTool, OpenAI::Internal::AnyHash) + end + + # The name of the custom tool, used to identify it in tool calls. + sig { returns(String) } + attr_accessor :name + + # The type of the custom tool. Always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # Optional description of the custom tool, used to provide more context. + sig { returns(T.nilable(String)) } + attr_reader :description + + sig { params(description: String).void } + attr_writer :description + + # The input format for the custom tool. Default is unconstrained text. + sig do + returns( + T.nilable( + T.any( + OpenAI::CustomToolInputFormat::Text, + OpenAI::CustomToolInputFormat::Grammar + ) + ) + ) + end + attr_reader :format_ + + sig do + params( + format_: + T.any( + OpenAI::CustomToolInputFormat::Text::OrHash, + OpenAI::CustomToolInputFormat::Grammar::OrHash + ) + ).void + end + attr_writer :format_ + + # A custom tool that processes input using a specified format. Learn more about + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools). + sig do + params( + name: String, + description: String, + format_: + T.any( + OpenAI::CustomToolInputFormat::Text::OrHash, + OpenAI::CustomToolInputFormat::Grammar::OrHash + ), + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The name of the custom tool, used to identify it in tool calls. + name:, + # Optional description of the custom tool, used to provide more context. + description: nil, + # The input format for the custom tool. Default is unconstrained text. + format_: nil, + # The type of the custom tool. Always `custom`. + type: :custom + ) + end + + sig do + override.returns( + { + name: String, + type: Symbol, + description: String, + format_: + T.any( + OpenAI::CustomToolInputFormat::Text, + OpenAI::CustomToolInputFormat::Grammar + ) + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response.rbi b/rbi/openai/models/responses/response.rbi index 3cdfaa54..8980d26b 100644 --- a/rbi/openai/models/responses/response.rbi +++ b/rbi/openai/models/responses/response.rbi @@ -116,8 +116,10 @@ module OpenAI # Learn more about # [built-in tools](https://platform.openai.com/docs/guides/tools). # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. sig { returns(T::Array[OpenAI::Responses::Tool::Variants]) } attr_accessor :tools @@ -308,7 +310,8 @@ module OpenAI OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash, OpenAI::Responses::ResponseOutputItem::McpCall::OrHash, OpenAI::Responses::ResponseOutputItem::McpListTools::OrHash, - OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash + OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash, + OpenAI::Responses::ResponseCustomToolCall::OrHash ) ], parallel_tool_calls: T::Boolean, @@ -316,9 +319,11 @@ module OpenAI tool_choice: T.any( OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed::OrHash, OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, - OpenAI::Responses::ToolChoiceMcp::OrHash + OpenAI::Responses::ToolChoiceMcp::OrHash, + OpenAI::Responses::ToolChoiceCustom::OrHash ), tools: T::Array[ @@ -330,6 +335,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ], @@ -413,8 +419,10 @@ module OpenAI # Learn more about # [built-in tools](https://platform.openai.com/docs/guides/tools). # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. tools:, # An alternative to sampling with temperature, called nucleus sampling, where the # model considers the results of the tokens with top_p probability mass. So 0.1 @@ -684,9 +692,11 @@ module OpenAI T.type_alias do T.any( OpenAI::Responses::ToolChoiceOptions::TaggedSymbol, + OpenAI::Responses::ToolChoiceAllowed, OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction, - OpenAI::Responses::ToolChoiceMcp + OpenAI::Responses::ToolChoiceMcp, + OpenAI::Responses::ToolChoiceCustom ) end diff --git a/rbi/openai/models/responses/response_create_params.rbi b/rbi/openai/models/responses/response_create_params.rbi index 734ac2b0..ad7a9dc9 100644 --- a/rbi/openai/models/responses/response_create_params.rbi +++ b/rbi/openai/models/responses/response_create_params.rbi @@ -208,6 +208,24 @@ module OpenAI sig { returns(T.nilable(T::Boolean)) } attr_accessor :store + # Options for streaming responses. Only set this when you set `stream: true`. + sig do + returns( + T.nilable(OpenAI::Responses::ResponseCreateParams::StreamOptions) + ) + end + attr_reader :stream_options + + sig do + params( + stream_options: + T.nilable( + OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash + ) + ).void + end + attr_writer :stream_options + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will # make the output more random, while lower values like 0.2 will make it more # focused and deterministic. We generally recommend altering this or `top_p` but @@ -242,9 +260,11 @@ module OpenAI T.nilable( T.any( OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed, OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction, - OpenAI::Responses::ToolChoiceMcp + OpenAI::Responses::ToolChoiceMcp, + OpenAI::Responses::ToolChoiceCustom ) ) ) @@ -256,9 +276,11 @@ module OpenAI tool_choice: T.any( OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed::OrHash, OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, - OpenAI::Responses::ToolChoiceMcp::OrHash + OpenAI::Responses::ToolChoiceMcp::OrHash, + OpenAI::Responses::ToolChoiceCustom::OrHash ) ).void end @@ -276,8 +298,10 @@ module OpenAI # Learn more about # [built-in tools](https://platform.openai.com/docs/guides/tools). # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. sig do returns( T.nilable( @@ -290,6 +314,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::CustomTool, OpenAI::Responses::WebSearchTool ) ] @@ -310,6 +335,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ] @@ -386,14 +412,20 @@ module OpenAI OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol ), store: T.nilable(T::Boolean), + stream_options: + T.nilable( + OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash + ), temperature: T.nilable(Float), text: OpenAI::Responses::ResponseTextConfig::OrHash, tool_choice: T.any( OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed::OrHash, OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, - OpenAI::Responses::ToolChoiceMcp::OrHash + OpenAI::Responses::ToolChoiceMcp::OrHash, + OpenAI::Responses::ToolChoiceCustom::OrHash ), tools: T::Array[ @@ -405,6 +437,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ], @@ -521,6 +554,8 @@ module OpenAI service_tier: nil, # Whether to store the generated model response for later retrieval via API. store: nil, + # Options for streaming responses. Only set this when you set `stream: true`. + stream_options: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will # make the output more random, while lower values like 0.2 will make it more # focused and deterministic. We generally recommend altering this or `top_p` but @@ -548,8 +583,10 @@ module OpenAI # Learn more about # [built-in tools](https://platform.openai.com/docs/guides/tools). # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. tools: nil, # An integer between 0 and 20 specifying the number of most likely tokens to # return at each token position, each with an associated log probability. @@ -608,14 +645,20 @@ module OpenAI OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol ), store: T.nilable(T::Boolean), + stream_options: + T.nilable( + OpenAI::Responses::ResponseCreateParams::StreamOptions + ), temperature: T.nilable(Float), text: OpenAI::Responses::ResponseTextConfig, tool_choice: T.any( OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed, OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction, - OpenAI::Responses::ToolChoiceMcp + OpenAI::Responses::ToolChoiceMcp, + OpenAI::Responses::ToolChoiceCustom ), tools: T::Array[ @@ -627,6 +670,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::CustomTool, OpenAI::Responses::WebSearchTool ) ], @@ -739,6 +783,47 @@ module OpenAI end end + class StreamOptions < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCreateParams::StreamOptions, + OpenAI::Internal::AnyHash + ) + end + + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :include_obfuscation + + sig { params(include_obfuscation: T::Boolean).void } + attr_writer :include_obfuscation + + # Options for streaming responses. Only set this when you set `stream: true`. + sig do + params(include_obfuscation: T::Boolean).returns(T.attached_class) + end + def self.new( + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + include_obfuscation: nil + ) + end + + sig { override.returns({ include_obfuscation: T::Boolean }) } + def to_hash + end + end + # How the model should select which tool (or tools) to use when generating a # response. See the `tools` parameter to see how to specify which tools the model # can call. @@ -749,9 +834,11 @@ module OpenAI T.type_alias do T.any( OpenAI::Responses::ToolChoiceOptions::TaggedSymbol, + OpenAI::Responses::ToolChoiceAllowed, OpenAI::Responses::ToolChoiceTypes, OpenAI::Responses::ToolChoiceFunction, - OpenAI::Responses::ToolChoiceMcp + OpenAI::Responses::ToolChoiceMcp, + OpenAI::Responses::ToolChoiceCustom ) end diff --git a/rbi/openai/models/responses/response_custom_tool_call.rbi b/rbi/openai/models/responses/response_custom_tool_call.rbi new file mode 100644 index 00000000..89eb7f74 --- /dev/null +++ b/rbi/openai/models/responses/response_custom_tool_call.rbi @@ -0,0 +1,78 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCustomToolCall < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCustomToolCall, + OpenAI::Internal::AnyHash + ) + end + + # An identifier used to map this custom tool call to a tool call output. + sig { returns(String) } + attr_accessor :call_id + + # The input for the custom tool call generated by the model. + sig { returns(String) } + attr_accessor :input + + # The name of the custom tool being called. + sig { returns(String) } + attr_accessor :name + + # The type of the custom tool call. Always `custom_tool_call`. + sig { returns(Symbol) } + attr_accessor :type + + # The unique ID of the custom tool call in the OpenAI platform. + sig { returns(T.nilable(String)) } + attr_reader :id + + sig { params(id: String).void } + attr_writer :id + + # A call to a custom tool created by the model. + sig do + params( + call_id: String, + input: String, + name: String, + id: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # An identifier used to map this custom tool call to a tool call output. + call_id:, + # The input for the custom tool call generated by the model. + input:, + # The name of the custom tool being called. + name:, + # The unique ID of the custom tool call in the OpenAI platform. + id: nil, + # The type of the custom tool call. Always `custom_tool_call`. + type: :custom_tool_call + ) + end + + sig do + override.returns( + { + call_id: String, + input: String, + name: String, + type: Symbol, + id: String + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi b/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi new file mode 100644 index 00000000..dbc175f0 --- /dev/null +++ b/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi @@ -0,0 +1,75 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallInputDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCustomToolCallInputDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The incremental input data (delta) for the custom tool call. + sig { returns(String) } + attr_accessor :delta + + # Unique identifier for the API item associated with this event. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output this delta applies to. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The event type identifier. + sig { returns(Symbol) } + attr_accessor :type + + # Event representing a delta (partial update) to the input of a custom tool call. + sig do + params( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The incremental input data (delta) for the custom tool call. + delta:, + # Unique identifier for the API item associated with this event. + item_id:, + # The index of the output this delta applies to. + output_index:, + # The sequence number of this event. + sequence_number:, + # The event type identifier. + type: :"response.custom_tool_call_input.delta" + ) + end + + sig do + override.returns( + { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi b/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi new file mode 100644 index 00000000..cd3c485d --- /dev/null +++ b/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi @@ -0,0 +1,75 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallInputDoneEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCustomToolCallInputDoneEvent, + OpenAI::Internal::AnyHash + ) + end + + # The complete input data for the custom tool call. + sig { returns(String) } + attr_accessor :input + + # Unique identifier for the API item associated with this event. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output this event applies to. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The event type identifier. + sig { returns(Symbol) } + attr_accessor :type + + # Event indicating that input for a custom tool call is complete. + sig do + params( + input: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The complete input data for the custom tool call. + input:, + # Unique identifier for the API item associated with this event. + item_id:, + # The index of the output this event applies to. + output_index:, + # The sequence number of this event. + sequence_number:, + # The event type identifier. + type: :"response.custom_tool_call_input.done" + ) + end + + sig do + override.returns( + { + input: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_custom_tool_call_output.rbi b/rbi/openai/models/responses/response_custom_tool_call_output.rbi new file mode 100644 index 00000000..b18c6a16 --- /dev/null +++ b/rbi/openai/models/responses/response_custom_tool_call_output.rbi @@ -0,0 +1,65 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseCustomToolCallOutput < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseCustomToolCallOutput, + OpenAI::Internal::AnyHash + ) + end + + # The call ID, used to map this custom tool call output to a custom tool call. + sig { returns(String) } + attr_accessor :call_id + + # The output from the custom tool call generated by your code. + sig { returns(String) } + attr_accessor :output + + # The type of the custom tool call output. Always `custom_tool_call_output`. + sig { returns(Symbol) } + attr_accessor :type + + # The unique ID of the custom tool call output in the OpenAI platform. + sig { returns(T.nilable(String)) } + attr_reader :id + + sig { params(id: String).void } + attr_writer :id + + # The output of a custom tool call from your code, being sent back to the model. + sig do + params( + call_id: String, + output: String, + id: String, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The call ID, used to map this custom tool call output to a custom tool call. + call_id:, + # The output from the custom tool call generated by your code. + output:, + # The unique ID of the custom tool call output in the OpenAI platform. + id: nil, + # The type of the custom tool call output. Always `custom_tool_call_output`. + type: :custom_tool_call_output + ) + end + + sig do + override.returns( + { call_id: String, output: String, type: Symbol, id: String } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_input_item.rbi b/rbi/openai/models/responses/response_input_item.rbi index bbafe9cb..6b28bab2 100644 --- a/rbi/openai/models/responses/response_input_item.rbi +++ b/rbi/openai/models/responses/response_input_item.rbi @@ -32,6 +32,8 @@ module OpenAI OpenAI::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Responses::ResponseInputItem::McpCall, + OpenAI::Responses::ResponseCustomToolCallOutput, + OpenAI::Responses::ResponseCustomToolCall, OpenAI::Responses::ResponseInputItem::ItemReference ) end diff --git a/rbi/openai/models/responses/response_output_item.rbi b/rbi/openai/models/responses/response_output_item.rbi index 51542fc3..1f08b218 100644 --- a/rbi/openai/models/responses/response_output_item.rbi +++ b/rbi/openai/models/responses/response_output_item.rbi @@ -21,7 +21,8 @@ module OpenAI OpenAI::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Responses::ResponseOutputItem::McpCall, OpenAI::Responses::ResponseOutputItem::McpListTools, - OpenAI::Responses::ResponseOutputItem::McpApprovalRequest + OpenAI::Responses::ResponseOutputItem::McpApprovalRequest, + OpenAI::Responses::ResponseCustomToolCall ) end diff --git a/rbi/openai/models/responses/response_output_item_added_event.rbi b/rbi/openai/models/responses/response_output_item_added_event.rbi index 6479c80d..6ac51356 100644 --- a/rbi/openai/models/responses/response_output_item_added_event.rbi +++ b/rbi/openai/models/responses/response_output_item_added_event.rbi @@ -44,7 +44,8 @@ module OpenAI OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash, OpenAI::Responses::ResponseOutputItem::McpCall::OrHash, OpenAI::Responses::ResponseOutputItem::McpListTools::OrHash, - OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash + OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash, + OpenAI::Responses::ResponseCustomToolCall::OrHash ), output_index: Integer, sequence_number: Integer, diff --git a/rbi/openai/models/responses/response_output_item_done_event.rbi b/rbi/openai/models/responses/response_output_item_done_event.rbi index 7789f951..ba0ecc6d 100644 --- a/rbi/openai/models/responses/response_output_item_done_event.rbi +++ b/rbi/openai/models/responses/response_output_item_done_event.rbi @@ -44,7 +44,8 @@ module OpenAI OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash, OpenAI::Responses::ResponseOutputItem::McpCall::OrHash, OpenAI::Responses::ResponseOutputItem::McpListTools::OrHash, - OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash + OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash, + OpenAI::Responses::ResponseCustomToolCall::OrHash ), output_index: Integer, sequence_number: Integer, diff --git a/rbi/openai/models/responses/response_reasoning_item.rbi b/rbi/openai/models/responses/response_reasoning_item.rbi index 0317e535..328f2828 100644 --- a/rbi/openai/models/responses/response_reasoning_item.rbi +++ b/rbi/openai/models/responses/response_reasoning_item.rbi @@ -16,7 +16,7 @@ module OpenAI sig { returns(String) } attr_accessor :id - # Reasoning text contents. + # Reasoning summary content. sig do returns(T::Array[OpenAI::Responses::ResponseReasoningItem::Summary]) end @@ -26,6 +26,26 @@ module OpenAI sig { returns(Symbol) } attr_accessor :type + # Reasoning text content. + sig do + returns( + T.nilable( + T::Array[OpenAI::Responses::ResponseReasoningItem::Content] + ) + ) + end + attr_reader :content + + sig do + params( + content: + T::Array[ + OpenAI::Responses::ResponseReasoningItem::Content::OrHash + ] + ).void + end + attr_writer :content + # The encrypted content of the reasoning item - populated when a response is # generated with `reasoning.encrypted_content` in the `include` parameter. sig { returns(T.nilable(String)) } @@ -60,6 +80,10 @@ module OpenAI T::Array[ OpenAI::Responses::ResponseReasoningItem::Summary::OrHash ], + content: + T::Array[ + OpenAI::Responses::ResponseReasoningItem::Content::OrHash + ], encrypted_content: T.nilable(String), status: OpenAI::Responses::ResponseReasoningItem::Status::OrSymbol, type: Symbol @@ -68,8 +92,10 @@ module OpenAI def self.new( # The unique identifier of the reasoning content. id:, - # Reasoning text contents. + # Reasoning summary content. summary:, + # Reasoning text content. + content: nil, # The encrypted content of the reasoning item - populated when a response is # generated with `reasoning.encrypted_content` in the `include` parameter. encrypted_content: nil, @@ -88,6 +114,8 @@ module OpenAI summary: T::Array[OpenAI::Responses::ResponseReasoningItem::Summary], type: Symbol, + content: + T::Array[OpenAI::Responses::ResponseReasoningItem::Content], encrypted_content: T.nilable(String), status: OpenAI::Responses::ResponseReasoningItem::Status::OrSymbol } @@ -105,7 +133,7 @@ module OpenAI ) end - # A short summary of the reasoning used by the model when generating the response. + # A summary of the reasoning output from the model so far. sig { returns(String) } attr_accessor :text @@ -115,7 +143,7 @@ module OpenAI sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new( - # A short summary of the reasoning used by the model when generating the response. + # A summary of the reasoning output from the model so far. text:, # The type of the object. Always `summary_text`. type: :summary_text @@ -127,6 +155,37 @@ module OpenAI end end + class Content < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningItem::Content, + OpenAI::Internal::AnyHash + ) + end + + # Reasoning text output from the model. + sig { returns(String) } + attr_accessor :text + + # The type of the object. Always `reasoning_text`. + sig { returns(Symbol) } + attr_accessor :type + + sig { params(text: String, type: Symbol).returns(T.attached_class) } + def self.new( + # Reasoning text output from the model. + text:, + # The type of the object. Always `reasoning_text`. + type: :reasoning_text + ) + end + + sig { override.returns({ text: String, type: Symbol }) } + def to_hash + end + end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. # Populated when items are returned via API. module Status diff --git a/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi b/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi deleted file mode 100644 index c92fd014..00000000 --- a/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi +++ /dev/null @@ -1,85 +0,0 @@ -# typed: strong - -module OpenAI - module Models - module Responses - class ResponseReasoningSummaryDeltaEvent < OpenAI::Internal::Type::BaseModel - OrHash = - T.type_alias do - T.any( - OpenAI::Responses::ResponseReasoningSummaryDeltaEvent, - OpenAI::Internal::AnyHash - ) - end - - # The partial update to the reasoning summary content. - sig { returns(T.anything) } - attr_accessor :delta - - # The unique identifier of the item for which the reasoning summary is being - # updated. - sig { returns(String) } - attr_accessor :item_id - - # The index of the output item in the response's output array. - sig { returns(Integer) } - attr_accessor :output_index - - # The sequence number of this event. - sig { returns(Integer) } - attr_accessor :sequence_number - - # The index of the summary part within the output item. - sig { returns(Integer) } - attr_accessor :summary_index - - # The type of the event. Always 'response.reasoning_summary.delta'. - sig { returns(Symbol) } - attr_accessor :type - - # Emitted when there is a delta (partial update) to the reasoning summary content. - sig do - params( - delta: T.anything, - item_id: String, - output_index: Integer, - sequence_number: Integer, - summary_index: Integer, - type: Symbol - ).returns(T.attached_class) - end - def self.new( - # The partial update to the reasoning summary content. - delta:, - # The unique identifier of the item for which the reasoning summary is being - # updated. - item_id:, - # The index of the output item in the response's output array. - output_index:, - # The sequence number of this event. - sequence_number:, - # The index of the summary part within the output item. - summary_index:, - # The type of the event. Always 'response.reasoning_summary.delta'. - type: :"response.reasoning_summary.delta" - ) - end - - sig do - override.returns( - { - delta: T.anything, - item_id: String, - output_index: Integer, - sequence_number: Integer, - summary_index: Integer, - type: Symbol - } - ) - end - def to_hash - end - end - end - end -end diff --git a/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi b/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi new file mode 100644 index 00000000..54336041 --- /dev/null +++ b/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi @@ -0,0 +1,83 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ResponseReasoningTextDeltaEvent < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ResponseReasoningTextDeltaEvent, + OpenAI::Internal::AnyHash + ) + end + + # The index of the reasoning content part this delta is associated with. + sig { returns(Integer) } + attr_accessor :content_index + + # The text delta that was added to the reasoning content. + sig { returns(String) } + attr_accessor :delta + + # The ID of the item this reasoning text delta is associated with. + sig { returns(String) } + attr_accessor :item_id + + # The index of the output item this reasoning text delta is associated with. + sig { returns(Integer) } + attr_accessor :output_index + + # The sequence number of this event. + sig { returns(Integer) } + attr_accessor :sequence_number + + # The type of the event. Always `response.reasoning_text.delta`. + sig { returns(Symbol) } + attr_accessor :type + + # Emitted when a delta is added to a reasoning text. + sig do + params( + content_index: Integer, + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # The index of the reasoning content part this delta is associated with. + content_index:, + # The text delta that was added to the reasoning content. + delta:, + # The ID of the item this reasoning text delta is associated with. + item_id:, + # The index of the output item this reasoning text delta is associated with. + output_index:, + # The sequence number of this event. + sequence_number:, + # The type of the event. Always `response.reasoning_text.delta`. + type: :"response.reasoning_text.delta" + ) + end + + sig do + override.returns( + { + content_index: Integer, + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: Symbol + } + ) + end + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/responses/response_reasoning_summary_done_event.rbi b/rbi/openai/models/responses/response_reasoning_text_done_event.rbi similarity index 54% rename from rbi/openai/models/responses/response_reasoning_summary_done_event.rbi rename to rbi/openai/models/responses/response_reasoning_text_done_event.rbi index eead2395..2561422b 100644 --- a/rbi/openai/models/responses/response_reasoning_summary_done_event.rbi +++ b/rbi/openai/models/responses/response_reasoning_text_done_event.rbi @@ -3,20 +3,24 @@ module OpenAI module Models module Responses - class ResponseReasoningSummaryDoneEvent < OpenAI::Internal::Type::BaseModel + class ResponseReasoningTextDoneEvent < OpenAI::Internal::Type::BaseModel OrHash = T.type_alias do T.any( - OpenAI::Responses::ResponseReasoningSummaryDoneEvent, + OpenAI::Responses::ResponseReasoningTextDoneEvent, OpenAI::Internal::AnyHash ) end - # The unique identifier of the item for which the reasoning summary is finalized. + # The index of the reasoning content part. + sig { returns(Integer) } + attr_accessor :content_index + + # The ID of the item this reasoning text is associated with. sig { returns(String) } attr_accessor :item_id - # The index of the output item in the response's output array. + # The index of the output item this reasoning text is associated with. sig { returns(Integer) } attr_accessor :output_index @@ -24,52 +28,48 @@ module OpenAI sig { returns(Integer) } attr_accessor :sequence_number - # The index of the summary part within the output item. - sig { returns(Integer) } - attr_accessor :summary_index - - # The finalized reasoning summary text. + # The full text of the completed reasoning content. sig { returns(String) } attr_accessor :text - # The type of the event. Always 'response.reasoning_summary.done'. + # The type of the event. Always `response.reasoning_text.done`. sig { returns(Symbol) } attr_accessor :type - # Emitted when the reasoning summary content is finalized for an item. + # Emitted when a reasoning text is completed. sig do params( + content_index: Integer, item_id: String, output_index: Integer, sequence_number: Integer, - summary_index: Integer, text: String, type: Symbol ).returns(T.attached_class) end def self.new( - # The unique identifier of the item for which the reasoning summary is finalized. + # The index of the reasoning content part. + content_index:, + # The ID of the item this reasoning text is associated with. item_id:, - # The index of the output item in the response's output array. + # The index of the output item this reasoning text is associated with. output_index:, # The sequence number of this event. sequence_number:, - # The index of the summary part within the output item. - summary_index:, - # The finalized reasoning summary text. + # The full text of the completed reasoning content. text:, - # The type of the event. Always 'response.reasoning_summary.done'. - type: :"response.reasoning_summary.done" + # The type of the event. Always `response.reasoning_text.done`. + type: :"response.reasoning_text.done" ) end sig do override.returns( { + content_index: Integer, item_id: String, output_index: Integer, sequence_number: Integer, - summary_index: Integer, text: String, type: Symbol } diff --git a/rbi/openai/models/responses/response_retrieve_params.rbi b/rbi/openai/models/responses/response_retrieve_params.rbi index c25abeb6..f4d1f80c 100644 --- a/rbi/openai/models/responses/response_retrieve_params.rbi +++ b/rbi/openai/models/responses/response_retrieve_params.rbi @@ -31,6 +31,18 @@ module OpenAI end attr_writer :include + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + sig { returns(T.nilable(T::Boolean)) } + attr_reader :include_obfuscation + + sig { params(include_obfuscation: T::Boolean).void } + attr_writer :include_obfuscation + # The sequence number of the event after which to start streaming. sig { returns(T.nilable(Integer)) } attr_reader :starting_after @@ -41,6 +53,7 @@ module OpenAI sig do params( include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + include_obfuscation: T::Boolean, starting_after: Integer, request_options: OpenAI::RequestOptions::OrHash ).returns(T.attached_class) @@ -49,6 +62,13 @@ module OpenAI # Additional fields to include in the response. See the `include` parameter for # Response creation above for more information. include: nil, + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + include_obfuscation: nil, # The sequence number of the event after which to start streaming. starting_after: nil, request_options: {} @@ -60,6 +80,7 @@ module OpenAI { include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + include_obfuscation: T::Boolean, starting_after: Integer, request_options: OpenAI::RequestOptions } diff --git a/rbi/openai/models/responses/response_stream_event.rbi b/rbi/openai/models/responses/response_stream_event.rbi index 8b5e96b2..0aba05b2 100644 --- a/rbi/openai/models/responses/response_stream_event.rbi +++ b/rbi/openai/models/responses/response_stream_event.rbi @@ -38,6 +38,8 @@ module OpenAI OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Responses::ResponseReasoningSummaryTextDoneEvent, + OpenAI::Responses::ResponseReasoningTextDeltaEvent, + OpenAI::Responses::ResponseReasoningTextDoneEvent, OpenAI::Responses::ResponseRefusalDeltaEvent, OpenAI::Responses::ResponseRefusalDoneEvent, OpenAI::Responses::ResponseTextDeltaEvent, @@ -59,8 +61,8 @@ module OpenAI OpenAI::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Responses::ResponseQueuedEvent, - OpenAI::Responses::ResponseReasoningSummaryDeltaEvent, - OpenAI::Responses::ResponseReasoningSummaryDoneEvent + OpenAI::Responses::ResponseCustomToolCallInputDeltaEvent, + OpenAI::Responses::ResponseCustomToolCallInputDoneEvent ) end diff --git a/rbi/openai/models/responses/response_text_config.rbi b/rbi/openai/models/responses/response_text_config.rbi index 5103891f..e36e1bac 100644 --- a/rbi/openai/models/responses/response_text_config.rbi +++ b/rbi/openai/models/responses/response_text_config.rbi @@ -50,6 +50,18 @@ module OpenAI end attr_writer :format_ + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + sig do + returns( + T.nilable( + OpenAI::Responses::ResponseTextConfig::Verbosity::OrSymbol + ) + ) + end + attr_accessor :verbosity + # Configuration options for a text response from the model. Can be plain text or # structured JSON data. Learn more: # @@ -62,6 +74,10 @@ module OpenAI OpenAI::ResponseFormatText::OrHash, OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash, OpenAI::ResponseFormatJSONObject::OrHash + ), + verbosity: + T.nilable( + OpenAI::Responses::ResponseTextConfig::Verbosity::OrSymbol ) ).returns(T.attached_class) end @@ -79,7 +95,11 @@ module OpenAI # Setting to `{ "type": "json_object" }` enables the older JSON mode, which # ensures the message the model generates is valid JSON. Using `json_schema` is # preferred for models that support it. - format_: nil + format_: nil, + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + verbosity: nil ) end @@ -91,12 +111,55 @@ module OpenAI OpenAI::ResponseFormatText, OpenAI::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::ResponseFormatJSONObject + ), + verbosity: + T.nilable( + OpenAI::Responses::ResponseTextConfig::Verbosity::OrSymbol ) } ) end def to_hash end + + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + module Verbosity + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ResponseTextConfig::Verbosity) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + LOW = + T.let( + :low, + OpenAI::Responses::ResponseTextConfig::Verbosity::TaggedSymbol + ) + MEDIUM = + T.let( + :medium, + OpenAI::Responses::ResponseTextConfig::Verbosity::TaggedSymbol + ) + HIGH = + T.let( + :high, + OpenAI::Responses::ResponseTextConfig::Verbosity::TaggedSymbol + ) + + sig do + override.returns( + T::Array[ + OpenAI::Responses::ResponseTextConfig::Verbosity::TaggedSymbol + ] + ) + end + def self.values + end + end end end end diff --git a/rbi/openai/models/responses/tool.rbi b/rbi/openai/models/responses/tool.rbi index fa1bac73..b5b5d1b5 100644 --- a/rbi/openai/models/responses/tool.rbi +++ b/rbi/openai/models/responses/tool.rbi @@ -17,6 +17,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter, OpenAI::Responses::Tool::ImageGeneration, OpenAI::Responses::Tool::LocalShell, + OpenAI::Responses::CustomTool, OpenAI::Responses::WebSearchTool ) end diff --git a/rbi/openai/models/responses/tool_choice_allowed.rbi b/rbi/openai/models/responses/tool_choice_allowed.rbi new file mode 100644 index 00000000..47ee4cca --- /dev/null +++ b/rbi/openai/models/responses/tool_choice_allowed.rbi @@ -0,0 +1,124 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ToolChoiceAllowed < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ToolChoiceAllowed, + OpenAI::Internal::AnyHash + ) + end + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + sig { returns(OpenAI::Responses::ToolChoiceAllowed::Mode::OrSymbol) } + attr_accessor :mode + + # A list of tool definitions that the model should be allowed to call. + # + # For the Responses API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "name": "get_weather" }, + # { "type": "mcp", "server_label": "deepwiki" }, + # { "type": "image_generation" } + # ] + # ``` + sig { returns(T::Array[T::Hash[Symbol, T.anything]]) } + attr_accessor :tools + + # Allowed tool configuration type. Always `allowed_tools`. + sig { returns(Symbol) } + attr_accessor :type + + # Constrains the tools available to the model to a pre-defined set. + sig do + params( + mode: OpenAI::Responses::ToolChoiceAllowed::Mode::OrSymbol, + tools: T::Array[T::Hash[Symbol, T.anything]], + type: Symbol + ).returns(T.attached_class) + end + def self.new( + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + mode:, + # A list of tool definitions that the model should be allowed to call. + # + # For the Responses API, the list of tool definitions might look like: + # + # ```json + # [ + # { "type": "function", "name": "get_weather" }, + # { "type": "mcp", "server_label": "deepwiki" }, + # { "type": "image_generation" } + # ] + # ``` + tools:, + # Allowed tool configuration type. Always `allowed_tools`. + type: :allowed_tools + ) + end + + sig do + override.returns( + { + mode: OpenAI::Responses::ToolChoiceAllowed::Mode::OrSymbol, + tools: T::Array[T::Hash[Symbol, T.anything]], + type: Symbol + } + ) + end + def to_hash + end + + # Constrains the tools available to the model to a pre-defined set. + # + # `auto` allows the model to pick from among the allowed tools and generate a + # message. + # + # `required` requires the model to call one or more of the allowed tools. + module Mode + extend OpenAI::Internal::Type::Enum + + TaggedSymbol = + T.type_alias do + T.all(Symbol, OpenAI::Responses::ToolChoiceAllowed::Mode) + end + OrSymbol = T.type_alias { T.any(Symbol, String) } + + AUTO = + T.let( + :auto, + OpenAI::Responses::ToolChoiceAllowed::Mode::TaggedSymbol + ) + REQUIRED = + T.let( + :required, + OpenAI::Responses::ToolChoiceAllowed::Mode::TaggedSymbol + ) + + sig do + override.returns( + T::Array[OpenAI::Responses::ToolChoiceAllowed::Mode::TaggedSymbol] + ) + end + def self.values + end + end + end + end + end +end diff --git a/rbi/openai/models/responses/tool_choice_custom.rbi b/rbi/openai/models/responses/tool_choice_custom.rbi new file mode 100644 index 00000000..d3944c11 --- /dev/null +++ b/rbi/openai/models/responses/tool_choice_custom.rbi @@ -0,0 +1,39 @@ +# typed: strong + +module OpenAI + module Models + module Responses + class ToolChoiceCustom < OpenAI::Internal::Type::BaseModel + OrHash = + T.type_alias do + T.any( + OpenAI::Responses::ToolChoiceCustom, + OpenAI::Internal::AnyHash + ) + end + + # The name of the custom tool to call. + sig { returns(String) } + attr_accessor :name + + # For custom tool calling, the type is always `custom`. + sig { returns(Symbol) } + attr_accessor :type + + # Use this option to force the model to call a specific custom tool. + sig { params(name: String, type: Symbol).returns(T.attached_class) } + def self.new( + # The name of the custom tool to call. + name:, + # For custom tool calling, the type is always `custom`. + type: :custom + ) + end + + sig { override.returns({ name: String, type: Symbol }) } + def to_hash + end + end + end + end +end diff --git a/rbi/openai/models/vector_store_search_params.rbi b/rbi/openai/models/vector_store_search_params.rbi index 3d78d7c9..74b8d1bd 100644 --- a/rbi/openai/models/vector_store_search_params.rbi +++ b/rbi/openai/models/vector_store_search_params.rbi @@ -157,6 +157,7 @@ module OpenAI ) end + # Enable re-ranking; set to `none` to disable, which can help reduce latency. sig do returns( T.nilable( @@ -188,7 +189,11 @@ module OpenAI score_threshold: Float ).returns(T.attached_class) end - def self.new(ranker: nil, score_threshold: nil) + def self.new( + # Enable re-ranking; set to `none` to disable, which can help reduce latency. + ranker: nil, + score_threshold: nil + ) end sig do @@ -203,6 +208,7 @@ module OpenAI def to_hash end + # Enable re-ranking; set to `none` to disable, which can help reduce latency. module Ranker extend OpenAI::Internal::Type::Enum @@ -215,6 +221,11 @@ module OpenAI end OrSymbol = T.type_alias { T.any(Symbol, String) } + NONE = + T.let( + :none, + OpenAI::VectorStoreSearchParams::RankingOptions::Ranker::TaggedSymbol + ) AUTO = T.let( :auto, diff --git a/rbi/openai/resources/beta/assistants.rbi b/rbi/openai/resources/beta/assistants.rbi index d58ecbb1..6489d48e 100644 --- a/rbi/openai/resources/beta/assistants.rbi +++ b/rbi/openai/resources/beta/assistants.rbi @@ -60,12 +60,11 @@ module OpenAI metadata: nil, # The name of the assistant. The maximum length is 256 characters. name: nil, - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -188,12 +187,11 @@ module OpenAI model: nil, # The name of the assistant. The maximum length is 256 characters. name: nil, - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/rbi/openai/resources/beta/threads/runs.rbi b/rbi/openai/resources/beta/threads/runs.rbi index ba97e4db..bab46d8b 100644 --- a/rbi/openai/resources/beta/threads/runs.rbi +++ b/rbi/openai/resources/beta/threads/runs.rbi @@ -123,12 +123,11 @@ module OpenAI # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) # during tool use. parallel_tool_calls: nil, - # Body param: **o-series models only** - # - # Constrains effort on reasoning for + # Body param: Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Body param: Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -303,12 +302,11 @@ module OpenAI # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) # during tool use. parallel_tool_calls: nil, - # Body param: **o-series models only** - # - # Constrains effort on reasoning for + # Body param: Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # Body param: Specifies the format that the model must output. Compatible with # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/rbi/openai/resources/chat/completions.rbi b/rbi/openai/resources/chat/completions.rbi index 2df75cd8..3dfda2d3 100644 --- a/rbi/openai/resources/chat/completions.rbi +++ b/rbi/openai/resources/chat/completions.rbi @@ -89,18 +89,25 @@ module OpenAI tool_choice: T.any( OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, - OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash + OpenAI::Chat::ChatCompletionAllowedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::OrHash ), tools: T::Array[ T.any( - OpenAI::Chat::ChatCompletionTool::OrHash, - OpenAI::StructuredOutput::JsonSchemaConverter + OpenAI::StructuredOutput::JsonSchemaConverter, + OpenAI::Chat::ChatCompletionFunctionTool::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::OrHash ) ], top_logprobs: T.nilable(Integer), top_p: T.nilable(Float), user: String, + verbosity: + T.nilable( + OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol + ), web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions::OrHash, stream: T.noreturn, @@ -210,12 +217,11 @@ module OpenAI # hit rates. Replaces the `user` field. # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). prompt_cache_key: nil, - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # An object specifying the format that the model must output. # @@ -286,9 +292,9 @@ module OpenAI # `none` is the default when no tools are present. `auto` is the default if tools # are present. tool_choice: nil, - # A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. + # A list of tools the model may call. You can provide either + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + # or [function tools](https://platform.openai.com/docs/guides/function-calling). tools: nil, # An integer between 0 and 20 specifying the number of most likely tokens to # return at each token position, each with an associated log probability. @@ -306,6 +312,10 @@ module OpenAI # similar requests and to help OpenAI detect and prevent abuse. # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). user: nil, + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + verbosity: nil, # This tool searches the web for relevant results to use in a response. Learn more # about the # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). @@ -398,12 +408,24 @@ module OpenAI tool_choice: T.any( OpenAI::Chat::ChatCompletionToolChoiceOption::Auto::OrSymbol, - OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash + OpenAI::Chat::ChatCompletionAllowedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoice::OrHash, + OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::OrHash ), - tools: T::Array[OpenAI::Chat::ChatCompletionTool::OrHash], + tools: + T::Array[ + T.any( + OpenAI::Chat::ChatCompletionFunctionTool::OrHash, + OpenAI::Chat::ChatCompletionCustomTool::OrHash + ) + ], top_logprobs: T.nilable(Integer), top_p: T.nilable(Float), user: String, + verbosity: + T.nilable( + OpenAI::Chat::CompletionCreateParams::Verbosity::OrSymbol + ), web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions::OrHash, stream: T.noreturn, @@ -513,12 +535,11 @@ module OpenAI # hit rates. Replaces the `user` field. # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). prompt_cache_key: nil, - # **o-series models only** - # # Constrains effort on reasoning for # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - # result in faster responses and fewer tokens used on reasoning in a response. + # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + # effort can result in faster responses and fewer tokens used on reasoning in a + # response. reasoning_effort: nil, # An object specifying the format that the model must output. # @@ -589,9 +610,9 @@ module OpenAI # `none` is the default when no tools are present. `auto` is the default if tools # are present. tool_choice: nil, - # A list of tools the model may call. Currently, only functions are supported as a - # tool. Use this to provide a list of functions the model may generate JSON inputs - # for. A max of 128 functions are supported. + # A list of tools the model may call. You can provide either + # [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + # or [function tools](https://platform.openai.com/docs/guides/function-calling). tools: nil, # An integer between 0 and 20 specifying the number of most likely tokens to # return at each token position, each with an associated log probability. @@ -609,6 +630,10 @@ module OpenAI # similar requests and to help OpenAI detect and prevent abuse. # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). user: nil, + # Constrains the verbosity of the model's response. Lower values will result in + # more concise responses, while higher values will result in more verbose + # responses. Currently supported values are `low`, `medium`, and `high`. + verbosity: nil, # This tool searches the web for relevant results to use in a response. Learn more # about the # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). diff --git a/rbi/openai/resources/responses.rbi b/rbi/openai/resources/responses.rbi index ab8d97e2..7a2577b3 100644 --- a/rbi/openai/resources/responses.rbi +++ b/rbi/openai/resources/responses.rbi @@ -48,6 +48,10 @@ module OpenAI OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol ), store: T.nilable(T::Boolean), + stream_options: + T.nilable( + OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash + ), temperature: T.nilable(Float), text: T.any( @@ -57,9 +61,11 @@ module OpenAI tool_choice: T.any( OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed::OrHash, OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, - OpenAI::Responses::ToolChoiceMcp::OrHash + OpenAI::Responses::ToolChoiceMcp::OrHash, + OpenAI::Responses::ToolChoiceCustom::OrHash ), tools: T::Array[ @@ -71,6 +77,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ], @@ -188,6 +195,8 @@ module OpenAI service_tier: nil, # Whether to store the generated model response for later retrieval via API. store: nil, + # Options for streaming responses. Only set this when you set `stream: true`. + stream_options: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will # make the output more random, while lower values like 0.2 will make it more # focused and deterministic. We generally recommend altering this or `top_p` but @@ -215,8 +224,10 @@ module OpenAI # Learn more about # [built-in tools](https://platform.openai.com/docs/guides/tools). # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. tools: nil, # An integer between 0 and 20 specifying the number of most likely tokens to # return at each token position, each with an associated log probability. @@ -290,6 +301,10 @@ module OpenAI OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol ), store: T.nilable(T::Boolean), + stream_options: + T.nilable( + OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash + ), temperature: T.nilable(Float), text: T.nilable( @@ -301,9 +316,11 @@ module OpenAI tool_choice: T.any( OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed::OrHash, OpenAI::Responses::ToolChoiceTypes::OrHash, OpenAI::Responses::ToolChoiceFunction::OrHash, - OpenAI::Responses::ToolChoiceMcp::OrHash + OpenAI::Responses::ToolChoiceMcp::OrHash, + OpenAI::Responses::ToolChoiceCustom::OrHash ), tools: T::Array[ @@ -315,6 +332,7 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, + OpenAI::Responses::CustomTool::OrHash, OpenAI::Responses::WebSearchTool::OrHash ) ], @@ -436,6 +454,8 @@ module OpenAI service_tier: nil, # Whether to store the generated model response for later retrieval via API. store: nil, + # Options for streaming responses. Only set this when you set `stream: true`. + stream_options: nil, # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will # make the output more random, while lower values like 0.2 will make it more # focused and deterministic. We generally recommend altering this or `top_p` but @@ -463,8 +483,10 @@ module OpenAI # Learn more about # [built-in tools](https://platform.openai.com/docs/guides/tools). # - **Function calls (custom tools)**: Functions that are defined by you, enabling - # the model to call your own code. Learn more about + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use custom tools to call your own code. tools: nil, # An integer between 0 and 20 specifying the number of most likely tokens to # return at each token position, each with an associated log probability. @@ -498,12 +520,30 @@ module OpenAI # See {OpenAI::Resources::Responses#create} for non-streaming counterpart. # - # Creates a model response with a higher-level streaming interface that provides - # helper methods for processing events and aggregating stream outputs. + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. sig do params( + background: T.nilable(T::Boolean), + include: + T.nilable( + T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol] + ), input: T.nilable(OpenAI::Responses::ResponseCreateParams::Input::Variants), + instructions: T.nilable(String), + max_output_tokens: T.nilable(Integer), + max_tool_calls: T.nilable(Integer), + metadata: T.nilable(T::Hash[Symbol, String]), model: T.nilable( T.any( @@ -512,18 +552,12 @@ module OpenAI OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol ) ), - background: T.nilable(T::Boolean), - include: - T.nilable( - T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol] - ), - instructions: T.nilable(String), - max_output_tokens: T.nilable(Integer), - metadata: T.nilable(T::Hash[Symbol, String]), parallel_tool_calls: T.nilable(T::Boolean), previous_response_id: T.nilable(String), prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash), + prompt_cache_key: String, reasoning: T.nilable(OpenAI::Reasoning::OrHash), + safety_identifier: String, service_tier: T.nilable( OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol @@ -531,20 +565,25 @@ module OpenAI store: T.nilable(T::Boolean), temperature: T.nilable(Float), text: - T.any( - OpenAI::Responses::ResponseTextConfig::OrHash, - OpenAI::StructuredOutput::JsonSchemaConverter + T.nilable( + T.any( + OpenAI::Responses::ResponseTextConfig::OrHash, + OpenAI::StructuredOutput::JsonSchemaConverter + ) ), tool_choice: T.any( OpenAI::Responses::ToolChoiceOptions::OrSymbol, + OpenAI::Responses::ToolChoiceAllowed::OrHash, OpenAI::Responses::ToolChoiceTypes::OrHash, - OpenAI::Responses::ToolChoiceFunction::OrHash + OpenAI::Responses::ToolChoiceFunction::OrHash, + OpenAI::Responses::ToolChoiceMcp::OrHash ), tools: T.nilable( T::Array[ T.any( + OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Responses::FunctionTool::OrHash, OpenAI::Responses::FileSearchTool::OrHash, OpenAI::Responses::ComputerTool::OrHash, @@ -552,65 +591,184 @@ module OpenAI OpenAI::Responses::Tool::CodeInterpreter::OrHash, OpenAI::Responses::Tool::ImageGeneration::OrHash, OpenAI::Responses::Tool::LocalShell::OrHash, - OpenAI::Responses::WebSearchTool::OrHash, - OpenAI::StructuredOutput::JsonSchemaConverter + OpenAI::Responses::WebSearchTool::OrHash ) ] ), + top_logprobs: T.nilable(Integer), top_p: T.nilable(Float), truncation: T.nilable( OpenAI::Responses::ResponseCreateParams::Truncation::OrSymbol ), - user: T.nilable(String), - starting_after: T.nilable(Integer), - request_options: T.nilable(OpenAI::RequestOptions::OrHash) + user: String, + request_options: OpenAI::RequestOptions::OrHash ).returns(OpenAI::Streaming::ResponseStream) end def stream( - # Text, image, or file inputs to the model, used to generate a response. - input: nil, - # Model ID used to generate the response, like `gpt-4o` or `o3`. - model: nil, # Whether to run the model response in the background. + # [Learn more](https://platform.openai.com/docs/guides/background). background: nil, - # Specify additional output data to include in the model response. + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `code_interpreter_call.outputs`: Includes the outputs of python code execution + # in code interpreter tool call items. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `message.output_text.logprobs`: Include logprobs with assistant messages. + # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + # tokens in reasoning item outputs. This enables reasoning items to be used in + # multi-turn conversations when using the Responses API statelessly (like when + # the `store` parameter is set to `false`, or when an organization is enrolled + # in the zero data retention program). include: nil, + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) + input: nil, # A system (or developer) message inserted into the model's context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will not be carried over to the next response. This makes it simple to + # swap out system (or developer) messages in new responses. instructions: nil, - # An upper bound for the number of tokens that can be generated for a response. + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). max_output_tokens: nil, - # Set of 16 key-value pairs that can be attached to an object. + # The maximum number of total calls to built-in tools that can be processed in a + # response. This maximum number applies across all built-in tool calls, not per + # individual tool. Any further attempts to call a tool by the model will be + # ignored. + max_tool_calls: nil, + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. metadata: nil, + # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. + model: nil, # Whether to allow the model to run tool calls in parallel. parallel_tool_calls: nil, # The unique ID of the previous response to the model. Use this to create - # multi-turn conversations. + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). previous_response_id: nil, # Reference to a prompt template and its variables. + # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). prompt: nil, - # Configuration options for reasoning models. + # Used by OpenAI to cache responses for similar requests to optimize your cache + # hit rates. Replaces the `user` field. + # [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + prompt_cache_key: nil, + # **o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). reasoning: nil, - # Specifies the latency tier to use for processing the request. + # A stable identifier used to help detect users of your application that may be + # violating OpenAI's usage policies. The IDs should be a string that uniquely + # identifies each user. We recommend hashing their username or email address, in + # order to avoid sending us any identifying information. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + safety_identifier: nil, + # Specifies the processing type used for serving the request. + # + # - If set to 'auto', then the request will be processed with the service tier + # configured in the Project settings. Unless otherwise configured, the Project + # will use 'default'. + # - If set to 'default', then the request will be processed with the standard + # pricing and performance for the selected model. + # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or + # 'priority', then the request will be processed with the corresponding service + # tier. [Contact sales](https://openai.com/contact-sales) to learn more about + # Priority processing. + # - When not set, the default behavior is 'auto'. + # + # When the `service_tier` parameter is set, the response body will include the + # `service_tier` value based on the processing mode actually used to serve the + # request. This response value may be different from the value set in the + # parameter. service_tier: nil, # Whether to store the generated model response for later retrieval via API. store: nil, - # What sampling temperature to use, between 0 and 2. + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. temperature: nil, - # Configuration options for a text response from the model. + # Configuration options for a text response from the model. Can be plain text, + # structured JSON data, or text that conforms to a custom grammar. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + # - [Custom grammars](https://platform.openai.com/docs/guides/custom-grammars) text: nil, - # How the model should select which tool (or tools) to use when generating a response. + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. tool_choice: nil, - # An array of tools the model may call while generating a response. + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Custom tools**: Free form tools which the model can call with flexible + # inputs and outputs. Learn more about + # [custom tools](https://platform.openai.com/docs/guides/custom-tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code with strongly typed arguments and outputs. + # Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). + # You can also use + # [custom tools](https://platform.openai.com/docs/guides/custom-tools) to call + # your own code. tools: nil, - # An alternative to sampling with temperature, called nucleus sampling. + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + top_logprobs: nil, + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. top_p: nil, # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. truncation: nil, - # A stable identifier for your end-users. + # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + # `prompt_cache_key` instead to maintain caching optimizations. A stable + # identifier for your end-users. Used to boost cache hit rates by better bucketing + # similar requests and to help OpenAI detect and prevent abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). user: nil, - # The sequence number of the event after which to start streaming (for resuming streams). - starting_after: nil, request_options: {} ) end @@ -622,6 +780,7 @@ module OpenAI params( response_id: String, include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + include_obfuscation: T::Boolean, starting_after: Integer, stream: T.noreturn, request_options: OpenAI::RequestOptions::OrHash @@ -633,6 +792,13 @@ module OpenAI # Additional fields to include in the response. See the `include` parameter for # Response creation above for more information. include: nil, + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + include_obfuscation: nil, # The sequence number of the event after which to start streaming. starting_after: nil, # There is no need to provide `stream:`. Instead, use `#retrieve_streaming` or @@ -649,6 +815,7 @@ module OpenAI params( response_id: String, include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol], + include_obfuscation: T::Boolean, starting_after: Integer, stream: T.noreturn, request_options: OpenAI::RequestOptions::OrHash @@ -664,6 +831,13 @@ module OpenAI # Additional fields to include in the response. See the `include` parameter for # Response creation above for more information. include: nil, + # When true, stream obfuscation will be enabled. Stream obfuscation adds random + # characters to an `obfuscation` field on streaming delta events to normalize + # payload sizes as a mitigation to certain side-channel attacks. These obfuscation + # fields are included by default, but add a small amount of overhead to the data + # stream. You can set `include_obfuscation` to false to optimize for bandwidth if + # you trust the network links between your application and the OpenAI API. + include_obfuscation: nil, # The sequence number of the event after which to start streaming. starting_after: nil, # There is no need to provide `stream:`. Instead, use `#retrieve_streaming` or diff --git a/scripts/mock b/scripts/mock index d2814ae6..0b28f6ea 100755 --- a/scripts/mock +++ b/scripts/mock @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}" # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log & # Wait for server to come online echo -n "Waiting for server" @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" fi diff --git a/sig/openai/internal/transport/base_client.rbs b/sig/openai/internal/transport/base_client.rbs index 109af718..db2e8ff4 100644 --- a/sig/openai/internal/transport/base_client.rbs +++ b/sig/openai/internal/transport/base_client.rbs @@ -99,7 +99,7 @@ module OpenAI retry_count: Integer ) -> Float - private def send_request: ( + def send_request: ( OpenAI::Internal::Transport::BaseClient::request_input request, redirect_count: Integer, retry_count: Integer, diff --git a/sig/openai/models.rbs b/sig/openai/models.rbs index 1c5e1e9d..67856129 100644 --- a/sig/openai/models.rbs +++ b/sig/openai/models.rbs @@ -53,6 +53,8 @@ module OpenAI class CreateEmbeddingResponse = OpenAI::Models::CreateEmbeddingResponse + module CustomToolInputFormat = OpenAI::Models::CustomToolInputFormat + class Embedding = OpenAI::Models::Embedding class EmbeddingCreateParams = OpenAI::Models::EmbeddingCreateParams @@ -167,6 +169,10 @@ module OpenAI class ResponseFormatText = OpenAI::Models::ResponseFormatText + class ResponseFormatTextGrammar = OpenAI::Models::ResponseFormatTextGrammar + + class ResponseFormatTextPython = OpenAI::Models::ResponseFormatTextPython + module Responses = OpenAI::Models::Responses module ResponsesModel = OpenAI::Models::ResponsesModel diff --git a/sig/openai/models/beta/assistant_update_params.rbs b/sig/openai/models/beta/assistant_update_params.rbs index 6ee6405d..9d1254f3 100644 --- a/sig/openai/models/beta/assistant_update_params.rbs +++ b/sig/openai/models/beta/assistant_update_params.rbs @@ -83,6 +83,12 @@ module OpenAI type model = String + | :"gpt-5" + | :"gpt-5-mini" + | :"gpt-5-nano" + | :"gpt-5-2025-08-07" + | :"gpt-5-mini-2025-08-07" + | :"gpt-5-nano-2025-08-07" | :"gpt-4.1" | :"gpt-4.1-mini" | :"gpt-4.1-nano" @@ -125,6 +131,12 @@ module OpenAI def self?.variants: -> ::Array[OpenAI::Models::Beta::AssistantUpdateParams::model] + GPT_5: :"gpt-5" + GPT_5_MINI: :"gpt-5-mini" + GPT_5_NANO: :"gpt-5-nano" + GPT_5_2025_08_07: :"gpt-5-2025-08-07" + GPT_5_MINI_2025_08_07: :"gpt-5-mini-2025-08-07" + GPT_5_NANO_2025_08_07: :"gpt-5-nano-2025-08-07" GPT_4_1: :"gpt-4.1" GPT_4_1_MINI: :"gpt-4.1-mini" GPT_4_1_NANO: :"gpt-4.1-nano" diff --git a/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs b/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs new file mode 100644 index 00000000..6de6e1c5 --- /dev/null +++ b/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs @@ -0,0 +1,29 @@ +module OpenAI + module Models + class ChatCompletionAllowedToolChoice = Chat::ChatCompletionAllowedToolChoice + + module Chat + type chat_completion_allowed_tool_choice = + { + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools, + type: :allowed_tools + } + + class ChatCompletionAllowedToolChoice < OpenAI::Internal::Type::BaseModel + attr_accessor allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools + + attr_accessor type: :allowed_tools + + def initialize: ( + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools, + ?type: :allowed_tools + ) -> void + + def to_hash: -> { + allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools, + type: :allowed_tools + } + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_allowed_tools.rbs b/sig/openai/models/chat/chat_completion_allowed_tools.rbs new file mode 100644 index 00000000..0744b34f --- /dev/null +++ b/sig/openai/models/chat/chat_completion_allowed_tools.rbs @@ -0,0 +1,38 @@ +module OpenAI + module Models + module Chat + type chat_completion_allowed_tools = + { + mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode, + tools: ::Array[::Hash[Symbol, top]] + } + + class ChatCompletionAllowedTools < OpenAI::Internal::Type::BaseModel + attr_accessor mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode + + attr_accessor tools: ::Array[::Hash[Symbol, top]] + + def initialize: ( + mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode, + tools: ::Array[::Hash[Symbol, top]] + ) -> void + + def to_hash: -> { + mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode, + tools: ::Array[::Hash[Symbol, top]] + } + + type mode = :auto | :required + + module Mode + extend OpenAI::Internal::Type::Enum + + AUTO: :auto + REQUIRED: :required + + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionAllowedTools::mode] + end + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_assistant_message_param.rbs b/sig/openai/models/chat/chat_completion_assistant_message_param.rbs index afc4b011..0b86be4a 100644 --- a/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +++ b/sig/openai/models/chat/chat_completion_assistant_message_param.rbs @@ -11,7 +11,7 @@ module OpenAI function_call: OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall?, name: String, refusal: String?, - tool_calls: ::Array[OpenAI::Chat::ChatCompletionMessageToolCall] + tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] } class ChatCompletionAssistantMessageParam < OpenAI::Internal::Type::BaseModel @@ -29,11 +29,11 @@ module OpenAI attr_accessor refusal: String? - attr_reader tool_calls: ::Array[OpenAI::Chat::ChatCompletionMessageToolCall]? + attr_reader tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call]? def tool_calls=: ( - ::Array[OpenAI::Chat::ChatCompletionMessageToolCall] - ) -> ::Array[OpenAI::Chat::ChatCompletionMessageToolCall] + ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] + ) -> ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] def initialize: ( ?audio: OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio?, @@ -41,7 +41,7 @@ module OpenAI ?function_call: OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall?, ?name: String, ?refusal: String?, - ?tool_calls: ::Array[OpenAI::Chat::ChatCompletionMessageToolCall], + ?tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call], ?role: :assistant ) -> void @@ -52,7 +52,7 @@ module OpenAI function_call: OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall?, name: String, refusal: String?, - tool_calls: ::Array[OpenAI::Chat::ChatCompletionMessageToolCall] + tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] } type audio = { id: String } diff --git a/sig/openai/models/chat/chat_completion_custom_tool.rbs b/sig/openai/models/chat/chat_completion_custom_tool.rbs new file mode 100644 index 00000000..ac57cae7 --- /dev/null +++ b/sig/openai/models/chat/chat_completion_custom_tool.rbs @@ -0,0 +1,137 @@ +module OpenAI + module Models + class ChatCompletionCustomTool = Chat::ChatCompletionCustomTool + + module Chat + type chat_completion_custom_tool = + { + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom, + type: :custom + } + + class ChatCompletionCustomTool < OpenAI::Internal::Type::BaseModel + attr_accessor custom: OpenAI::Chat::ChatCompletionCustomTool::Custom + + attr_accessor type: :custom + + def initialize: ( + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom, + ?type: :custom + ) -> void + + def to_hash: -> { + custom: OpenAI::Chat::ChatCompletionCustomTool::Custom, + type: :custom + } + + type custom = + { + name: String, + description: String, + format_: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_ + } + + class Custom < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + + attr_reader description: String? + + def description=: (String) -> String + + attr_reader format_: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_? + + def format_=: ( + OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_ + ) -> OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_ + + def initialize: ( + name: String, + ?description: String, + ?format_: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_ + ) -> void + + def to_hash: -> { + name: String, + description: String, + format_: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_ + } + + type format_ = + OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Text + | OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar + + module Format + extend OpenAI::Internal::Type::Union + + type text = { type: :text } + + class Text < OpenAI::Internal::Type::BaseModel + attr_accessor type: :text + + def initialize: (?type: :text) -> void + + def to_hash: -> { type: :text } + end + + type grammar = + { + grammar: OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar, + type: :grammar + } + + class Grammar < OpenAI::Internal::Type::BaseModel + attr_accessor grammar: OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar + + attr_accessor type: :grammar + + def initialize: ( + grammar: OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar, + ?type: :grammar + ) -> void + + def to_hash: -> { + grammar: OpenAI::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar, + type: :grammar + } + + type grammar = + { + definition: String, + syntax: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::syntax + } + + class Grammar < OpenAI::Internal::Type::BaseModel + attr_accessor definition: String + + attr_accessor syntax: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::syntax + + def initialize: ( + definition: String, + syntax: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::syntax + ) -> void + + def to_hash: -> { + definition: String, + syntax: OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::syntax + } + + type syntax = :lark | :regex + + module Syntax + extend OpenAI::Internal::Type::Enum + + LARK: :lark + REGEX: :regex + + def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::Format::Grammar::Grammar::syntax] + end + end + end + + def self?.variants: -> ::Array[OpenAI::Models::Chat::ChatCompletionCustomTool::Custom::format_] + end + end + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_function_tool.rbs b/sig/openai/models/chat/chat_completion_function_tool.rbs new file mode 100644 index 00000000..03d0abce --- /dev/null +++ b/sig/openai/models/chat/chat_completion_function_tool.rbs @@ -0,0 +1,26 @@ +module OpenAI + module Models + class ChatCompletionFunctionTool = Chat::ChatCompletionFunctionTool + + module Chat + type chat_completion_function_tool = + { function: OpenAI::FunctionDefinition, type: :function } + + class ChatCompletionFunctionTool < OpenAI::Internal::Type::BaseModel + attr_accessor function: OpenAI::FunctionDefinition + + attr_accessor type: :function + + def initialize: ( + function: OpenAI::FunctionDefinition, + ?type: :function + ) -> void + + def to_hash: -> { + function: OpenAI::FunctionDefinition, + type: :function + } + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_message.rbs b/sig/openai/models/chat/chat_completion_message.rbs index 7225d0a0..dbf4b405 100644 --- a/sig/openai/models/chat/chat_completion_message.rbs +++ b/sig/openai/models/chat/chat_completion_message.rbs @@ -11,7 +11,7 @@ module OpenAI annotations: ::Array[OpenAI::Chat::ChatCompletionMessage::Annotation], audio: OpenAI::Chat::ChatCompletionAudio?, function_call: OpenAI::Chat::ChatCompletionMessage::FunctionCall, - tool_calls: ::Array[OpenAI::Chat::ChatCompletionMessageToolCall] + tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] } class ChatCompletionMessage < OpenAI::Internal::Type::BaseModel @@ -35,11 +35,11 @@ module OpenAI OpenAI::Chat::ChatCompletionMessage::FunctionCall ) -> OpenAI::Chat::ChatCompletionMessage::FunctionCall - attr_reader tool_calls: ::Array[OpenAI::Chat::ChatCompletionMessageToolCall]? + attr_reader tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call]? def tool_calls=: ( - ::Array[OpenAI::Chat::ChatCompletionMessageToolCall] - ) -> ::Array[OpenAI::Chat::ChatCompletionMessageToolCall] + ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] + ) -> ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] def initialize: ( content: String?, @@ -47,7 +47,7 @@ module OpenAI ?annotations: ::Array[OpenAI::Chat::ChatCompletionMessage::Annotation], ?audio: OpenAI::Chat::ChatCompletionAudio?, ?function_call: OpenAI::Chat::ChatCompletionMessage::FunctionCall, - ?tool_calls: ::Array[OpenAI::Chat::ChatCompletionMessageToolCall], + ?tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call], ?role: :assistant ) -> void @@ -58,7 +58,7 @@ module OpenAI annotations: ::Array[OpenAI::Chat::ChatCompletionMessage::Annotation], audio: OpenAI::Chat::ChatCompletionAudio?, function_call: OpenAI::Chat::ChatCompletionMessage::FunctionCall, - tool_calls: ::Array[OpenAI::Chat::ChatCompletionMessageToolCall] + tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] } type annotation = diff --git a/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs b/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs new file mode 100644 index 00000000..b3852753 --- /dev/null +++ b/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs @@ -0,0 +1,46 @@ +module OpenAI + module Models + class ChatCompletionMessageCustomToolCall = Chat::ChatCompletionMessageCustomToolCall + + module Chat + type chat_completion_message_custom_tool_call = + { + id: String, + custom: OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom, + type: :custom + } + + class ChatCompletionMessageCustomToolCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor custom: OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom + + attr_accessor type: :custom + + def initialize: ( + id: String, + custom: OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom, + ?type: :custom + ) -> void + + def to_hash: -> { + id: String, + custom: OpenAI::Chat::ChatCompletionMessageCustomToolCall::Custom, + type: :custom + } + + type custom = { input: String, name: String } + + class Custom < OpenAI::Internal::Type::BaseModel + attr_accessor input: String + + attr_accessor name: String + + def initialize: (input: String, name: String) -> void + + def to_hash: -> { input: String, name: String } + end + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs b/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs new file mode 100644 index 00000000..ebd90f7f --- /dev/null +++ b/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs @@ -0,0 +1,46 @@ +module OpenAI + module Models + class ChatCompletionMessageFunctionToolCall = Chat::ChatCompletionMessageFunctionToolCall + + module Chat + type chat_completion_message_function_tool_call = + { + id: String, + function: OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function, + type: :function + } + + class ChatCompletionMessageFunctionToolCall < OpenAI::Internal::Type::BaseModel + attr_accessor id: String + + attr_accessor function: OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function + + attr_accessor type: :function + + def initialize: ( + id: String, + function: OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function, + ?type: :function + ) -> void + + def to_hash: -> { + id: String, + function: OpenAI::Chat::ChatCompletionMessageFunctionToolCall::Function, + type: :function + } + + type function = { arguments: String, name: String } + + class Function < OpenAI::Internal::Type::BaseModel + attr_accessor arguments: String + + attr_accessor name: String + + def initialize: (arguments: String, name: String) -> void + + def to_hash: -> { arguments: String, name: String } + end + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_message_tool_call.rbs b/sig/openai/models/chat/chat_completion_message_tool_call.rbs index c787ea9c..446c9f1e 100644 --- a/sig/openai/models/chat/chat_completion_message_tool_call.rbs +++ b/sig/openai/models/chat/chat_completion_message_tool_call.rbs @@ -1,45 +1,16 @@ module OpenAI module Models - class ChatCompletionMessageToolCall = Chat::ChatCompletionMessageToolCall + module ChatCompletionMessageToolCall = Chat::ChatCompletionMessageToolCall module Chat type chat_completion_message_tool_call = - { - id: String, - function: OpenAI::Chat::ChatCompletionMessageToolCall::Function, - type: :function - } + OpenAI::Chat::ChatCompletionMessageFunctionToolCall + | OpenAI::Chat::ChatCompletionMessageCustomToolCall - class ChatCompletionMessageToolCall < OpenAI::Internal::Type::BaseModel - attr_accessor id: String + module ChatCompletionMessageToolCall + extend OpenAI::Internal::Type::Union - attr_accessor function: OpenAI::Chat::ChatCompletionMessageToolCall::Function - - attr_accessor type: :function - - def initialize: ( - id: String, - function: OpenAI::Chat::ChatCompletionMessageToolCall::Function, - ?type: :function - ) -> void - - def to_hash: -> { - id: String, - function: OpenAI::Chat::ChatCompletionMessageToolCall::Function, - type: :function - } - - type function = { arguments: String, name: String } - - class Function < OpenAI::Internal::Type::BaseModel - attr_accessor arguments: String - - attr_accessor name: String - - def initialize: (arguments: String, name: String) -> void - - def to_hash: -> { arguments: String, name: String } - end + def self?.variants: -> ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call] end end end diff --git a/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs b/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs new file mode 100644 index 00000000..ec6ae0e9 --- /dev/null +++ b/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs @@ -0,0 +1,39 @@ +module OpenAI + module Models + class ChatCompletionNamedToolChoiceCustom = Chat::ChatCompletionNamedToolChoiceCustom + + module Chat + type chat_completion_named_tool_choice_custom = + { + custom: OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom, + type: :custom + } + + class ChatCompletionNamedToolChoiceCustom < OpenAI::Internal::Type::BaseModel + attr_accessor custom: OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom + + attr_accessor type: :custom + + def initialize: ( + custom: OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom, + ?type: :custom + ) -> void + + def to_hash: -> { + custom: OpenAI::Chat::ChatCompletionNamedToolChoiceCustom::Custom, + type: :custom + } + + type custom = { name: String } + + class Custom < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + + def initialize: (name: String) -> void + + def to_hash: -> { name: String } + end + end + end + end +end diff --git a/sig/openai/models/chat/chat_completion_stream_options.rbs b/sig/openai/models/chat/chat_completion_stream_options.rbs index 6905d394..7217a030 100644 --- a/sig/openai/models/chat/chat_completion_stream_options.rbs +++ b/sig/openai/models/chat/chat_completion_stream_options.rbs @@ -3,16 +3,24 @@ module OpenAI class ChatCompletionStreamOptions = Chat::ChatCompletionStreamOptions module Chat - type chat_completion_stream_options = { include_usage: bool } + type chat_completion_stream_options = + { include_obfuscation: bool, include_usage: bool } class ChatCompletionStreamOptions < OpenAI::Internal::Type::BaseModel + attr_reader include_obfuscation: bool? + + def include_obfuscation=: (bool) -> bool + attr_reader include_usage: bool? def include_usage=: (bool) -> bool - def initialize: (?include_usage: bool) -> void + def initialize: ( + ?include_obfuscation: bool, + ?include_usage: bool + ) -> void - def to_hash: -> { include_usage: bool } + def to_hash: -> { include_obfuscation: bool, include_usage: bool } end end end diff --git a/sig/openai/models/chat/chat_completion_tool.rbs b/sig/openai/models/chat/chat_completion_tool.rbs index 23153c68..34abaf37 100644 --- a/sig/openai/models/chat/chat_completion_tool.rbs +++ b/sig/openai/models/chat/chat_completion_tool.rbs @@ -1,25 +1,16 @@ module OpenAI module Models - class ChatCompletionTool = Chat::ChatCompletionTool + module ChatCompletionTool = Chat::ChatCompletionTool module Chat type chat_completion_tool = - { function: OpenAI::FunctionDefinition, type: :function } + OpenAI::Chat::ChatCompletionFunctionTool + | OpenAI::Chat::ChatCompletionCustomTool - class ChatCompletionTool < OpenAI::Internal::Type::BaseModel - attr_accessor function: OpenAI::FunctionDefinition + module ChatCompletionTool + extend OpenAI::Internal::Type::Union - attr_accessor type: :function - - def initialize: ( - function: OpenAI::FunctionDefinition, - ?type: :function - ) -> void - - def to_hash: -> { - function: OpenAI::FunctionDefinition, - type: :function - } + def self?.variants: -> ::Array[OpenAI::Models::Chat::chat_completion_tool] end end end diff --git a/sig/openai/models/chat/chat_completion_tool_choice_option.rbs b/sig/openai/models/chat/chat_completion_tool_choice_option.rbs index 523db9a4..383ccb44 100644 --- a/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +++ b/sig/openai/models/chat/chat_completion_tool_choice_option.rbs @@ -5,7 +5,9 @@ module OpenAI module Chat type chat_completion_tool_choice_option = OpenAI::Models::Chat::ChatCompletionToolChoiceOption::auto + | OpenAI::Chat::ChatCompletionAllowedToolChoice | OpenAI::Chat::ChatCompletionNamedToolChoice + | OpenAI::Chat::ChatCompletionNamedToolChoiceCustom module ChatCompletionToolChoiceOption extend OpenAI::Internal::Type::Union diff --git a/sig/openai/models/chat/completion_create_params.rbs b/sig/openai/models/chat/completion_create_params.rbs index 298d3d31..e02095c4 100644 --- a/sig/openai/models/chat/completion_create_params.rbs +++ b/sig/openai/models/chat/completion_create_params.rbs @@ -30,10 +30,11 @@ module OpenAI stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, temperature: Float?, tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, - tools: ::Array[OpenAI::Chat::ChatCompletionTool], + tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], top_logprobs: Integer?, top_p: Float?, user: String, + verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity?, web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions } & OpenAI::Internal::Type::request_parameters @@ -118,11 +119,11 @@ module OpenAI OpenAI::Models::Chat::chat_completion_tool_choice_option ) -> OpenAI::Models::Chat::chat_completion_tool_choice_option - attr_reader tools: ::Array[OpenAI::Chat::ChatCompletionTool]? + attr_reader tools: ::Array[OpenAI::Models::Chat::chat_completion_tool]? def tools=: ( - ::Array[OpenAI::Chat::ChatCompletionTool] - ) -> ::Array[OpenAI::Chat::ChatCompletionTool] + ::Array[OpenAI::Models::Chat::chat_completion_tool] + ) -> ::Array[OpenAI::Models::Chat::chat_completion_tool] attr_accessor top_logprobs: Integer? @@ -132,6 +133,8 @@ module OpenAI def user=: (String) -> String + attr_accessor verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity? + attr_reader web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions? def web_search_options=: ( @@ -166,10 +169,11 @@ module OpenAI ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, ?temperature: Float?, ?tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, - ?tools: ::Array[OpenAI::Chat::ChatCompletionTool], + ?tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], ?top_logprobs: Integer?, ?top_p: Float?, ?user: String, + ?verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity?, ?web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions, ?request_options: OpenAI::request_opts ) -> void @@ -202,10 +206,11 @@ module OpenAI stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, temperature: Float?, tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, - tools: ::Array[OpenAI::Chat::ChatCompletionTool], + tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], top_logprobs: Integer?, top_p: Float?, user: String, + verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity?, web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions, request_options: OpenAI::RequestOptions } @@ -318,6 +323,18 @@ module OpenAI StringArray: OpenAI::Internal::Type::Converter end + type verbosity = :low | :medium | :high + + module Verbosity + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + + def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::verbosity] + end + type web_search_options = { search_context_size: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::search_context_size, diff --git a/sig/openai/models/chat_model.rbs b/sig/openai/models/chat_model.rbs index bbe91426..afba9c16 100644 --- a/sig/openai/models/chat_model.rbs +++ b/sig/openai/models/chat_model.rbs @@ -1,7 +1,14 @@ module OpenAI module Models type chat_model = - :"gpt-4.1" + :"gpt-5" + | :"gpt-5-mini" + | :"gpt-5-nano" + | :"gpt-5-2025-08-07" + | :"gpt-5-mini-2025-08-07" + | :"gpt-5-nano-2025-08-07" + | :"gpt-5-chat-latest" + | :"gpt-4.1" | :"gpt-4.1-mini" | :"gpt-4.1-nano" | :"gpt-4.1-2025-04-14" @@ -60,6 +67,13 @@ module OpenAI module ChatModel extend OpenAI::Internal::Type::Enum + GPT_5: :"gpt-5" + GPT_5_MINI: :"gpt-5-mini" + GPT_5_NANO: :"gpt-5-nano" + GPT_5_2025_08_07: :"gpt-5-2025-08-07" + GPT_5_MINI_2025_08_07: :"gpt-5-mini-2025-08-07" + GPT_5_NANO_2025_08_07: :"gpt-5-nano-2025-08-07" + GPT_5_CHAT_LATEST: :"gpt-5-chat-latest" GPT_4_1: :"gpt-4.1" GPT_4_1_MINI: :"gpt-4.1-mini" GPT_4_1_NANO: :"gpt-4.1-nano" diff --git a/sig/openai/models/custom_tool_input_format.rbs b/sig/openai/models/custom_tool_input_format.rbs new file mode 100644 index 00000000..b0898e18 --- /dev/null +++ b/sig/openai/models/custom_tool_input_format.rbs @@ -0,0 +1,61 @@ +module OpenAI + module Models + type custom_tool_input_format = + OpenAI::CustomToolInputFormat::Text + | OpenAI::CustomToolInputFormat::Grammar + + module CustomToolInputFormat + extend OpenAI::Internal::Type::Union + + type text = { type: :text } + + class Text < OpenAI::Internal::Type::BaseModel + attr_accessor type: :text + + def initialize: (?type: :text) -> void + + def to_hash: -> { type: :text } + end + + type grammar = + { + definition: String, + syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax, + type: :grammar + } + + class Grammar < OpenAI::Internal::Type::BaseModel + attr_accessor definition: String + + attr_accessor syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax + + attr_accessor type: :grammar + + def initialize: ( + definition: String, + syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax, + ?type: :grammar + ) -> void + + def to_hash: -> { + definition: String, + syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax, + type: :grammar + } + + type syntax = :lark | :regex + + module Syntax + extend OpenAI::Internal::Type::Enum + + LARK: :lark + REGEX: :regex + + def self?.values: -> ::Array[OpenAI::Models::CustomToolInputFormat::Grammar::syntax] + end + end + + def self?.variants: -> ::Array[OpenAI::Models::custom_tool_input_format] + end + end +end diff --git a/sig/openai/models/evals/create_eval_completions_run_data_source.rbs b/sig/openai/models/evals/create_eval_completions_run_data_source.rbs index a4e24364..d3806378 100644 --- a/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +++ b/sig/openai/models/evals/create_eval_completions_run_data_source.rbs @@ -335,7 +335,7 @@ module OpenAI response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format, seed: Integer, temperature: Float, - tools: ::Array[OpenAI::Chat::ChatCompletionTool], + tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool], top_p: Float } @@ -358,11 +358,11 @@ module OpenAI def temperature=: (Float) -> Float - attr_reader tools: ::Array[OpenAI::Chat::ChatCompletionTool]? + attr_reader tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool]? def tools=: ( - ::Array[OpenAI::Chat::ChatCompletionTool] - ) -> ::Array[OpenAI::Chat::ChatCompletionTool] + ::Array[OpenAI::Chat::ChatCompletionFunctionTool] + ) -> ::Array[OpenAI::Chat::ChatCompletionFunctionTool] attr_reader top_p: Float? @@ -373,7 +373,7 @@ module OpenAI ?response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format, ?seed: Integer, ?temperature: Float, - ?tools: ::Array[OpenAI::Chat::ChatCompletionTool], + ?tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool], ?top_p: Float ) -> void @@ -382,7 +382,7 @@ module OpenAI response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format, seed: Integer, temperature: Float, - tools: ::Array[OpenAI::Chat::ChatCompletionTool], + tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool], top_p: Float } diff --git a/sig/openai/models/reasoning_effort.rbs b/sig/openai/models/reasoning_effort.rbs index d449d634..2245e639 100644 --- a/sig/openai/models/reasoning_effort.rbs +++ b/sig/openai/models/reasoning_effort.rbs @@ -1,10 +1,11 @@ module OpenAI module Models - type reasoning_effort = :low | :medium | :high + type reasoning_effort = :minimal | :low | :medium | :high module ReasoningEffort extend OpenAI::Internal::Type::Enum + MINIMAL: :minimal LOW: :low MEDIUM: :medium HIGH: :high diff --git a/sig/openai/models/response_format_text_grammar.rbs b/sig/openai/models/response_format_text_grammar.rbs new file mode 100644 index 00000000..5a49c0bd --- /dev/null +++ b/sig/openai/models/response_format_text_grammar.rbs @@ -0,0 +1,15 @@ +module OpenAI + module Models + type response_format_text_grammar = { grammar: String, type: :grammar } + + class ResponseFormatTextGrammar < OpenAI::Internal::Type::BaseModel + attr_accessor grammar: String + + attr_accessor type: :grammar + + def initialize: (grammar: String, ?type: :grammar) -> void + + def to_hash: -> { grammar: String, type: :grammar } + end + end +end diff --git a/sig/openai/models/response_format_text_python.rbs b/sig/openai/models/response_format_text_python.rbs new file mode 100644 index 00000000..ac13e843 --- /dev/null +++ b/sig/openai/models/response_format_text_python.rbs @@ -0,0 +1,13 @@ +module OpenAI + module Models + type response_format_text_python = { type: :python } + + class ResponseFormatTextPython < OpenAI::Internal::Type::BaseModel + attr_accessor type: :python + + def initialize: (?type: :python) -> void + + def to_hash: -> { type: :python } + end + end +end diff --git a/sig/openai/models/responses/custom_tool.rbs b/sig/openai/models/responses/custom_tool.rbs new file mode 100644 index 00000000..6d529cea --- /dev/null +++ b/sig/openai/models/responses/custom_tool.rbs @@ -0,0 +1,43 @@ +module OpenAI + module Models + module Responses + type custom_tool = + { + name: String, + type: :custom, + description: String, + format_: OpenAI::Models::custom_tool_input_format + } + + class CustomTool < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + + attr_accessor type: :custom + + attr_reader description: String? + + def description=: (String) -> String + + attr_reader format_: OpenAI::Models::custom_tool_input_format? + + def format_=: ( + OpenAI::Models::custom_tool_input_format + ) -> OpenAI::Models::custom_tool_input_format + + def initialize: ( + name: String, + ?description: String, + ?format_: OpenAI::Models::custom_tool_input_format, + ?type: :custom + ) -> void + + def to_hash: -> { + name: String, + type: :custom, + description: String, + format_: OpenAI::Models::custom_tool_input_format + } + end + end + end +end diff --git a/sig/openai/models/responses/response.rbs b/sig/openai/models/responses/response.rbs index 5e6ab27f..6bf78a3c 100644 --- a/sig/openai/models/responses/response.rbs +++ b/sig/openai/models/responses/response.rbs @@ -220,9 +220,11 @@ module OpenAI type tool_choice = OpenAI::Models::Responses::tool_choice_options + | OpenAI::Responses::ToolChoiceAllowed | OpenAI::Responses::ToolChoiceTypes | OpenAI::Responses::ToolChoiceFunction | OpenAI::Responses::ToolChoiceMcp + | OpenAI::Responses::ToolChoiceCustom module ToolChoice extend OpenAI::Internal::Type::Union diff --git a/sig/openai/models/responses/response_create_params.rbs b/sig/openai/models/responses/response_create_params.rbs index bde15b88..83f641b4 100644 --- a/sig/openai/models/responses/response_create_params.rbs +++ b/sig/openai/models/responses/response_create_params.rbs @@ -19,6 +19,7 @@ module OpenAI safety_identifier: String, service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, store: bool?, + stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, temperature: Float?, text: OpenAI::Responses::ResponseTextConfig, tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, @@ -78,6 +79,8 @@ module OpenAI attr_accessor store: bool? + attr_accessor stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions? + attr_accessor temperature: Float? attr_reader text: OpenAI::Responses::ResponseTextConfig? @@ -125,6 +128,7 @@ module OpenAI ?safety_identifier: String, ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, ?store: bool?, + ?stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, ?temperature: Float?, ?text: OpenAI::Responses::ResponseTextConfig, ?tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, @@ -153,6 +157,7 @@ module OpenAI safety_identifier: String, service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, store: bool?, + stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, temperature: Float?, text: OpenAI::Responses::ResponseTextConfig, tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, @@ -186,11 +191,25 @@ module OpenAI def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::service_tier] end + type stream_options = { include_obfuscation: bool } + + class StreamOptions < OpenAI::Internal::Type::BaseModel + attr_reader include_obfuscation: bool? + + def include_obfuscation=: (bool) -> bool + + def initialize: (?include_obfuscation: bool) -> void + + def to_hash: -> { include_obfuscation: bool } + end + type tool_choice = OpenAI::Models::Responses::tool_choice_options + | OpenAI::Responses::ToolChoiceAllowed | OpenAI::Responses::ToolChoiceTypes | OpenAI::Responses::ToolChoiceFunction | OpenAI::Responses::ToolChoiceMcp + | OpenAI::Responses::ToolChoiceCustom module ToolChoice extend OpenAI::Internal::Type::Union diff --git a/sig/openai/models/responses/response_custom_tool_call.rbs b/sig/openai/models/responses/response_custom_tool_call.rbs new file mode 100644 index 00000000..16916b4f --- /dev/null +++ b/sig/openai/models/responses/response_custom_tool_call.rbs @@ -0,0 +1,44 @@ +module OpenAI + module Models + module Responses + type response_custom_tool_call = + { + call_id: String, + input: String, + name: String, + type: :custom_tool_call, + id: String + } + + class ResponseCustomToolCall < OpenAI::Internal::Type::BaseModel + attr_accessor call_id: String + + attr_accessor input: String + + attr_accessor name: String + + attr_accessor type: :custom_tool_call + + attr_reader id: String? + + def id=: (String) -> String + + def initialize: ( + call_id: String, + input: String, + name: String, + ?id: String, + ?type: :custom_tool_call + ) -> void + + def to_hash: -> { + call_id: String, + input: String, + name: String, + type: :custom_tool_call, + id: String + } + end + end + end +end diff --git a/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs b/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs new file mode 100644 index 00000000..030f7237 --- /dev/null +++ b/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs @@ -0,0 +1,42 @@ +module OpenAI + module Models + module Responses + type response_custom_tool_call_input_delta_event = + { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.custom_tool_call_input.delta" + } + + class ResponseCustomToolCallInputDeltaEvent < OpenAI::Internal::Type::BaseModel + attr_accessor delta: String + + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.custom_tool_call_input.delta" + + def initialize: ( + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.custom_tool_call_input.delta" + ) -> void + + def to_hash: -> { + delta: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.custom_tool_call_input.delta" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs b/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs new file mode 100644 index 00000000..2378e7ae --- /dev/null +++ b/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs @@ -0,0 +1,42 @@ +module OpenAI + module Models + module Responses + type response_custom_tool_call_input_done_event = + { + input: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.custom_tool_call_input.done" + } + + class ResponseCustomToolCallInputDoneEvent < OpenAI::Internal::Type::BaseModel + attr_accessor input: String + + attr_accessor item_id: String + + attr_accessor output_index: Integer + + attr_accessor sequence_number: Integer + + attr_accessor type: :"response.custom_tool_call_input.done" + + def initialize: ( + input: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + ?type: :"response.custom_tool_call_input.done" + ) -> void + + def to_hash: -> { + input: String, + item_id: String, + output_index: Integer, + sequence_number: Integer, + type: :"response.custom_tool_call_input.done" + } + end + end + end +end diff --git a/sig/openai/models/responses/response_custom_tool_call_output.rbs b/sig/openai/models/responses/response_custom_tool_call_output.rbs new file mode 100644 index 00000000..d9c9486a --- /dev/null +++ b/sig/openai/models/responses/response_custom_tool_call_output.rbs @@ -0,0 +1,39 @@ +module OpenAI + module Models + module Responses + type response_custom_tool_call_output = + { + call_id: String, + output: String, + type: :custom_tool_call_output, + id: String + } + + class ResponseCustomToolCallOutput < OpenAI::Internal::Type::BaseModel + attr_accessor call_id: String + + attr_accessor output: String + + attr_accessor type: :custom_tool_call_output + + attr_reader id: String? + + def id=: (String) -> String + + def initialize: ( + call_id: String, + output: String, + ?id: String, + ?type: :custom_tool_call_output + ) -> void + + def to_hash: -> { + call_id: String, + output: String, + type: :custom_tool_call_output, + id: String + } + end + end + end +end diff --git a/sig/openai/models/responses/response_input_item.rbs b/sig/openai/models/responses/response_input_item.rbs index 35bf8908..08461d42 100644 --- a/sig/openai/models/responses/response_input_item.rbs +++ b/sig/openai/models/responses/response_input_item.rbs @@ -20,6 +20,8 @@ module OpenAI | OpenAI::Responses::ResponseInputItem::McpApprovalRequest | OpenAI::Responses::ResponseInputItem::McpApprovalResponse | OpenAI::Responses::ResponseInputItem::McpCall + | OpenAI::Responses::ResponseCustomToolCallOutput + | OpenAI::Responses::ResponseCustomToolCall | OpenAI::Responses::ResponseInputItem::ItemReference module ResponseInputItem diff --git a/sig/openai/models/responses/response_output_item.rbs b/sig/openai/models/responses/response_output_item.rbs index dc0254bd..13b7bedc 100644 --- a/sig/openai/models/responses/response_output_item.rbs +++ b/sig/openai/models/responses/response_output_item.rbs @@ -14,6 +14,7 @@ module OpenAI | OpenAI::Responses::ResponseOutputItem::McpCall | OpenAI::Responses::ResponseOutputItem::McpListTools | OpenAI::Responses::ResponseOutputItem::McpApprovalRequest + | OpenAI::Responses::ResponseCustomToolCall module ResponseOutputItem extend OpenAI::Internal::Type::Union diff --git a/sig/openai/models/responses/response_reasoning_item.rbs b/sig/openai/models/responses/response_reasoning_item.rbs index 620ee9dc..e4e79c73 100644 --- a/sig/openai/models/responses/response_reasoning_item.rbs +++ b/sig/openai/models/responses/response_reasoning_item.rbs @@ -6,6 +6,7 @@ module OpenAI id: String, summary: ::Array[OpenAI::Responses::ResponseReasoningItem::Summary], type: :reasoning, + content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content], encrypted_content: String?, status: OpenAI::Models::Responses::ResponseReasoningItem::status } @@ -17,6 +18,12 @@ module OpenAI attr_accessor type: :reasoning + attr_reader content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content]? + + def content=: ( + ::Array[OpenAI::Responses::ResponseReasoningItem::Content] + ) -> ::Array[OpenAI::Responses::ResponseReasoningItem::Content] + attr_accessor encrypted_content: String? attr_reader status: OpenAI::Models::Responses::ResponseReasoningItem::status? @@ -28,6 +35,7 @@ module OpenAI def initialize: ( id: String, summary: ::Array[OpenAI::Responses::ResponseReasoningItem::Summary], + ?content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content], ?encrypted_content: String?, ?status: OpenAI::Models::Responses::ResponseReasoningItem::status, ?type: :reasoning @@ -37,6 +45,7 @@ module OpenAI id: String, summary: ::Array[OpenAI::Responses::ResponseReasoningItem::Summary], type: :reasoning, + content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content], encrypted_content: String?, status: OpenAI::Models::Responses::ResponseReasoningItem::status } @@ -53,6 +62,18 @@ module OpenAI def to_hash: -> { text: String, type: :summary_text } end + type content = { text: String, type: :reasoning_text } + + class Content < OpenAI::Internal::Type::BaseModel + attr_accessor text: String + + attr_accessor type: :reasoning_text + + def initialize: (text: String, ?type: :reasoning_text) -> void + + def to_hash: -> { text: String, type: :reasoning_text } + end + type status = :in_progress | :completed | :incomplete module Status diff --git a/sig/openai/models/responses/response_reasoning_summary_delta_event.rbs b/sig/openai/models/responses/response_reasoning_text_delta_event.rbs similarity index 50% rename from sig/openai/models/responses/response_reasoning_summary_delta_event.rbs rename to sig/openai/models/responses/response_reasoning_text_delta_event.rbs index 4e613606..a8d33a4f 100644 --- a/sig/openai/models/responses/response_reasoning_summary_delta_event.rbs +++ b/sig/openai/models/responses/response_reasoning_text_delta_event.rbs @@ -1,18 +1,20 @@ module OpenAI module Models module Responses - type response_reasoning_summary_delta_event = + type response_reasoning_text_delta_event = { - delta: top, + content_index: Integer, + delta: String, item_id: String, output_index: Integer, sequence_number: Integer, - summary_index: Integer, - type: :"response.reasoning_summary.delta" + type: :"response.reasoning_text.delta" } - class ResponseReasoningSummaryDeltaEvent < OpenAI::Internal::Type::BaseModel - attr_accessor delta: top + class ResponseReasoningTextDeltaEvent < OpenAI::Internal::Type::BaseModel + attr_accessor content_index: Integer + + attr_accessor delta: String attr_accessor item_id: String @@ -20,26 +22,24 @@ module OpenAI attr_accessor sequence_number: Integer - attr_accessor summary_index: Integer - - attr_accessor type: :"response.reasoning_summary.delta" + attr_accessor type: :"response.reasoning_text.delta" def initialize: ( - delta: top, + content_index: Integer, + delta: String, item_id: String, output_index: Integer, sequence_number: Integer, - summary_index: Integer, - ?type: :"response.reasoning_summary.delta" + ?type: :"response.reasoning_text.delta" ) -> void def to_hash: -> { - delta: top, + content_index: Integer, + delta: String, item_id: String, output_index: Integer, sequence_number: Integer, - summary_index: Integer, - type: :"response.reasoning_summary.delta" + type: :"response.reasoning_text.delta" } end end diff --git a/sig/openai/models/responses/response_reasoning_summary_done_event.rbs b/sig/openai/models/responses/response_reasoning_text_done_event.rbs similarity index 58% rename from sig/openai/models/responses/response_reasoning_summary_done_event.rbs rename to sig/openai/models/responses/response_reasoning_text_done_event.rbs index cb56e84f..9e3712b0 100644 --- a/sig/openai/models/responses/response_reasoning_summary_done_event.rbs +++ b/sig/openai/models/responses/response_reasoning_text_done_event.rbs @@ -1,45 +1,45 @@ module OpenAI module Models module Responses - type response_reasoning_summary_done_event = + type response_reasoning_text_done_event = { + content_index: Integer, item_id: String, output_index: Integer, sequence_number: Integer, - summary_index: Integer, text: String, - type: :"response.reasoning_summary.done" + type: :"response.reasoning_text.done" } - class ResponseReasoningSummaryDoneEvent < OpenAI::Internal::Type::BaseModel + class ResponseReasoningTextDoneEvent < OpenAI::Internal::Type::BaseModel + attr_accessor content_index: Integer + attr_accessor item_id: String attr_accessor output_index: Integer attr_accessor sequence_number: Integer - attr_accessor summary_index: Integer - attr_accessor text: String - attr_accessor type: :"response.reasoning_summary.done" + attr_accessor type: :"response.reasoning_text.done" def initialize: ( + content_index: Integer, item_id: String, output_index: Integer, sequence_number: Integer, - summary_index: Integer, text: String, - ?type: :"response.reasoning_summary.done" + ?type: :"response.reasoning_text.done" ) -> void def to_hash: -> { + content_index: Integer, item_id: String, output_index: Integer, sequence_number: Integer, - summary_index: Integer, text: String, - type: :"response.reasoning_summary.done" + type: :"response.reasoning_text.done" } end end diff --git a/sig/openai/models/responses/response_retrieve_params.rbs b/sig/openai/models/responses/response_retrieve_params.rbs index 56f3ed6a..66b490f5 100644 --- a/sig/openai/models/responses/response_retrieve_params.rbs +++ b/sig/openai/models/responses/response_retrieve_params.rbs @@ -4,6 +4,7 @@ module OpenAI type response_retrieve_params = { include: ::Array[OpenAI::Models::Responses::response_includable], + include_obfuscation: bool, starting_after: Integer } & OpenAI::Internal::Type::request_parameters @@ -18,18 +19,24 @@ module OpenAI ::Array[OpenAI::Models::Responses::response_includable] ) -> ::Array[OpenAI::Models::Responses::response_includable] + attr_reader include_obfuscation: bool? + + def include_obfuscation=: (bool) -> bool + attr_reader starting_after: Integer? def starting_after=: (Integer) -> Integer def initialize: ( ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?include_obfuscation: bool, ?starting_after: Integer, ?request_options: OpenAI::request_opts ) -> void def to_hash: -> { include: ::Array[OpenAI::Models::Responses::response_includable], + include_obfuscation: bool, starting_after: Integer, request_options: OpenAI::RequestOptions } diff --git a/sig/openai/models/responses/response_stream_event.rbs b/sig/openai/models/responses/response_stream_event.rbs index 21677586..b9a88468 100644 --- a/sig/openai/models/responses/response_stream_event.rbs +++ b/sig/openai/models/responses/response_stream_event.rbs @@ -30,6 +30,8 @@ module OpenAI | OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent | OpenAI::Responses::ResponseReasoningSummaryTextDeltaEvent | OpenAI::Responses::ResponseReasoningSummaryTextDoneEvent + | OpenAI::Responses::ResponseReasoningTextDeltaEvent + | OpenAI::Responses::ResponseReasoningTextDoneEvent | OpenAI::Responses::ResponseRefusalDeltaEvent | OpenAI::Responses::ResponseRefusalDoneEvent | OpenAI::Responses::ResponseTextDeltaEvent @@ -51,8 +53,8 @@ module OpenAI | OpenAI::Responses::ResponseMcpListToolsInProgressEvent | OpenAI::Responses::ResponseOutputTextAnnotationAddedEvent | OpenAI::Responses::ResponseQueuedEvent - | OpenAI::Responses::ResponseReasoningSummaryDeltaEvent - | OpenAI::Responses::ResponseReasoningSummaryDoneEvent + | OpenAI::Responses::ResponseCustomToolCallInputDeltaEvent + | OpenAI::Responses::ResponseCustomToolCallInputDoneEvent module ResponseStreamEvent extend OpenAI::Internal::Type::Union diff --git a/sig/openai/models/responses/response_text_config.rbs b/sig/openai/models/responses/response_text_config.rbs index e60dae0f..3f0a6608 100644 --- a/sig/openai/models/responses/response_text_config.rbs +++ b/sig/openai/models/responses/response_text_config.rbs @@ -2,7 +2,10 @@ module OpenAI module Models module Responses type response_text_config = - { format_: OpenAI::Models::Responses::response_format_text_config } + { + format_: OpenAI::Models::Responses::response_format_text_config, + verbosity: OpenAI::Models::Responses::ResponseTextConfig::verbosity? + } class ResponseTextConfig < OpenAI::Internal::Type::BaseModel attr_reader format_: OpenAI::Models::Responses::response_format_text_config? @@ -11,13 +14,29 @@ module OpenAI OpenAI::Models::Responses::response_format_text_config ) -> OpenAI::Models::Responses::response_format_text_config + attr_accessor verbosity: OpenAI::Models::Responses::ResponseTextConfig::verbosity? + def initialize: ( - ?format_: OpenAI::Models::Responses::response_format_text_config + ?format_: OpenAI::Models::Responses::response_format_text_config, + ?verbosity: OpenAI::Models::Responses::ResponseTextConfig::verbosity? ) -> void def to_hash: -> { - format_: OpenAI::Models::Responses::response_format_text_config + format_: OpenAI::Models::Responses::response_format_text_config, + verbosity: OpenAI::Models::Responses::ResponseTextConfig::verbosity? } + + type verbosity = :low | :medium | :high + + module Verbosity + extend OpenAI::Internal::Type::Enum + + LOW: :low + MEDIUM: :medium + HIGH: :high + + def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseTextConfig::verbosity] + end end end end diff --git a/sig/openai/models/responses/tool.rbs b/sig/openai/models/responses/tool.rbs index fffb117b..71600464 100644 --- a/sig/openai/models/responses/tool.rbs +++ b/sig/openai/models/responses/tool.rbs @@ -9,6 +9,7 @@ module OpenAI | OpenAI::Responses::Tool::CodeInterpreter | OpenAI::Responses::Tool::ImageGeneration | OpenAI::Responses::Tool::LocalShell + | OpenAI::Responses::CustomTool | OpenAI::Responses::WebSearchTool module Tool diff --git a/sig/openai/models/responses/tool_choice_allowed.rbs b/sig/openai/models/responses/tool_choice_allowed.rbs new file mode 100644 index 00000000..add7a8ce --- /dev/null +++ b/sig/openai/models/responses/tool_choice_allowed.rbs @@ -0,0 +1,43 @@ +module OpenAI + module Models + module Responses + type tool_choice_allowed = + { + mode: OpenAI::Models::Responses::ToolChoiceAllowed::mode, + tools: ::Array[::Hash[Symbol, top]], + type: :allowed_tools + } + + class ToolChoiceAllowed < OpenAI::Internal::Type::BaseModel + attr_accessor mode: OpenAI::Models::Responses::ToolChoiceAllowed::mode + + attr_accessor tools: ::Array[::Hash[Symbol, top]] + + attr_accessor type: :allowed_tools + + def initialize: ( + mode: OpenAI::Models::Responses::ToolChoiceAllowed::mode, + tools: ::Array[::Hash[Symbol, top]], + ?type: :allowed_tools + ) -> void + + def to_hash: -> { + mode: OpenAI::Models::Responses::ToolChoiceAllowed::mode, + tools: ::Array[::Hash[Symbol, top]], + type: :allowed_tools + } + + type mode = :auto | :required + + module Mode + extend OpenAI::Internal::Type::Enum + + AUTO: :auto + REQUIRED: :required + + def self?.values: -> ::Array[OpenAI::Models::Responses::ToolChoiceAllowed::mode] + end + end + end + end +end diff --git a/sig/openai/models/responses/tool_choice_custom.rbs b/sig/openai/models/responses/tool_choice_custom.rbs new file mode 100644 index 00000000..9848fb4f --- /dev/null +++ b/sig/openai/models/responses/tool_choice_custom.rbs @@ -0,0 +1,17 @@ +module OpenAI + module Models + module Responses + type tool_choice_custom = { name: String, type: :custom } + + class ToolChoiceCustom < OpenAI::Internal::Type::BaseModel + attr_accessor name: String + + attr_accessor type: :custom + + def initialize: (name: String, ?type: :custom) -> void + + def to_hash: -> { name: String, type: :custom } + end + end + end +end diff --git a/sig/openai/models/vector_store_search_params.rbs b/sig/openai/models/vector_store_search_params.rbs index 0ad7493a..aecf7fd7 100644 --- a/sig/openai/models/vector_store_search_params.rbs +++ b/sig/openai/models/vector_store_search_params.rbs @@ -99,11 +99,12 @@ module OpenAI score_threshold: Float } - type ranker = :auto | :"default-2024-11-15" + type ranker = :none | :auto | :"default-2024-11-15" module Ranker extend OpenAI::Internal::Type::Enum + NONE: :none AUTO: :auto DEFAULT_2024_11_15: :"default-2024-11-15" diff --git a/sig/openai/resources/chat/completions.rbs b/sig/openai/resources/chat/completions.rbs index 79a01c27..a4237ff1 100644 --- a/sig/openai/resources/chat/completions.rbs +++ b/sig/openai/resources/chat/completions.rbs @@ -32,10 +32,11 @@ module OpenAI ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, ?temperature: Float?, ?tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, - ?tools: ::Array[OpenAI::Chat::ChatCompletionTool], + ?tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], ?top_logprobs: Integer?, ?top_p: Float?, ?user: String, + ?verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity?, ?web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions, ?request_options: OpenAI::request_opts ) -> OpenAI::Chat::ChatCompletion @@ -68,10 +69,11 @@ module OpenAI ?stream_options: OpenAI::Chat::ChatCompletionStreamOptions?, ?temperature: Float?, ?tool_choice: OpenAI::Models::Chat::chat_completion_tool_choice_option, - ?tools: ::Array[OpenAI::Chat::ChatCompletionTool], + ?tools: ::Array[OpenAI::Models::Chat::chat_completion_tool], ?top_logprobs: Integer?, ?top_p: Float?, ?user: String, + ?verbosity: OpenAI::Models::Chat::CompletionCreateParams::verbosity?, ?web_search_options: OpenAI::Chat::CompletionCreateParams::WebSearchOptions, ?request_options: OpenAI::request_opts ) -> OpenAI::Internal::Stream[OpenAI::Chat::ChatCompletionChunk] diff --git a/sig/openai/resources/responses.rbs b/sig/openai/resources/responses.rbs index e0d85fee..502b7174 100644 --- a/sig/openai/resources/responses.rbs +++ b/sig/openai/resources/responses.rbs @@ -20,6 +20,7 @@ module OpenAI ?safety_identifier: String, ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, ?store: bool?, + ?stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, ?temperature: Float?, ?text: OpenAI::Responses::ResponseTextConfig, ?tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, @@ -32,6 +33,35 @@ module OpenAI ) -> OpenAI::Responses::Response def stream_raw: ( + ?background: bool?, + ?include: ::Array[OpenAI::Models::Responses::response_includable]?, + ?input: OpenAI::Models::Responses::ResponseCreateParams::input, + ?instructions: String?, + ?max_output_tokens: Integer?, + ?max_tool_calls: Integer?, + ?metadata: OpenAI::Models::metadata?, + ?model: OpenAI::Models::responses_model, + ?parallel_tool_calls: bool?, + ?previous_response_id: String?, + ?prompt: OpenAI::Responses::ResponsePrompt?, + ?prompt_cache_key: String, + ?reasoning: OpenAI::Reasoning?, + ?safety_identifier: String, + ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?, + ?store: bool?, + ?stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?, + ?temperature: Float?, + ?text: OpenAI::Responses::ResponseTextConfig, + ?tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice, + ?tools: ::Array[OpenAI::Models::Responses::tool], + ?top_logprobs: Integer?, + ?top_p: Float?, + ?truncation: OpenAI::Models::Responses::ResponseCreateParams::truncation?, + ?user: String, + ?request_options: OpenAI::request_opts + ) -> OpenAI::Internal::Stream[OpenAI::Models::Responses::response_stream_event] + + def stream: ( ?background: bool?, ?include: ::Array[OpenAI::Models::Responses::response_includable]?, ?input: OpenAI::Models::Responses::ResponseCreateParams::input, @@ -62,6 +92,7 @@ module OpenAI def retrieve: ( String response_id, ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?include_obfuscation: bool, ?starting_after: Integer, ?request_options: OpenAI::request_opts ) -> OpenAI::Responses::Response @@ -69,6 +100,7 @@ module OpenAI def retrieve_streaming: ( String response_id, ?include: ::Array[OpenAI::Models::Responses::response_includable], + ?include_obfuscation: bool, ?starting_after: Integer, ?request_options: OpenAI::request_opts ) -> OpenAI::Internal::Stream[OpenAI::Models::Responses::response_stream_event] diff --git a/test/openai/client_test.rb b/test/openai/client_test.rb index 85ec0868..8ba8bd2d 100644 --- a/test/openai/client_test.rb +++ b/test/openai/client_test.rb @@ -40,7 +40,7 @@ def test_client_default_request_default_retry_attempts openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") assert_raises(OpenAI::Errors::InternalServerError) do - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-4.1") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") end assert_requested(:any, /./, times: 3) @@ -52,7 +52,7 @@ def test_client_given_request_default_retry_attempts openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key", max_retries: 3) assert_raises(OpenAI::Errors::InternalServerError) do - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-4.1") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") end assert_requested(:any, /./, times: 4) @@ -66,7 +66,7 @@ def test_client_default_request_given_retry_attempts assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4.1", + model: :"gpt-5", request_options: {max_retries: 3} ) end @@ -82,7 +82,7 @@ def test_client_given_request_given_retry_attempts assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4.1", + model: :"gpt-5", request_options: {max_retries: 4} ) end @@ -100,7 +100,7 @@ def test_client_retry_after_seconds openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key", max_retries: 1) assert_raises(OpenAI::Errors::InternalServerError) do - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-4.1") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") end assert_requested(:any, /./, times: 2) @@ -118,7 +118,7 @@ def test_client_retry_after_date assert_raises(OpenAI::Errors::InternalServerError) do Thread.current.thread_variable_set(:time_now, Time.now) - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-4.1") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") Thread.current.thread_variable_set(:time_now, nil) end @@ -136,7 +136,7 @@ def test_client_retry_after_ms openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key", max_retries: 1) assert_raises(OpenAI::Errors::InternalServerError) do - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-4.1") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") end assert_requested(:any, /./, times: 2) @@ -149,7 +149,7 @@ def test_retry_count_header openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") assert_raises(OpenAI::Errors::InternalServerError) do - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-4.1") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") end 3.times do @@ -165,7 +165,7 @@ def test_omit_retry_count_header assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4.1", + model: :"gpt-5", request_options: {extra_headers: {"x-stainless-retry-count" => nil}} ) end @@ -183,7 +183,7 @@ def test_overwrite_retry_count_header assert_raises(OpenAI::Errors::InternalServerError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4.1", + model: :"gpt-5", request_options: {extra_headers: {"x-stainless-retry-count" => "42"}} ) end @@ -207,7 +207,7 @@ def test_client_redirect_307 assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4.1", + model: :"gpt-5", request_options: {extra_headers: {}} ) end @@ -240,7 +240,7 @@ def test_client_redirect_303 assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4.1", + model: :"gpt-5", request_options: {extra_headers: {}} ) end @@ -268,7 +268,7 @@ def test_client_redirect_auth_keep_same_origin assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4.1", + model: :"gpt-5", request_options: {extra_headers: {"authorization" => "Bearer xyz"}} ) end @@ -299,7 +299,7 @@ def test_client_redirect_auth_strip_cross_origin assert_raises(OpenAI::Errors::APIConnectionError) do openai.chat.completions.create( messages: [{content: "string", role: :developer}], - model: :"gpt-4.1", + model: :"gpt-5", request_options: {extra_headers: {"authorization" => "Bearer xyz"}} ) end @@ -315,7 +315,7 @@ def test_default_headers openai = OpenAI::Client.new(base_url: "http://localhost", api_key: "My API Key") - openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-4.1") + openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") assert_requested(:any, /./) do |req| headers = req.headers.transform_keys(&:downcase).fetch_values("accept", "content-type") diff --git a/test/openai/resources/beta/assistants_test.rb b/test/openai/resources/beta/assistants_test.rb index ea241550..d10b07fd 100644 --- a/test/openai/resources/beta/assistants_test.rb +++ b/test/openai/resources/beta/assistants_test.rb @@ -4,7 +4,7 @@ class OpenAI::Test::Resources::Beta::AssistantsTest < OpenAI::Test::ResourceTest def test_create_required_params - response = @openai.beta.assistants.create(model: :"gpt-4.1") + response = @openai.beta.assistants.create(model: :"gpt-5") assert_pattern do response => OpenAI::Beta::Assistant diff --git a/test/openai/resources/chat/completions_test.rb b/test/openai/resources/chat/completions_test.rb index 2bb2db4b..e4e742e8 100644 --- a/test/openai/resources/chat/completions_test.rb +++ b/test/openai/resources/chat/completions_test.rb @@ -5,7 +5,7 @@ class OpenAI::Test::Resources::Chat::CompletionsTest < OpenAI::Test::ResourceTest def test_create_required_params response = - @openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-4.1") + @openai.chat.completions.create(messages: [{content: "string", role: :developer}], model: :"gpt-5") assert_pattern do response => OpenAI::Chat::ChatCompletion