Skip to content

Commit 027b99b

Browse files
MAking completion object properties optional
1 parent 287c37c commit 027b99b

File tree

5 files changed

+24
-24
lines changed

5 files changed

+24
-24
lines changed

Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatDemo/ChatProvider.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,9 @@ import SwiftOpenAI
2929
let response = try await service.startChat(parameters: parameters)
3030
let choices = response.choices
3131
let chatUsage = response.usage
32-
let logprobs = choices.compactMap(\.logprobs)
32+
let logprobs = choices?.compactMap(\.logprobs)
3333
dump(logprobs)
34-
self.messages = choices.compactMap(\.message.content)
34+
self.messages = choices?.compactMap(\.message?.content) ?? []
3535
dump(chatUsage)
3636
self.usage = chatUsage
3737
} catch APIError.responseUnsuccessful(let description, let statusCode) {

Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatFunctionsCall/Completion/ChatFunctionCallProvider.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ enum FunctionCallDefinition: String, CaseIterable {
103103
do {
104104
let chat = try await service.startChat(parameters: parameters)
105105

106-
guard let assistantMessage = chat.choices.first?.message else { return }
106+
guard let assistantMessage = chat.choices?.first?.message else { return }
107107

108108
let content = assistantMessage.content ?? ""
109109

@@ -157,7 +157,7 @@ enum FunctionCallDefinition: String, CaseIterable {
157157
model: .gpt41106Preview)
158158
do {
159159
let chat = try await service.startChat(parameters: paramsForChat)
160-
guard let assistantMessage = chat.choices.first?.message else { return }
160+
guard let assistantMessage = chat.choices?.first?.message else { return }
161161
await updateLastAssistantMessage(.init(content: .content(.init(text: assistantMessage.content)), origin: .received(.gpt)))
162162
} catch {
163163
// If an error occurs, update the UI to display the error message.

Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatStructureOutputTool/ChatStructuredOutputToolProvider.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ final class ChatStructuredOutputToolProvider {
107107
do {
108108

109109
let chat = try await service.startChat(parameters: parameters)
110-
guard let assistantMessage = chat.choices.first?.message else { return }
110+
guard let assistantMessage = chat.choices?.first?.message else { return }
111111
let content = assistantMessage.content ?? ""
112112
await updateLastAssistantMessage(.init(content: .content(.init(text: content)), origin: .received(.gpt)))
113113
if let toolCalls = assistantMessage.toolCalls {
@@ -241,7 +241,7 @@ extension ChatStructuredOutputToolProvider {
241241
model: .gpt4o)
242242
do {
243243
let chat = try await service.startChat(parameters: paramsForChat)
244-
guard let assistantMessage = chat.choices.first?.message else { return }
244+
guard let assistantMessage = chat.choices?.first?.message else { return }
245245
await updateLastAssistantMessage(.init(content: .content(.init(text: assistantMessage.content)), origin: .received(.gpt)))
246246
} catch {
247247
// If an error occurs, update the UI to display the error message.

Examples/SwiftOpenAIExample/SwiftOpenAIExample/ChatStructuredOutputs/ChatStructuredOutputProvider.swift

+2-2
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,9 @@ final class ChatStructuredOutputProvider {
3030
{
3131
do {
3232
let choices = try await service.startChat(parameters: parameters).choices
33-
self.messages = choices.compactMap(\.message.content).map { $0.asJsonFormatted() }
33+
self.messages = choices?.compactMap(\.message?.content).map { $0.asJsonFormatted() } ?? []
3434
assert(messages.count == 1)
35-
self.errorMessage = choices.first?.message.refusal ?? ""
35+
self.errorMessage = choices?.first?.message?.refusal ?? ""
3636
} catch APIError.responseUnsuccessful(let description, let statusCode) {
3737
self.errorMessage = "Network error with status code: \(statusCode) and description: \(description)"
3838
} catch {

Sources/OpenAI/Public/ResponseModels/Chat/ChatCompletionObject.swift

+16-16
Original file line numberDiff line numberDiff line change
@@ -11,20 +11,20 @@ import Foundation
1111
public struct ChatCompletionObject: Decodable {
1212

1313
/// A unique identifier for the chat completion.
14-
public let id: String
14+
public let id: String?
1515
/// A list of chat completion choices. Can be more than one if n is greater than 1.
16-
public let choices: [ChatChoice]
16+
public let choices: [ChatChoice]?
1717
/// The Unix timestamp (in seconds) of when the chat completion was created.
18-
public let created: Int
18+
public let created: Int?
1919
/// The model used for the chat completion.
20-
public let model: String
20+
public let model: String?
2121
/// The service tier used for processing the request. This field is only included if the service_tier parameter is specified in the request.
2222
public let serviceTier: String?
2323
/// This fingerprint represents the backend configuration that the model runs with.
2424
/// Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.
2525
public let systemFingerprint: String?
2626
/// The object type, which is always chat.completion.
27-
public let object: String
27+
public let object: String?
2828
/// Usage statistics for the completion request.
2929
public let usage: ChatUsage?
3030

@@ -33,9 +33,9 @@ public struct ChatCompletionObject: Decodable {
3333
/// The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.
3434
public let finishReason: IntOrStringValue?
3535
/// The index of the choice in the list of choices.
36-
public let index: Int
36+
public let index: Int?
3737
/// A chat completion message generated by the model.
38-
public let message: ChatMessage
38+
public let message: ChatMessage?
3939
/// Log probability information for the choice.
4040
public let logprobs: LogProb?
4141

@@ -49,7 +49,7 @@ public struct ChatCompletionObject: Decodable {
4949
@available(*, deprecated, message: "Deprecated and replaced by `tool_calls`")
5050
public let functionCall: FunctionCall?
5151
/// The role of the author of this message.
52-
public let role: String
52+
public let role: String?
5353
/// The reasoning content generated by the model, if available.
5454
public let reasoningContent: String?
5555
/// Provided by the Vision API.
@@ -61,12 +61,12 @@ public struct ChatCompletionObject: Decodable {
6161

6262
/// Provided by the Vision API.
6363
public struct FinishDetails: Decodable {
64-
let type: String
64+
let type: String?
6565
}
6666

6767
public struct Audio: Decodable {
6868
/// Unique identifier for this audio response.
69-
public let id: String
69+
public let id: String?
7070
/// The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations.
7171
public let expiresAt: Int?
7272
/// Base64 encoded audio bytes generated by the model, in the format specified in the request.
@@ -96,18 +96,18 @@ public struct ChatCompletionObject: Decodable {
9696

9797
public struct LogProb: Decodable {
9898
/// A list of message content tokens with log probability information.
99-
let content: [TokenDetail]
99+
let content: [TokenDetail]?
100100
}
101101

102102
public struct TokenDetail: Decodable {
103103
/// The token.
104-
let token: String
104+
let token: String?
105105
/// The log probability of this token.
106-
let logprob: Double
106+
let logprob: Double?
107107
/// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
108108
let bytes: [Int]?
109109
/// List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.
110-
let topLogprobs: [TopLogProb]
110+
let topLogprobs: [TopLogProb]?
111111

112112
enum CodingKeys: String, CodingKey {
113113
case token, logprob, bytes
@@ -116,9 +116,9 @@ public struct ChatCompletionObject: Decodable {
116116

117117
struct TopLogProb: Decodable {
118118
/// The token.
119-
let token: String
119+
let token: String?
120120
/// The log probability of this token.
121-
let logprob: Double
121+
let logprob: Double?
122122
/// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
123123
let bytes: [Int]?
124124
}

0 commit comments

Comments
 (0)