Skip to content

Commit 842e56a

Browse files
release: 0.28.0 (#245)
Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com> Co-authored-by: Graden Rea <[email protected]>
1 parent 64bbdcd commit 842e56a

File tree

10 files changed

+67
-5
lines changed

10 files changed

+67
-5
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "0.27.0"
2+
".": "0.28.0"
33
}

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 17
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-d6462cdccc879903e070222172f392feb1487969a39fb8399983e57f0a731fdd.yml
3-
openapi_spec_hash: 00207ab94189df55f0229c088475d4ad
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-ea5b03626ef6c5d62792b87e2bef9acff936112791abe056db5d0a7a66ba39d7.yml
3+
openapi_spec_hash: 841fe2e4ce7db3d377f7ad03cd973344
44
config_hash: 6b1c374dcc1ffa3165dd22f52a77ff89

CHANGELOG.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,20 @@
11
# Changelog
22

3+
## 0.28.0 (2025-06-12)
4+
5+
Full Changelog: [v0.27.0...v0.28.0](https://github.com/groq/groq-python/compare/v0.27.0...v0.28.0)
6+
7+
### Features
8+
9+
* **api:** api update ([1b088b1](https://github.com/groq/groq-python/commit/1b088b11e42ddd9b04c3fa9fc0d1cf957696529a))
10+
* **api:** api update ([2a96d4f](https://github.com/groq/groq-python/commit/2a96d4fbcf6dbff9a8125a0f5f9d73ffe97d5223))
11+
* **api:** api update ([089ab40](https://github.com/groq/groq-python/commit/089ab404ada560ca59048ec23981252f439165ea))
12+
13+
14+
### Chores
15+
16+
* fix streaming overloads ([bb31101](https://github.com/groq/groq-python/commit/bb3110118ca7fe8258d7eaed6856a1bed560e533))
17+
318
## 0.27.0 (2025-06-10)
419

520
Full Changelog: [v0.26.0...v0.27.0](https://github.com/groq/groq-python/compare/v0.26.0...v0.27.0)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "groq"
3-
version = "0.27.0"
3+
version = "0.28.0"
44
description = "The official Python library for the groq API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"

src/groq/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "groq"
4-
__version__ = "0.27.0" # x-release-please-version
4+
__version__ = "0.28.0" # x-release-please-version

src/groq/resources/chat/completions.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ def create(
6868
n: Optional[int] | NotGiven = NOT_GIVEN,
6969
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
7070
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
71+
reasoning_effort: Optional[Literal["none", "default"]] | NotGiven = NOT_GIVEN,
7172
reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN,
7273
response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN,
7374
search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN,
@@ -110,6 +111,7 @@ def create(
110111
n: Optional[int] | NotGiven = NOT_GIVEN,
111112
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
112113
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
114+
reasoning_effort: Optional[Literal["none", "default"]] | NotGiven = NOT_GIVEN,
113115
reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN,
114116
response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN,
115117
search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN,
@@ -152,6 +154,7 @@ def create(
152154
n: Optional[int] | NotGiven = NOT_GIVEN,
153155
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
154156
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
157+
reasoning_effort: Optional[Literal["none", "default"]] | NotGiven = NOT_GIVEN,
155158
reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN,
156159
response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN,
157160
search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN,
@@ -203,6 +206,7 @@ def create(
203206
n: Optional[int] | NotGiven = NOT_GIVEN,
204207
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
205208
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
209+
reasoning_effort: Optional[Literal["none", "default"]] | NotGiven = NOT_GIVEN,
206210
reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN,
207211
response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN,
208212
search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN,
@@ -285,6 +289,9 @@ def create(
285289
whether they appear in the text so far, increasing the model's likelihood to
286290
talk about new topics.
287291
292+
reasoning_effort: this field is only available for qwen3 models. Set to 'none' to disable
293+
reasoning. Set to 'default' or null to let Qwen reason.
294+
288295
reasoning_format: Specifies how to output reasoning tokens
289296
290297
response_format: An object specifying the format that the model must output. Setting to
@@ -377,6 +384,7 @@ def create(
377384
"n": n,
378385
"parallel_tool_calls": parallel_tool_calls,
379386
"presence_penalty": presence_penalty,
387+
"reasoning_effort": reasoning_effort,
380388
"reasoning_format": reasoning_format,
381389
"response_format": response_format,
382390
"search_settings": search_settings,
@@ -442,6 +450,7 @@ async def create(
442450
n: Optional[int] | NotGiven = NOT_GIVEN,
443451
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
444452
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
453+
reasoning_effort: Optional[Literal["none", "default"]] | NotGiven = NOT_GIVEN,
445454
reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN,
446455
response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN,
447456
search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN,
@@ -484,6 +493,7 @@ async def create(
484493
n: Optional[int] | NotGiven = NOT_GIVEN,
485494
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
486495
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
496+
reasoning_effort: Optional[Literal["none", "default"]] | NotGiven = NOT_GIVEN,
487497
reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN,
488498
response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN,
489499
search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN,
@@ -526,6 +536,7 @@ async def create(
526536
n: Optional[int] | NotGiven = NOT_GIVEN,
527537
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
528538
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
539+
reasoning_effort: Optional[Literal["none", "default"]] | NotGiven = NOT_GIVEN,
529540
reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN,
530541
response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN,
531542
search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN,
@@ -577,6 +588,7 @@ async def create(
577588
n: Optional[int] | NotGiven = NOT_GIVEN,
578589
parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
579590
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
591+
reasoning_effort: Optional[Literal["none", "default"]] | NotGiven = NOT_GIVEN,
580592
reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN,
581593
response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN,
582594
search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN,
@@ -659,6 +671,9 @@ async def create(
659671
whether they appear in the text so far, increasing the model's likelihood to
660672
talk about new topics.
661673
674+
reasoning_effort: this field is only available for qwen3 models. Set to 'none' to disable
675+
reasoning. Set to 'default' or null to let Qwen reason.
676+
662677
reasoning_format: Specifies how to output reasoning tokens
663678
664679
response_format: An object specifying the format that the model must output. Setting to
@@ -751,6 +766,7 @@ async def create(
751766
"n": n,
752767
"parallel_tool_calls": parallel_tool_calls,
753768
"presence_penalty": presence_penalty,
769+
"reasoning_effort": reasoning_effort,
754770
"reasoning_format": reasoning_format,
755771
"response_format": response_format,
756772
"search_settings": search_settings,

src/groq/types/chat/chat_completion_assistant_message_param.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,5 +47,11 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False):
4747
role.
4848
"""
4949

50+
reasoning: Optional[str]
51+
"""
52+
The reasoning output by the assistant if reasoning_format was set to 'parsed'.
53+
This field is only useable with qwen3 models.
54+
"""
55+
5056
tool_calls: Iterable[ChatCompletionMessageToolCallParam]
5157
"""The tool calls generated by the model, such as function calls."""

src/groq/types/chat/chat_completion_message.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,15 +25,32 @@ class ExecutedToolCodeResultChartElement(BaseModel):
2525
angle: Optional[float] = None
2626
"""The angle for this element"""
2727

28+
first_quartile: Optional[float] = None
29+
"""The first quartile value for this element"""
30+
2831
group: Optional[str] = None
2932
"""The group this element belongs to"""
3033

34+
max: Optional[float] = None
35+
36+
median: Optional[float] = None
37+
"""The median value for this element"""
38+
39+
min: Optional[float] = None
40+
"""The minimum value for this element"""
41+
42+
outliers: Optional[List[float]] = None
43+
"""The outliers for this element"""
44+
3145
points: Optional[List[List[float]]] = None
3246
"""The points for this element"""
3347

3448
radius: Optional[float] = None
3549
"""The radius for this element"""
3650

51+
third_quartile: Optional[float] = None
52+
"""The third quartile value for this element"""
53+
3754
value: Optional[float] = None
3855
"""The value for this element"""
3956

src/groq/types/chat/completion_create_params.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,12 @@ class CompletionCreateParams(TypedDict, total=False):
132132
far, increasing the model's likelihood to talk about new topics.
133133
"""
134134

135+
reasoning_effort: Optional[Literal["none", "default"]]
136+
"""
137+
this field is only available for qwen3 models. Set to 'none' to disable
138+
reasoning. Set to 'default' or null to let Qwen reason.
139+
"""
140+
135141
reasoning_format: Optional[Literal["hidden", "raw", "parsed"]]
136142
"""Specifies how to output reasoning tokens"""
137143

tests/api_resources/chat/test_completions.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ def test_method_create_with_all_params(self, client: Groq) -> None:
6060
n=1,
6161
parallel_tool_calls=True,
6262
presence_penalty=-2,
63+
reasoning_effort="none",
6364
reasoning_format="hidden",
6465
response_format={"type": "text"},
6566
search_settings={
@@ -173,6 +174,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> N
173174
n=1,
174175
parallel_tool_calls=True,
175176
presence_penalty=-2,
177+
reasoning_effort="none",
176178
reasoning_format="hidden",
177179
response_format={"type": "text"},
178180
search_settings={

0 commit comments

Comments
 (0)