Skip to content

Commit d9bdc57

Browse files
authored
Merge pull request #13 from groq/release-please--branches--main--changes--next
release: 0.4.1
2 parents 9638e4f + f9edc0a commit d9bdc57

14 files changed

+88
-66
lines changed

.devcontainer/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT}
33

44
USER vscode
55

6-
RUN curl -sSf https://rye-up.com/get | RYE_VERSION="0.15.2" RYE_INSTALL_OPTION="--yes" bash
6+
RUN curl -sSf https://rye-up.com/get | RYE_VERSION="0.24.0" RYE_INSTALL_OPTION="--yes" bash
77
ENV PATH=/home/vscode/.rye/shims:$PATH
88

99
RUN echo "[[ -d .venv ]] && source .venv/bin/activate" >> /home/vscode/.bashrc

.github/workflows/ci.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ jobs:
2121
curl -sSf https://rye-up.com/get | bash
2222
echo "$HOME/.rye/shims" >> $GITHUB_PATH
2323
env:
24-
RYE_VERSION: 0.15.2
24+
RYE_VERSION: 0.24.0
2525
RYE_INSTALL_OPTION: "--yes"
2626

2727
- name: Install dependencies

.github/workflows/publish-pypi.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ jobs:
2121
curl -sSf https://rye-up.com/get | bash
2222
echo "$HOME/.rye/shims" >> $GITHUB_PATH
2323
env:
24-
RYE_VERSION: 0.15.2
24+
RYE_VERSION: 0.24.0
2525
RYE_INSTALL_OPTION: "--yes"
2626

2727
- name: Publish to PyPI

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "0.4.0"
2+
".": "0.4.1"
33
}

CHANGELOG.md

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,27 @@
11
# Changelog
22

3-
## 0.4.0 (2024-02-15)
3+
## 0.4.1 (2024-02-21)
44

5-
Full Changelog: [v0.1.0...v0.4.0](https://github.com/groq/groq-python/compare/v0.1.0...v0.4.0)
5+
Full Changelog: [v0.1.0...v0.4.1](https://github.com/groq/groq-python/compare/v0.1.0...v0.4.1)
66

77
### Features
88

9+
* Add initial Stainless SDK ([4de80db](https://github.com/groq/groq-python/commit/4de80dbe4c27d6ecbcc2d2b6192d27ade5da3866))
910
* Add initial Stainless SDK ([d5a8512](https://github.com/groq/groq-python/commit/d5a851262e04e625dde130367ed91d8f95683599))
1011
* Add initial Stainless SDK ([316de2c](https://github.com/groq/groq-python/commit/316de2ccfeb76e36fe34bb8656ea90a8d42a7d00))
1112
* create default branch ([7e00266](https://github.com/groq/groq-python/commit/7e00266e3c691d92d508e753e2c14c03297c09f9))
13+
* update via SDK Studio ([#10](https://github.com/groq/groq-python/issues/10)) ([0c0d204](https://github.com/groq/groq-python/commit/0c0d20405a96167f060a03a2b8a58a49d9a1c7c8))
14+
* update via SDK Studio ([#14](https://github.com/groq/groq-python/issues/14)) ([153744e](https://github.com/groq/groq-python/commit/153744e8ca331900adb0bd88a688afedf9a4bf00))
1215
* update via SDK Studio ([#3](https://github.com/groq/groq-python/issues/3)) ([8d92c08](https://github.com/groq/groq-python/commit/8d92c086e320c2715e02bc79807ff872e84c0b0f))
1316

1417

1518
### Chores
1619

1720
* go live ([#2](https://github.com/groq/groq-python/issues/2)) ([ba81c42](https://github.com/groq/groq-python/commit/ba81c42d6d0fd6d47819e0d58962235cb70ca4f1))
21+
* go live ([#5](https://github.com/groq/groq-python/issues/5)) ([75ea081](https://github.com/groq/groq-python/commit/75ea081a84bbcf15702dcb53fd6411c8de497c83))
1822
* go live ([#5](https://github.com/groq/groq-python/issues/5)) ([af9a838](https://github.com/groq/groq-python/commit/af9a838e240bb0f7385bc33fb18ce246427ca2f7))
23+
* update branch ([#12](https://github.com/groq/groq-python/issues/12)) ([28f34e1](https://github.com/groq/groq-python/commit/28f34e1cc08b974d31744fd94bf31eafc9e6e867))
24+
* update branch ([#8](https://github.com/groq/groq-python/issues/8)) ([b9b55b4](https://github.com/groq/groq-python/commit/b9b55b41cb158efd155f9cda829808c877493afd))
1925

2026
## 0.1.0 (2024-02-10)
2127

examples/chat_completion.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,43 +10,33 @@
1010
# Set an optional system message. This sets the behavior of the
1111
# assistant and can be used to provide specific instructions for
1212
# how it should behave throughout the conversation.
13-
{
14-
"role": "system",
15-
"content": "you are a helpful assistant."
16-
},
13+
{"role": "system", "content": "you are a helpful assistant."},
1714
# Set a user message for the assistant to respond to.
1815
{
1916
"role": "user",
2017
"content": "Explain the importance of low latency LLMs",
2118
},
2219
],
23-
2420
# The language model which will generate the completion.
2521
model="mixtral-8x7b-32768",
26-
2722
#
2823
# Optional parameters
2924
#
30-
3125
# Controls randomness: lowering results in less random completions.
3226
# As the temperature approaches zero, the model will become deterministic
3327
# and repetitive.
3428
temperature=0.5,
35-
3629
# The maximum number of tokens to generate. Requests can use up to
3730
# 2048 tokens shared between prompt and completion.
3831
max_tokens=1024,
39-
4032
# Controls diversity via nucleus sampling: 0.5 means half of all
4133
# likelihood-weighted options are considered.
4234
top_p=1,
43-
4435
# A stop sequence is a predefined or user-specified text string that
4536
# signals an AI to stop generating content, ensuring its responses
4637
# remain focused and concise. Examples include punctuation marks and
4738
# markers like "[end]".
4839
stop=None,
49-
5040
# If set, partial message deltas will be sent.
5141
stream=False,
5242
)

examples/chat_completion_async.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -14,43 +14,33 @@ async def main() -> None:
1414
# Set an optional system message. This sets the behavior of the
1515
# assistant and can be used to provide specific instructions for
1616
# how it should behave throughout the conversation.
17-
{
18-
"role": "system",
19-
"content": "you are a helpful assistant."
20-
},
17+
{"role": "system", "content": "you are a helpful assistant."},
2118
# Set a user message for the assistant to respond to.
2219
{
2320
"role": "user",
2421
"content": "Explain the importance of low latency LLMs",
2522
},
2623
],
27-
2824
# The language model which will generate the completion.
2925
model="mixtral-8x7b-32768",
30-
3126
#
3227
# Optional parameters
3328
#
34-
3529
# Controls randomness: lowering results in less random completions.
3630
# As the temperature approaches zero, the model will become
3731
# deterministic and repetitive.
3832
temperature=0.5,
39-
4033
# The maximum number of tokens to generate. Requests can use up to
4134
# 2048 tokens shared between prompt and completion.
4235
max_tokens=1024,
43-
4436
# Controls diversity via nucleus sampling: 0.5 means half of all
4537
# likelihood-weighted options are considered.
4638
top_p=1,
47-
4839
# A stop sequence is a predefined or user-specified text string that
4940
# signals an AI to stop generating content, ensuring its responses
5041
# remain focused and concise. Examples include punctuation marks and
5142
# markers like "[end]".
5243
stop=None,
53-
5444
# If set, partial message deltas will be sent.
5545
stream=False,
5646
)

examples/chat_completion_async_streaming.py

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -14,39 +14,30 @@ async def main() -> None:
1414
# Set an optional system message. This sets the behavior of the
1515
# assistant and can be used to provide specific instructions for
1616
# how it should behave throughout the conversation.
17-
{
18-
"role": "system",
19-
"content": "you are a helpful assistant."
20-
},
17+
{"role": "system", "content": "you are a helpful assistant."},
2118
# Set a user message for the assistant to respond to.
2219
{
2320
"role": "user",
2421
"content": "Explain the importance of low latency LLMs",
2522
},
2623
],
27-
2824
# The language model which will generate the completion.
2925
model="mixtral-8x7b-32768",
30-
3126
#
3227
# Optional parameters
3328
#
34-
3529
# Controls randomness: lowering results in less random completions.
3630
# As the temperature approaches zero, the model will become
3731
# deterministic and repetitive.
3832
temperature=0.5,
39-
4033
# The maximum number of tokens to generate. Requests can use up to
4134
# 2048 tokens shared between prompt and completion.
4235
max_tokens=1024,
43-
4436
# A stop sequence is a predefined or user-specified text string that
4537
# signals an AI to stop generating content, ensuring its responses
4638
# remain focused and concise. Examples include punctuation marks and
4739
# markers like "[end]".
4840
stop=None,
49-
5041
# Controls diversity via nucleus sampling: 0.5 means half of all
5142
# likelihood-weighted options are considered.
5243
stream=True,

examples/chat_completion_stop.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -10,37 +10,28 @@
1010
# Set an optional system message. This sets the behavior of the
1111
# assistant and can be used to provide specific instructions for
1212
# how it should behave throughout the conversation.
13-
{
14-
"role": "system",
15-
"content": "you are a helpful assistant."
16-
},
13+
{"role": "system", "content": "you are a helpful assistant."},
1714
# Set a user message for the assistant to respond to.
1815
{
1916
"role": "user",
20-
"content": "Count to 10. Your response must begin with \"1, \". example: 1, 2, 3, ...",
17+
"content": 'Count to 10. Your response must begin with "1, ". example: 1, 2, 3, ...',
2118
},
2219
],
23-
2420
# The language model which will generate the completion.
2521
model="mixtral-8x7b-32768",
26-
2722
#
2823
# Optional parameters
2924
#
30-
3125
# Controls randomness: lowering results in less random completions.
3226
# As the temperature approaches zero, the model will become deterministic
3327
# and repetitive.
3428
temperature=0.5,
35-
3629
# The maximum number of tokens to generate. Requests can use up to
3730
# 2048 tokens shared between prompt and completion.
3831
max_tokens=1024,
39-
4032
# Controls diversity via nucleus sampling: 0.5 means half of all
4133
# likelihood-weighted options are considered.
4234
top_p=1,
43-
4435
# A stop sequence is a predefined or user-specified text string that
4536
# signals an AI to stop generating content, ensuring its responses
4637
# remain focused and concise. Examples include punctuation marks and
@@ -49,7 +40,6 @@
4940
# If multiple stop values are needed, an array of string may be passed,
5041
# stop=[", 6", ", six", ", Six"]
5142
stop=", 6",
52-
5343
# If set, partial message deltas will be sent.
5444
stream=False,
5545
)

examples/chat_completion_streaming.py

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,43 +10,33 @@
1010
# Set an optional system message. This sets the behavior of the
1111
# assistant and can be used to provide specific instructions for
1212
# how it should behave throughout the conversation.
13-
{
14-
"role": "system",
15-
"content": "you are a helpful assistant."
16-
},
13+
{"role": "system", "content": "you are a helpful assistant."},
1714
# Set a user message for the assistant to respond to.
1815
{
1916
"role": "user",
2017
"content": "Explain the importance of low latency LLMs",
2118
},
2219
],
23-
2420
# The language model which will generate the completion.
2521
model="mixtral-8x7b-32768",
26-
2722
#
2823
# Optional parameters
2924
#
30-
3125
# Controls randomness: lowering results in less random completions.
3226
# As the temperature approaches zero, the model will become deterministic
3327
# and repetitive.
3428
temperature=0.5,
35-
3629
# The maximum number of tokens to generate. Requests can use up to
3730
# 2048 tokens shared between prompt and completion.
3831
max_tokens=1024,
39-
4032
# Controls diversity via nucleus sampling: 0.5 means half of all
4133
# likelihood-weighted options are considered.
4234
top_p=1,
43-
4435
# A stop sequence is a predefined or user-specified text string that
4536
# signals an AI to stop generating content, ensuring its responses
4637
# remain focused and concise. Examples include punctuation marks and
4738
# markers like "[end]".
4839
stop=None,
49-
5040
# If set, partial message deltas will be sent.
5141
stream=True,
5242
)

0 commit comments

Comments
 (0)