Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "0.30.0"
".": "0.31.0"
}
6 changes: 3 additions & 3 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
configured_endpoints: 17
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-4543b558a0a546fc45d3300535b9b535f9cf251f4284bc255d3bc337727e5a50.yml
openapi_spec_hash: 09235cb11f84f84a07819c2b3f0a6d6a
config_hash: 6b1c374dcc1ffa3165dd22f52a77ff89
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-a95a90928412afdb9cf5101b7fbb67ef2abbc4ecaa51ff18fa04643f9e8d2c95.yml
openapi_spec_hash: d2e5cb1562a2b2beb8673256252b9bf5
config_hash: 961b4995e909aef11a454befa56ad3d2
27 changes: 27 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,32 @@
# Changelog

## 0.31.0 (2025-09-02)

Full Changelog: [v0.30.0...v0.31.0](https://github.com/groq/groq-typescript/compare/v0.30.0...v0.31.0)

### Features

* **api:** api update ([be48d38](https://github.com/groq/groq-typescript/commit/be48d38f749dc4599ab71633ff226583f6e74b02))
* **api:** api update ([44424c2](https://github.com/groq/groq-typescript/commit/44424c26018f43b69682bebca7e9c734b5d0f1e0))
* **api:** api update ([f772446](https://github.com/groq/groq-typescript/commit/f772446f655ff905c14f40f45cafc4109ef7f912))
* **api:** api update ([2e12bd1](https://github.com/groq/groq-typescript/commit/2e12bd1022f8cd168d41b62d753615b3900ee1f1))
* **api:** api update ([61a9e99](https://github.com/groq/groq-typescript/commit/61a9e99c984e43432d823388435893408d84ce0f))


### Bug Fixes

* update example model from decommissioned models to gpt-oss-20b ([1c69897](https://github.com/groq/groq-typescript/commit/1c69897c7fe09a0d0df5f041749a17a95ed878cd))


### Chores

* **deps:** update dependency node-fetch to v2.6.13 ([4938496](https://github.com/groq/groq-typescript/commit/49384963049a83b196e87595046e7686fcdfcca0))
* **internal:** formatting change ([a287d5e](https://github.com/groq/groq-typescript/commit/a287d5e41f1b6bba338b0cc3e1b9212a35b4e824))
* **internal:** move publish config ([5fe1890](https://github.com/groq/groq-typescript/commit/5fe1890402075a67d6ea4472b6196273cd0b0f9e))
* **internal:** update comment in script ([2a69a41](https://github.com/groq/groq-typescript/commit/2a69a4195d773bd0c6c1dcc0e49824ed6f50b24d))
* update @stainless-api/prism-cli to v5.15.0 ([35cd683](https://github.com/groq/groq-typescript/commit/35cd68309611e5e77f04974ce18b516116f3346d))
* update CI script ([e863bc0](https://github.com/groq/groq-typescript/commit/e863bc04c0324e2b5cdf2fab8abe36e2c4469907))

## 0.30.0 (2025-08-05)

Full Changelog: [v0.29.0...v0.30.0](https://github.com/groq/groq-typescript/compare/v0.29.0...v0.30.0)
Expand Down
16 changes: 8 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ const client = new Groq({

const chatCompletion = await client.chat.completions.create({
messages: [{ role: 'user', content: 'Explain the importance of low latency LLMs' }],
model: 'llama3-8b-8192',
model: 'openai/gpt-oss-20b',
});

console.log(chatCompletion.choices[0].message.content);
Expand All @@ -51,7 +51,7 @@ const params: Groq.Chat.CompletionCreateParams = {
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
],
model: 'llama3-8b-8192',
model: 'openai/gpt-oss-20b',
};
const chatCompletion: Groq.Chat.ChatCompletion = await client.chat.completions.create(params);
```
Expand Down Expand Up @@ -117,7 +117,7 @@ const chatCompletion = await client.chat.completions
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
],
model: 'llama3-8b-8192',
model: 'openai/gpt-oss-20b',
})
.catch(async (err) => {
if (err instanceof Groq.APIError) {
Expand Down Expand Up @@ -159,7 +159,7 @@ const client = new Groq({
});

// Or, configure per-request:
await client.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'llama3-8b-8192' }, {
await client.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'openai/gpt-oss-20b' }, {
maxRetries: 5,
});
```
Expand All @@ -176,7 +176,7 @@ const client = new Groq({
});

// Override per-request:
await client.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'llama3-8b-8192' }, {
await client.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'openai/gpt-oss-20b' }, {
timeout: 5 * 1000,
});
```
Expand All @@ -203,7 +203,7 @@ const response = await client.chat.completions
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
],
model: 'llama3-8b-8192',
model: 'openai/gpt-oss-20b',
})
.asResponse();
console.log(response.headers.get('X-My-Header'));
Expand All @@ -215,7 +215,7 @@ const { data: chatCompletion, response: raw } = await client.chat.completions
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
],
model: 'llama3-8b-8192',
model: 'openai/gpt-oss-20b',
})
.withResponse();
console.log(raw.headers.get('X-My-Header'));
Expand Down Expand Up @@ -329,7 +329,7 @@ await client.chat.completions.create(
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
],
model: 'llama3-8b-8192',
model: 'openai/gpt-oss-20b',
},
{
httpAgent: new http.Agent({ keepAlive: false }),
Expand Down
2 changes: 1 addition & 1 deletion bin/publish-npm
Original file line number Diff line number Diff line change
Expand Up @@ -58,4 +58,4 @@ else
fi

# Publish with the appropriate tag
yarn publish --access public --tag "$TAG"
yarn publish --tag "$TAG"
5 changes: 4 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "groq-sdk",
"version": "0.30.0",
"version": "0.31.0",
"description": "The official TypeScript library for the Groq API",
"author": "Groq <[email protected]>",
"types": "dist/index.d.ts",
Expand All @@ -13,6 +13,9 @@
"**/*"
],
"private": false,
"publishConfig": {
"access": "public"
},
"scripts": {
"test": "./scripts/test",
"build": "./scripts/build",
Expand Down
4 changes: 2 additions & 2 deletions scripts/mock
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}"

# Run prism mock on the given spec
if [ "$1" == "--daemon" ]; then
npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log &
npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log &

# Wait for server to come online
echo -n "Waiting for server"
Expand All @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then

echo
else
npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL"
npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL"
fi
2 changes: 1 addition & 1 deletion scripts/test
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ elif ! prism_is_running ; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the prism command:"
echo
echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}"
echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}"
echo

exit 1
Expand Down
2 changes: 1 addition & 1 deletion scripts/utils/upload-artifact.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ if [[ "$SIGNED_URL" == "null" ]]; then
exit 1
fi

UPLOAD_RESPONSE=$(tar -cz dist | curl -v -X PUT \
UPLOAD_RESPONSE=$(tar -cz "${BUILD_PATH:-dist}" | curl -v -X PUT \
-H "Content-Type: application/gzip" \
--data-binary @- "$SIGNED_URL" 2>&1)

Expand Down
1 change: 1 addition & 0 deletions src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,7 @@ Groq.Audio = Audio;
Groq.Models = Models;
Groq.Batches = Batches;
Groq.Files = Files;

export declare namespace Groq {
export type RequestOptions = Core.RequestOptions;

Expand Down
63 changes: 53 additions & 10 deletions src/resources/chat/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1409,9 +1409,10 @@ export namespace ChatCompletionTokenLogprob {

export interface ChatCompletionTool {
/**
* The type of the tool. Currently, only `function` is supported.
* The type of the tool. `function`, `browser_search`, and `code_interpreter` are
* supported.
*/
type: 'function' | 'browser_search' | 'code_interpreter';
type: 'function' | 'browser_search' | 'code_interpreter' | (string & {});

function?: Shared.FunctionDefinition;
}
Expand Down Expand Up @@ -1487,6 +1488,17 @@ export interface ChatCompletionCreateParamsBase {
| 'llama3-70b-8192'
| 'llama3-8b-8192';

/**
* Custom configuration of models and tools for Compound.
*/
compound_custom?: CompletionCreateParams.CompoundCustom | null;

/**
* A list of documents to provide context for the conversation. Each document
* contains text that can be referenced by the model.
*/
documents?: Array<CompletionCreateParams.Document> | null;

/**
* @deprecated Deprecated: Use search_settings.exclude_domains instead. A list of
* domains to exclude from the search results when the model uses a web search
Expand All @@ -1495,9 +1507,9 @@ export interface ChatCompletionCreateParamsBase {
exclude_domains?: Array<string> | null;

/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their
* existing frequency in the text so far, decreasing the model's likelihood to
* repeat the same line verbatim.
* This is not yet supported by any of our models. Number between -2.0 and 2.0.
* Positive values penalize new tokens based on their existing frequency in the
* text so far, decreasing the model's likelihood to repeat the same line verbatim.
*/
frequency_penalty?: number | null;

Expand Down Expand Up @@ -1580,15 +1592,18 @@ export interface ChatCompletionCreateParamsBase {
parallel_tool_calls?: boolean | null;

/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on
* whether they appear in the text so far, increasing the model's likelihood to
* talk about new topics.
* This is not yet supported by any of our models. Number between -2.0 and 2.0.
* Positive values penalize new tokens based on whether they appear in the text so
* far, increasing the model's likelihood to talk about new topics.
*/
presence_penalty?: number | null;

/**
* this field is only available for qwen3 models. Set to 'none' to disable
* reasoning. Set to 'default' or null to let Qwen reason.
* qwen3 models support the following values Set to 'none' to disable reasoning.
* Set to 'default' or null to let Qwen reason.
*
* openai/gpt-oss-20b and openai/gpt-oss-120b support 'low', 'medium', or 'high'.
* 'medium' is the default value.
*/
reasoning_effort?: 'none' | 'default' | 'low' | 'medium' | 'high' | null;

Expand Down Expand Up @@ -1707,6 +1722,34 @@ export interface ChatCompletionCreateParamsBase {
}

export namespace CompletionCreateParams {
/**
* Custom configuration of models and tools for Compound.
*/
export interface CompoundCustom {
models?: CompoundCustom.Models | null;
}

export namespace CompoundCustom {
export interface Models {
/**
* Custom model to use for answering.
*/
answering_model?: string | null;

/**
* Custom model to use for reasoning.
*/
reasoning_model?: string | null;
}
}

export interface Document {
/**
* The text content of the document.
*/
text: string;
}

/**
* @deprecated
*/
Expand Down
2 changes: 1 addition & 1 deletion src/version.ts
Original file line number Diff line number Diff line change
@@ -1 +1 @@
export const VERSION = '0.30.0'; // x-release-please-version
export const VERSION = '0.31.0'; // x-release-please-version
2 changes: 2 additions & 0 deletions tests/api-resources/chat/completions.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ describe('resource completions', () => {
const response = await client.chat.completions.create({
messages: [{ content: 'string', role: 'system', name: 'name' }],
model: 'meta-llama/llama-4-scout-17b-16e-instruct',
compound_custom: { models: { answering_model: 'answering_model', reasoning_model: 'reasoning_model' } },
documents: [{ text: 'text' }],
exclude_domains: ['string'],
frequency_penalty: -2,
function_call: 'none',
Expand Down
Loading