Skip to content

Commit 1c69897

Browse files
fix: update example model from decommissioned models to gpt-oss-20b
1 parent 523355d commit 1c69897

File tree

2 files changed

+9
-9
lines changed

2 files changed

+9
-9
lines changed

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 17
22
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-a95a90928412afdb9cf5101b7fbb67ef2abbc4ecaa51ff18fa04643f9e8d2c95.yml
33
openapi_spec_hash: d2e5cb1562a2b2beb8673256252b9bf5
4-
config_hash: 6b1c374dcc1ffa3165dd22f52a77ff89
4+
config_hash: 961b4995e909aef11a454befa56ad3d2

README.md

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ const client = new Groq({
2828

2929
const chatCompletion = await client.chat.completions.create({
3030
messages: [{ role: 'user', content: 'Explain the importance of low latency LLMs' }],
31-
model: 'llama3-8b-8192',
31+
model: 'openai/gpt-oss-20b',
3232
});
3333

3434
console.log(chatCompletion.choices[0].message.content);
@@ -51,7 +51,7 @@ const params: Groq.Chat.CompletionCreateParams = {
5151
{ role: 'system', content: 'You are a helpful assistant.' },
5252
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
5353
],
54-
model: 'llama3-8b-8192',
54+
model: 'openai/gpt-oss-20b',
5555
};
5656
const chatCompletion: Groq.Chat.ChatCompletion = await client.chat.completions.create(params);
5757
```
@@ -117,7 +117,7 @@ const chatCompletion = await client.chat.completions
117117
{ role: 'system', content: 'You are a helpful assistant.' },
118118
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
119119
],
120-
model: 'llama3-8b-8192',
120+
model: 'openai/gpt-oss-20b',
121121
})
122122
.catch(async (err) => {
123123
if (err instanceof Groq.APIError) {
@@ -159,7 +159,7 @@ const client = new Groq({
159159
});
160160

161161
// Or, configure per-request:
162-
await client.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'llama3-8b-8192' }, {
162+
await client.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'openai/gpt-oss-20b' }, {
163163
maxRetries: 5,
164164
});
165165
```
@@ -176,7 +176,7 @@ const client = new Groq({
176176
});
177177

178178
// Override per-request:
179-
await client.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'llama3-8b-8192' }, {
179+
await client.chat.completions.create({ messages: [{ role: 'system', content: 'You are a helpful assistant.' }, { role: 'user', content: 'Explain the importance of low latency LLMs' }], model: 'openai/gpt-oss-20b' }, {
180180
timeout: 5 * 1000,
181181
});
182182
```
@@ -203,7 +203,7 @@ const response = await client.chat.completions
203203
{ role: 'system', content: 'You are a helpful assistant.' },
204204
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
205205
],
206-
model: 'llama3-8b-8192',
206+
model: 'openai/gpt-oss-20b',
207207
})
208208
.asResponse();
209209
console.log(response.headers.get('X-My-Header'));
@@ -215,7 +215,7 @@ const { data: chatCompletion, response: raw } = await client.chat.completions
215215
{ role: 'system', content: 'You are a helpful assistant.' },
216216
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
217217
],
218-
model: 'llama3-8b-8192',
218+
model: 'openai/gpt-oss-20b',
219219
})
220220
.withResponse();
221221
console.log(raw.headers.get('X-My-Header'));
@@ -329,7 +329,7 @@ await client.chat.completions.create(
329329
{ role: 'system', content: 'You are a helpful assistant.' },
330330
{ role: 'user', content: 'Explain the importance of low latency LLMs' },
331331
],
332-
model: 'llama3-8b-8192',
332+
model: 'openai/gpt-oss-20b',
333333
},
334334
{
335335
httpAgent: new http.Agent({ keepAlive: false }),

0 commit comments

Comments
 (0)