Skip to content

Commit

Permalink
chore: update a few add'l processor references to extractor (#4496)
Browse files Browse the repository at this point in the history
  • Loading branch information
shaper authored Jan 24, 2025
1 parent 149d2a5 commit 361fd08
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 23 deletions.
6 changes: 6 additions & 0 deletions .changeset/short-actors-sort.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
'@ai-sdk/openai-compatible': patch
'@ai-sdk/deepseek': patch
---

chore: update a few add'l processor references to extractor
4 changes: 2 additions & 2 deletions content/providers/02-openai-compatible-providers/index.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ Metadata extractors work with both streaming and non-streaming chat completions
Here's an example metadata extractor that captures both standard and custom provider data:

```typescript
const MyMetadataExtractor: MetadataExtractor = {
const myMetadataExtractor: MetadataExtractor = {
// Process complete, non-streaming responses
extractMetadata: ({ parsedBody }) => {
// You have access to the complete raw response
Expand Down Expand Up @@ -247,7 +247,7 @@ const provider = createOpenAICompatible({
name: 'my-provider',
apiKey: process.env.PROVIDER_API_KEY,
baseURL: 'https://api.provider.com/v1',
metadataExtractor: MyMetadataExtractor,
metadataExtractor: myMetadataExtractor,
});
```

Expand Down
36 changes: 18 additions & 18 deletions packages/deepseek/src/deepseek-metadata-extractor.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -45,25 +45,25 @@ describe('buildMetadataFromResponse', () => {
});
});

describe('streaming metadata processor', () => {
describe('streaming metadata extractor', () => {
it('should process streaming chunks and build final metadata', () => {
const processor = deepSeekMetadataExtractor.createStreamExtractor();
const extractor = deepSeekMetadataExtractor.createStreamExtractor();

// Process initial chunks without usage data
processor.processChunk({
extractor.processChunk({
choices: [{ finish_reason: null }],
});

// Process final chunk with usage data
processor.processChunk({
extractor.processChunk({
choices: [{ finish_reason: 'stop' }],
usage: {
prompt_cache_hit_tokens: 100,
prompt_cache_miss_tokens: 50,
},
});

const finalMetadata = processor.buildMetadata();
const finalMetadata = extractor.buildMetadata();

expect(finalMetadata).toEqual({
deepseek: {
Expand All @@ -74,32 +74,32 @@ describe('streaming metadata processor', () => {
});

it('should handle streaming chunks without usage data', () => {
const processor = deepSeekMetadataExtractor.createStreamExtractor();
const extractor = deepSeekMetadataExtractor.createStreamExtractor();

processor.processChunk({
extractor.processChunk({
choices: [{ finish_reason: 'stop' }],
});

const finalMetadata = processor.buildMetadata();
const finalMetadata = extractor.buildMetadata();

expect(finalMetadata).toBeUndefined();
});

it('should handle invalid streaming chunks', () => {
const processor = deepSeekMetadataExtractor.createStreamExtractor();
const extractor = deepSeekMetadataExtractor.createStreamExtractor();

processor.processChunk('invalid chunk');
extractor.processChunk('invalid chunk');

const finalMetadata = processor.buildMetadata();
const finalMetadata = extractor.buildMetadata();

expect(finalMetadata).toBeUndefined();
});

it('should only capture usage data from final chunk with stop reason', () => {
const processor = deepSeekMetadataExtractor.createStreamExtractor();
const extractor = deepSeekMetadataExtractor.createStreamExtractor();

// Process chunk with usage but no stop reason
processor.processChunk({
extractor.processChunk({
choices: [{ finish_reason: null }],
usage: {
prompt_cache_hit_tokens: 50,
Expand All @@ -108,15 +108,15 @@ describe('streaming metadata processor', () => {
});

// Process final chunk with different usage data
processor.processChunk({
extractor.processChunk({
choices: [{ finish_reason: 'stop' }],
usage: {
prompt_cache_hit_tokens: 100,
prompt_cache_miss_tokens: 50,
},
});

const finalMetadata = processor.buildMetadata();
const finalMetadata = extractor.buildMetadata();

expect(finalMetadata).toEqual({
deepseek: {
Expand All @@ -127,17 +127,17 @@ describe('streaming metadata processor', () => {
});

it('should handle null values in usage data', () => {
const processor = deepSeekMetadataExtractor.createStreamExtractor();
const extractor = deepSeekMetadataExtractor.createStreamExtractor();

processor.processChunk({
extractor.processChunk({
choices: [{ finish_reason: 'stop' }],
usage: {
prompt_cache_hit_tokens: null,
prompt_cache_miss_tokens: 50,
},
});

const finalMetadata = processor.buildMetadata();
const finalMetadata = extractor.buildMetadata();

expect(finalMetadata).toEqual({
deepseek: {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,11 @@ export type MetadataExtractor = {
}) => LanguageModelV1ProviderMetadata | undefined;

/**
* Creates a streaming metadata processor that can accumulate and process chunks
* of a streaming response. Used to build metadata progressively during streaming.
* Creates an extractor for handling streaming responses. The returned object provides
* methods to process individual chunks and build the final metadata from the accumulated
* stream data.
*
* @returns A new StreamingMetadataProcessor instance
* @returns An object with methods to process chunks and build metadata from a stream
*/
createStreamExtractor: () => {
/**
Expand Down

0 comments on commit 361fd08

Please sign in to comment.