Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 78 additions & 0 deletions core/bifrost.go
Original file line number Diff line number Diff line change
Expand Up @@ -1260,6 +1260,84 @@ func (bifrost *Bifrost) BatchResultsRequest(ctx context.Context, req *schemas.Bi
return response, nil
}

// BatchDeleteRequest deletes a batch job.
func (bifrost *Bifrost) BatchDeleteRequest(ctx context.Context, req *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
if req == nil {
return nil, &schemas.BifrostError{
IsBifrostError: false,
Error: &schemas.ErrorField{
Message: "batch delete request is nil",
},
}
}
if req.Provider == "" {
return nil, &schemas.BifrostError{
IsBifrostError: false,
Error: &schemas.ErrorField{
Message: "provider is required for batch delete request",
},
}
}
if req.BatchID == "" {
return nil, &schemas.BifrostError{
IsBifrostError: false,
Error: &schemas.ErrorField{
Message: "batch_id is required for batch delete request",
},
}
}
if ctx == nil {
ctx = bifrost.ctx
}

provider := bifrost.getProviderByKey(req.Provider)
if provider == nil {
return nil, &schemas.BifrostError{
IsBifrostError: false,
Error: &schemas.ErrorField{
Message: "provider not found for batch delete request",
},
}
}

config, err := bifrost.account.GetConfigForProvider(req.Provider)
if err != nil {
return nil, newBifrostErrorFromMsg(fmt.Sprintf("failed to get config for provider %s: %v", req.Provider, err.Error()))
}
if config == nil {
return nil, newBifrostErrorFromMsg(fmt.Sprintf("config is nil for provider %s", req.Provider))
}

// Determine the base provider type for key requirement checks
baseProvider := req.Provider
if config.CustomProviderConfig != nil && config.CustomProviderConfig.BaseProviderType != "" {
baseProvider = config.CustomProviderConfig.BaseProviderType
}

var key schemas.Key
if providerRequiresKey(baseProvider, config.CustomProviderConfig) {
keys, keyErr := bifrost.getAllSupportedKeys(&ctx, req.Provider, baseProvider)
if keyErr != nil {
return nil, newBifrostError(keyErr)
}
if len(keys) > 0 {
key = keys[0]
}
}

response, bifrostErr := executeRequestWithRetries(&ctx, config, func() (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
return provider.BatchDelete(ctx, key, req)
}, schemas.BatchDeleteRequest, req.Provider, "")
if bifrostErr != nil {
bifrostErr.ExtraFields = schemas.BifrostErrorExtraFields{
RequestType: schemas.BatchDeleteRequest,
Provider: req.Provider,
}
return nil, bifrostErr
}
return response, nil
}
Comment on lines +1263 to +1339
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Add ExtraFields.RequestType/Provider to early validation errors for BatchDeleteRequest.
Right now nil/provider/batch_id validation returns don’t populate ExtraFields, which makes client-side error handling/log correlation weaker.

 func (bifrost *Bifrost) BatchDeleteRequest(ctx context.Context, req *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
 	if req == nil {
 		return nil, &schemas.BifrostError{
 			IsBifrostError: false,
 			Error: &schemas.ErrorField{
 				Message: "batch delete request is nil",
 			},
+			ExtraFields: schemas.BifrostErrorExtraFields{
+				RequestType: schemas.BatchDeleteRequest,
+			},
 		}
 	}
 	if req.Provider == "" {
 		return nil, &schemas.BifrostError{
 			IsBifrostError: false,
 			Error: &schemas.ErrorField{
 				Message: "provider is required for batch delete request",
 			},
+			ExtraFields: schemas.BifrostErrorExtraFields{
+				RequestType: schemas.BatchDeleteRequest,
+			},
 		}
 	}
 	if req.BatchID == "" {
 		return nil, &schemas.BifrostError{
 			IsBifrostError: false,
 			Error: &schemas.ErrorField{
 				Message: "batch_id is required for batch delete request",
 			},
+			ExtraFields: schemas.BifrostErrorExtraFields{
+				RequestType: schemas.BatchDeleteRequest,
+				Provider:    req.Provider,
+			},
 		}
 	}
🤖 Prompt for AI Agents
In core/bifrost.go around lines 1249 to 1325, the early validation and
pre-flight error returns in BatchDeleteRequest do not populate
BifrostError.ExtraFields (RequestType and Provider); update each early return
(nil req, missing provider, missing batch_id, provider not found, and any
config-get errors before executeRequestWithRetries) to set ExtraFields =
schemas.BifrostErrorExtraFields{RequestType: schemas.BatchDeleteRequest,
Provider: <use req.Provider when available or empty string>} so clients can
correlate errors by request type and provider; ensure the
newBifrostError/newBifrostErrorFromMsg returns include these ExtraFields before
returning.


// FileUploadRequest uploads a file to the specified provider.
func (bifrost *Bifrost) FileUploadRequest(ctx context.Context, req *schemas.BifrostFileUploadRequest) (*schemas.BifrostFileUploadResponse, *schemas.BifrostError) {
if req == nil {
Expand Down
5 changes: 5 additions & 0 deletions core/providers/anthropic/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -377,3 +377,8 @@ func formatAnthropicTimestamp(unixTime int64) string {
}
return time.Unix(unixTime, 0).UTC().Format(time.RFC3339)
}

// BatchDelete is not supported by Anthropic provider.
func (provider *AnthropicProvider) BatchDelete(ctx context.Context, key schemas.Key, request *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
return nil, providerUtils.NewUnsupportedOperationError(schemas.BatchDeleteRequest, provider.GetProviderKey())
}
Comment on lines +380 to +384
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Build break: context.Context used but context isn’t imported.

BatchDelete uses context.Context, but the import block only includes time, providerUtils, schemas, fasthttp. Add the missing import.

 import (
+	"context"
 	"time"

 	providerUtils "github.com/maximhq/bifrost/core/providers/utils"
 	"github.com/maximhq/bifrost/core/schemas"
 	"github.com/valyala/fasthttp"
 )

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In core/providers/anthropic/batch.go around lines 377 to 381, the BatchDelete
signature references context.Context but the file's import block doesn't include
the "context" package; add "context" to the imports for this file (alongside
time, providerUtils, schemas, fasthttp) so the code compiles.

5 changes: 5 additions & 0 deletions core/providers/cerebras/cerebras.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,3 +260,8 @@ func (provider *CerebrasProvider) BatchCancel(_ context.Context, _ schemas.Key,
func (provider *CerebrasProvider) BatchResults(_ context.Context, _ schemas.Key, _ *schemas.BifrostBatchResultsRequest) (*schemas.BifrostBatchResultsResponse, *schemas.BifrostError) {
return nil, providerUtils.NewUnsupportedOperationError(schemas.BatchResultsRequest, provider.GetProviderKey())
}

// BatchDelete is not supported by Cerebras provider.
func (provider *CerebrasProvider) BatchDelete(ctx context.Context, key schemas.Key, request *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
return nil, providerUtils.NewUnsupportedOperationError(schemas.BatchDeleteRequest, provider.GetProviderKey())
}
Comment on lines +264 to +267
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

cat -n core/providers/cerebras/cerebras.go

Repository: maximhq/bifrost

Length of output: 15578


Use _ for unused parameters in unsupported stub to match recent batch operation methods.

- func (provider *CerebrasProvider) BatchDelete(ctx context.Context, key schemas.Key, request *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
+ func (provider *CerebrasProvider) BatchDelete(_ context.Context, _ schemas.Key, _ *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
  	return nil, providerUtils.NewUnsupportedOperationError(schemas.BatchDeleteRequest, provider.GetProviderKey())
  }

All other batch operation stubs (BatchCreate, BatchList, BatchRetrieve, BatchCancel, BatchResults) and file operation stubs consistently use _ for unused parameters. BatchDelete should follow the same pattern.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
// BatchDelete is not supported by Cerebras provider.
func (provider *CerebrasProvider) BatchDelete(ctx context.Context, key schemas.Key, request *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
return nil, providerUtils.NewUnsupportedOperationError(schemas.BatchDeleteRequest, provider.GetProviderKey())
}
// BatchDelete is not supported by Cerebras provider.
func (provider *CerebrasProvider) BatchDelete(_ context.Context, _ schemas.Key, _ *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
return nil, providerUtils.NewUnsupportedOperationError(schemas.BatchDeleteRequest, provider.GetProviderKey())
}
🤖 Prompt for AI Agents
In core/providers/cerebras/cerebras.go around lines 264 to 267, the BatchDelete
stub lists unused parameters by name; change the method signature to use
underscores for unused parameters (e.g., replace named params with _ types) to
match the other batch operation stubs, leaving the body returning the
unsupported operation error unchanged.

6 changes: 6 additions & 0 deletions core/providers/cohere/cohere.go
Original file line number Diff line number Diff line change
Expand Up @@ -866,6 +866,12 @@ func (provider *CohereProvider) BatchResults(_ context.Context, _ schemas.Key, _
return nil, providerUtils.NewUnsupportedOperationError(schemas.BatchResultsRequest, provider.GetProviderKey())
}

// BatchDelete is not supported by Cohere provider.
func (provider *CohereProvider) BatchDelete(ctx context.Context, key schemas.Key, request *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
return nil, providerUtils.NewUnsupportedOperationError(schemas.BatchDeleteRequest, provider.GetProviderKey())
}
Comment on lines +869 to +872
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Fix unused parameters in Cohere BatchDelete stub.

ctx, key, and request are unused and will cause a compile failure. Match the surrounding unsupported methods:

-// BatchDelete is not supported by Cohere provider.
-func (provider *CohereProvider) BatchDelete(ctx context.Context, key schemas.Key, request *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
+// BatchDelete is not supported by Cohere provider.
+func (provider *CohereProvider) BatchDelete(_ context.Context, _ schemas.Key, _ *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
 	return nil, providerUtils.NewUnsupportedOperationError(schemas.BatchDeleteRequest, provider.GetProviderKey())
 }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
// BatchDelete is not supported by Cohere provider.
func (provider *CohereProvider) BatchDelete(ctx context.Context, key schemas.Key, request *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
return nil, providerUtils.NewUnsupportedOperationError(schemas.BatchDeleteRequest, provider.GetProviderKey())
}
// BatchDelete is not supported by Cohere provider.
func (provider *CohereProvider) BatchDelete(_ context.Context, _ schemas.Key, _ *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
return nil, providerUtils.NewUnsupportedOperationError(schemas.BatchDeleteRequest, provider.GetProviderKey())
}
🤖 Prompt for AI Agents
In core/providers/cohere/cohere.go around lines 869-872, the BatchDelete stub
declares named parameters ctx, key, and request but does not use them causing a
compile error; change the signature to use unnamed parameters or blank
identifiers (e.g., func (provider *CohereProvider) BatchDelete(_
context.Context, _ schemas.Key, _ *schemas.BifrostBatchDeleteRequest) ...) so
the parameters are ignored, keeping the same return values and body (returning
the unsupported operation error) to match surrounding unsupported methods.



// FileUpload is not supported by Cohere provider.
func (provider *CohereProvider) FileUpload(_ context.Context, _ schemas.Key, _ *schemas.BifrostFileUploadRequest) (*schemas.BifrostFileUploadResponse, *schemas.BifrostError) {
return nil, providerUtils.NewUnsupportedOperationError(schemas.FileUploadRequest, provider.GetProviderKey())
Expand Down
143 changes: 143 additions & 0 deletions core/providers/gemini/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -240,3 +240,146 @@ func extractGeminiUsageMetadata(geminiResponse *GenerateContentResponse) (int, i
}
return inputTokens, outputTokens, totalTokens
}

// ==================== SDK RESPONSE CONVERTERS ====================
// These functions convert Bifrost batch responses to Google GenAI SDK format.

// ToGeminiJobState converts Bifrost batch status to Gemini SDK job state.
func ToGeminiJobState(status schemas.BatchStatus) string {
switch status {
case schemas.BatchStatusValidating:
return GeminiJobStatePending
case schemas.BatchStatusInProgress:
return GeminiJobStateRunning
case schemas.BatchStatusFinalizing:
return GeminiJobStateRunning
case schemas.BatchStatusCompleted:
return GeminiJobStateSucceeded
case schemas.BatchStatusFailed:
return GeminiJobStateFailed
case schemas.BatchStatusCancelling:
return GeminiJobStateCancelling
case schemas.BatchStatusCancelled:
return GeminiJobStateCancelled
case schemas.BatchStatusExpired:
return GeminiJobStateFailed
default:
return GeminiJobStatePending
}
}
Comment on lines +248 to +269
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Fix BatchStats computation to avoid negative pending counts

The SDK converters generally look good, but there’s a corner case in the stats math:

  • In BatchRetrieve, BifrostBatchRetrieveResponse.RequestCounts is populated with Completed and Failed, while Total is left at its zero value.
  • ToGeminiBatchRetrieveResponse and ToGeminiBatchListResponse currently use resp.RequestCounts.Total directly to derive RequestCount and PendingRequestCount.

When Total == 0 and Completed > 0, PendingRequestCount = Total - Completed becomes negative, which is invalid.

Consider defensively deriving totals and pending counts, for example:

-	result.Metadata = &GeminiBatchMetadata{
-		Name:       resp.ID,
-		State:      ToGeminiJobState(resp.Status),
-		CreateTime: time.Unix(resp.CreatedAt, 0).Format(time.RFC3339),
-		BatchStats: &GeminiBatchStats{
-			RequestCount:           resp.RequestCounts.Total,
-			PendingRequestCount:    resp.RequestCounts.Total - resp.RequestCounts.Completed,
-			SuccessfulRequestCount: resp.RequestCounts.Completed - resp.RequestCounts.Failed,
-		},
-	}
+	total := resp.RequestCounts.Total
+	if total == 0 {
+		total = resp.RequestCounts.Completed + resp.RequestCounts.Failed
+	}
+	success := resp.RequestCounts.Completed
+	if success < 0 {
+		success = 0
+	}
+	pending := total - (success + resp.RequestCounts.Failed)
+	if pending < 0 {
+		pending = 0
+	}
+
+	result.Metadata = &GeminiBatchMetadata{
+		Name:       resp.ID,
+		State:      ToGeminiJobState(resp.Status),
+		CreateTime: time.Unix(resp.CreatedAt, 0).Format(time.RFC3339),
+		BatchStats: &GeminiBatchStats{
+			RequestCount:           total,
+			PendingRequestCount:    pending,
+			SuccessfulRequestCount: success,
+		},
+	}

and mirror the same pattern inside ToGeminiBatchListResponse when populating BatchStats, so the invariants RequestCount ≥ 0, PendingRequestCount ≥ 0, and RequestCount ≈ success + failures + pending always hold.

Also applies to: 899-919, 941-958

🤖 Prompt for AI Agents
core/providers/gemini/batch.go lines ~847-868 (and similarly at 899-919,
941-958): the current stats math can produce negative PendingRequestCount when
resp.RequestCounts.Total is zero but Completed/Failed are non-zero; instead
derive totals defensively: compute total := resp.RequestCounts.Total; if total
== 0 { total = resp.RequestCounts.Completed + resp.RequestCounts.Failed +
resp.RequestCounts.Pending }; compute pending := total -
resp.RequestCounts.Completed - resp.RequestCounts.Failed; if pending < 0 {
pending = 0 }; then set RequestCount = total and PendingRequestCount = pending
(ensuring RequestCount ≥ 0, PendingRequestCount ≥ 0 and RequestCount ≈ success +
failures + pending). Apply the same change to ToGeminiBatchListResponse and any
other places noted.


// ToGeminiBatchJobResponse converts a BifrostBatchCreateResponse to Gemini SDK format.
func ToGeminiBatchJobResponse(resp *schemas.BifrostBatchCreateResponse) *GeminiBatchJobResponseSDK {
if resp == nil {
return nil
}

result := &GeminiBatchJobResponseSDK{
Name: resp.ID,
State: ToGeminiJobState(resp.Status),
}

// Add metadata if available
if resp.CreatedAt > 0 {
result.Metadata = &GeminiBatchMetadata{
Name: resp.ID,
State: ToGeminiJobState(resp.Status),
CreateTime: time.Unix(resp.CreatedAt, 0).Format(time.RFC3339),
BatchStats: &GeminiBatchStats{
RequestCount: resp.RequestCounts.Total,
PendingRequestCount: resp.RequestCounts.Total - resp.RequestCounts.Completed,
SuccessfulRequestCount: resp.RequestCounts.Completed - resp.RequestCounts.Failed,
},
Comment on lines +288 to +292
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Fix BatchStats computation to avoid negative counts.

When resp.RequestCounts.Total is zero but Completed or Failed are non-zero, the computed PendingRequestCount = Total - Completed becomes negative. Derive totals defensively to maintain invariants.

Apply this pattern to all three converters (ToGeminiBatchJobResponse, ToGeminiBatchRetrieveResponse, ToGeminiBatchListResponse):

-		BatchStats: &GeminiBatchStats{
-			RequestCount:           resp.RequestCounts.Total,
-			PendingRequestCount:    resp.RequestCounts.Total - resp.RequestCounts.Completed,
-			SuccessfulRequestCount: resp.RequestCounts.Completed - resp.RequestCounts.Failed,
-		},
+		// Derive totals defensively to prevent negative counts
+		total := resp.RequestCounts.Total
+		if total == 0 && (resp.RequestCounts.Completed > 0 || resp.RequestCounts.Failed > 0) {
+			total = resp.RequestCounts.Completed + resp.RequestCounts.Failed
+		}
+		pending := total - resp.RequestCounts.Completed - resp.RequestCounts.Failed
+		if pending < 0 {
+			pending = 0
+		}
+		success := resp.RequestCounts.Completed - resp.RequestCounts.Failed
+		if success < 0 {
+			success = 0
+		}
+		BatchStats: &GeminiBatchStats{
+			RequestCount:           total,
+			PendingRequestCount:    pending,
+			SuccessfulRequestCount: success,
+		},

Also applies to: 316-320, 355-359

🤖 Prompt for AI Agents
In core/providers/gemini/batch.go around lines 289-293 (and similarly at 316-320
and 355-359), the BatchStats math can produce negative counts when telemetry is
inconsistent; replace the direct uses with defensive computations: set
RequestCount = max(resp.RequestCounts.Total, resp.RequestCounts.Completed,
resp.RequestCounts.Failed), PendingRequestCount = max(0, RequestCount -
resp.RequestCounts.Completed), and SuccessfulRequestCount = max(0,
resp.RequestCounts.Completed - resp.RequestCounts.Failed). Apply this same
pattern in ToGeminiBatchJobResponse, ToGeminiBatchRetrieveResponse, and
ToGeminiBatchListResponse so all three converters compute non-negative,
consistent batch stats.

}
}

return result
}

// ToGeminiBatchRetrieveResponse converts a BifrostBatchRetrieveResponse to Gemini SDK format.
func ToGeminiBatchRetrieveResponse(resp *schemas.BifrostBatchRetrieveResponse) *GeminiBatchJobResponseSDK {
if resp == nil {
return nil
}

result := &GeminiBatchJobResponseSDK{
Name: resp.ID,
State: ToGeminiJobState(resp.Status),
}

// Add metadata
result.Metadata = &GeminiBatchMetadata{
Name: resp.ID,
State: ToGeminiJobState(resp.Status),
CreateTime: time.Unix(resp.CreatedAt, 0).Format(time.RFC3339),
BatchStats: &GeminiBatchStats{
Comment on lines +310 to +315
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Avoid emitting 1970 timestamps when CreatedAt == 0.

In ToGeminiBatchRetrieveResponse / ToGeminiBatchListResponse, consider guarding CreatedAt > 0 (like ToGeminiBatchJobResponse already does) before setting CreateTime.

Also applies to: 353-358

🤖 Prompt for AI Agents
In core/providers/gemini/batch.go around lines 314-319 (and similarly 353-358),
the code unconditionally formats resp.CreatedAt which emits a 1970 timestamp
when CreatedAt == 0; update both places to check if resp.CreatedAt > 0 before
setting result.Metadata.CreateTime (e.g., only set CreateTime =
time.Unix(resp.CreatedAt, 0).Format(time.RFC3339) when > 0), otherwise leave
CreateTime empty or nil to avoid the 1970-01-01 value.

RequestCount: resp.RequestCounts.Total,
PendingRequestCount: resp.RequestCounts.Total - resp.RequestCounts.Completed,
SuccessfulRequestCount: resp.RequestCounts.Completed - resp.RequestCounts.Failed,
},
}

if resp.CompletedAt != nil {
result.Metadata.EndTime = time.Unix(*resp.CompletedAt, 0).Format(time.RFC3339)
}

// Add output file info if available
if resp.OutputFileID != nil {
result.Dest = &GeminiBatchDest{
FileName: *resp.OutputFileID,
}
}

return result
}

// ToGeminiBatchListResponse converts a BifrostBatchListResponse to Gemini SDK format.
func ToGeminiBatchListResponse(resp *schemas.BifrostBatchListResponse) *GeminiBatchListResponseSDK {
if resp == nil {
return nil
}

jobs := make([]GeminiBatchJobResponseSDK, 0, len(resp.Data))
for _, batch := range resp.Data {
job := GeminiBatchJobResponseSDK{
Name: batch.ID,
State: ToGeminiJobState(batch.Status),
}

// Add metadata
job.Metadata = &GeminiBatchMetadata{
Name: batch.ID,
State: ToGeminiJobState(batch.Status),
CreateTime: time.Unix(batch.CreatedAt, 0).Format(time.RFC3339),
BatchStats: &GeminiBatchStats{
RequestCount: batch.RequestCounts.Total,
PendingRequestCount: batch.RequestCounts.Total - batch.RequestCounts.Completed,
SuccessfulRequestCount: batch.RequestCounts.Completed - batch.RequestCounts.Failed,
},
}

jobs = append(jobs, job)
}

result := &GeminiBatchListResponseSDK{
BatchJobs: jobs,
}

if resp.NextCursor != nil {
result.NextPageToken = *resp.NextCursor
}

return result
}

// ToGeminiBatchCancelResponse converts a BifrostBatchCancelResponse to Gemini SDK format.
func ToGeminiBatchCancelResponse(resp *schemas.BifrostBatchCancelResponse) *GeminiBatchJobResponseSDK {
if resp == nil {
return nil
}

return &GeminiBatchJobResponseSDK{
Name: resp.ID,
State: ToGeminiJobState(resp.Status),
}
}
59 changes: 59 additions & 0 deletions core/providers/gemini/gemini.go
Original file line number Diff line number Diff line change
Expand Up @@ -2101,6 +2101,65 @@ func (provider *GeminiProvider) BatchResults(ctx context.Context, key schemas.Ke
return batchResultsResp, nil
}

// BatchDelete deletes a batch job for Gemini.
func (provider *GeminiProvider) BatchDelete(ctx context.Context, key schemas.Key, request *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
if err := providerUtils.CheckOperationAllowed(schemas.Gemini, provider.customProviderConfig, schemas.BatchDeleteRequest); err != nil {
return nil, err
}

providerName := provider.GetProviderKey()

if request.BatchID == "" {
return nil, providerUtils.NewBifrostOperationError("batch_id is required", nil, providerName)
}

// Create HTTP request
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)

// Build URL for delete operation
batchID := request.BatchID
var url string
if strings.HasPrefix(batchID, "batches/") {
url = fmt.Sprintf("%s/%s", provider.networkConfig.BaseURL, batchID)
} else {
url = fmt.Sprintf("%s/batches/%s", provider.networkConfig.BaseURL, batchID)
}

provider.logger.Debug("gemini batch delete url: " + url)
providerUtils.SetExtraHeaders(ctx, req, provider.networkConfig.ExtraHeaders, nil)
req.SetRequestURI(url)
req.Header.SetMethod(http.MethodDelete)
if key.Value != "" {
req.Header.Set("x-goog-api-key", key.Value)
}
req.Header.SetContentType("application/json")

// Make request
latency, bifrostErr := providerUtils.MakeRequestWithContext(ctx, provider.client, req, resp)
if bifrostErr != nil {
return nil, bifrostErr
}

// Handle response
if resp.StatusCode() != fasthttp.StatusOK && resp.StatusCode() != fasthttp.StatusNoContent {
return nil, parseGeminiError(resp)
}

return &schemas.BifrostBatchDeleteResponse{
ID: request.BatchID,
Object: "batch",
Deleted: true,
ExtraFields: schemas.BifrostResponseExtraFields{
RequestType: schemas.BatchDeleteRequest,
Provider: providerName,
Latency: latency.Milliseconds(),
},
}, nil
}
Comment on lines +2104 to +2161
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Fix likely compile error + preserve error metadata in Gemini BatchDelete.

parseGeminiError is called with a different arity here than elsewhere in this file; also you probably want RequestMetadata for consistent error shaping.

 	// Handle response
 	if resp.StatusCode() != fasthttp.StatusOK && resp.StatusCode() != fasthttp.StatusNoContent {
-		return nil, parseGeminiError(resp)
+		return nil, parseGeminiError(resp, &providerUtils.RequestMetadata{
+			Provider:    providerName,
+			RequestType: schemas.BatchDeleteRequest,
+		})
 	}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
// BatchDelete deletes a batch job for Gemini.
func (provider *GeminiProvider) BatchDelete(ctx context.Context, key schemas.Key, request *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
if err := providerUtils.CheckOperationAllowed(schemas.Gemini, provider.customProviderConfig, schemas.BatchDeleteRequest); err != nil {
return nil, err
}
providerName := provider.GetProviderKey()
if request.BatchID == "" {
return nil, providerUtils.NewBifrostOperationError("batch_id is required", nil, providerName)
}
// Create HTTP request
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
// Build URL for delete operation
batchID := request.BatchID
var url string
if strings.HasPrefix(batchID, "batches/") {
url = fmt.Sprintf("%s/%s", provider.networkConfig.BaseURL, batchID)
} else {
url = fmt.Sprintf("%s/batches/%s", provider.networkConfig.BaseURL, batchID)
}
provider.logger.Debug("gemini batch delete url: " + url)
providerUtils.SetExtraHeaders(ctx, req, provider.networkConfig.ExtraHeaders, nil)
req.SetRequestURI(url)
req.Header.SetMethod(http.MethodDelete)
if key.Value != "" {
req.Header.Set("x-goog-api-key", key.Value)
}
req.Header.SetContentType("application/json")
// Make request
latency, bifrostErr := providerUtils.MakeRequestWithContext(ctx, provider.client, req, resp)
if bifrostErr != nil {
return nil, bifrostErr
}
// Handle response
if resp.StatusCode() != fasthttp.StatusOK && resp.StatusCode() != fasthttp.StatusNoContent {
return nil, parseGeminiError(resp)
}
return &schemas.BifrostBatchDeleteResponse{
ID: request.BatchID,
Object: "batch",
Deleted: true,
ExtraFields: schemas.BifrostResponseExtraFields{
RequestType: schemas.BatchDeleteRequest,
Provider: providerName,
Latency: latency.Milliseconds(),
},
}, nil
}
// BatchDelete deletes a batch job for Gemini.
func (provider *GeminiProvider) BatchDelete(ctx context.Context, key schemas.Key, request *schemas.BifrostBatchDeleteRequest) (*schemas.BifrostBatchDeleteResponse, *schemas.BifrostError) {
if err := providerUtils.CheckOperationAllowed(schemas.Gemini, provider.customProviderConfig, schemas.BatchDeleteRequest); err != nil {
return nil, err
}
providerName := provider.GetProviderKey()
if request.BatchID == "" {
return nil, providerUtils.NewBifrostOperationError("batch_id is required", nil, providerName)
}
// Create HTTP request
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseRequest(req)
defer fasthttp.ReleaseResponse(resp)
// Build URL for delete operation
batchID := request.BatchID
var url string
if strings.HasPrefix(batchID, "batches/") {
url = fmt.Sprintf("%s/%s", provider.networkConfig.BaseURL, batchID)
} else {
url = fmt.Sprintf("%s/batches/%s", provider.networkConfig.BaseURL, batchID)
}
provider.logger.Debug("gemini batch delete url: " + url)
providerUtils.SetExtraHeaders(ctx, req, provider.networkConfig.ExtraHeaders, nil)
req.SetRequestURI(url)
req.Header.SetMethod(http.MethodDelete)
if key.Value != "" {
req.Header.Set("x-goog-api-key", key.Value)
}
req.Header.SetContentType("application/json")
// Make request
latency, bifrostErr := providerUtils.MakeRequestWithContext(ctx, provider.client, req, resp)
if bifrostErr != nil {
return nil, bifrostErr
}
// Handle response
if resp.StatusCode() != fasthttp.StatusOK && resp.StatusCode() != fasthttp.StatusNoContent {
return nil, parseGeminiError(resp, &providerUtils.RequestMetadata{
Provider: providerName,
RequestType: schemas.BatchDeleteRequest,
})
}
return &schemas.BifrostBatchDeleteResponse{
ID: request.BatchID,
Object: "batch",
Deleted: true,
ExtraFields: schemas.BifrostResponseExtraFields{
RequestType: schemas.BatchDeleteRequest,
Provider: providerName,
Latency: latency.Milliseconds(),
},
}, nil
}
🤖 Prompt for AI Agents
In core/providers/gemini/gemini.go around lines 2093-2150, the call to
parseGeminiError(resp) here uses the wrong arity and omits RequestMetadata;
update the error path to call parseGeminiError with the same parameters used
elsewhere in this file (pass resp, providerName, and a RequestMetadata that sets
RequestType to schemas.BatchDeleteRequest) so the code compiles and the returned
error includes consistent provider and request metadata.


// FileUpload uploads a file to Gemini.
func (provider *GeminiProvider) FileUpload(ctx context.Context, key schemas.Key, request *schemas.BifrostFileUploadRequest) (*schemas.BifrostFileUploadResponse, *schemas.BifrostError) {
if err := providerUtils.CheckOperationAllowed(schemas.Gemini, provider.customProviderConfig, schemas.FileUploadRequest); err != nil {
Expand Down
Loading