Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
a895149
fix(claude): prevent final events when no content streamed
hkfires Dec 10, 2025
76c563d
fix(executor): increase buffer size for stream scanners to 50MB acros…
sususu98 Dec 10, 2025
1da03bf
Merge pull request #479 from router-for-me/claude
luispater Dec 11, 2025
a03d514
feat(registry): add thinking metadata for models
hkfires Dec 11, 2025
3ffd120
feat(runtime): add thinking config normalization
hkfires Dec 11, 2025
d06d0ea
fix(util): centralize reasoning effort normalization
hkfires Dec 11, 2025
169f429
fix(util): align reasoning effort handling with registry
hkfires Dec 11, 2025
519da2e
fix(runtime): validate reasoning effort levels
hkfires Dec 11, 2025
3a81ab2
fix(runtime): unify reasoning effort metadata overrides
hkfires Dec 11, 2025
007572b
fix(util): do not strip thinking suffix on registered models
hkfires Dec 11, 2025
f6300c7
fix(runtime): validate thinking config in iflow and qwen
hkfires Dec 11, 2025
21bbcec
docs(runtime): document reasoning effort precedence
hkfires Dec 11, 2025
6285459
fix(runtime): unify claude thinking config resolution
hkfires Dec 11, 2025
facfe7c
refactor(thinking): use bracket tags for thinking meta
hkfires Dec 11, 2025
2760989
Merge pull request #485 from router-for-me/think
luispater Dec 11, 2025
e79f65f
refactor(thinking): use parentheses for metadata suffix
hkfires Dec 11, 2025
88bdd25
fix(amp): set status on claude stream errors
hkfires Dec 11, 2025
564bcba
Merge pull request #487 from router-for-me/amp
luispater Dec 11, 2025
a74ee3f
Merge pull request #481 from sususu98/fix/increase-buffer-size
luispater Dec 11, 2025
4ce7c61
Merge branch 'main' into plus
luispater Dec 11, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion config.example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ ws-auth: false
# excluded-models:
# - "claude-opus-4-5-20251101" # exclude specific models (exact match)
# - "claude-3-*" # wildcard matching prefix (e.g. claude-3-7-sonnet-20250219)
# - "*-think" # wildcard matching suffix (e.g. claude-opus-4-5-thinking)
# - "*-thinking" # wildcard matching suffix (e.g. claude-opus-4-5-thinking)
# - "*haiku*" # wildcard matching substring (e.g. claude-3-5-haiku-20241022)

# Kiro (AWS CodeWhisperer) configuration
Expand Down
23 changes: 19 additions & 4 deletions internal/registry/model_definitions.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ func GetClaudeModels() []*ModelInfo {
DisplayName: "Claude 4.5 Haiku",
ContextLength: 200000,
MaxCompletionTokens: 64000,
// Thinking: not supported for Haiku models
},
{
ID: "claude-sonnet-4-5-20250929",
Expand Down Expand Up @@ -49,6 +50,7 @@ func GetClaudeModels() []*ModelInfo {
DisplayName: "Claude 4.1 Opus",
ContextLength: 200000,
MaxCompletionTokens: 32000,
Thinking: &ThinkingSupport{Min: 1024, Max: 100000, ZeroAllowed: false, DynamicAllowed: true},
},
{
ID: "claude-opus-4-20250514",
Expand All @@ -59,6 +61,7 @@ func GetClaudeModels() []*ModelInfo {
DisplayName: "Claude 4 Opus",
ContextLength: 200000,
MaxCompletionTokens: 32000,
Thinking: &ThinkingSupport{Min: 1024, Max: 100000, ZeroAllowed: false, DynamicAllowed: true},
},
{
ID: "claude-sonnet-4-20250514",
Expand All @@ -69,6 +72,7 @@ func GetClaudeModels() []*ModelInfo {
DisplayName: "Claude 4 Sonnet",
ContextLength: 200000,
MaxCompletionTokens: 64000,
Thinking: &ThinkingSupport{Min: 1024, Max: 100000, ZeroAllowed: false, DynamicAllowed: true},
},
{
ID: "claude-3-7-sonnet-20250219",
Expand All @@ -79,6 +83,7 @@ func GetClaudeModels() []*ModelInfo {
DisplayName: "Claude 3.7 Sonnet",
ContextLength: 128000,
MaxCompletionTokens: 8192,
Thinking: &ThinkingSupport{Min: 1024, Max: 100000, ZeroAllowed: false, DynamicAllowed: true},
},
{
ID: "claude-3-5-haiku-20241022",
Expand All @@ -89,6 +94,7 @@ func GetClaudeModels() []*ModelInfo {
DisplayName: "Claude 3.5 Haiku",
ContextLength: 128000,
MaxCompletionTokens: 8192,
// Thinking: not supported for Haiku models
},
}
}
Expand Down Expand Up @@ -476,6 +482,7 @@ func GetOpenAIModels() []*ModelInfo {
ContextLength: 400000,
MaxCompletionTokens: 128000,
SupportedParameters: []string{"tools"},
Thinking: &ThinkingSupport{Levels: []string{"minimal", "low", "medium", "high"}},
},
{
ID: "gpt-5-codex",
Expand All @@ -489,6 +496,7 @@ func GetOpenAIModels() []*ModelInfo {
ContextLength: 400000,
MaxCompletionTokens: 128000,
SupportedParameters: []string{"tools"},
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
Comment on lines 496 to +499

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

There are multiple instances of &ThinkingSupport{Levels: []string{"low", "medium", "high"}} and other similar ThinkingSupport structs being created. To improve maintainability and reduce duplication, consider defining these common configurations as package-level constants or variables.

For example:

var thinkingLevelsLowMediumHigh = &ThinkingSupport{Levels: []string{"low", "medium", "high"}}

// ... in GetOpenAIModels()
{
    ID:                  "gpt-5-codex",
    // ...
    Thinking:            thinkingLevelsLowMediumHigh,
},

This would apply to several other model definitions in this file as well.

},
{
ID: "gpt-5-codex-mini",
Expand All @@ -502,6 +510,7 @@ func GetOpenAIModels() []*ModelInfo {
ContextLength: 400000,
MaxCompletionTokens: 128000,
SupportedParameters: []string{"tools"},
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
},
{
ID: "gpt-5.1",
Expand All @@ -515,6 +524,7 @@ func GetOpenAIModels() []*ModelInfo {
ContextLength: 400000,
MaxCompletionTokens: 128000,
SupportedParameters: []string{"tools"},
Thinking: &ThinkingSupport{Levels: []string{"none", "low", "medium", "high"}},
},
{
ID: "gpt-5.1-codex",
Expand All @@ -528,6 +538,7 @@ func GetOpenAIModels() []*ModelInfo {
ContextLength: 400000,
MaxCompletionTokens: 128000,
SupportedParameters: []string{"tools"},
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
},
{
ID: "gpt-5.1-codex-mini",
Expand All @@ -541,6 +552,7 @@ func GetOpenAIModels() []*ModelInfo {
ContextLength: 400000,
MaxCompletionTokens: 128000,
SupportedParameters: []string{"tools"},
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
},
{
ID: "gpt-5.1-codex-max",
Expand All @@ -554,6 +566,7 @@ func GetOpenAIModels() []*ModelInfo {
ContextLength: 400000,
MaxCompletionTokens: 128000,
SupportedParameters: []string{"tools"},
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high", "xhigh"}},
},
}
}
Expand Down Expand Up @@ -610,6 +623,7 @@ func GetIFlowModels() []*ModelInfo {
DisplayName string
Description string
Created int64
Thinking *ThinkingSupport
}{
{ID: "tstars2.0", DisplayName: "TStars-2.0", Description: "iFlow TStars-2.0 multimodal assistant", Created: 1746489600},
{ID: "qwen3-coder-plus", DisplayName: "Qwen3-Coder-Plus", Description: "Qwen3 Coder Plus code generation", Created: 1753228800},
Expand All @@ -619,17 +633,17 @@ func GetIFlowModels() []*ModelInfo {
{ID: "kimi-k2-0905", DisplayName: "Kimi-K2-Instruct-0905", Description: "Moonshot Kimi K2 instruct 0905", Created: 1757030400},
{ID: "glm-4.6", DisplayName: "GLM-4.6", Description: "Zhipu GLM 4.6 general model", Created: 1759190400},
{ID: "kimi-k2", DisplayName: "Kimi-K2", Description: "Moonshot Kimi K2 general model", Created: 1752192000},
{ID: "kimi-k2-thinking", DisplayName: "Kimi-K2-Thinking", Description: "Moonshot Kimi K2 general model", Created: 1762387200},
{ID: "kimi-k2-thinking", DisplayName: "Kimi-K2-Thinking", Description: "Moonshot Kimi K2 thinking model", Created: 1762387200, Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}}},
{ID: "deepseek-v3.2-chat", DisplayName: "DeepSeek-V3.2", Description: "DeepSeek V3.2", Created: 1764576000},
{ID: "deepseek-v3.2", DisplayName: "DeepSeek-V3.2-Exp", Description: "DeepSeek V3.2 experimental", Created: 1759104000},
{ID: "deepseek-v3.1", DisplayName: "DeepSeek-V3.1-Terminus", Description: "DeepSeek V3.1 Terminus", Created: 1756339200},
{ID: "deepseek-r1", DisplayName: "DeepSeek-R1", Description: "DeepSeek reasoning model R1", Created: 1737331200},
{ID: "deepseek-r1", DisplayName: "DeepSeek-R1", Description: "DeepSeek reasoning model R1", Created: 1737331200, Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}}},
{ID: "deepseek-v3", DisplayName: "DeepSeek-V3-671B", Description: "DeepSeek V3 671B", Created: 1734307200},
{ID: "qwen3-32b", DisplayName: "Qwen3-32B", Description: "Qwen3 32B", Created: 1747094400},
{ID: "qwen3-235b-a22b-thinking-2507", DisplayName: "Qwen3-235B-A22B-Thinking", Description: "Qwen3 235B A22B Thinking (2507)", Created: 1753401600},
{ID: "qwen3-235b-a22b-thinking-2507", DisplayName: "Qwen3-235B-A22B-Thinking", Description: "Qwen3 235B A22B Thinking (2507)", Created: 1753401600, Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}}},
{ID: "qwen3-235b-a22b-instruct", DisplayName: "Qwen3-235B-A22B-Instruct", Description: "Qwen3 235B A22B Instruct", Created: 1753401600},
{ID: "qwen3-235b", DisplayName: "Qwen3-235B-A22B", Description: "Qwen3 235B A22B", Created: 1753401600},
{ID: "minimax-m2", DisplayName: "MiniMax-M2", Description: "MiniMax M2", Created: 1758672000},
{ID: "minimax-m2", DisplayName: "MiniMax-M2", Description: "MiniMax M2", Created: 1758672000, Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}}},
}
models := make([]*ModelInfo, 0, len(entries))
for _, entry := range entries {
Expand All @@ -641,6 +655,7 @@ func GetIFlowModels() []*ModelInfo {
Type: "iflow",
DisplayName: entry.DisplayName,
Description: entry.Description,
Thinking: entry.Thinking,
})
}
return models
Expand Down
3 changes: 3 additions & 0 deletions internal/registry/model_registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,9 @@ type ThinkingSupport struct {
ZeroAllowed bool `json:"zero_allowed,omitempty"`
// DynamicAllowed indicates whether -1 is a valid value (dynamic thinking budget).
DynamicAllowed bool `json:"dynamic_allowed,omitempty"`
// Levels defines discrete reasoning effort levels (e.g., "low", "medium", "high").
// When set, the model uses level-based reasoning instead of token budgets.
Levels []string `json:"levels,omitempty"`
}

// ModelRegistration tracks a model's availability
Expand Down
2 changes: 1 addition & 1 deletion internal/runtime/executor/antigravity_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ const (
defaultAntigravityAgent = "antigravity/1.11.5 windows/amd64"
antigravityAuthType = "antigravity"
refreshSkew = 3000 * time.Second
streamScannerBuffer int = 20_971_520
streamScannerBuffer int = 52_428_800 // 50MB
)

var (
Expand Down
60 changes: 8 additions & 52 deletions internal/runtime/executor/claude_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ func (e *ClaudeExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
// If from == to (Claude → Claude), directly forward the SSE stream without translation
if from == to {
scanner := bufio.NewScanner(decodedBody)
scanner.Buffer(nil, 20_971_520)
scanner.Buffer(nil, 52_428_800) // 50MB

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The buffer size 52_428_800 is hardcoded here and in several other executors within this package. A constant streamScannerBuffer is defined in antigravity_executor.go but is not used. To avoid magic numbers and improve maintainability, please use this constant.

This comment also applies to codex_executor.go, gemini_cli_executor.go, gemini_executor.go, gemini_vertex_executor.go, iflow_executor.go, and openai_compat_executor.go.

Suggested change
scanner.Buffer(nil, 52_428_800) // 50MB
scanner.Buffer(nil, streamScannerBuffer)

for scanner.Scan() {
line := scanner.Bytes()
appendAPIResponseChunk(ctx, e.cfg, line)
Expand All @@ -277,7 +277,7 @@ func (e *ClaudeExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A

// For other formats, use translation
scanner := bufio.NewScanner(decodedBody)
scanner.Buffer(nil, 20_971_520)
scanner.Buffer(nil, 52_428_800) // 50MB
var param any
for scanner.Scan() {
line := scanner.Bytes()
Expand Down Expand Up @@ -450,59 +450,15 @@ func extractAndRemoveBetas(body []byte) ([]string, []byte) {
return betas, body
}

// injectThinkingConfig adds thinking configuration based on metadata or legacy suffixes.
// injectThinkingConfig adds thinking configuration based on metadata using the unified flow.
// It uses util.ResolveClaudeThinkingConfig which internally calls ResolveThinkingConfigFromMetadata
// and NormalizeThinkingBudget, ensuring consistency with other executors like Gemini.
func (e *ClaudeExecutor) injectThinkingConfig(modelName string, metadata map[string]any, body []byte) []byte {
// Only inject if thinking config is not already present
if gjson.GetBytes(body, "thinking").Exists() {
budget, ok := util.ResolveClaudeThinkingConfig(modelName, metadata)
if !ok {
return body
}

budgetTokens, ok := resolveClaudeThinkingBudget(modelName, metadata)
if !ok || budgetTokens <= 0 {
return body
}

body, _ = sjson.SetBytes(body, "thinking.type", "enabled")
body, _ = sjson.SetBytes(body, "thinking.budget_tokens", budgetTokens)
return body
}

func resolveClaudeThinkingBudget(modelName string, metadata map[string]any) (int, bool) {
budget, include, effort, matched := util.ThinkingFromMetadata(metadata)
if matched {
if include != nil && !*include {
return 0, false
}
if budget != nil {
normalized := util.NormalizeThinkingBudget(modelName, *budget)
if normalized > 0 {
return normalized, true
}
return 0, false
}
if effort != nil {
if derived, ok := util.ThinkingEffortToBudget(modelName, *effort); ok && derived > 0 {
return derived, true
}
}
}
return claudeBudgetFromSuffix(modelName)
}

func claudeBudgetFromSuffix(modelName string) (int, bool) {
lower := strings.ToLower(strings.TrimSpace(modelName))
switch {
case strings.HasSuffix(lower, "-thinking-low"):
return 1024, true
case strings.HasSuffix(lower, "-thinking-medium"):
return 8192, true
case strings.HasSuffix(lower, "-thinking-high"):
return 24576, true
case strings.HasSuffix(lower, "-thinking"):
return 8192, true
default:
return 0, false
}
return util.ApplyClaudeThinkingConfig(body, budget)
}

// ensureMaxTokensForThinking ensures max_tokens > thinking.budget_tokens when thinking is enabled.
Expand Down
16 changes: 12 additions & 4 deletions internal/runtime/executor/codex_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,11 @@ func (e *CodexExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
from := opts.SourceFormat
to := sdktranslator.FromString("codex")
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
body = applyReasoningEffortMetadata(body, req.Metadata, req.Model)
body = applyReasoningEffortMetadata(body, req.Metadata, req.Model, "reasoning.effort")
body = normalizeThinkingConfig(body, upstreamModel)
if errValidate := validateThinkingConfig(body, upstreamModel); errValidate != nil {
return resp, errValidate
}
body = applyPayloadConfig(e.cfg, req.Model, body)
body, _ = sjson.SetBytes(body, "model", upstreamModel)
body, _ = sjson.SetBytes(body, "stream", true)
Expand Down Expand Up @@ -148,7 +152,11 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
to := sdktranslator.FromString("codex")
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)

body = applyReasoningEffortMetadata(body, req.Metadata, req.Model)
body = applyReasoningEffortMetadata(body, req.Metadata, req.Model, "reasoning.effort")
body = normalizeThinkingConfig(body, upstreamModel)
if errValidate := validateThinkingConfig(body, upstreamModel); errValidate != nil {
return nil, errValidate
}
body = applyPayloadConfig(e.cfg, req.Model, body)
body, _ = sjson.DeleteBytes(body, "previous_response_id")
body, _ = sjson.SetBytes(body, "model", upstreamModel)
Expand Down Expand Up @@ -208,7 +216,7 @@ func (e *CodexExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
}
}()
scanner := bufio.NewScanner(httpResp.Body)
scanner.Buffer(nil, 20_971_520)
scanner.Buffer(nil, 52_428_800) // 50MB
var param any
for scanner.Scan() {
line := scanner.Bytes()
Expand Down Expand Up @@ -246,7 +254,7 @@ func (e *CodexExecutor) CountTokens(ctx context.Context, auth *cliproxyauth.Auth

modelForCounting := req.Model

body = applyReasoningEffortMetadata(body, req.Metadata, req.Model)
body = applyReasoningEffortMetadata(body, req.Metadata, req.Model, "reasoning.effort")
body, _ = sjson.SetBytes(body, "model", upstreamModel)
body, _ = sjson.DeleteBytes(body, "previous_response_id")
body, _ = sjson.SetBytes(body, "stream", false)
Expand Down
2 changes: 1 addition & 1 deletion internal/runtime/executor/gemini_cli_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ func (e *GeminiCLIExecutor) ExecuteStream(ctx context.Context, auth *cliproxyaut
}()
if opts.Alt == "" {
scanner := bufio.NewScanner(resp.Body)
scanner.Buffer(nil, 20_971_520)
scanner.Buffer(nil, 52_428_800) // 50MB
var param any
for scanner.Scan() {
line := scanner.Bytes()
Expand Down
2 changes: 1 addition & 1 deletion internal/runtime/executor/gemini_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ func (e *GeminiExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.A
}
}()
scanner := bufio.NewScanner(httpResp.Body)
scanner.Buffer(nil, 20_971_520)
scanner.Buffer(nil, 52_428_800) // 50MB
var param any
for scanner.Scan() {
line := scanner.Bytes()
Expand Down
4 changes: 2 additions & 2 deletions internal/runtime/executor/gemini_vertex_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,7 @@ func (e *GeminiVertexExecutor) executeStreamWithServiceAccount(ctx context.Conte
}
}()
scanner := bufio.NewScanner(httpResp.Body)
scanner.Buffer(nil, 20_971_520)
scanner.Buffer(nil, 52_428_800) // 50MB
var param any
for scanner.Scan() {
line := scanner.Bytes()
Expand Down Expand Up @@ -696,7 +696,7 @@ func (e *GeminiVertexExecutor) executeStreamWithAPIKey(ctx context.Context, auth
}
}()
scanner := bufio.NewScanner(httpResp.Body)
scanner.Buffer(nil, 20_971_520)
scanner.Buffer(nil, 52_428_800) // 50MB
var param any
for scanner.Scan() {
line := scanner.Bytes()
Expand Down
20 changes: 15 additions & 5 deletions internal/runtime/executor/iflow_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,15 @@ func (e *IFlowExecutor) Execute(ctx context.Context, auth *cliproxyauth.Auth, re
from := opts.SourceFormat
to := sdktranslator.FromString("openai")
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
body = applyReasoningEffortMetadataChatCompletions(body, req.Metadata, req.Model)
if upstreamModel := util.ResolveOriginalModel(req.Model, req.Metadata); upstreamModel != "" {
body = applyReasoningEffortMetadata(body, req.Metadata, req.Model, "reasoning_effort")
upstreamModel := util.ResolveOriginalModel(req.Model, req.Metadata)
if upstreamModel != "" {
body, _ = sjson.SetBytes(body, "model", upstreamModel)
}
body = normalizeThinkingConfig(body, upstreamModel)
if errValidate := validateThinkingConfig(body, upstreamModel); errValidate != nil {
return resp, errValidate
}
body = applyPayloadConfig(e.cfg, req.Model, body)

endpoint := strings.TrimSuffix(baseURL, "/") + iflowDefaultEndpoint
Expand Down Expand Up @@ -143,10 +148,15 @@ func (e *IFlowExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
to := sdktranslator.FromString("openai")
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)

body = applyReasoningEffortMetadataChatCompletions(body, req.Metadata, req.Model)
if upstreamModel := util.ResolveOriginalModel(req.Model, req.Metadata); upstreamModel != "" {
body = applyReasoningEffortMetadata(body, req.Metadata, req.Model, "reasoning_effort")
upstreamModel := util.ResolveOriginalModel(req.Model, req.Metadata)
if upstreamModel != "" {
body, _ = sjson.SetBytes(body, "model", upstreamModel)
}
body = normalizeThinkingConfig(body, upstreamModel)
if errValidate := validateThinkingConfig(body, upstreamModel); errValidate != nil {
return nil, errValidate
}
// Ensure tools array exists to avoid provider quirks similar to Qwen's behaviour.
toolsResult := gjson.GetBytes(body, "tools")
if toolsResult.Exists() && toolsResult.IsArray() && len(toolsResult.Array()) == 0 {
Expand Down Expand Up @@ -209,7 +219,7 @@ func (e *IFlowExecutor) ExecuteStream(ctx context.Context, auth *cliproxyauth.Au
}()

scanner := bufio.NewScanner(httpResp.Body)
scanner.Buffer(nil, 20_971_520)
scanner.Buffer(nil, 52_428_800) // 50MB
var param any
for scanner.Scan() {
line := scanner.Bytes()
Expand Down
Loading
Loading