Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions docs/cli/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,16 @@ In addition to a project settings file, a project's `.gemini` directory can cont
```json
"enableOpenAILogging": true
```
- **`ollama.timeoutMs`** (number):
- **Description:** Overrides the default 120 000 ms request timeout used for OpenAI/Ollama-compatible API calls. Useful for very large prompts or models that take a long time to produce the first token.
- **Default:** `120000` (2 minutes) if not set.
- **Example:**

```json
"ollama": {
"timeoutMs": 86400000
}
```

### Example `settings.json`:

Expand Down Expand Up @@ -272,6 +282,9 @@ The CLI automatically loads environment variables from an `.env` file. The loadi
- Specifies the default Gemini model to use.
- Overrides the hardcoded default
- Example: `export GEMINI_MODEL="gemini-2.5-flash"`
- **`OPENAI_TIMEOUT_MS`** / **`OLLAMA_TIMEOUT_MS`**:
- Sets the request timeout (in milliseconds) for OpenAI/Ollama-compatible calls. `OPENAI_TIMEOUT_MS` takes precedence if both are defined.
- Example: `export OPENAI_TIMEOUT_MS=86400000`
- **`GOOGLE_API_KEY`**:
- Your Google Cloud API key.
- Required for using Vertex AI in express mode.
Expand Down Expand Up @@ -354,6 +367,8 @@ Arguments passed directly when running the CLI can override other configurations
- Displays the version of the CLI.
- **`--openai-logging`**:
- Enables logging of OpenAI API calls for debugging and analysis. This flag overrides the `enableOpenAILogging` setting in `settings.json`.
- **`--openai-timeout-ms <milliseconds>`**:
- Overrides the request timeout for OpenAI/Ollama-compatible calls for the current session. Accepts a non-negative integer (e.g., `86400000` for one day).

## Context Files (Hierarchical Instructional Context)

Expand Down
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

52 changes: 52 additions & 0 deletions packages/cli/src/config/config.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,16 +50,22 @@ vi.mock('@tcsenpai/ollama-code', async () => {
),
Config: class MockConfig extends actualServer.Config {
private enableOpenAILogging: boolean;
private timeoutMs?: number;

constructor(params: ConfigParameters) {
super(params);
this.enableOpenAILogging = params.enableOpenAILogging ?? false;
this.timeoutMs = params.contentGeneratorTimeoutMs;
}

getEnableOpenAILogging(): boolean {
return this.enableOpenAILogging;
}

getContentGeneratorTimeoutMs(): number | undefined {
return this.timeoutMs;
}

// Override other methods to ensure they work correctly
getShowMemoryUsage(): boolean {
return (
Expand Down Expand Up @@ -243,6 +249,52 @@ describe('loadCliConfig', () => {
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(config.getShowMemoryUsage()).toBe(true);
});

it('should prioritize CLI timeout over environment and settings', async () => {
process.argv = ['node', 'script.js', '--openai-timeout-ms', '86400000'];
const argv = await parseArguments();
process.env.OPENAI_TIMEOUT_MS = '60000';
const settings: Settings = { ollama: { timeoutMs: 30000 } };
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(
(
config as unknown as {
getContentGeneratorTimeoutMs(): number | undefined;
}
).getContentGeneratorTimeoutMs(),
).toBe(86400000);
});

it('should use environment timeout when CLI flag is absent', async () => {
process.argv = ['node', 'script.js'];
process.env.OPENAI_TIMEOUT_MS = '123456';
const argv = await parseArguments();
const settings: Settings = { ollama: { timeoutMs: 789 } };
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(
(
config as unknown as {
getContentGeneratorTimeoutMs(): number | undefined;
}
).getContentGeneratorTimeoutMs(),
).toBe(123456);
});

it('should fall back to settings timeout when no CLI or environment override exists', async () => {
process.argv = ['node', 'script.js'];
delete process.env.OPENAI_TIMEOUT_MS;
delete process.env.OLLAMA_TIMEOUT_MS;
const argv = await parseArguments();
const settings: Settings = { ollama: { timeoutMs: 456789 } };
const config = await loadCliConfig(settings, [], 'test-session', argv);
expect(
(
config as unknown as {
getContentGeneratorTimeoutMs(): number | undefined;
}
).getContentGeneratorTimeoutMs(),
).toBe(456789);
});
});

describe('loadCliConfig telemetry', () => {
Expand Down
51 changes: 51 additions & 0 deletions packages/cli/src/config/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,27 @@ const logger = {
error: (...args: any[]) => console.error('[ERROR]', ...args),
};

function parseTimeoutMs(
value: unknown,
sourceDescription: string,
): number | undefined {
if (value === undefined || value === null || value === '') {
return undefined;
}

const numeric =
typeof value === 'number' ? value : Number.parseFloat(String(value));

if (!Number.isFinite(numeric) || numeric < 0) {
logger.warn(
`Ignoring invalid timeout value "${value}" from ${sourceDescription}. Expected a non-negative number.`,
);
return undefined;
}

return numeric;
}

export interface CliArgs {
model: string | undefined;
sandbox: boolean | string | undefined;
Expand All @@ -59,6 +80,7 @@ export interface CliArgs {
openaiLogging: boolean | undefined;
openaiApiKey: string | undefined;
openaiBaseUrl: string | undefined;
openaiTimeoutMs: number | undefined;
}

export async function parseArguments(): Promise<CliArgs> {
Expand Down Expand Up @@ -197,6 +219,11 @@ export async function parseArguments(): Promise<CliArgs> {
type: 'string',
description: 'OpenAI base URL (for custom endpoints)',
})
.option('openai-timeout-ms', {
type: 'number',
description:
'Timeout in milliseconds for OpenAI/Ollama streaming requests (overrides settings and environment)',
})

.version(await getCliVersion()) // This will enable the --version flag based on package.json
.alias('v', 'version')
Expand Down Expand Up @@ -330,6 +357,29 @@ export async function loadCliConfig(

const sandboxConfig = await loadSandboxConfig(settings, argv);

const cliTimeoutMs = parseTimeoutMs(
argv.openaiTimeoutMs,
'--openai-timeout-ms flag',
);
let envTimeoutMs: number | undefined;
if (process.env.OPENAI_TIMEOUT_MS !== undefined) {
envTimeoutMs = parseTimeoutMs(
process.env.OPENAI_TIMEOUT_MS,
'environment variable OPENAI_TIMEOUT_MS',
);
} else if (process.env.OLLAMA_TIMEOUT_MS !== undefined) {
envTimeoutMs = parseTimeoutMs(
process.env.OLLAMA_TIMEOUT_MS,
'environment variable OLLAMA_TIMEOUT_MS',
);
}
const settingsTimeoutMs = parseTimeoutMs(
settings.ollama?.timeoutMs,
'settings.ollama.timeoutMs',
);
const contentGeneratorTimeoutMs =
cliTimeoutMs ?? envTimeoutMs ?? settingsTimeoutMs;

return new Config({
sessionId,
embeddingModel: DEFAULT_GEMINI_EMBEDDING_MODEL,
Expand Down Expand Up @@ -393,6 +443,7 @@ export async function loadCliConfig(
(typeof argv.openaiLogging === 'undefined'
? settings.enableOpenAILogging
: argv.openaiLogging) ?? false,
contentGeneratorTimeoutMs,
sampling_params: settings.sampling_params,
});
}
Expand Down
8 changes: 8 additions & 0 deletions packages/cli/src/config/settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ export interface OllamaConfig {
baseUrl?: string;
model?: string;
apiKey?: string;
timeoutMs?: number;
}

export interface Settings {
Expand Down Expand Up @@ -297,6 +298,13 @@ export function loadOllamaConfig(): OllamaConfig {
if (config.apiKey && !process.env.OLLAMA_API_KEY && !process.env.OPENAI_API_KEY) {
process.env.OLLAMA_API_KEY = config.apiKey;
}
if (
config.timeoutMs !== undefined &&
process.env.OPENAI_TIMEOUT_MS === undefined &&
process.env.OLLAMA_TIMEOUT_MS === undefined
) {
process.env.OPENAI_TIMEOUT_MS = String(config.timeoutMs);
}

return config;
}
Expand Down
19 changes: 19 additions & 0 deletions packages/core/src/config/config.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,25 @@ describe('Server Config (config.ts)', () => {
expect(config.getModel()).toBe(newModel); // getModel() should return the updated model
expect(GeminiClient).toHaveBeenCalledWith(config);
});

it('should apply configured timeout when provided', async () => {
const timeoutMs = 86400000;
const config = new Config({
...baseParams,
contentGeneratorTimeoutMs: timeoutMs,
});
const mockContentConfig = {
model: MODEL,
};

(createContentGeneratorConfig as Mock).mockResolvedValueOnce(
mockContentConfig,
);

await config.refreshAuth(AuthType.USE_OPENAI);

expect(config.getContentGeneratorConfig().timeout).toBe(timeoutMs);
});
});

it('Config constructor should store userMemory correctly', () => {
Expand Down
7 changes: 7 additions & 0 deletions packages/core/src/config/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ export interface ConfigParameters {
noBrowser?: boolean;
ideMode?: boolean;
enableOpenAILogging?: boolean;
contentGeneratorTimeoutMs?: number;
sampling_params?: {
top_p?: number;
top_k?: number;
Expand Down Expand Up @@ -195,6 +196,7 @@ export class Config {
private readonly noBrowser: boolean;
private readonly ideMode: boolean;
private readonly enableOpenAILogging: boolean;
private readonly contentGeneratorTimeoutMs?: number;
private readonly sampling_params?: {
top_p?: number;
top_k?: number;
Expand Down Expand Up @@ -257,6 +259,7 @@ export class Config {
this.noBrowser = params.noBrowser ?? false;
this.ideMode = params.ideMode ?? false;
this.enableOpenAILogging = params.enableOpenAILogging ?? false;
this.contentGeneratorTimeoutMs = params.contentGeneratorTimeoutMs;
this.sampling_params = params.sampling_params;

if (params.contextFileName) {
Expand Down Expand Up @@ -297,6 +300,10 @@ export class Config {
this.contentGeneratorConfig.samplingParams = this.sampling_params;
}

if (this.contentGeneratorTimeoutMs !== undefined) {
this.contentGeneratorConfig.timeout = this.contentGeneratorTimeoutMs;
}

this.geminiClient = new GeminiClient(this);
await this.geminiClient.initialize(this.contentGeneratorConfig);

Expand Down