Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 73 additions & 0 deletions litellm/proxy/management_endpoints/key_management_endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -2841,6 +2841,79 @@ async def list_keys(
code=status.HTTP_500_INTERNAL_SERVER_ERROR,
)

@router.get(
"/key/aliases",
tags=["key management"],
dependencies=[Depends(user_api_key_auth)],
)
@management_endpoint_wrapper
async def key_aliases() -> Dict[str, List[str]]:
"""
Lists all key aliases

Returns:
{
"aliases": List[str]
}
"""
try:
from litellm.proxy.proxy_server import prisma_client

verbose_proxy_logger.debug("Entering key_aliases function")

if prisma_client is None:
verbose_proxy_logger.error("Database not connected")
raise Exception("Database not connected")

where: Dict[str, Any] = {}
try:
where.update(_get_condition_to_filter_out_ui_session_tokens())
except NameError:
# Helper may not exist in some builds; ignore if missing
pass

rows = await prisma_client.db.litellm_verificationtoken.find_many(
where=where,
order=[{"key_alias": "asc"}],
)

seen = set()
aliases: List[str] = []
for row in rows:
alias = getattr(row, "key_alias", None)
if alias is None and isinstance(row, dict):
alias = row.get("key_alias")

if not alias:
continue

alias_str = str(alias).strip()
if alias_str and alias_str not in seen:
seen.add(alias_str)
aliases.append(alias_str)

verbose_proxy_logger.debug(f"Returning {len(aliases)} key aliases")

return {"aliases": aliases}

except Exception as e:
verbose_proxy_logger.exception(f"Error in key_aliases: {e}")
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"error({str(e)})"),
type=ProxyErrorTypes.internal_server_error,
param=getattr(e, "param", "None"),
code=getattr(e, "status_code", status.HTTP_500_INTERNAL_SERVER_ERROR),
)
elif isinstance(e, ProxyException):
raise e
raise ProxyException(
message="Authentication Error, " + str(e),
type=ProxyErrorTypes.internal_server_error,
param=getattr(e, "param", "None"),
code=status.HTTP_500_INTERNAL_SERVER_ERROR,
)


def _validate_sort_params(
sort_by: Optional[str], sort_order: str
Expand Down
56 changes: 53 additions & 3 deletions tests/proxy_unit_tests/test_key_generate_prisma.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
list_keys,
regenerate_key_fn,
update_key_fn,
key_aliases,
)
from litellm.proxy.management_endpoints.team_endpoints import (
new_team,
Expand Down Expand Up @@ -151,7 +152,6 @@ def prisma_client():
@pytest.mark.flaky(retries=6, delay=1)
async def test_new_user_response(prisma_client):
try:

print("prisma client=", prisma_client)

setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
Expand Down Expand Up @@ -424,7 +424,6 @@ async def test_call_with_valid_model_using_all_models(prisma_client):
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
try:

await litellm.proxy.proxy_server.prisma_client.connect()

team_request = NewTeamRequest(
Expand Down Expand Up @@ -1789,7 +1788,6 @@ async def test_call_with_key_over_model_budget(
litellm.callbacks.append(model_budget_limiter)

try:

# set budget for chatgpt-v-3 to 0.000001, expect the next request to fail
model_max_budget = {
"gpt-4o-mini": {
Expand Down Expand Up @@ -3531,6 +3529,58 @@ async def test_list_keys(prisma_client):
assert _key in response["keys"]


@pytest.mark.asyncio
async def test_key_aliases(prisma_client):
"""
Test the key_aliases function:
- Returns a list
- Includes alias from a newly created key
- Aliases are unique and sorted
"""
import asyncio
import uuid
import litellm
from litellm.proxy._types import LitellmUserRoles

# Wire up test prisma client
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
await litellm.proxy.proxy_server.prisma_client.connect()

# Basic call
response = await key_aliases()
assert "aliases" in response
assert isinstance(response["aliases"], list)

# Create a new user (and key) with a unique alias
unique_id = str(uuid.uuid4())
test_alias = f"key-aliases-test-{unique_id}"
test_user_id = f"key-aliases-user-{unique_id}"

await new_user(
data=NewUserRequest(
user_id=test_user_id,
user_role=LitellmUserRoles.INTERNAL_USER,
key_alias=test_alias,
),
user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN),
)

# Allow async DB writes to settle
await asyncio.sleep(2)

# Call again and validate
response_after = await key_aliases()
aliases = response_after["aliases"]

# Contains the new alias
assert test_alias in aliases

# Unique & sorted (endpoint dedupes and orders ascending)
assert len(aliases) == len(set(aliases))
assert aliases == sorted(aliases)


@pytest.mark.asyncio
async def test_auth_vertex_ai_route(prisma_client):
"""
Expand Down
Original file line number Diff line number Diff line change
@@ -1,54 +1,26 @@
import { keyListCall, teamListCall, organizationListCall } from "../networking";
import { teamListCall, organizationListCall, keyAliasesCall } from "../networking"
import { Team } from "./key_list";
import { Organization } from "../networking";

/**
* Fetches all key aliases across all pages
* Fetches all key aliases via the dedicated /key/aliases endpoint
* @param accessToken The access token for API authentication
* @returns Array of all unique key aliases
*/
export const fetchAllKeyAliases = async (accessToken: string | null): Promise<string[]> => {
if (!accessToken) return [];

try {
// Fetch all pages of keys to extract aliases
let allAliases: string[] = [];
let currentPage = 1;
let hasMorePages = true;

while (hasMorePages) {
const response = await keyListCall(
accessToken,
null, // organization_id
"", // team_id
null, // selectedKeyAlias
null, // user_id
null, // key_hash
currentPage,
100, // larger page size to reduce number of requests
);

// Extract aliases from this page
const pageAliases = response.keys.map((key: any) => key.key_alias).filter(Boolean) as string[];

allAliases = [...allAliases, ...pageAliases];

// Check if there are more pages
if (currentPage < response.total_pages) {
currentPage++;
} else {
hasMorePages = false;
}
}

// Remove duplicates
return Array.from(new Set(allAliases));
const { aliases } = await keyAliasesCall(accessToken as unknown as String);
// Defensive dedupe & null-guard
return Array.from(new Set((aliases || []).filter(Boolean)));
} catch (error) {
console.error("Error fetching all key aliases:", error);
return [];
}
};


/**
* Fetches all teams across all pages
* @param accessToken The access token for API authentication
Expand Down
35 changes: 35 additions & 0 deletions ui/litellm-dashboard/src/components/networking.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -3102,6 +3102,41 @@ export const keyListCall = async (
}
};

export const keyAliasesCall = async (
accessToken: String
): Promise<{ aliases: string[] }> => {
/**
* Get all key aliases from proxy
*/
try {
let url = proxyBaseUrl ? `${proxyBaseUrl}/key/aliases` : `/key/aliases`;
console.log("in keyAliasesCall");

const response = await fetch(url, {
method: "GET",
headers: {
[globalLitellmHeaderName]: `Bearer ${accessToken}`,
"Content-Type": "application/json",
},
});

if (!response.ok) {
const errorData = await response.json();
const errorMessage = deriveErrorMessage(errorData);
handleError(errorMessage);
throw new Error(errorMessage);
}

const data = await response.json();
console.log("/key/aliases API Response:", data);
return data; // { aliases: string[] }
} catch (error) {
console.error("Failed to fetch key aliases:", error);
throw error;
}
};


export const spendUsersCall = async (accessToken: String, userID: String) => {
try {
const url = proxyBaseUrl ? `${proxyBaseUrl}/spend/users` : `/spend/users`;
Expand Down
Loading