diff --git a/litellm/proxy/management_endpoints/key_management_endpoints.py b/litellm/proxy/management_endpoints/key_management_endpoints.py index bb1de5e8ce8a..39151dcb6955 100644 --- a/litellm/proxy/management_endpoints/key_management_endpoints.py +++ b/litellm/proxy/management_endpoints/key_management_endpoints.py @@ -2841,6 +2841,79 @@ async def list_keys( code=status.HTTP_500_INTERNAL_SERVER_ERROR, ) +@router.get( + "/key/aliases", + tags=["key management"], + dependencies=[Depends(user_api_key_auth)], +) +@management_endpoint_wrapper +async def key_aliases() -> Dict[str, List[str]]: + """ + Lists all key aliases + + Returns: + { + "aliases": List[str] + } + """ + try: + from litellm.proxy.proxy_server import prisma_client + + verbose_proxy_logger.debug("Entering key_aliases function") + + if prisma_client is None: + verbose_proxy_logger.error("Database not connected") + raise Exception("Database not connected") + + where: Dict[str, Any] = {} + try: + where.update(_get_condition_to_filter_out_ui_session_tokens()) + except NameError: + # Helper may not exist in some builds; ignore if missing + pass + + rows = await prisma_client.db.litellm_verificationtoken.find_many( + where=where, + order=[{"key_alias": "asc"}], + ) + + seen = set() + aliases: List[str] = [] + for row in rows: + alias = getattr(row, "key_alias", None) + if alias is None and isinstance(row, dict): + alias = row.get("key_alias") + + if not alias: + continue + + alias_str = str(alias).strip() + if alias_str and alias_str not in seen: + seen.add(alias_str) + aliases.append(alias_str) + + verbose_proxy_logger.debug(f"Returning {len(aliases)} key aliases") + + return {"aliases": aliases} + + except Exception as e: + verbose_proxy_logger.exception(f"Error in key_aliases: {e}") + if isinstance(e, HTTPException): + raise ProxyException( + message=getattr(e, "detail", f"error({str(e)})"), + type=ProxyErrorTypes.internal_server_error, + param=getattr(e, "param", "None"), + code=getattr(e, "status_code", status.HTTP_500_INTERNAL_SERVER_ERROR), + ) + elif isinstance(e, ProxyException): + raise e + raise ProxyException( + message="Authentication Error, " + str(e), + type=ProxyErrorTypes.internal_server_error, + param=getattr(e, "param", "None"), + code=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + def _validate_sort_params( sort_by: Optional[str], sort_order: str diff --git a/tests/proxy_unit_tests/test_key_generate_prisma.py b/tests/proxy_unit_tests/test_key_generate_prisma.py index 8c556b6565cc..5a8b8c7c06e2 100644 --- a/tests/proxy_unit_tests/test_key_generate_prisma.py +++ b/tests/proxy_unit_tests/test_key_generate_prisma.py @@ -61,6 +61,7 @@ list_keys, regenerate_key_fn, update_key_fn, + key_aliases, ) from litellm.proxy.management_endpoints.team_endpoints import ( new_team, @@ -151,7 +152,6 @@ def prisma_client(): @pytest.mark.flaky(retries=6, delay=1) async def test_new_user_response(prisma_client): try: - print("prisma client=", prisma_client) setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) @@ -424,7 +424,6 @@ async def test_call_with_valid_model_using_all_models(prisma_client): setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") try: - await litellm.proxy.proxy_server.prisma_client.connect() team_request = NewTeamRequest( @@ -1789,7 +1788,6 @@ async def test_call_with_key_over_model_budget( litellm.callbacks.append(model_budget_limiter) try: - # set budget for chatgpt-v-3 to 0.000001, expect the next request to fail model_max_budget = { "gpt-4o-mini": { @@ -3531,6 +3529,58 @@ async def test_list_keys(prisma_client): assert _key in response["keys"] +@pytest.mark.asyncio +async def test_key_aliases(prisma_client): + """ + Test the key_aliases function: + - Returns a list + - Includes alias from a newly created key + - Aliases are unique and sorted + """ + import asyncio + import uuid + import litellm + from litellm.proxy._types import LitellmUserRoles + + # Wire up test prisma client + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + + # Basic call + response = await key_aliases() + assert "aliases" in response + assert isinstance(response["aliases"], list) + + # Create a new user (and key) with a unique alias + unique_id = str(uuid.uuid4()) + test_alias = f"key-aliases-test-{unique_id}" + test_user_id = f"key-aliases-user-{unique_id}" + + await new_user( + data=NewUserRequest( + user_id=test_user_id, + user_role=LitellmUserRoles.INTERNAL_USER, + key_alias=test_alias, + ), + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) + + # Allow async DB writes to settle + await asyncio.sleep(2) + + # Call again and validate + response_after = await key_aliases() + aliases = response_after["aliases"] + + # Contains the new alias + assert test_alias in aliases + + # Unique & sorted (endpoint dedupes and orders ascending) + assert len(aliases) == len(set(aliases)) + assert aliases == sorted(aliases) + + @pytest.mark.asyncio async def test_auth_vertex_ai_route(prisma_client): """ diff --git a/ui/litellm-dashboard/src/components/key_team_helpers/filter_helpers.ts b/ui/litellm-dashboard/src/components/key_team_helpers/filter_helpers.ts index 79182e45f680..14bf95f32466 100644 --- a/ui/litellm-dashboard/src/components/key_team_helpers/filter_helpers.ts +++ b/ui/litellm-dashboard/src/components/key_team_helpers/filter_helpers.ts @@ -1,9 +1,9 @@ -import { keyListCall, teamListCall, organizationListCall } from "../networking"; +import { teamListCall, organizationListCall, keyAliasesCall } from "../networking" import { Team } from "./key_list"; import { Organization } from "../networking"; /** - * Fetches all key aliases across all pages + * Fetches all key aliases via the dedicated /key/aliases endpoint * @param accessToken The access token for API authentication * @returns Array of all unique key aliases */ @@ -11,44 +11,16 @@ export const fetchAllKeyAliases = async (accessToken: string | null): Promise key.key_alias).filter(Boolean) as string[]; - - allAliases = [...allAliases, ...pageAliases]; - - // Check if there are more pages - if (currentPage < response.total_pages) { - currentPage++; - } else { - hasMorePages = false; - } - } - - // Remove duplicates - return Array.from(new Set(allAliases)); + const { aliases } = await keyAliasesCall(accessToken as unknown as String); + // Defensive dedupe & null-guard + return Array.from(new Set((aliases || []).filter(Boolean))); } catch (error) { console.error("Error fetching all key aliases:", error); return []; } }; + /** * Fetches all teams across all pages * @param accessToken The access token for API authentication diff --git a/ui/litellm-dashboard/src/components/networking.tsx b/ui/litellm-dashboard/src/components/networking.tsx index 5f27c277d43b..5eb0eb0ad6e4 100644 --- a/ui/litellm-dashboard/src/components/networking.tsx +++ b/ui/litellm-dashboard/src/components/networking.tsx @@ -3102,6 +3102,41 @@ export const keyListCall = async ( } }; +export const keyAliasesCall = async ( + accessToken: String +): Promise<{ aliases: string[] }> => { + /** + * Get all key aliases from proxy + */ + try { + let url = proxyBaseUrl ? `${proxyBaseUrl}/key/aliases` : `/key/aliases`; + console.log("in keyAliasesCall"); + + const response = await fetch(url, { + method: "GET", + headers: { + [globalLitellmHeaderName]: `Bearer ${accessToken}`, + "Content-Type": "application/json", + }, + }); + + if (!response.ok) { + const errorData = await response.json(); + const errorMessage = deriveErrorMessage(errorData); + handleError(errorMessage); + throw new Error(errorMessage); + } + + const data = await response.json(); + console.log("/key/aliases API Response:", data); + return data; // { aliases: string[] } + } catch (error) { + console.error("Failed to fetch key aliases:", error); + throw error; + } +}; + + export const spendUsersCall = async (accessToken: String, userID: String) => { try { const url = proxyBaseUrl ? `${proxyBaseUrl}/spend/users` : `/spend/users`;