diff --git a/README.md b/README.md index 5c69052..662e3c8 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,9 @@ cd w07-solution ``` 2. Install dependencies: ```bash - pip install -r requirements.txt + python3 -m venv .venv + source .venv/bin/activate + pip3 install -r requirements.txt ``` ## Running the Application diff --git a/client/src/app.css b/client/src/app.css index 6e08f4d..42511be 100644 --- a/client/src/app.css +++ b/client/src/app.css @@ -295,4 +295,63 @@ header h1 { .like-button.liked:hover { background-color: rgba(255, 23, 68, 0.1); +} + +/* Recommendation Banner Styles */ +.recommendation-banner { + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + border-radius: 0.75rem; + margin-bottom: 2rem; + box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); + overflow: hidden; +} + +.recommendation-banner.loading { + background: linear-gradient(135deg, #f3f4f6 0%, #e5e7eb 100%); + animation: pulse 2s ease-in-out infinite alternate; +} + +.recommendation-banner.empty { + background: linear-gradient(135deg, #f9fafb 0%, #f3f4f6 100%); + border: 2px dashed #d1d5db; +} + +.recommendation-content { + padding: 1.5rem; +} + +.recommendation-banner h3 { + color: white; + font-size: 1.1rem; + font-weight: 600; + margin-bottom: 0.5rem; + display: flex; + align-items: center; + gap: 0.5rem; +} + +.recommendation-banner.loading h3, +.recommendation-banner.empty h3 { + color: #4b5563; +} + +.recommendation-banner p { + color: rgba(255, 255, 255, 0.9); + font-size: 0.95rem; + line-height: 1.5; + margin: 0; +} + +.recommendation-banner.loading p, +.recommendation-banner.empty p { + color: #6b7280; +} + +@keyframes pulse { + 0% { + opacity: 1; + } + 100% { + opacity: 0.7; + } } \ No newline at end of file diff --git a/client/src/lib/types.ts b/client/src/lib/types.ts index 49ea78a..88b2b08 100644 --- a/client/src/lib/types.ts +++ b/client/src/lib/types.ts @@ -8,4 +8,8 @@ export type Meal = { export type UserPreferences = { username: string; favoriteMeals: string[]; +}; + +export type Recommendation = { + recommendation: string; }; \ No newline at end of file diff --git a/client/src/routes/+page.svelte b/client/src/routes/+page.svelte index e248842..ae5b0de 100644 --- a/client/src/routes/+page.svelte +++ b/client/src/routes/+page.svelte @@ -5,11 +5,14 @@ import FoodCard from './FoodCard.svelte'; import type {Meal} from '$lib/types'; import type {PageProps} from "./$types"; + import { BaseURL } from '$lib/env'; + import { getCookie } from '$lib'; let {data}: PageProps = $props(); // For more information on runes and reactivity, see: https://svelte.dev/docs/svelte/what-are-runes let meals: Meal[] = $state(data.meals); + let recommendation: string | undefined = data.recommendation?.recommendation;
@@ -18,14 +21,31 @@

Today's menu offerings

+ + {#if recommendation} +
+
+

🤖 AI Recommendation

+

{recommendation}

+
+
+ {:else} +
+
+

🤖 AI Recommendation

+

No recommendations available. Try adding some favorite meals first!

+
+
+ {/if} + {#if meals.length === 0}

Loading menu items...

{:else}
- {#each meals as meal} - + {#each meals as {}, i} + {/each}
{/if} diff --git a/client/src/routes/+page.ts b/client/src/routes/+page.ts index 56bb597..00f4125 100644 --- a/client/src/routes/+page.ts +++ b/client/src/routes/+page.ts @@ -1,6 +1,6 @@ import type { PageLoad } from './$types'; import { BaseURL} from '$lib/env'; -import type { Meal, UserPreferences } from '$lib/types'; +import type { Meal, Recommendation, UserPreferences } from '$lib/types'; import { getCookie, setCookie } from '$lib'; export const ssr = false; @@ -22,19 +22,26 @@ export const load: PageLoad = async ({ fetch }) => { } } - const res = await fetch(`${BaseURL}/mensa-garching/today`); - const meals: Meal[] = await res.json(); + // Execute all requests in parallel, don't wait for slow ones to complete + const [mealsResult, preferencesResult, recommendationResult] = await Promise.allSettled([ + fetch(`${BaseURL}/mensa-garching/today`).then(res => res.json()), + fetch(`${BaseURL}/preferences/${username}`).then(res => res.json()), + fetch(`${BaseURL}/recommend/${username}`).then(res => res.json()) + ]); - const res2 = await fetch(`${BaseURL}/preferences/${username}`); - const preferences: UserPreferences = await res2.json(); + // Extract successful results or provide defaults + const meals: Meal[] = mealsResult.status === 'fulfilled' ? mealsResult.value : []; + const preferences: UserPreferences = preferencesResult.status === 'fulfilled' ? preferencesResult.value : { favoriteMeals: [] }; + const recommendation: Recommendation | null = recommendationResult.status === 'fulfilled' ? recommendationResult.value : null; console.log('Meals:', meals); console.log('Preferences:', preferences); + console.log('Recommendation:', recommendation); // Set the boolean favorite property for each meal meals.forEach((meal: any) => { meal.favorite = preferences.favoriteMeals.includes(meal.name); }); - return { meals}; + return { meals, recommendation }; }; \ No newline at end of file diff --git a/client/src/routes/FoodCard.svelte b/client/src/routes/FoodCard.svelte index c829411..b6e4c21 100644 --- a/client/src/routes/FoodCard.svelte +++ b/client/src/routes/FoodCard.svelte @@ -4,7 +4,7 @@ import { BaseURL } from "$lib/env"; import { getCookie } from "$lib"; - let { meal }: { meal: Meal } = $props(); // List of favorite meal + let { meal = $bindable() }: { meal: Meal } = $props(); // List of favorite meal //toggle favorite when heart button is clicked const toggleFavorite = async () => { diff --git a/docs/CanteenApp Bruno/Get LLM recommendation.bru b/docs/CanteenApp Bruno/Get LLM recommendation.bru new file mode 100644 index 0000000..f2c6a6a --- /dev/null +++ b/docs/CanteenApp Bruno/Get LLM recommendation.bru @@ -0,0 +1,29 @@ +meta { + name: Get LLM recommendation + type: http + seq: 5 +} + +post { + url: http://localhost:5000/recommend + body: json + auth: inherit +} + +body:json { + { + "favorite_menu": [ + "Chicken Alfredo", + "Caesar Salad", + "Margherita Pizza", + "Grilled Salmon" + ], + "todays_menu": [ + "Beef Tacos", + "Chicken Alfredo Pasta", + "Greek Salad", + "Vegetarian Pizza", + "Fish and Chips" + ] + } +} diff --git a/docs/CanteenApp Bruno/Get recommendation.bru b/docs/CanteenApp Bruno/Get recommendation.bru new file mode 100644 index 0000000..8f130be --- /dev/null +++ b/docs/CanteenApp Bruno/Get recommendation.bru @@ -0,0 +1,11 @@ +meta { + name: Get recommendation + type: http + seq: 6 +} + +get { + url: http://localhost:8080/api/recommend/test + body: none + auth: inherit +} diff --git a/llm/main.py b/llm/main.py index 474aa26..29c8f50 100644 --- a/llm/main.py +++ b/llm/main.py @@ -1,17 +1,24 @@ import os -import asyncpg +import json +import requests +from typing import Dict, Any, List, Optional from fastapi import FastAPI, HTTPException -from pydantic import BaseModel -from langchain import LLMChain, PromptTemplate -from langchain.llms import AzureOpenAI +from pydantic import BaseModel, Field +from langchain.llms.base import LLM +from langchain.chains import LLMChain +from langchain_core.prompts import PromptTemplate +from langchain.callbacks.manager import CallbackManagerForLLMRun + +# Environment configuration CHAIR_API_KEY = os.getenv("CHAIR_API_KEY") +API_URL = "https://gpu.aet.cit.tum.de/api/chat/completions" + # Create FastAPI application instance app = FastAPI( title="LLM Recommendation Service", description="Service that generates personalized food recommendations using an LLM", version="1.0.0" ) -API_URL = "https://gpu.aet.cit.tum.de/api/chat/completions" class RecommendRequest(BaseModel): @@ -19,10 +26,11 @@ class RecommendRequest(BaseModel): Request schema for recommendation endpoint. Attributes: - userId (str): Unique identifier for the user requesting a recommendation. + favorite_menu (List[str]): User's favorite meal names + todays_menu (List[str]): Today's available meal names """ - favorite_menu: str - todays_menu: dict #I am not sure of this? + favorite_menu: List[str] = Field(..., description="User's favorite meal names") + todays_menu: List[str] = Field(..., description="Today's available meal names") class RecommendResponse(BaseModel): @@ -32,67 +40,217 @@ class RecommendResponse(BaseModel): Attributes: recommendation (str): The personalized recommendation string. """ - recommendation: str - -def chat_with_model(messages: list[dict]) -> dict: - headers = { - "Authorization": f"Bearer {CHAIR_API_KEY}", - "Content-Type": "application/json", - } - payload = { - "model": "tinyllama", - "messages": messages - } - resp = requests.post(API_URL, headers=headers, json=payload, timeout=30) - resp.raise_for_status() - return resp.json() - + recommendation: str = Field(..., description="Personalized food recommendation") + + +class OpenWebUILLM(LLM): + """ + Custom LangChain LLM wrapper for Open WebUI API. + + This class integrates the Open WebUI API with LangChain's LLM interface, + allowing us to use the API in LangChain chains and pipelines. + """ + + api_url: str = API_URL + api_key: str = CHAIR_API_KEY + model_name: str = "llama3:latest" + + @property + def _llm_type(self) -> str: + return "open_webui" + + def _call( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + """ + Call the Open WebUI API to generate a response. + + Args: + prompt: The input prompt to send to the model + stop: Optional list of stop sequences + run_manager: Optional callback manager for LangChain + **kwargs: Additional keyword arguments + + Returns: + The generated response text + + Raises: + Exception: If API call fails + """ + if not self.api_key: + raise ValueError("CHAIR_API_KEY environment variable is required") + + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + } + + # Build messages for chat completion + messages = [ + {"role": "user", "content": prompt} + ] + + payload = { + "model": self.model_name, + "messages": messages, + } + + try: + response = requests.post( + self.api_url, + headers=headers, + json=payload, + timeout=30 + ) + response.raise_for_status() + + result = response.json() + + # Extract the response content + if "choices" in result and len(result["choices"]) > 0: + content = result["choices"][0]["message"]["content"] + return content.strip() + else: + raise ValueError("Unexpected response format from API") + + except requests.RequestException as e: + raise Exception(f"API request failed: {str(e)}") + except (KeyError, IndexError, ValueError) as e: + raise Exception(f"Failed to parse API response: {str(e)}") + + +# Initialize the LLM +llm = OpenWebUILLM() + +# Create the prompt template +recommendation_prompt = PromptTemplate( + input_variables=["favorite_menu", "todays_menu"], + template="""You are a helpful food recommendation assistant. Your task is to suggest exactly one dish from today's menu based on the user's preferences. + +User's favorite meals: {favorite_menu} + +Today's available meals: {todays_menu} + +Based on the user's favorite meals, please recommend exactly ONE meal from today's available options. +Consider: +- Similarity to the user's favorite meals +- Flavor profiles that match their preferences +- Availability in today's menu + +IMPORTANT: You must respond with ONLY the exact name of one dish from today's menu. Do not include any explanations, additional text, punctuation, or formatting. Just return the dish name exactly as it appears in today's menu. + +Example format: Spaghetti Carbonara + +Recommendation:""" +) + +# Create the LLM chain +recommendation_chain = LLMChain( + llm=llm, + prompt=recommendation_prompt, + verbose=True +) + +@app.get("/health") +async def health_check(): + """Health check endpoint.""" + return {"status": "healthy", "service": "LLM Recommendation Service"} + @app.post( "/recommend", response_model=RecommendResponse, summary="Generate personalized food recommendation", - description="Accepts a userId, fetches order history, and returns a dish recommendation via LLM." + description="Accepts user's favorite meals and today's menu, returns a personalized meal recommendation via Ollama." ) async def recommend(req: RecommendRequest) -> RecommendResponse: - # build a simple chat history - messages = [ - {"role": "system", "content": "You are a helpful assistant that suggests food."}, - { - "role": "user", - "content": ( - f"User's favorite menu is: {req.favorite_menu}\n" - f"Here is today's menu: {req.todays_menu}\n" - "Based on the user's favorite menu, suggest exactly one dish from today's menu." - ) - } - ] - + """ + Generate a personalized food recommendation using LangChain and Ollama. + + Args: + req: Request containing user's favorite meals and today's menu + + Returns: + RecommendResponse containing the recommendation + + Raises: + HTTPException: If the API call fails or other errors occur + """ try: - result = chat_with_model(messages) - # extract the model's reply - recommendation = result["choices"][0]["message"]["content"].strip() - except requests.HTTPError as http_err: - raise HTTPException(status_code=502, detail=f"Upstream API error: {http_err}") + # Validate input + if not req.favorite_menu: + raise HTTPException( + status_code=400, + detail="favorite_menu cannot be empty" + ) + + if not req.todays_menu: + raise HTTPException( + status_code=400, + detail="todays_menu cannot be empty" + ) + + # Format arrays as comma-separated strings for better processing + favorite_meals_str = ", ".join(req.favorite_menu) + todays_meals_str = ", ".join(req.todays_menu) + + # Use LangChain to generate recommendation + recommendation = recommendation_chain.run( + favorite_menu=favorite_meals_str, + todays_menu=todays_meals_str + ) + + return RecommendResponse(recommendation=recommendation) + + except HTTPException: + # Re-raise HTTP exceptions as-is + raise except Exception as e: - raise HTTPException(status_code=500, detail=f"Internal error: {e}") + # Log the error (in production, use proper logging) + print(f"Error generating recommendation: {str(e)}") + raise HTTPException( + status_code=500, + detail=f"Failed to generate recommendation: {str(e)}" + ) - return RecommendResponse(recommendation=recommendation) - -# so that we can call python main.py directly + +@app.get("/") +async def root(): + """Root endpoint with service information.""" + return { + "service": "LLM Recommendation Service", + "version": "1.0.0", + "description": "Generates personalized food recommendations using LangChain and Open WebUI", + "endpoints": { + "health": "/health", + "recommend": "/recommend", + "docs": "/docs" + } + } + +# Entry point for direct execution if __name__ == "__main__": """ Entry point for `python main.py` invocation. Starts Uvicorn server serving this FastAPI app. - Honors PORT environment variable (default: 8000). + Honors PORT environment variable (default: 5000). Reload=True enables live-reload during development. """ import uvicorn + port = int(os.getenv("PORT", 5000)) + + print(f"Starting LLM Recommendation Service on port {port}") + print(f"API Documentation available at: http://localhost:{port}/docs") + uvicorn.run( "main:app", host="0.0.0.0", - port=int(os.getenv("PORT", 5000)), + port=port, reload=True ) diff --git a/llm/requirements.txt b/llm/requirements.txt index c682eb0..2de581c 100644 --- a/llm/requirements.txt +++ b/llm/requirements.txt @@ -1,5 +1,6 @@ -fastapi -uvicorn # for import inside main.py -asyncpg -langchain -pydantic +fastapi>=0.100.0 +uvicorn[standard]>=0.23.0 +requests>=2.31.0 +pydantic>=2.0.0 +langchain>=0.3.0 +langchain-core>=0.3.0 diff --git a/server/src/main/java/de/tum/aet/devops25/w07/client/LLMRestClient.java b/server/src/main/java/de/tum/aet/devops25/w07/client/LLMRestClient.java new file mode 100644 index 0000000..213829a --- /dev/null +++ b/server/src/main/java/de/tum/aet/devops25/w07/client/LLMRestClient.java @@ -0,0 +1,64 @@ +package de.tum.aet.devops25.w07.client; + +import de.tum.aet.devops25.w07.model.RecommendRequest; +import de.tum.aet.devops25.w07.model.RecommendResponse; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.http.HttpEntity; +import org.springframework.http.HttpHeaders; +import org.springframework.http.HttpMethod; +import org.springframework.http.MediaType; +import org.springframework.http.ResponseEntity; +import org.springframework.stereotype.Component; +import org.springframework.web.client.RestTemplate; + +import java.util.List; + +@Component +public class LLMRestClient { + + private final RestTemplate restTemplate; + private final String llmServiceUrl; + + public LLMRestClient(RestTemplate restTemplate, + @Value("${llm.service.url:http://localhost:5000}") String llmServiceUrl) { + this.restTemplate = restTemplate; + this.llmServiceUrl = llmServiceUrl; + } + + /** + * Generate recommendations using the REST LLM service + * @param favoriteMenu list of user's favorite meal names + * @param todaysMenu list of today's available meal names + * @return recommendation string + */ + public String generateRecommendations(List favoriteMenu, List todaysMenu) { + try { + // Create request body + RecommendRequest request = new RecommendRequest(favoriteMenu, todaysMenu); + + // Set headers + HttpHeaders headers = new HttpHeaders(); + headers.setContentType(MediaType.APPLICATION_JSON); + + // Create HTTP entity + HttpEntity entity = new HttpEntity<>(request, headers); + + // Make REST call + String url = llmServiceUrl + "/recommend"; + ResponseEntity response = restTemplate.exchange( + url, + HttpMethod.POST, + entity, + RecommendResponse.class + ); + + // Extract recommendation from response + RecommendResponse responseBody = response.getBody(); + return responseBody != null ? responseBody.recommendation() : ""; + + } catch (Exception e) { + System.err.println("Error calling LLM REST service: " + e.getMessage()); + return ""; + } + } +} diff --git a/server/src/main/java/de/tum/aet/devops25/w07/controller/MealRecommendationController.java b/server/src/main/java/de/tum/aet/devops25/w07/controller/RecommendationController.java similarity index 78% rename from server/src/main/java/de/tum/aet/devops25/w07/controller/MealRecommendationController.java rename to server/src/main/java/de/tum/aet/devops25/w07/controller/RecommendationController.java index 045205a..58b9dce 100644 --- a/server/src/main/java/de/tum/aet/devops25/w07/controller/MealRecommendationController.java +++ b/server/src/main/java/de/tum/aet/devops25/w07/controller/RecommendationController.java @@ -12,31 +12,31 @@ import org.springframework.web.bind.annotation.RestController; import java.util.List; +import java.util.Map; @RestController -public class MealRecommendationController { +public class RecommendationController { private final LLMRecommendationService llmRecommendationService; private final UserPreferenceService userPreferenceService; private final CanteenService canteenService; - public MealRecommendationController(LLMRecommendationService llmRecommendationService, UserPreferenceService userPreferenceService, CanteenService canteenService) { + public RecommendationController(LLMRecommendationService llmRecommendationService, UserPreferenceService userPreferenceService, CanteenService canteenService) { this.llmRecommendationService = llmRecommendationService; this.userPreferenceService = userPreferenceService; this.canteenService = canteenService; } - @GetMapping("/meal/recommend/{userId}") - public ResponseEntity getRecommendation(@PathVariable String userId) { + @GetMapping("/recommend/{name}") + public ResponseEntity> getRecommendation(@PathVariable String name) { //Get the favorite meal from database - UserPreferences userPreferences = userPreferenceService.getPreferences(userId); + UserPreferences userPreferences = userPreferenceService.getPreferences(name); if (userPreferences == null || userPreferences.getFavoriteMeals() == null || userPreferences.getFavoriteMeals().isEmpty()) { return ResponseEntity.noContent().build(); // No favorites found } //Get the today's menu from canteenService.getTodayMeals - //TODO: Hard coded as mensa-garching, not good but temp solution. List todaysMeals = canteenService.getTodayMeals("mensa-garching"); //call LLM service to get recommendation based on the user's favorites @@ -46,7 +46,7 @@ public ResponseEntity getRecommendation(@PathVariable String userId) { return ResponseEntity.noContent().build(); } - return ResponseEntity.ok(responseFromLLMService); + return ResponseEntity.ok(Map.of("recommendation", responseFromLLMService)); } diff --git a/server/src/main/java/de/tum/aet/devops25/w07/model/RecommendRequest.java b/server/src/main/java/de/tum/aet/devops25/w07/model/RecommendRequest.java new file mode 100644 index 0000000..9155421 --- /dev/null +++ b/server/src/main/java/de/tum/aet/devops25/w07/model/RecommendRequest.java @@ -0,0 +1,10 @@ +package de.tum.aet.devops25.w07.model; + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.List; + +public record RecommendRequest( + @JsonProperty("favorite_menu") List favoriteMenu, + @JsonProperty("todays_menu") List todaysMenu +) {} diff --git a/server/src/main/java/de/tum/aet/devops25/w07/model/RecommendResponse.java b/server/src/main/java/de/tum/aet/devops25/w07/model/RecommendResponse.java new file mode 100644 index 0000000..e38a410 --- /dev/null +++ b/server/src/main/java/de/tum/aet/devops25/w07/model/RecommendResponse.java @@ -0,0 +1,7 @@ +package de.tum.aet.devops25.w07.model; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public record RecommendResponse( + @JsonProperty("recommendation") String recommendation +) {} diff --git a/server/src/main/java/de/tum/aet/devops25/w07/service/LLMRecommendationService.java b/server/src/main/java/de/tum/aet/devops25/w07/service/LLMRecommendationService.java index c835866..3719897 100644 --- a/server/src/main/java/de/tum/aet/devops25/w07/service/LLMRecommendationService.java +++ b/server/src/main/java/de/tum/aet/devops25/w07/service/LLMRecommendationService.java @@ -1,53 +1,36 @@ package de.tum.aet.devops25.w07.service; -import de.tum.aet.devops25.w07.grpc.LLMGrpcClient; +import de.tum.aet.devops25.w07.client.LLMRestClient; import de.tum.aet.devops25.w07.model.Dish; -import llm.LLM; import org.springframework.stereotype.Service; -import java.time.LocalDateTime; -import java.time.format.DateTimeFormatter; import java.util.List; import java.util.stream.Collectors; @Service public class LLMRecommendationService { - private final LLMGrpcClient llmGrpcClient; + private final LLMRestClient llmRestClient; - public LLMRecommendationService(LLMGrpcClient llmGrpcClient) { - this.llmGrpcClient = llmGrpcClient; + public LLMRecommendationService(LLMRestClient llmRestClient) { + this.llmRestClient = llmRestClient; } /** - * Get recommendation from LLM service using gRPC + * Get recommendation from LLM service using REST API * @param favoriteMeals list of user's favorite meal names * @param todayMeals list of today's available dishes - * @return recommendation as a string (comma-separated list of recommended dish names) + * @return recommendation as a string */ public String getRecommendationFromLLM(List favoriteMeals, List todayMeals) { try { - // Convert favorite meals to FavoriteItem objects - String currentTime = LocalDateTime.now().format(DateTimeFormatter.ISO_LOCAL_DATE_TIME); - List favoriteItems = favoriteMeals.stream() - .map(mealName -> llmGrpcClient.createFavoriteItem( - generateId(mealName), - mealName, - "Meal", // Default category - currentTime - )) + // Convert today's dishes to meal names + List todayMealNames = todayMeals.stream() + .map(Dish::name) .collect(Collectors.toList()); - // Generate a user cookie (in real implementation, this would come from the session) - String userCookie = "user-" + System.currentTimeMillis(); - - // Call gRPC service - List recommendedIds = llmGrpcClient.generateRecommendations(userCookie, favoriteItems); - - // Convert recommended IDs back to dish names - // In a real implementation, you would map IDs to actual dish names - // For now, we'll return the IDs as recommendations - return String.join(", ", recommendedIds); + // Call REST service + return llmRestClient.generateRecommendations(favoriteMeals, todayMealNames); } catch (Exception e) { System.err.println("Error fetching recommendation from LLM service: " + e.getMessage()); @@ -55,11 +38,4 @@ public String getRecommendationFromLLM(List favoriteMeals, List to } } - /** - * Generate a simple ID from meal name - */ - private String generateId(String mealName) { - return mealName.toLowerCase().replaceAll("[^a-zA-Z0-9]", "-"); - } - } \ No newline at end of file diff --git a/server/src/main/resources/application.properties b/server/src/main/resources/application.properties index f7a1e63..e8f1c73 100644 --- a/server/src/main/resources/application.properties +++ b/server/src/main/resources/application.properties @@ -17,5 +17,9 @@ spring.jpa.hibernate.ddl-auto=update spring.jpa.show-sql=true spring.sql.init.mode=always +# LLM Service Configuration +llm.service.url=${LLM_SERVICE_URL:http://localhost:5000} + +# gRPC Configuration (kept for compatibility) grpc.client.llm-service.address=static://localhost:9090 grpc.client.llm-service.negotiation-type=plaintext diff --git a/server/src/test/java/de/tum/aet/devops25/w07/service/LLMRecommendationServiceTest.java b/server/src/test/java/de/tum/aet/devops25/w07/service/LLMRecommendationServiceTest.java new file mode 100644 index 0000000..0bb6b83 --- /dev/null +++ b/server/src/test/java/de/tum/aet/devops25/w07/service/LLMRecommendationServiceTest.java @@ -0,0 +1,89 @@ +package de.tum.aet.devops25.w07.service; + +import de.tum.aet.devops25.w07.client.LLMRestClient; +import de.tum.aet.devops25.w07.model.Dish; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.when; + +@ExtendWith(MockitoExtension.class) +class LLMRecommendationServiceTest { + + @Mock + private LLMRestClient llmRestClient; + + private LLMRecommendationService llmRecommendationService; + + @BeforeEach + void setUp() { + llmRecommendationService = new LLMRecommendationService(llmRestClient); + } + + @Test + void shouldGetRecommendationFromLLM() { + // Given + List favoriteMeals = Arrays.asList("Pizza", "Pasta"); + List todayMeals = Arrays.asList( + new Dish("Margherita Pizza", "main", Arrays.asList("vegetarian")), + new Dish("Chicken Curry", "main", Arrays.asList("spicy")), + new Dish("Caesar Salad", "salad", Arrays.asList("fresh")) + ); + + String expectedRecommendation = "Margherita Pizza"; + + when(llmRestClient.generateRecommendations( + eq(favoriteMeals), + eq(Arrays.asList("Margherita Pizza", "Chicken Curry", "Caesar Salad")) + )).thenReturn(expectedRecommendation); + + // When + String result = llmRecommendationService.getRecommendationFromLLM(favoriteMeals, todayMeals); + + // Then + assertThat(result).isEqualTo(expectedRecommendation); + } + + @Test + void shouldReturnEmptyStringWhenExceptionOccurs() { + // Given + List favoriteMeals = Arrays.asList("Pizza"); + List todayMeals = Arrays.asList( + new Dish("Margherita Pizza", "main", Arrays.asList("vegetarian")) + ); + + when(llmRestClient.generateRecommendations(any(), any())) + .thenThrow(new RuntimeException("Service unavailable")); + + // When + String result = llmRecommendationService.getRecommendationFromLLM(favoriteMeals, todayMeals); + + // Then + assertThat(result).isEmpty(); + } + + @Test + void shouldHandleEmptyMealLists() { + // Given + List favoriteMeals = Arrays.asList(); + List todayMeals = Arrays.asList(); + + when(llmRestClient.generateRecommendations(eq(favoriteMeals), eq(Arrays.asList()))) + .thenReturn(""); + + // When + String result = llmRecommendationService.getRecommendationFromLLM(favoriteMeals, todayMeals); + + // Then + assertThat(result).isEmpty(); + } +}