diff --git a/examples/bookmind_app/README.md b/examples/bookmind_app/README.md
new file mode 100644
index 000000000..6d4404e47
--- /dev/null
+++ b/examples/bookmind_app/README.md
@@ -0,0 +1,112 @@
+# BookMind
+
+[](https://youtu.be/DL4-DswxfEM)
+
+BookMind is a web application that allows users to explore character relationships and storylines in books using AI-powered visualizations. The application provides interactive mind maps, AI chatbots for deep questions, book summaries, and community contributions.
+
+## Features
+
+- Interactive Mind Maps: Visualize relationships between characters and plot elements.
+- AI Chatbot: Ask deep questions about the book and get insightful answers.
+- Book Summaries: Get concise overviews of plots and themes.
+- Community Contributions: Add and refine maps with fellow book lovers.
+
+## Prerequisites
+
+- Node.js
+- Python >= 3.10
+- LlamaStack server running locally
+- Environment variables:
+ - LLAMA_STACK_PORT
+ - INFERENCE_MODEL
+ - REACT_APP_GOOGLE_BOOKS_API_KEY
+
+## Getting Started
+
+### Run llama-stack
+
+1. Setting up Ollama server
+ Please check the [Ollama Documentation](https://github.com/ollama/ollama) on how to install and run Ollama. After installing Ollama, you need to run `ollama serve` to start the server.
+
+```
+export INFERENCE_MODEL="meta-llama/Llama-3.2-3B-Instruct"
+
+# ollama names this model differently, and we must use the ollama name when loading the model
+export OLLAMA_INFERENCE_MODEL="llama3.2:3b-instruct-fp16"
+ollama run $OLLAMA_INFERENCE_MODEL --keepalive 60m
+```
+
+2. Running `llama-stack` server
+
+```
+pip install llama-stack
+
+export LLAMA_STACK_PORT=5000
+
+# This builds llamastack-ollama conda environment
+llama stack build --template ollama --image-type conda
+
+conda activate llamastack-ollama
+
+llama stack run \
+ --port $LLAMA_STACK_PORT \
+ --env INFERENCE_MODEL=$INFERENCE_MODEL \
+ --env OLLAMA_URL=http://localhost:11434 \
+ ollama
+```
+
+### Backend Setup
+
+1. Install dependencies:
+
+```
+cd server
+pip install -r requirements.txt
+```
+
+2. Set `.env` in the `server` directory
+
+You should modify the name of `example.env` to `.env` in the `server` directory.
+**Modify INFERENCE_MODEL in example.env with yours.**
+
+3. Run the server:
+
+```
+python server.py
+```
+
+### Frontend Setup
+
+1. Set up GOOGLE_BOOKS_API_KEY:
+
+You should rename `example.env` with `.env` and replace `{YOUR_API_KEY}` with your [google_books_api_key](https://developers.google.com/books/docs/v1/using) after getting your api key.
+
+```
+REACT_APP_GOOGLE_BOOKS_API_KEY={YOUR_API_KEY}
+```
+
+2. Install dependencies and run the application:
+
+```
+npm install
+npm start
+```
+
+## Usage
+
+1. Initialize Memory: Upload your book or choose from the library to initialize memory.
+2. AI Analysis: The AI analyzes the book and generates a mind map.
+3. Explore Insights: Explore relationships, themes, and Q&A insights.
+
+## What did we use Llama-stack in BookMind?
+
+1️⃣ Llama Inference models: You can start a LLM application using various LLM services easily.
+2️⃣ RAG with FAISS: We leveraged FAISS in Llama-stack for Retrieval-Augmented Generation, enabling real-time responses to character relationship questions.
+3️⃣ Multi-Hop Reasoning: Our system performs sequential inference—first extracting characters and relationships, then generating graphized mind map data in JSON for visual storytelling.
+
+## Contributors
+
+[Original Repo](https://github.com/seyeong-han/BookMind)
+[seyeong-han](https://github.com/seyeong-han)
+[sunjinj](https://github.com/SunjinJ)
+[WonHaLee](https://github.com/WonHaLee)
diff --git a/examples/bookmind_app/example.env b/examples/bookmind_app/example.env
new file mode 100644
index 000000000..41d80c076
--- /dev/null
+++ b/examples/bookmind_app/example.env
@@ -0,0 +1 @@
+REACT_APP_GOOGLE_BOOKS_API_KEY={YOUR_API_KEY}
\ No newline at end of file
diff --git a/examples/bookmind_app/package.json b/examples/bookmind_app/package.json
new file mode 100644
index 000000000..ec233d2bb
--- /dev/null
+++ b/examples/bookmind_app/package.json
@@ -0,0 +1,48 @@
+{
+ "name": "bookmind",
+ "version": "0.1.0",
+ "private": true,
+ "dependencies": {
+ "@testing-library/jest-dom": "^5.17.0",
+ "@testing-library/react": "^13.4.0",
+ "@testing-library/user-event": "^13.5.0",
+ "axios": "^1.7.7",
+ "fs": "^0.0.1-security",
+ "lottie-react": "^2.4.0",
+ "lucide-react": "^0.460.0",
+ "react": "^18.3.1",
+ "react-dom": "^18.3.1",
+ "react-force-graph": "^1.44.7",
+ "react-force-graph-2d": "^1.25.8",
+ "react-router-dom": "^7.0.1",
+ "react-scripts": "^5.0.1",
+ "web-vitals": "^2.1.4"
+ },
+ "scripts": {
+ "start": "react-scripts start",
+ "build": "react-scripts build",
+ "test": "react-scripts test",
+ "eject": "react-scripts eject"
+ },
+ "eslintConfig": {
+ "extends": [
+ "react-app",
+ "react-app/jest"
+ ]
+ },
+ "browserslist": {
+ "production": [
+ ">0.2%",
+ "not dead",
+ "not op_mini all"
+ ],
+ "development": [
+ "last 1 chrome version",
+ "last 1 firefox version",
+ "last 1 safari version"
+ ]
+ },
+ "devDependencies": {
+ "tailwindcss": "^3.4.15"
+ }
+}
diff --git a/examples/bookmind_app/public/favicon.ico b/examples/bookmind_app/public/favicon.ico
new file mode 100644
index 000000000..a11777cc4
Binary files /dev/null and b/examples/bookmind_app/public/favicon.ico differ
diff --git a/examples/bookmind_app/public/index.html b/examples/bookmind_app/public/index.html
new file mode 100644
index 000000000..aa069f27c
--- /dev/null
+++ b/examples/bookmind_app/public/index.html
@@ -0,0 +1,43 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ React App
+
+
+
+
+
+
+
diff --git a/examples/bookmind_app/public/logo192.png b/examples/bookmind_app/public/logo192.png
new file mode 100644
index 000000000..fc44b0a37
Binary files /dev/null and b/examples/bookmind_app/public/logo192.png differ
diff --git a/examples/bookmind_app/public/logo512.png b/examples/bookmind_app/public/logo512.png
new file mode 100644
index 000000000..a4e47a654
Binary files /dev/null and b/examples/bookmind_app/public/logo512.png differ
diff --git a/examples/bookmind_app/public/manifest.json b/examples/bookmind_app/public/manifest.json
new file mode 100644
index 000000000..080d6c77a
--- /dev/null
+++ b/examples/bookmind_app/public/manifest.json
@@ -0,0 +1,25 @@
+{
+ "short_name": "React App",
+ "name": "Create React App Sample",
+ "icons": [
+ {
+ "src": "favicon.ico",
+ "sizes": "64x64 32x32 24x24 16x16",
+ "type": "image/x-icon"
+ },
+ {
+ "src": "logo192.png",
+ "type": "image/png",
+ "sizes": "192x192"
+ },
+ {
+ "src": "logo512.png",
+ "type": "image/png",
+ "sizes": "512x512"
+ }
+ ],
+ "start_url": ".",
+ "display": "standalone",
+ "theme_color": "#000000",
+ "background_color": "#ffffff"
+}
diff --git a/examples/bookmind_app/public/robots.txt b/examples/bookmind_app/public/robots.txt
new file mode 100644
index 000000000..e9e57dc4d
--- /dev/null
+++ b/examples/bookmind_app/public/robots.txt
@@ -0,0 +1,3 @@
+# https://www.robotstxt.org/robotstxt.html
+User-agent: *
+Disallow:
diff --git a/examples/bookmind_app/server/README.md b/examples/bookmind_app/server/README.md
new file mode 100644
index 000000000..15c62a8e1
--- /dev/null
+++ b/examples/bookmind_app/server/README.md
@@ -0,0 +1,27 @@
+# Book Character Graph API
+
+A Flask-based API server that analyzes books to create character relationship graphs and provides an interactive query interface using LlamaStack.
+
+## Features
+
+- Character and relationship extraction from books
+- Graph generation of character relationships
+- Memory-based query system for book details
+- Interactive Q&A about book characters and plots
+
+## Prerequisites
+
+- Python 3.x
+- LlamaStack server running locally
+- Environment variables:
+ - `LLAMA_STACK_PORT`
+ - `INFERENCE_MODEL`
+
+## Get Started
+
+```bash
+# Install dependencies
+pip install -r requirements.txt
+
+python server.py
+```
diff --git a/examples/bookmind_app/server/example.env b/examples/bookmind_app/server/example.env
new file mode 100644
index 000000000..ebf82724c
--- /dev/null
+++ b/examples/bookmind_app/server/example.env
@@ -0,0 +1,3 @@
+LLAMA_STACK_PORT=5000
+INFERENCE_MODEL="meta-llama/Llama-3.2-3B-Instruct"
+SERVER_PORT=5001
\ No newline at end of file
diff --git a/examples/bookmind_app/server/requirements.txt b/examples/bookmind_app/server/requirements.txt
new file mode 100644
index 000000000..fdbf142b4
--- /dev/null
+++ b/examples/bookmind_app/server/requirements.txt
@@ -0,0 +1,5 @@
+flask
+flask-cors
+llama_stack_client
+asyncio
+werkzeug
\ No newline at end of file
diff --git a/examples/bookmind_app/server/server.py b/examples/bookmind_app/server/server.py
new file mode 100644
index 000000000..d474a0011
--- /dev/null
+++ b/examples/bookmind_app/server/server.py
@@ -0,0 +1,281 @@
+import os
+import json
+import asyncio
+import logging
+from flask import Flask, request, jsonify
+from flask_cors import CORS
+from llama_stack_client import LlamaStackClient
+from llama_stack_client.lib.agents.agent import Agent
+from llama_stack_client.lib.agents.event_logger import EventLogger
+from llama_stack_client.types.agent_create_params import AgentConfig
+
+# Get env variables
+INFERENCE_MODEL = os.environ["INFERENCE_MODEL"]
+LLAMA_STACK_PORT = os.environ["LLAMA_STACK_PORT"]
+SERVER_PORT = os.environ["SERVER_PORT"]
+
+# Flask setup
+app = Flask(__name__)
+CORS(app)
+
+# Memory initialization
+active_sessions = {}
+
+
+@app.route("/initialize", methods=["POST"])
+def initialize_memory():
+ """
+ Initialize memory for a new book. Queries LlamaStack for characters and relationships,
+ and sets up memory for the user to interact with.
+ """
+ data = request.json
+ book_title = data.get("title")
+ if not book_title:
+ return jsonify({"error": "Book title is required"}), 400
+
+ # Clear the previous session if any
+ active_sessions.clear()
+
+ # Create a new memory agent and process the book
+ response = asyncio.run(process_book(book_title))
+ return jsonify(response), 200
+
+
+async def async_generator_wrapper(sync_gen):
+ for item in sync_gen:
+ yield item
+
+
+async def get_graph_response(text_response, client):
+ # Create prompt for graph conversion
+ graph_prompt = f"""
+ Convert this character description into a graph format with nodes and links.
+ Format the response as a JSON object with "nodes" and "links" arrays.
+ Each node should have "id", "name", and "val" properties.
+ Each link should have "source" and "target" properties using the node ids.
+ Set all node "val" to 10.
+
+ Text to convert:
+ {text_response}
+
+ Expected format example (return only the JSON object, no additional text):
+ {{
+ "nodes": [
+ {{"id": id1, "name": "Character Name", "val": 10}}
+ ],
+ "links": [
+ {{"source": id1, "target": "id2"}}
+ ]
+ }}
+ id1 and id2 are example variables and you should not generate those exact values.
+ """
+
+ # Get graph structure from LlamaStack
+ graph_response = client.inference.chat_completion(
+ model_id=INFERENCE_MODEL,
+ messages=[
+ {"role": "system", "content": "You are a data structure expert. Convert text descriptions into graph JSON format."},
+ {"role": "user", "content": graph_prompt}
+ ]
+ )
+ return graph_response
+
+
+def jsonify_graph_response(response):
+ """Extract and parse JSON content from graph response."""
+ try:
+ content = response.completion_message.content
+ print("content: ", content)
+ # Find indices of first { and last }
+ start_idx = content.find('{')
+ end_idx = content.rfind('}')
+
+ if start_idx == -1 or end_idx == -1:
+ raise ValueError("No valid JSON object found in response")
+
+ # Extract JSON string
+ json_str = content[start_idx:end_idx + 1]
+
+ # Parse JSON
+ return json.loads(json_str)
+
+ except Exception as e:
+ logging.error(f"Error parsing graph response: {e}")
+ return None
+
+
+async def process_book(book_title):
+ """
+ Process the book title, query LlamaStack for characters and relationships,
+ and initialize memory.
+ """
+ client = LlamaStackClient(base_url=f"http://localhost:{LLAMA_STACK_PORT}")
+ agent_config = AgentConfig(
+ model=INFERENCE_MODEL,
+ instructions="You are a helpful assistant",
+ tools=[{"type": "memory"}], # Enable memory
+ enable_session_persistence=True,
+ max_infer_iters=5,
+ )
+
+ # Create the agent and session
+ agent = Agent(client, agent_config)
+ session_id = agent.create_session(f"{book_title}-session")
+ active_sessions["agent"] = agent
+ active_sessions["session_id"] = session_id
+ logging.info(f"Created session_id={session_id} for book: {book_title}")
+
+ # Query LlamaStack for characters and relationships
+ prompt = f"Who are the characters in the book '{book_title}', and what are their relationships?"
+
+ response = client.inference.chat_completion(
+ model_id=os.environ["INFERENCE_MODEL"],
+ messages=[
+ {"role": "system", "content": "You are a knowledgeable book expert. Provide detailed information about characters and their relationships in the book."},
+ {"role": "user", "content": prompt}
+ ]
+ )
+ text_response = response.completion_message.content
+
+ file_name = f"{book_title.replace(' ', '_').lower()}_memory.txt"
+ with open(file_name, "w") as f:
+ f.write(text_response)
+
+ graph_response = await get_graph_response(text_response, client)
+
+ print("graph_response: ", graph_response)
+
+ graph_data = ""
+ try:
+ graph_data = jsonify_graph_response(graph_response)
+ logging.info("Graph data generated:", json.dumps(graph_data, indent=2))
+ except json.JSONDecodeError as e:
+ logging.error(f"Error parsing graph response: {e}")
+
+ # Push to memory agent (optional if further memory setup is needed)
+ memory_prompt = "Save this knowledge about the book into memory for future questions."
+ memory_response = agent.create_turn(
+ messages=[{"role": "user", "content": memory_prompt}],
+ attachments=[
+ {"content": text_response, "mime_type": "text/plain"}
+ ],
+ session_id=session_id,
+ )
+
+ async for log in async_generator_wrapper(EventLogger().log(memory_response)):
+ log.print()
+
+ return graph_data
+
+
+def convert_to_graph_format(entities, relationships):
+ """
+ Converts entities and relationships into a graph dictionary format.
+ """
+ nodes = []
+ links = []
+ node_id_map = {}
+ node_counter = 1
+
+ # Add entities as nodes
+ for entity in entities:
+ name = entity["text"]
+ if name not in node_id_map:
+ node_id_map[name] = f"id{node_counter}"
+ nodes.append({"id": f"id{node_counter}", "name": name, "val": 10})
+ node_counter += 1
+
+ # Add relationships as links
+ for relationship in relationships.split("\n"): # Assuming relationships are line-separated
+ parts = relationship.split(" and ")
+ if len(parts) == 2:
+ source_name = parts[0].strip()
+ target_name = parts[1].split(" are")[0].strip()
+
+ if source_name in node_id_map and target_name in node_id_map:
+ links.append({
+ "source": node_id_map[source_name],
+ "target": node_id_map[target_name]
+ })
+
+ return {"nodes": nodes, "links": links}
+
+
+def convert_to_graph_format_old(response):
+ """
+ Converts LlamaStack's text response into a graph dictionary format.
+ """
+ nodes = []
+ links = []
+
+ # Simplified parsing logic (replace with actual NLP or regex parsing)
+ lines = [line for line in response if line.strip()]
+ node_id_map = {}
+ node_counter = 1
+
+ for line in lines:
+ if " and " in line: # Assumes relationships are described as "X and Y are friends"
+ parts = line.split(" and ")
+ if len(parts) == 2:
+ source_name = parts[0].strip()
+ target_name = parts[1].split(" are")[0].strip()
+
+ # Add nodes if they don't already exist
+ for name in [source_name, target_name]:
+ if name not in node_id_map:
+ node_id_map[name] = f"id{node_counter}"
+ nodes.append({"id": f"id{node_counter}", "name": name, "val": 10})
+ node_counter += 1
+
+ # Add the relationship as a link
+ links.append({
+ "source": node_id_map[source_name],
+ "target": node_id_map[target_name]
+ })
+
+ return {"nodes": nodes, "links": links}
+
+
+@app.route("/query", methods=["POST"])
+def query_memory():
+ """
+ Handles user queries and returns answers based on memory.
+ """
+ data = request.json
+ query = data.get("query")
+ if not query:
+ return jsonify({"error": "Query parameter is missing"}), 400
+
+ # Query the memory agent
+ response = asyncio.run(query_llama_stack(query))
+ return jsonify({"response": response})
+
+
+async def query_llama_stack(prompt):
+ """
+ Queries the active LlamaStack session with a user prompt.
+ """
+ if "agent" not in active_sessions:
+ return "No active agent session. Please initialize a book first."
+
+ agent = active_sessions["agent"]
+ session_id = active_sessions["session_id"]
+
+ response = agent.create_turn(
+ messages=[{"role": "user", "content": prompt}],
+ session_id=session_id,
+ )
+
+ # Process response logs
+ result = []
+ async for log in async_generator_wrapper(EventLogger().log(response)):
+ result.append(str(log))
+
+ inference_start_idx = result.index("inference> ")
+ inference_logs = result[inference_start_idx + 1:]
+
+ return "\n".join(inference_logs)
+
+
+if __name__ == "__main__":
+ app.run(debug=True, port=SERVER_PORT)
diff --git a/examples/bookmind_app/server/test.py b/examples/bookmind_app/server/test.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/bookmind_app/src/App.css b/examples/bookmind_app/src/App.css
new file mode 100644
index 000000000..74b5e0534
--- /dev/null
+++ b/examples/bookmind_app/src/App.css
@@ -0,0 +1,38 @@
+.App {
+ text-align: center;
+}
+
+.App-logo {
+ height: 40vmin;
+ pointer-events: none;
+}
+
+@media (prefers-reduced-motion: no-preference) {
+ .App-logo {
+ animation: App-logo-spin infinite 20s linear;
+ }
+}
+
+.App-header {
+ background-color: #282c34;
+ min-height: 100vh;
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ font-size: calc(10px + 2vmin);
+ color: white;
+}
+
+.App-link {
+ color: #61dafb;
+}
+
+@keyframes App-logo-spin {
+ from {
+ transform: rotate(0deg);
+ }
+ to {
+ transform: rotate(360deg);
+ }
+}
diff --git a/examples/bookmind_app/src/App.js b/examples/bookmind_app/src/App.js
new file mode 100644
index 000000000..d3271465c
--- /dev/null
+++ b/examples/bookmind_app/src/App.js
@@ -0,0 +1,17 @@
+import { BrowserRouter as Router, Routes, Route } from "react-router-dom";
+import Home from "./homePage/index";
+import SearchPage from "./bookPage/components/SearchPage";
+
+function App() {
+ return (
+
+
+ {/* Define routes for Home and SearchPage */}
+ } />
+ } />
+
+
+ );
+}
+
+export default App;
diff --git a/examples/bookmind_app/src/App.test.js b/examples/bookmind_app/src/App.test.js
new file mode 100644
index 000000000..1f03afeec
--- /dev/null
+++ b/examples/bookmind_app/src/App.test.js
@@ -0,0 +1,8 @@
+import { render, screen } from '@testing-library/react';
+import App from './App';
+
+test('renders learn react link', () => {
+ render();
+ const linkElement = screen.getByText(/learn react/i);
+ expect(linkElement).toBeInTheDocument();
+});
diff --git a/examples/bookmind_app/src/approuter.jsx b/examples/bookmind_app/src/approuter.jsx
new file mode 100644
index 000000000..7a80f93cf
--- /dev/null
+++ b/examples/bookmind_app/src/approuter.jsx
@@ -0,0 +1,26 @@
+/* eslint-disable no-extra-semi */
+/* eslint-disable react/prop-types */
+/* eslint-disable no-unused-vars */
+import { Route, BrowserRouter as Router, Routes } from "react-router-dom";
+
+import Home from "./pages/homePage";
+import BookPage from "./pages/bookPage";
+
+const AppRouter = function () {
+ return (
+ <>
+
+ } />
+ } />
+
+ >
+ );
+};
+
+const App = () => (
+
+
+
+);
+
+export default App;
diff --git a/examples/bookmind_app/src/index.css b/examples/bookmind_app/src/index.css
new file mode 100644
index 000000000..b5c61c956
--- /dev/null
+++ b/examples/bookmind_app/src/index.css
@@ -0,0 +1,3 @@
+@tailwind base;
+@tailwind components;
+@tailwind utilities;
diff --git a/examples/bookmind_app/src/index.js b/examples/bookmind_app/src/index.js
new file mode 100644
index 000000000..6e917b174
--- /dev/null
+++ b/examples/bookmind_app/src/index.js
@@ -0,0 +1,13 @@
+import React from "react";
+import ReactDOM from "react-dom/client";
+import "./index.css";
+import AppRouter from "./approuter";
+import reportWebVitals from "./reportWebVitals";
+
+const root = ReactDOM.createRoot(document.getElementById("root"));
+root.render();
+
+// If you want to start measuring performance in your app, pass a function
+// to log results (for example: reportWebVitals(console.log))
+// or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
+reportWebVitals();
diff --git a/examples/bookmind_app/src/logo.svg b/examples/bookmind_app/src/logo.svg
new file mode 100644
index 000000000..9dfc1c058
--- /dev/null
+++ b/examples/bookmind_app/src/logo.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/examples/bookmind_app/src/pages/Layout.jsx b/examples/bookmind_app/src/pages/Layout.jsx
new file mode 100644
index 000000000..da6868d42
--- /dev/null
+++ b/examples/bookmind_app/src/pages/Layout.jsx
@@ -0,0 +1,19 @@
+import "../index.css";
+
+export const metadata = {
+ title: "BookMind - Unravel Stories, One Map at a Time",
+ description:
+ "Explore character relationships and storylines with AI-powered visualizations.",
+};
+
+export default function RootLayout({ children }) {
+ return (
+
+
+ {metadata.title}
+
+
+ {children}
+
+ );
+}
diff --git a/examples/bookmind_app/src/pages/MindMap/index.jsx b/examples/bookmind_app/src/pages/MindMap/index.jsx
new file mode 100644
index 000000000..a621fc396
--- /dev/null
+++ b/examples/bookmind_app/src/pages/MindMap/index.jsx
@@ -0,0 +1,127 @@
+import React, { useState } from "react";
+import CytoscapeComponent from "react-cytoscapejs";
+
+const MindMap = () => {
+ const [hoveredNode, setHoveredNode] = useState(null);
+
+ // Graph data: Nodes and Edges
+ const elements = [
+ { data: { id: "Harry", label: "Harry Potter" } },
+ { data: { id: "Hermione", label: "Hermione Granger" } },
+ { data: { id: "Ron", label: "Ron Weasley" } },
+ { data: { id: "Dumbledore", label: "Albus Dumbledore" } },
+ {
+ data: {
+ id: "friendship1",
+ source: "Harry",
+ target: "Hermione",
+ label: "Friends",
+ },
+ },
+ {
+ data: {
+ id: "friendship2",
+ source: "Harry",
+ target: "Ron",
+ label: "Best Friends",
+ },
+ },
+ {
+ data: {
+ id: "mentor",
+ source: "Dumbledore",
+ target: "Harry",
+ label: "Mentor",
+ },
+ },
+ ];
+
+ // Cytoscape Styles for Nodes and Edges
+ const style = [
+ {
+ selector: "node",
+ style: {
+ "background-color": "#0074D9",
+ label: "data(label)",
+ "text-valign": "center",
+ "text-halign": "center",
+ color: "#ffffff",
+ "font-size": "10px",
+ width: "40px",
+ height: "40px",
+ },
+ },
+ {
+ selector: "edge",
+ style: {
+ "line-color": "#AAAAAA",
+ "target-arrow-color": "#AAAAAA",
+ "target-arrow-shape": "triangle",
+ "curve-style": "bezier",
+ label: "data(label)",
+ "font-size": "8px",
+ color: "#333333",
+ "text-outline-color": "#ffffff",
+ "text-outline-width": "1px",
+ },
+ },
+ {
+ selector: ":selected",
+ style: {
+ "background-color": "#FF4136",
+ "line-color": "#FF4136",
+ "target-arrow-color": "#FF4136",
+ "source-arrow-color": "#FF4136",
+ },
+ },
+ ];
+
+ // Handle hover events
+ const handleMouseOver = (event) => {
+ const node = event.target.data();
+ setHoveredNode(node);
+ };
+
+ const handleMouseOut = () => {
+ setHoveredNode(null);
+ };
+
+ return (
+