Skip to content

Commit d65bbbd

Browse files
committedDec 17, 2024·
--
1 parent 7313027 commit d65bbbd

28 files changed

+147
-3731
lines changed
 

Diff for: ‎.gitattributes

-2
This file was deleted.

Diff for: ‎.gitignore

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
# Ignore local python virtual environment
1111

12-
.venv
12+
venv
1313
env/
1414

1515
# Ignore streamlit_chat_app.py conversations pickle

Diff for: ‎Dockerfile

+13-13
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,20 @@
1-
# Use an official Python runtime as a parent image
2-
FROM python:3.10-slim
1+
# Use the official Node.js image as a base
2+
FROM node:14
33

4-
# Set the working directory in the container
4+
# Set the working directory
55
WORKDIR /app
66

7-
# Copy the current directory contents into the container at /app
8-
COPY . /app
7+
# Copy package.json and package-lock.json
8+
COPY frontend/package*.json ./
99

10-
# Install any needed packages specified in requirements.txt
11-
RUN pip install --no-cache-dir -r requirements.txt
10+
# Install dependencies
11+
RUN npm install
1212

13-
# Make port 8501 available to the world outside this container
14-
EXPOSE 8501
13+
# Copy the rest of the application code
14+
COPY . .
1515

16-
# Define environment variable
17-
ENV NAME DARKCODER
16+
# Expose the port the app runs on
17+
EXPOSE 3000
1818

19-
# Run app.py when the container launches
20-
CMD ["streamlit", "run", "app.py", "--server.port", "8501"]
19+
# Command to run the application
20+
CMD ["npm", "run", "dev"]

Diff for: ‎README.md

-59
This file was deleted.

Diff for: ‎app.py

-83
This file was deleted.

Diff for: ‎backend/app/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# App initialization

Diff for: ‎backend/app/api/endpoints.py

+18
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
from flask import Blueprint, request, jsonify
2+
from app.core.llm_pipeline import process_query
3+
4+
chat_bp = Blueprint('chat', __name__)
5+
6+
@chat_bp.route('/chat', methods=['POST'])
7+
def chat():
8+
"""
9+
Process a chat query and return a response.
10+
"""
11+
data = request.json
12+
query = data.get('query')
13+
14+
if not query:
15+
return jsonify({'error': 'No query provided'}), 400
16+
17+
response = process_query(query)
18+
return jsonify({'response': response}), 200

Diff for: ‎backend/app/api/upload.py

+31
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
from flask import Blueprint, request, jsonify
2+
from app.core.vector_store import VectorStore
3+
from app.core.llm_pipeline import process_query
4+
import os
5+
6+
upload_bp = Blueprint('upload', __name__)
7+
8+
@upload_bp.route('/upload', methods=['POST'])
9+
def upload_document():
10+
"""
11+
Upload a document and process it into the vector store.
12+
"""
13+
if 'file' not in request.files:
14+
return jsonify({'error': 'No file part'}), 400
15+
16+
file = request.files['file']
17+
if file.filename == '':
18+
return jsonify({'error': 'No selected file'}), 400
19+
20+
# Save the file temporarily
21+
file_path = os.path.join('temp', file.filename)
22+
file.save(file_path)
23+
24+
# Process the file into the vector store
25+
vector_store = VectorStore()
26+
vector_store.add_document(file_path)
27+
28+
# Clean up the temporary file
29+
os.remove(file_path)
30+
31+
return jsonify({'message': 'File uploaded and processed successfully'}), 200

Diff for: ‎backend/app/app.py

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
from flask import Flask
2+
from app.api.upload import upload_bp
3+
from app.api.endpoints import chat_bp
4+
5+
def create_app():
6+
app = Flask(__name__)
7+
8+
# Register blueprints
9+
app.register_blueprint(upload_bp, url_prefix='/api')
10+
app.register_blueprint(chat_bp, url_prefix='/api')
11+
12+
return app
13+
14+
if __name__ == "__main__":
15+
app = create_app()
16+
app.run(debug=True)

Diff for: ‎backend/app/core/llm_pipeline.py

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from langchain.llms import Groq
2+
3+
def process_query(query):
4+
"""
5+
Process a query using the Groq model and return the response.
6+
"""
7+
model = Groq(model_name="Llama-3") # or Mixtral
8+
response = model.generate(query)
9+
return response

Diff for: ‎backend/app/core/text_pipeline.py

+7
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from app.core.vector_store import VectorStore
2+
3+
def generate_embedding(text):
4+
# Generate embeddings for the given text
5+
vector_store = VectorStore()
6+
embedding = vector_store.store_embedding(text)
7+
return embedding

Diff for: ‎backend/app/core/vector_store.py

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
from langchain.vectorstores import VectorStore
2+
from langchain.embeddings import OpenAIEmbeddings
3+
4+
class CustomVectorStore(VectorStore):
5+
def __init__(self):
6+
self.embeddings = OpenAIEmbeddings()
7+
self.documents = []
8+
9+
def add_document(self, file_path):
10+
# Process the document and add it to the vector store
11+
with open(file_path, 'r') as file:
12+
content = file.read()
13+
embedding = self.embeddings.embed(content)
14+
self.documents.append((content, embedding))

Diff for: ‎backend/app/models/document.py

+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
from sqlalchemy import Column, Integer, String
2+
from app.database import Base
3+
4+
class Document(Base):
5+
__tablename__ = 'documents'
6+
7+
id = Column(Integer, primary_key=True, index=True)
8+
title = Column(String, index=True)
9+
content = Column(String)
10+
embedding = Column(String)
11+
12+
def __repr__(self):
13+
return f"<Document(title={self.title}, id={self.id})>"

Diff for: ‎backend/app/services/text_to_speech.py

+7
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
import openai
2+
3+
def handle_audio_upload(file):
4+
# Process the audio file and convert to text using OpenAI Whisper
5+
audio_content = file.read()
6+
response = openai.Audio.transcribe("whisper-1", audio_content)
7+
return response['text']

Diff for: ‎backend/app/services/video_handler.py

+7
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
import openai
2+
3+
def handle_video_upload(file):
4+
# Process the video file and convert to text using OpenAI Whisper
5+
video_content = file.read()
6+
response = openai.Audio.transcribe("whisper-1", video_content)
7+
return response['text']

Diff for: ‎backend/config/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# Configuration settings

Diff for: ‎backend/requirements.txt

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
Flask
2+
langchain
3+
langchain-groq
4+
openai
5+
chromadb
6+
sqlalchemy
7+
protobuf==3.20.3
8+
typing-extensions==4.5.0
9+
tokenizers==0.13.3

Diff for: ‎chat_history.csv

-3,122
This file was deleted.

Diff for: ‎chat_history.db

-8.13 MB
Binary file not shown.

Diff for: ‎main

-77
This file was deleted.

Diff for: ‎my-avatar.png

-8.81 KB
Binary file not shown.

Diff for: ‎requirements.txt

-3.76 KB
Binary file not shown.

Diff for: ‎start

-1
This file was deleted.

Diff for: ‎style.css

-22
This file was deleted.

Diff for: ‎utils/DarkGPT.py

-122
This file was deleted.

Diff for: ‎utils/__init__.py

Whitespace-only changes.

Diff for: ‎utils/get_response.py

-48
This file was deleted.

Diff for: ‎utils/summarize.py

-181
This file was deleted.

0 commit comments

Comments
 (0)
Please sign in to comment.