diff --git a/.gitignore b/.gitignore index acd2382..a067e99 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .DS_Store -nohup.out \ No newline at end of file +nohup.out +backend/openui-backend.tar diff --git a/helm/Chart.yaml b/helm/Chart.yaml new file mode 100644 index 0000000..7ecdef3 --- /dev/null +++ b/helm/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v2 +name: openui +version: 0.1.0 +description: A Helm chart for deploying the OpenUI application diff --git a/helm/README.md b/helm/README.md new file mode 100644 index 0000000..eb93b29 --- /dev/null +++ b/helm/README.md @@ -0,0 +1,105 @@ +## Deployment Instructions for OpenUI Helm Chart + +This guide provides detailed instructions on how to deploy the "OpenUI" Helm chart into a Kubernetes cluster. Before proceeding, ensure you have the following prerequisites met: + +### Prerequisites +- **Kubernetes Cluster**: Ensure you have access to a Kubernetes cluster and you have `kubectl` installed and configured to communicate with your cluster. +- **Helm Installed**: You need Helm installed on your machine. Helm is a tool for managing Kubernetes charts. Charts are packages of pre-configured Kubernetes resources. + +### Step 1: Clone the Repository +First, clone the repository containing the Helm chart to your local machine and goto `helm` subdirectory: + +```bash +git clone https://github.com/wandb/openui +cd opanui/helm +``` + +### Step 2: Create the Kubernetes Namespace +Create a namespace in your Kubernetes cluster where the resources will be deployed. This step is optional but recommended to keep your cluster organized: + +```bash +kubectl create namespace openui +``` + +### Step 3: Managing the Kubernetes Secret + +OpenUI application requires Kubernetes secret `OPENAI_API_KEY`. Ensure that these secrets are created within the same namespace. When creating secrets manually, specify the namespace: + +```bash +kubectl create secret generic openai-api-key-secret \ + --from-literal=OPENAI_API_KEY='your_openai_api_key_here' \ + --namespace openui +``` + +### Step 4 Create the folder for persistant volumes +Create directory for ollama persistant volumes. For example, `/mnt/data/ollama`. + +```bash +mkdir -p /mnt/data +mkdir -p /mnt/data/ollama +``` +Do not forget to change the path to this directory in the `values.yaml` file on the next step. + +### Step 5: Configure the Helm Chart +Edit the `values.yaml` file to customize the deployment settings like path to persistant volume, image tags, resource limits, or other configurations: + +```bash +nano values.yaml +``` +You can skip it if you use default values. + +### Step 6: Deploy the Helm Chart +Deploy the Helm chart using the following command. This command assumes you are still in the `helm` chart directory: + +```bash +helm install openui . --namespace openui +``` + +This command deploys the "OpenUI" application to the "openui" namespace in your Kubernetes cluster. + +### Step 7: Verify the Deployment +Check the status of the deployment to ensure everything is running as expected: + +```bash +kubectl get all -n openui +``` + +This command will show all the resources deployed in the "openui" namespace, allowing you to verify that your application components are up and running. + +### Step 8: Accessing the Application +To access the deployed application, you might need to set up port forwarding or an ingress, depending on how the service is exposed: + +- **For development purposes** (using port forwarding): + + ```bash + kubectl port-forward service/backend-service 7878:7878 -n openui + ``` + + Now, access the application via [http://localhost:7878](http://localhost:7878). + +- **For production environments** (using an ingress): + + Add to your Helm chart an ingress controller and that it is properly configured in `values.yaml`. Access the application via the URL configured in your ingress. + +### Step 9: Clean Up (Optional) +If you need to delete the deployment, use the following command: + +```bash +helm uninstall openui -n openui +``` + +And if you want to remove the namespace: + +```bash +kubectl delete namespace openui +``` + +### Troubleshooting +- If you encounter issues during deployment, you can check the logs of your pods or describe the resources for more detailed error messages: + + ```bash + kubectl logs pod-name -n openui + kubectl describe pod pod-name -n openui + ``` + +Replace `pod-name` with the name of the pod that is experiencing issues. diff --git a/helm/templates/backend-deployment.yaml b/helm/templates/backend-deployment.yaml new file mode 100644 index 0000000..cd5d7cb --- /dev/null +++ b/helm/templates/backend-deployment.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.backend.name }} + namespace: {{ .Values.global.namespace }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ .Values.backend.name }} + template: + metadata: + labels: + app: {{ .Values.backend.name }} + spec: + containers: + - name: backend + image: "{{ .Values.backend.image }}:{{ .Values.backend.tag }}" + imagePullPolicy: IfNotPresent + ports: + - containerPort: {{ .Values.backend.port }} + env: + - name: OLLAMA_HOST + value: {{ .Values.backend.ollamaHost }} + - name: OPENAI_API_KEY + valueFrom: + secretKeyRef: + name: openai-api-key-secret + key: OPENAI_API_KEY diff --git a/helm/templates/backend-service.yaml b/helm/templates/backend-service.yaml new file mode 100644 index 0000000..fdae489 --- /dev/null +++ b/helm/templates/backend-service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.backend.name }}-service + namespace: {{ .Values.global.namespace }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.backend.port }} + targetPort: {{ .Values.backend.port }} + protocol: TCP + selector: + app: {{ .Values.backend.name }} diff --git a/helm/templates/ollama-deployment.yaml b/helm/templates/ollama-deployment.yaml new file mode 100644 index 0000000..215cb51 --- /dev/null +++ b/helm/templates/ollama-deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.ollama.name }} + namespace: {{ .Values.global.namespace }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ .Values.ollama.name }} + template: + metadata: + labels: + app: {{ .Values.ollama.name }} + spec: + volumes: + - name: ollama-storage + persistentVolumeClaim: + claimName: {{ .Values.ollama.name }}-pvc + containers: + - name: ollama + image: "{{ .Values.ollama.image }}:{{ .Values.ollama.tag }}" + ports: + - containerPort: {{ .Values.ollama.port }} + volumeMounts: + - name: ollama-storage + mountPath: /root/.ollama diff --git a/helm/templates/ollama-pv-pvc.yaml.yaml b/helm/templates/ollama-pv-pvc.yaml.yaml new file mode 100644 index 0000000..73317c1 --- /dev/null +++ b/helm/templates/ollama-pv-pvc.yaml.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ .Values.ollama.name }}-pv + namespace: {{ .Values.global.namespace }} +spec: + capacity: + storage: {{ .Values.ollama.volumeSize }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + hostPath: + path: {{ .Values.ollama.volumePath }} + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Values.ollama.name }}-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.ollama.volumeSize }} diff --git a/helm/templates/ollama-service.yaml b/helm/templates/ollama-service.yaml new file mode 100644 index 0000000..e400449 --- /dev/null +++ b/helm/templates/ollama-service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.ollama.name }}-service + namespace: {{ .Values.global.namespace }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.ollama.port }} + targetPort: {{ .Values.ollama.port }} + protocol: TCP + selector: + app: {{ .Values.ollama.name }} diff --git a/helm/values.yaml b/helm/values.yaml new file mode 100644 index 0000000..9ec179a --- /dev/null +++ b/helm/values.yaml @@ -0,0 +1,20 @@ +global: + namespace: openui + +ollama: + name: ollama + image: ollama/ollama + tag: latest + port: 11434 + volumePath: /mnt/data/ollama + volumeSize: 2Gi + +backend: + name: backend + image: openui-backend # Adjust this with your actual image path + tag: latest + port: 7878 + ollamaHost: "http://ollama:11434" + +# Common settings or configurations that apply to both components +replicaCount: 1 diff --git a/kubernetes/README.md b/kubernetes/README.md new file mode 100644 index 0000000..8ec91dc --- /dev/null +++ b/kubernetes/README.md @@ -0,0 +1,99 @@ +Deploying to a Kubernetes cluster involves several steps. Below is a step-by-step guide that assumes you have a Kubernetes cluster already set up and configured, and you have `kubectl` installed and configured to communicate with your cluster. This guide also assumes you have already built and pushed your Docker images to a container registry that your Kubernetes cluster can access. + +### Step 1: Create Persistent Volume and Claim + +1. **Goto kubernetes subfolder** + ```bash + cd ./kubernetes + ``` + +2. **Create the Persistent Volume and Persistent Volume Claim for Ollama** + - Deploy to your cluster both `ollama-pv.yaml` and `ollama-pvc.yaml` files: + ```bash + kubectl apply -f ollama-pv.yaml + kubectl apply -f ollama-pvc.yaml + ``` + +### Step 2: Deploy Ollama Service + +1. **Create the Deployment and Service for Ollama** + - Deploy `ollama-deployment.yaml` and `ollama-service.yaml` files: + ```bash + kubectl apply -f ollama-deployment.yaml + kubectl apply -f ollama-service.yaml + ``` + +### Step 3: Deploy Backend Service + +1. **Ensure your Docker image for the backend is built and pushed to a registry** + - If not already done, build your Docker image from the backend directory and push it to your container registry. Make shure the image availablt for your kubernetes cluster. For example, for local microk8s deployment: + ```bash + cd ../backend + docker build . -t openui-backend --load + docker save openui-backend:latest > openui-backend.tar + microk8s ctr image import openui-backend.tar + rm -f openui-backend.tar + cd ../kubernetes + ``` + - Update the backend deployment YAML with the correct image name. + +2. **Create the Deployment and Service for Backend** + - Deploy `backend-deployment.yaml` and `backend-service.yaml` files: + ```bash + kubectl apply -f backend-deployment.yaml + kubectl apply -f backend-service.yaml + ``` + +### Step 4: Create Kubernetes Secret for Environment Variables + +1. **Encode your API Key in Base64** + - Encode your `OPENAI_API_KEY`: + ```bash + echo -n 'your_openai_api_key_here' | base64 + ``` + - Replace `YOUR_BASE64_ENCODED_API_KEY` in the secret YAML with the output from above command. + +2. **Create the Secret** + - Save the secret YAML configuration to a file named `openai-api-key-secret.yaml`. + - Deploy it: + ```bash + kubectl apply -f openai-api-key-secret.yaml + ``` + +### Step 5: Verify the Deployment + +1. **Check the status of your deployments** + - To see if the deployments are running and their statuses run: + ```bash + kubectl get deployments + ``` + +2. **Check the status of your services** + - To see if the services are running and to check their internal IP addresses and ports: + ```bash + kubectl get services + ``` + +3. **Check the status of your pods** + - This command helps you verify if the pods are running correctly: + ```bash + kubectl get pods + ``` + +4. **View logs for troubleshooting** + - If a pod isn’t starting or behaving as expected, view logs for more information: + ```bash + kubectl logs + ``` + +### Step 6: Accessing Your Application + +Depending on how your Kubernetes cluster is configured (e.g., if you're using Minikube, a cloud provider, etc.), accessing services externally will vary. For services only exposed internally, you can set up port forwarding: + +```bash +kubectl port-forward service/backend-service 7878:7878 +``` + +This command allows you to access the backend service via `localhost:7878` on your local machine. + +By following these steps, you should be able to deploy and run your application on a Kubernetes cluster. Adjust the steps based on your specific environment and configuration needs. diff --git a/kubernetes/backend-deployment.yaml b/kubernetes/backend-deployment.yaml new file mode 100644 index 0000000..cb07d6e --- /dev/null +++ b/kubernetes/backend-deployment.yaml @@ -0,0 +1,29 @@ +# Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: backend-deployment +spec: + selector: + matchLabels: + app: backend + replicas: 1 + template: + metadata: + labels: + app: backend + spec: + containers: + - name: backend + image: openui-backend:latest # Update this with your actual image path + imagePullPolicy: IfNotPresent + ports: + - containerPort: 7878 + env: + - name: OLLAMA_HOST + value: "http://ollama-service:11434" + - name: OPENAI_API_KEY + valueFrom: + secretKeyRef: + name: openai-api-key-secret + key: OPENAI_API_KEY diff --git a/kubernetes/backend-service.yaml b/kubernetes/backend-service.yaml new file mode 100644 index 0000000..464a625 --- /dev/null +++ b/kubernetes/backend-service.yaml @@ -0,0 +1,12 @@ +# Service +apiVersion: v1 +kind: Service +metadata: + name: backend-service +spec: + selector: + app: backend + ports: + - protocol: TCP + port: 7878 + targetPort: 7878 diff --git a/kubernetes/ollama-deployment.yaml b/kubernetes/ollama-deployment.yaml new file mode 100644 index 0000000..49e04ed --- /dev/null +++ b/kubernetes/ollama-deployment.yaml @@ -0,0 +1,27 @@ +# Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ollama-deployment +spec: + selector: + matchLabels: + app: ollama + replicas: 1 + template: + metadata: + labels: + app: ollama + spec: + containers: + - name: ollama + image: ollama/ollama:latest + ports: + - containerPort: 11434 + volumeMounts: + - name: ollama-storage + mountPath: /root/.ollama + volumes: + - name: ollama-storage + persistentVolumeClaim: + claimName: ollama-pvc diff --git a/kubernetes/ollama-pv.yaml b/kubernetes/ollama-pv.yaml new file mode 100644 index 0000000..6838a12 --- /dev/null +++ b/kubernetes/ollama-pv.yaml @@ -0,0 +1,13 @@ +# PersistentVolume +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ollama-pv +spec: + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + hostPath: + path: /mnt/data/ollama # Adjust the path based on your host's configuration diff --git a/kubernetes/ollama-pvc.yaml b/kubernetes/ollama-pvc.yaml new file mode 100644 index 0000000..98301fd --- /dev/null +++ b/kubernetes/ollama-pvc.yaml @@ -0,0 +1,12 @@ +# PersistentVolumeClaim +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ollama-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + volumeName: ollama-pv diff --git a/kubernetes/ollama-service.yaml b/kubernetes/ollama-service.yaml new file mode 100644 index 0000000..2b31b1a --- /dev/null +++ b/kubernetes/ollama-service.yaml @@ -0,0 +1,12 @@ +# Service +apiVersion: v1 +kind: Service +metadata: + name: ollama-service +spec: + selector: + app: ollama + ports: + - protocol: TCP + port: 11434 + targetPort: 11434 diff --git a/kubernetes/openai-api-key-secret.yaml b/kubernetes/openai-api-key-secret.yaml new file mode 100644 index 0000000..3bd4583 --- /dev/null +++ b/kubernetes/openai-api-key-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: openai-api-key-secret +type: Opaque +data: + OPENAI_API_KEY: YOUR_BASE64_ENCODED_API_KEY # Replace with your base64-encoded API key