From 969ae8ec75049383e092d0e9e4364104875badcc Mon Sep 17 00:00:00 2001 From: David Cannan <104325852+Cdaprod@users.noreply.github.com> Date: Mon, 28 Oct 2024 20:25:26 -0400 Subject: [PATCH] Add full steps workflow for CI/CD process --- .github/workflows/full_step_workflow.yml.dev | 95 ++++++++ docs/00-CLUSTERING-MINIO.md | 243 +++++++++++++++++++ docs/01-PROVISON-KUBERNETES-STORAGE.md | 232 ++++++++++++++++++ 3 files changed, 570 insertions(+) create mode 100644 .github/workflows/full_step_workflow.yml.dev create mode 100644 docs/00-CLUSTERING-MINIO.md create mode 100644 docs/01-PROVISON-KUBERNETES-STORAGE.md diff --git a/.github/workflows/full_step_workflow.yml.dev b/.github/workflows/full_step_workflow.yml.dev new file mode 100644 index 0000000..3f121a7 --- /dev/null +++ b/.github/workflows/full_step_workflow.yml.dev @@ -0,0 +1,95 @@ +# .github/workflows/main.yml +name: Full Steps Workflow + +on: + push: + branches: + - '**' + pull_request: + branches: + - '**' + +jobs: + build-test-tag-push-deploy: + runs-on: ubuntu-latest + strategy: + matrix: + service: + - name: 'app-frontend' + context: './app-frontend' + image: 'cdaprod/app-frontend' + port: '3000' + - name: 'middleware-registry' + context: './middleware-registry' + image: 'cdaprod/middleware-registry' + port: '8080' + - name: 'middleware-infrastructure' + context: './middleware-infrastructure' + image: 'cdaprod/middleware-infrastructure' + port: '8081' + steps: + - name: Checkout Code + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build Docker Image + run: | + docker build -t ${{ matrix.service.image }}:latest ${{ matrix.service.context }} + + - name: Run Tests + run: | + # Replace with your test command + docker run --rm ${{ matrix.service.image }}:latest sh -c "npm test || go test ./... || echo 'No tests specified'" + + - name: Get Git Version + id: get_version + run: | + git_version=$(git rev-parse --short HEAD) + echo "git_version=${git_version}" >> $GITHUB_OUTPUT + + - name: Tag Docker Image + run: | + docker tag ${{ matrix.service.image }}:latest ${{ matrix.service.image }}:${{ steps.get_version.outputs.git_version }} + + - name: Push Docker Images + run: | + docker push ${{ matrix.service.image }}:latest + docker push ${{ matrix.service.image }}:${{ steps.get_version.outputs.git_version }} + + - name: Deploy to Host + run: | + ssh -o StrictHostKeyChecking=no ${{ secrets.SSH_USERNAME }}@${{ secrets.SSH_HOST }} \ + "docker service update --image ${{ matrix.service.image }}:${{ steps.get_version.outputs.git_version }} ${ matrix.service.name }_service" + + - name: Notify on Failure + if: ${{ failure() }} + run: | + echo "An error occurred while processing service: ${{ matrix.service.name }}" + + - name: Take Screenshot + if: ${{ success() && matrix.service.name == 'app-frontend' }} + uses: mxschmitt/action-take-screenshot@v1 + with: + url: 'http://your-app-url.com' + output: screenshot_${{ matrix.service.name }}.png + + - name: Upload Screenshot + if: ${{ success() && matrix.service.name == 'app-frontend' }} + uses: actions/upload-artifact@v3 + with: + name: screenshot_${{ matrix.service.name }} + path: screenshot_${{ matrix.service.name }}.png + + - name: Clean up Docker resources + if: always() + run: | + docker system prune -af + docker builder prune -af \ No newline at end of file diff --git a/docs/00-CLUSTERING-MINIO.md b/docs/00-CLUSTERING-MINIO.md new file mode 100644 index 0000000..046ee73 --- /dev/null +++ b/docs/00-CLUSTERING-MINIO.md @@ -0,0 +1,243 @@ +To optimize the Docker Swarm setup for improved storage management while integrating the recommended MinIO setup, we need to combine elements from your current `docker-compose.yaml` and the recommended MinIO configuration. Additionally, we'll ensure that storage-intensive services utilize the NVMe SSD or expanded storage effectively. + +### Combined and Optimized `docker-compose.yaml` + +#### Key Changes: +1. **MinIO Configuration**: Set up MinIO in a distributed mode across multiple nodes. +2. **Storage Optimization**: Ensure data volumes are placed on nodes with the most storage capacity (e.g., NVMe SSD). +3. **Service Placement**: Use Docker placement constraints to control where services run, leveraging nodes with additional storage. + +Here’s the optimized `docker-compose.yaml`: + +```yaml +version: '3.8' + +services: + web: + image: cdaprod/cda-minio-control:latest + environment: + OPENAI_API_KEY: ${OPENAI_API_KEY} + MINIO_ENDPOINT: minio1:9000 + MINIO_ROOT_USER: ${MINIO_ROOT_USER} + MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD} + ports: + - "8000:8000" + networks: + - app_network + depends_on: + - weaviate + - minio1 + deploy: + replicas: 3 + update_config: + parallelism: 2 + delay: 10s + restart_policy: + condition: on-failure + + minio1: + image: minio/minio:latest + environment: + MINIO_ROOT_USER: ${MINIO_ROOT_USER} + MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD} + command: server http://minio{1...4}/data + volumes: + - minio_data1:/data + ports: + - "9000:9000" + - "9001:9001" + networks: + - app_network + - minio_net + deploy: + placement: + constraints: + - node.labels.storage == nvme + + minio2: + image: minio/minio:latest + environment: + MINIO_ROOT_USER: ${MINIO_ROOT_USER} + MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD} + command: server http://minio{1...4}/data + volumes: + - minio_data2:/data + networks: + - app_network + - minio_net + deploy: + placement: + constraints: + - node.labels.storage == expanded + + minio3: + image: minio/minio:latest + environment: + MINIO_ROOT_USER: ${MINIO_ROOT_USER} + MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD} + command: server http://minio{1...4}/data + volumes: + - minio_data3:/data + networks: + - app_network + - minio_net + deploy: + placement: + constraints: + - node.labels.storage == default + + minio4: + image: minio/minio:latest + environment: + MINIO_ROOT_USER: ${MINIO_ROOT_USER} + MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD} + command: server http://minio{1...4}/data + volumes: + - minio_data4:/data + networks: + - app_network + - minio_net + deploy: + placement: + constraints: + - node.labels.storage == default + + weaviate: + image: cdaprod/cda-weaviate:latest + environment: + OPENAI_API_KEY: ${OPENAI_API_KEY} + AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true' + PERSISTENCE_DATA_PATH: '/var/lib/weaviate' + QUERY_DEFAULTS_LIMIT: 25 + DEFAULT_VECTORIZER_MODULE: 'text2vec-openai' + ENABLE_MODULES: 'backup-s3, text2vec-openai' + BACKUP_S3_BUCKET: 'weaviate-backups' + BACKUP_S3_ENDPOINT: 'minio1:9000' + BACKUP_S3_ACCESS_KEY_ID: ${MINIO_ROOT_USER} + BACKUP_S3_SECRET_ACCESS_KEY: ${MINIO_ROOT_PASSWORD} + BACKUP_S3_USE_SSL: 'false' + CLUSTER_HOSTNAME: 'node1' + volumes: + - weaviate_data:/var/lib/weaviate + networks: + - app_network + depends_on: + - minio1 + + nginx: + image: cdaprod/cda-nginx:latest + ports: + - "80:80" + - "443:443" + volumes: + - ./nginx/nginx.conf:/etc/nginx/nginx.conf + networks: + - app_network + + jupyterlab: + build: + context: ./jupyter + image: cdaprod/cda-jupyterlab:latest + ports: + - "8888:8888" + volumes: + - ../usb/001/002:/dev/bus/usb/001/002 + - jupyter_data:/home/jovyan/work + networks: + - app_network + environment: + - JUPYTER_ENABLE_LAB=yes + privileged: true + deploy: + replicas: 1 + restart_policy: + condition: on-failure + command: ["jupyter", "lab", "--ip=0.0.0.0", "--allow-root", "--NotebookApp.token=''", "--NotebookApp.password=''"] + + tailscale: + image: tailscale/tailscale + container_name: tailscale + cap_add: + - NET_ADMIN + - SYS_MODULE + security_opt: + - apparmor:unconfined + volumes: + - /dev/net/tun:/dev/net/tun + - tailscale_data:/var/lib/tailscale + environment: + - TS_AUTH_KEY=${TS_AUTH_KEY} + networks: + - app_network + command: tailscaled + +networks: + app_network: + driver: overlay + minio_net: + driver: overlay + +volumes: + tailscale_data: + driver: local + minio_data1: + driver: local + driver_opts: + type: none + device: /mnt/nvme/data + o: bind + minio_data2: + driver: local + driver_opts: + type: none + device: /mnt/expanded/data + o: bind + minio_data3: + driver: local + minio_data4: + driver: local + weaviate_data: + driver: local + jupyter_data: + driver: local + driver_opts: + type: none + device: /opt/jupyter_data + o: bind + +secrets: + minio_root_user: + external: true + minio_root_password: + external: true +``` + +### Explanation and Optimization: + +1. **MinIO Distributed Setup**: + - The MinIO service is configured to run in a distributed mode across four nodes (`minio1`, `minio2`, `minio3`, `minio4`). + - Each MinIO instance is given specific placement constraints to ensure they run on the appropriate nodes. + +2. **Storage Placement**: + - `minio1` is configured to use the NVMe storage on the leader node. + - `minio2` is set to use the expanded storage on another Raspberry Pi 5 node. + - `minio3` and `minio4` use default local storage on the Raspberry Pi 4 nodes. + +3. **Service Placement Constraints**: + - Using `deploy.placement.constraints`, we control where each MinIO instance runs based on node labels. + - Ensure you label your nodes accordingly: + ```sh + docker node update --label-add storage=nvme + docker node update --label-add storage=expanded + docker node update --label-add storage=default + ``` + +4. **Volumes**: + - Specific volumes are configured to use the NVMe storage (`/mnt/nvme/data`) and the expanded storage (`/mnt/expanded/data`). + +5. **Networks**: + - `app_network` is used for general service communication. + - `minio_net` is specifically for MinIO services to communicate in a distributed setup. + +### Summary +This optimized `docker-compose.yaml` setup ensures efficient storage management across your Docker Swarm, leveraging the NVMe SSD and any additional storage nodes you have. This configuration also maintains your current services and adds robustness to your swarm infrastructure. \ No newline at end of file diff --git a/docs/01-PROVISON-KUBERNETES-STORAGE.md b/docs/01-PROVISON-KUBERNETES-STORAGE.md new file mode 100644 index 0000000..9f5436c --- /dev/null +++ b/docs/01-PROVISON-KUBERNETES-STORAGE.md @@ -0,0 +1,232 @@ +To provision your Raspberry Pi Docker Swarm with extra NVMe storage for compatibility with Kubernetes development, you'll need to follow a few steps. This includes setting up your storage with Docker, configuring your Raspberry Pi nodes, and preparing for Kubernetes deployment. Here's a detailed guide: + +### 1. Set Up NVMe Storage on Raspberry Pi + +#### A. Prepare the NVMe Storage + +1. **Connect the NVMe SSD to your Raspberry Pi**: + - Ensure the NVMe SSD is properly connected and recognized by the Raspberry Pi. + +2. **Format the NVMe SSD**: + ```sh + sudo mkfs.ext4 /dev/nvme0n1 + ``` + +3. **Mount the NVMe SSD**: + ```sh + sudo mkdir -p /mnt/nvme + sudo mount /dev/nvme0n1 /mnt/nvme + ``` + +4. **Update `/etc/fstab` for Persistent Mounting**: + ```sh + echo '/dev/nvme0n1 /mnt/nvme ext4 defaults 0 0' | sudo tee -a /etc/fstab + ``` + +### 2. Configure Docker Swarm with NVMe Storage + +#### A. Label Nodes with Storage Capabilities + +1. **Label the Nodes**: + ```sh + docker node update --label-add storage=nvme + ``` + +2. **Update Docker Compose File to Use the NVMe Storage** + +```yaml +version: '3.8' + +x-defaults: &defaults + restart: unless-stopped + env_file: .env + +x-labels: &labels + com.example.project: "MyApp" + com.example.version: "1.0.0" + com.example.description: "A description of MyApp" + com.example.maintainer: "me@example.com" + +x-driver-opts-nvme: &driver_opts_nvme + type: none + device: /mnt/nvme/data + o: bind + +services: + web: + <<: *defaults + image: cdaprod/cda-minio-control:latest + labels: + <<: *labels + environment: + OPENAI_API_KEY: ${OPENAI_API_KEY} + MINIO_ENDPOINT: minio1:9000 + MINIO_ROOT_USER: ${MINIO_ROOT_USER} + MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD} + ports: + - "8000:8000" + networks: + - app_network + depends_on: + - weaviate + - minio1 + deploy: + replicas: 3 + update_config: + parallelism: 2 + delay: 10s + restart_policy: + condition: on-failure + resources: + limits: + cpus: "1.0" + memory: "512M" + + minio1: + <<: *defaults + image: minio/minio:latest + labels: + <<: *labels + environment: + MINIO_ROOT_USER: ${MINIO_ROOT_USER} + MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD} + command: server http://minio{1...4}/data + volumes: + - minio_data1:/data + ports: + - "9000:9000" + - "9001:9001" + networks: + - app_network + - minio_net + deploy: + placement: + constraints: + - node.labels.storage == nvme + +volumes: + minio_data1: + driver: local + driver_opts: + <<: *driver_opts_nvme + +networks: + app_network: + driver: overlay + minio_net: + driver: overlay +``` + +### 3. Transition to Kubernetes + +#### A. Install Kubernetes on Raspberry Pi + +1. **Set Up Kubernetes**: + - Follow the [Kubernetes documentation](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) to install Kubernetes using `kubeadm` on your Raspberry Pi. + +2. **Initialize the Kubernetes Cluster**: + ```sh + sudo kubeadm init --pod-network-cidr=10.244.0.0/16 + ``` + +3. **Set Up `kubectl` for Your User**: + ```sh + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + ``` + +4. **Deploy a Pod Network**: + ```sh + kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + ``` + +#### B. Configure Storage for Kubernetes + +1. **Create a StorageClass for NVMe Storage**: + +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: nvme-storage +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +``` + +2. **Create a PersistentVolume**: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nvme-pv +spec: + capacity: + storage: 100Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: nvme-storage + local: + path: /mnt/nvme + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: storage + operator: In + values: + - nvme +``` + +3. **Create a PersistentVolumeClaim**: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nvme-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: nvme-storage +``` + +#### C. Deploy Applications in Kubernetes + +1. **Use the PersistentVolumeClaim in a Pod**: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: myapp-pod +spec: + containers: + - name: myapp-container + image: cdaprod/cda-minio-control:latest + volumeMounts: + - mountPath: "/data" + name: nvme-storage + volumes: + - name: nvme-storage + persistentVolumeClaim: + claimName: nvme-pvc +``` + +### Summary + +1. **Set Up NVMe Storage on Raspberry Pi**: + - Format, mount, and configure NVMe SSD. + +2. **Configure Docker Swarm**: + - Label nodes, update Docker Compose to use NVMe storage. + +3. **Transition to Kubernetes**: + - Install Kubernetes, configure StorageClass, PersistentVolume, and PersistentVolumeClaim. + +By following these steps, you can effectively provision your Raspberry Pi Docker Swarm with NVMe storage and transition to a Kubernetes environment for development and staging. This setup ensures your storage is configured properly for application development and deployment. \ No newline at end of file