Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 26 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -43,5 +43,30 @@ aws/studio/values.yaml
aws/studio/certificate.yaml
secret_db.json

azure/deploy/_tf/*
azure/deploy/.terraform/*
azure/deploy/.terraform.lock.hcl
azure/deploy/terraform.tfstate
azure/deploy/terraform.tfstate.backup
azure/configure/cert-manager-certificate-issuer.yaml
azure/configure/cert-manager-values.yaml
azure/configure/external-dns-secret.yaml
azure/configure/external-dns-values.yaml
azure/configure/test-ingress-certificate.yaml
azure/configure/test-ingress-httpbin-values.yaml
azure/configure/db-init.yaml
azure/rasa/kafka/kafka.yaml
azure/rasa/assistant/repos/*
azure/rasa/assistant/values.yaml
azure/rasa/ingress/ingress.yaml
azure/rasa/ingress/certificate.yaml
azure/studio/repos/*
azure/studio/values.yaml
azure/studio/certificate.yaml



# Dev
aws/setup/environment-variables-dev.sh
gcp/setup/environment-variables-dev.sh
aws/setup/environment-variables-dev.sh
azure/setup/environment-variables-dev.sh
17 changes: 13 additions & 4 deletions aws/configure/get-infra-values.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,26 @@ echo "Fetching some infrastructure values..."

# Authenticate with AWS Cluster
echo "Generating kubeconfig to authenticate with AWS EKS cluster..."
# To be able to interact with the EKS cluster we deployed earlier, we need to obtain the credentials for it. These credentials are saved in a file called kubeconfig which the AWS CLI can generate for us and kubectl can use.
# To be able to interact with the EKS cluster we deployed earlier, we need to obtain the credentials for it.
# These credentials are saved in a file called kubeconfig which the AWS CLI can generate for us and kubectl can use.
# Ensure we've got a path setup for the kubeconfig file:
export KUBECONFIG=$(pwd)/kubeconfig
echo "Kubeconfig path: $KUBECONFIG"
rm -f $KUBECONFIG
#Retrieve the credentials for the cluster using the AWS CLI:
# Retrieve the credentials for the cluster using the AWS CLI:
aws eks update-kubeconfig --region $REGION --name $NAME

# Get the directory where this script is located
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TARGET_DIR_RELATIVE="$SCRIPT_DIR/aws/deploy/_tf"
# It also works when sourced from zsh
if [[ -n "${BASH_SOURCE[0]}" ]]; then
SOURCE_PATH="${BASH_SOURCE[0]}"
else
# For zsh compatibility
SOURCE_PATH="${(%):-%x}"
fi

SCRIPT_DIR="$(cd "$(dirname "$SOURCE_PATH")" && pwd)"
TARGET_DIR_RELATIVE="$SCRIPT_DIR/../deploy/_tf"
TARGET_DIR_ABSOLUTE=$(realpath "$TARGET_DIR_RELATIVE")

export DB_SECRET_ID=$($TF_CMD -chdir=$TARGET_DIR_ABSOLUTE output -raw secret_id_db)
Expand Down
2 changes: 1 addition & 1 deletion aws/rasa/assistant/values.template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ rasa:
sasl_username: ${KAFKA_USER}
sasl_password: ${KAFKA_PASSWORD}
ssl_check_hostname: false
# We'll configure Rasa to use Google Cloud Storage for remote storage, so we can load models from the buckets we created earlier.
# We'll configure Rasa to use AWS for remote storage, so we can load models from the buckets we created earlier.
additionalArgs:
- --remote-storage
- aws
Expand Down
2 changes: 0 additions & 2 deletions aws/setup/environment-variables.sh
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,6 @@ export BUCKET_NAME_ENTROPY="xbuc"
export MODEL_BUCKET="${MY_COMPANY_NAME}-${BUCKET_NAME_ENTROPY}-${NAME}-model"
# The name of the bucket used to store models for Rasa Studio.
export STUDIO_BUCKET="${MY_COMPANY_NAME}-${BUCKET_NAME_ENTROPY}-${NAME}-studio"
# Process your domain name to create a DNS zone name for Amazon Route 53.
export DNS_ZONE=$(echo "$DOMAIN" | sed -e 's/\./-/g')
# The Kubernetes namespace that will be used for the deployment.
export NAMESPACE=rasa
# The database name for Rasa Pro.
Expand Down
5 changes: 5 additions & 0 deletions azure/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Microsoft Azure Playbook
This playbook outlines an opinionated, best-practice way to install Rasa Pro and Rasa Studio on Microsoft Azure. You may wish to adapt steps and configuration to meet your needs or organisational policies as required. The files here support you working through the Microsoft Azure Playbook which you can find [here](https://rasa.com/docs/learn/deployment/azure/azure-playbook-intro).



28 changes: 28 additions & 0 deletions azure/cleanup/cleanup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
set -e

# Get the directory where this script is located
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

# Source common utilities
source "$SCRIPT_DIR/../../utils/common.sh"
source "$SCRIPT_DIR/../utils/common.sh"

auth_to_k8s

print_info "Starting cleanup of Azure infrastructure..."

print_info "Uninstalling Istio..."

export ISTIO_DIR=$(ls | grep -v istio-operator.yaml | grep istio- | sort --version-sort | tail -1)
print_info "Istio dir: $ISTIO_DIR"
export ISTIO="$ISTIO_DIR/bin/istioctl"
$ISTIO version

# This makes sure the resources created by Istio and not managed by terraform are cleaned up properly.
$ISTIO uninstall --purge -y

TARGET_DIR_RELATIVE="$SCRIPT_DIR/../deploy/_tf"
TARGET_DIR_ABSOLUTE=$(realpath "$TARGET_DIR_RELATIVE")
$TF_CMD -chdir=$TARGET_DIR_ABSOLUTE destroy -auto-approve

print_info "Cleanup completed! Check the output above for any errors."
19 changes: 19 additions & 0 deletions azure/configure/cert-manager-certificate-issuer.template.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
email: $MY_EMAIL
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-issuer-account-key
solvers:
- dns01:
azureDNS:
hostedZoneName: ${DOMAIN}
resourceGroupName: ${NAME}
subscriptionID: ${ARM_SUBSCRIPTION_ID}
environment: AzurePublicCloud
managedIdentity:
clientID: ${SERVICE_ACCOUNT_DNS}
12 changes: 12 additions & 0 deletions azure/configure/cert-manager-values.template.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
crds:
enabled: true

serviceAccount:
labels:
azure.workload.identity/use: "true"

podLabels:
azure.workload.identity/use: "true"

prometheus:
enabled: true
38 changes: 38 additions & 0 deletions azure/configure/configure-cluster.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
set -e

# Get the directory where this script is located
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

# Source common utilities
source "$SCRIPT_DIR/../../utils/common.sh"
source "$SCRIPT_DIR/../utils/common.sh"

auth_to_k8s

# Get the directory where this script is located
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

print_info "Setting up Istio..."
# Download and install the istioctl tool for managing Istio, the service mesh that will ensure that communication between different Rasa product components is encrypted in transit, on your cluster:
curl -L https://istio.io/downloadIstio | sh -
# Configure required environment variables:
export ISTIO_DIR=$(ls | grep -v istio-operator.yaml | grep istio- | sort --version-sort | tail -1)
print_info "Istio dir: $ISTIO_DIR"
export ISTIO="$ISTIO_DIR/bin/istioctl"
$ISTIO version

# Install Istio onto Your Cluster
# Use our preconfigured YAML files to install Istio onto your cluster.
print_info "Installing Istio on your cluster..."
$ISTIO install --set profile=demo --skip-confirmation -f "$SCRIPT_DIR/istio-operator.yaml"

# Here we'll create an Ingress Class that will help us handle network traffic coming inbound to the Rasa products.
print_info "Creating the Istio Ingress Class on your cluster..."
kubectl apply -f "$SCRIPT_DIR/istio-ingress-class.yaml"

# You will now need to update some DNS records on your domain. You will need to find where your DNS is configured for your domain - this may be a cloud provider like AWS or a domain registrar like GoDaddy or Cloudflare.
print_info "Retrieving the nameservers of the zone you have just created in Azure..."
print_info "You must now create an NS record for your domain $DOMAIN with the following values:"
TARGET_DIR_RELATIVE="$SCRIPT_DIR/../deploy/_tf"
TARGET_DIR_ABSOLUTE=$(realpath "$TARGET_DIR_RELATIVE")
$TF_CMD -chdir=$TARGET_DIR_ABSOLUTE output dns_name_servers
52 changes: 52 additions & 0 deletions azure/configure/db-init.template.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
apiVersion: v1
kind: Pod
metadata:
name: db-init
spec:
containers:
- name: db-init
image: postgres:$PG_VERSION
imagePullPolicy: IfNotPresent
command:
- "/bin/bash"
- "-c"
- "--"
args:
- |
echo "create assistant database and user"
echo "CREATE USER $DB_ASSISTANT_USERNAME WITH PASSWORD '$DB_ASSISTANT_PASSWORD';" | psql -qtAX
echo "CREATE DATABASE $DB_ASSISTANT_DATABASE WITH ENCODING = 'UTF8';" | psql -qtAX
echo "GRANT ALL PRIVILEGES ON DATABASE $DB_ASSISTANT_DATABASE TO $DB_ASSISTANT_USERNAME;" | psql -qtAX
echo "GRANT azure_pg_admin TO $DB_ASSISTANT_USERNAME;" | psql -qtAX
echo "ALTER DATABASE $DB_ASSISTANT_DATABASE OWNER TO $DB_ASSISTANT_USERNAME;" | psql -qtAX

echo "create studio database and user"
echo "CREATE USER $DB_STUDIO_USERNAME WITH PASSWORD '$DB_STUDIO_PASSWORD';" | psql -qtAX
echo "CREATE DATABASE $DB_STUDIO_DATABASE WITH ENCODING = 'UTF8';" | psql -qtAX
echo "GRANT ALL PRIVILEGES ON DATABASE $DB_STUDIO_DATABASE TO $DB_STUDIO_USERNAME;" | psql -qtAX
echo "GRANT azure_pg_admin TO $DB_STUDIO_USERNAME;" | psql -qtAX
echo "ALTER DATABASE $DB_STUDIO_DATABASE OWNER TO $DB_STUDIO_USERNAME;" | psql -qtAX

echo "create keycloak database and user"
echo "CREATE USER $DB_KEYCLOAK_USERNAME WITH PASSWORD '$DB_KEYCLOAK_PASSWORD';" | psql -qtAX
echo "CREATE DATABASE $DB_KEYCLOAK_DATABASE WITH ENCODING = 'UTF8';" | psql -qtAX
echo "GRANT ALL PRIVILEGES ON DATABASE $DB_KEYCLOAK_DATABASE TO $DB_KEYCLOAK_USERNAME;" | psql -qtAX
echo "GRANT azure_pg_admin TO $DB_KEYCLOAK_USERNAME;" | psql -qtAX
echo "ALTER DATABASE $DB_KEYCLOAK_DATABASE OWNER TO $DB_KEYCLOAK_USERNAME;" | psql -qtAX

echo "granting schema public to keycloak user role..."
echo "GRANT ALL PRIVILEGES ON SCHEMA public TO $DB_KEYCLOAK_USERNAME;" | psql -qtAX
echo "granted schema public to keycloak"

sleep 10
echo "done"
env:
- name: PGHOST
value: $DB_HOST
- name: PGUSER
value: $DB_ROOT_UN
- name: PGPASSWORD
value: "$DB_ROOT_PW"
- name: PGPORT
value: "$DB_PORT"
restartPolicy: Never
14 changes: 14 additions & 0 deletions azure/configure/external-dns-secret.template.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
apiVersion: v1
kind: Secret
metadata:
name: external-dns-azure
namespace: external-dns
type: Opaque
stringData:
azure.json: |
{
"tenantId": "${ARM_TENANT_ID}",
"subscriptionId": "${ARM_SUBSCRIPTION_ID}",
"resourceGroup": "${NAME}",
"useWorkloadIdentityExtension": true
}
37 changes: 37 additions & 0 deletions azure/configure/external-dns-values.template.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
serviceAccount:
create: true
name: external-dns
labels:
azure.workload.identity/use: "true"
annotations:
azure.workload.identity/client-id: $SERVICE_ACCOUNT_DNS

podLabels:
azure.workload.identity/use: "true"

extraVolumes:
- name: azure-config-file
secret:
secretName: external-dns-azure

extraVolumeMounts:
- name: azure-config-file
mountPath: /etc/kubernetes
readOnly: true

rbac:
create: true

policy: sync

sources:
- istio-gateway
- ingress

txtOwnerId: $NAME

provider:
name: azure

domainFilters:
- $DOMAIN
46 changes: 46 additions & 0 deletions azure/configure/get-infra-values.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
echo "Fetching some infrastructure values..."

# Authenticate with Kubernetes Cluster
echo "Generating kubeconfig to authenticate with Azure Kubernetes cluster..."
# To be able to interact with the Kubernetes cluster we deployed earlier, we need to obtain the credentials for it.
# These credentials are saved in a file called kubeconfig which the cloud provider CLI tool can generate for us and kubectl can use.
# Ensure we've got a path setup for the kubeconfig file:
export KUBECONFIG=$(pwd)/kubeconfig
echo "Kubeconfig path: $KUBECONFIG"
rm -f $KUBECONFIG
# Retrieve the credentials for the cluster:
az aks get-credentials --resource-group "$NAME" --name "$NAME"

# Get the directory where this script is located
# It also works when sourced from zsh
if [[ -n "${BASH_SOURCE[0]}" ]]; then
SOURCE_PATH="${BASH_SOURCE[0]}"
else
# For zsh compatibility
SOURCE_PATH="${(%):-%x}"
fi

SCRIPT_DIR="$(cd "$(dirname "$SOURCE_PATH")" && pwd)"
TARGET_DIR_RELATIVE="$SCRIPT_DIR/../deploy/_tf"
TARGET_DIR_ABSOLUTE=$(realpath "$TARGET_DIR_RELATIVE")

export DB_ROOT_UN=postgres
export DB_ROOT_PW=$($TF_CMD -chdir=$TARGET_DIR_ABSOLUTE output -raw pg_main_pw)
export DB_PORT=5432
export DB_HOST=$($TF_CMD -chdir=$TARGET_DIR_ABSOLUTE output -raw db_host)

export REDIS_HOST=$($TF_CMD -chdir=$TARGET_DIR_ABSOLUTE output -raw redis_host)
export REDIS_AUTH=$($TF_CMD -chdir=$TARGET_DIR_ABSOLUTE output -raw redis_pw)

export SERVICE_ACCOUNT_DNS=$($TF_CMD -chdir=$TARGET_DIR_ABSOLUTE output -raw client_id_dns)
export SERVICE_ACCOUNT_STUDIO=$($TF_CMD -chdir=$TARGET_DIR_ABSOLUTE output -raw client_id_studio)

echo "Infrastructure values fetched successfully:"
echo "DB_ROOT_UN=$DB_ROOT_UN"
echo "DB_ROOT_PW=$DB_ROOT_PW"
echo "DB_PORT=$DB_PORT"
echo "DB_HOST=$DB_HOST"
echo "REDIS_HOST=$REDIS_HOST"
echo "REDIS_AUTH=$REDIS_AUTH"
echo "SERVICE_ACCOUNT_DNS=$SERVICE_ACCOUNT_DNS"
echo "SERVICE_ACCOUNT_STUDIO=$SERVICE_ACCOUNT_STUDIO"
37 changes: 37 additions & 0 deletions azure/configure/install-cert-manager.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
set -e

# Get the directory where this script is located
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"

# Source common utilities
source "$SCRIPT_DIR/../../utils/common.sh"

#Add the Helm repo to your local machine so it can find the installation Helm chart, which enables automated installation:
print_info "Adding the Helm repo for cert-manager..."
helm repo add jetstack https://charts.jetstack.io --force-update

# Substitute the values in the template file with the actual values:
envsubst < $SCRIPT_DIR/cert-manager-values.template.yaml > $SCRIPT_DIR/cert-manager-values.yaml

# We'll create a new Kubernetes namespace for cert-manager so it can be isolated from the rest of our deployments. This is a good practice for organisation and security:
print_info "Creating a new Kubernetes namespace for cert-manager..."
kubectl create ns cert-manager
kubectl label namespace cert-manager istio-injection=enabled
print_info "Cert-manager namespace created and labeled for Istio injection!"

# Install cert-manager using the configuration we've just created into its new namespace:
print_info "Installing cert-manager using the configuration we've just created into its new namespace..."
helm upgrade --install -n cert-manager cert-manager jetstack/cert-manager -f $SCRIPT_DIR/cert-manager-values.yaml

# Configure cert-manager to issue LetsEncrypt certificates:
print_info "Configuring cert-manager to issue LetsEncrypt certificates..."
envsubst < $SCRIPT_DIR/cert-manager-certificate-issuer.template.yaml > $SCRIPT_DIR/cert-manager-certificate-issuer.yaml
kubectl apply -f $SCRIPT_DIR/cert-manager-certificate-issuer.yaml
print_info "Cert-manager configured to issue LetsEncrypt certificates!"

# Wait for the ClusterIssuer to be created successfully:
print_info "Validating that the ClusterIssuer was created successfully..."
print_info "Waiting for ClusterIssuer letsencrypt to become Ready..."
kubectl wait --for=condition=Ready clusterissuer/letsencrypt --timeout=180s
print_info "ClusterIssuer letsencrypt is Ready:"
kubectl get clusterissuer letsencrypt
Loading