diff --git a/.gitignore b/.gitignore
index e8e450b..9028023 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,3 @@
gen/
+**/.terraform
+**/.terraform.lock.hcl
diff --git a/README.md b/README.md
index 8c80fbf..aa94862 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@
This repository compliments [Bufstream's documentation](https://buf.build/docs/bufstream/).
Within, you'll find a collection of modules that assist with deploying a Kubernetes cluster in
-either Amazon Web Services (AWS) or Google Cloud Platform (GCP). These modules generate necessary resources
+Amazon Web Services (AWS), Google Cloud Platform (GCP) or Microsoft Azure. These modules generate necessary resources
and then deploy Bufstream. The cluster created is meant to be used as a demo environment for those who would like
to test Bufstream but don't have an existing Kubernetes cluster.
@@ -15,6 +15,8 @@ To run it, you need to create a `tfvars` file that includes the required Terrafo
See below for the required variables. There is a README for each module with more details on the
variables that can be set.
+Note that you'll also need to include a provider under the folder of the desired cloud.
+
Required environment variables:
| Variable | Description |
@@ -84,3 +86,24 @@ Recommended variables in `tfvars`:
| Variable | Description |
|--------------|--------------------------|
| cluster_name | Name for the GKE cluster |
+
+## Azure
+
+By default, the module creates all resources necessary to deploy a Kubernetes cluster to the desired project.
+It also creates some specific resources required for Bufstream: a storage account and container, a virtual network
+and required subnets, and the bufstream identity with its required role assignment to access storage.
+
+Required variables in `tfvars`:
+
+| Variable | Description |
+|-------------|---------------------------------------------------------------------------------------|
+| location | Where to deploy the resources. A region that supports availability zones is required. |
+
+Recommended variables in `tfvars`:
+
+| Variable | Description |
+|--------------|--------------------------|
+| cluster_name | Name for the AKS cluster |
+
+Note that due to Azure limitations, the plan will always show a diff because we include resources to find the current
+tenant_id being worked on.
diff --git a/aws/main.tf b/aws/main.tf
index 740ceed..0206633 100644
--- a/aws/main.tf
+++ b/aws/main.tf
@@ -65,8 +65,6 @@ resource "aws_lb" "bufstream" {
}
locals {
- context = module.kubernetes.cluster_arn
-
bufstream_values = templatefile("${path.module}/bufstream.yaml.tpl", {
region = var.region
bucket_name = module.storage.bucket_ref
@@ -84,14 +82,6 @@ locals {
})
}
-resource "local_file" "context" {
- count = var.generate_config_files_path != null ? 1 : 0
- content = local.context
- filename = "${var.generate_config_files_path}/context"
-
- file_permission = "0600"
-}
-
resource "local_file" "bufstream_values" {
count = var.generate_config_files_path != null ? 1 : 0
content = local.bufstream_values
diff --git a/azure/README.md b/azure/README.md
new file mode 100644
index 0000000..22996e2
--- /dev/null
+++ b/azure/README.md
@@ -0,0 +1,73 @@
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [azurerm](#requirement\_azurerm) | ~> 4.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [azurerm](#provider\_azurerm) | 4.27.0 |
+| [local](#provider\_local) | 2.5.2 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [kubernetes](#module\_kubernetes) | ./kubernetes | n/a |
+| [network](#module\_network) | ./network | n/a |
+| [storage](#module\_storage) | ./storage | n/a |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [azurerm_resource_group.rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/resource_group) | resource |
+| [azurerm_resource_provider_registration.registrations](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/resource_provider_registration) | resource |
+| [local_file.bufstream_values](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.kubeconfig](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [azurerm_resource_group.rg](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/resource_group) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [bufstream\_identity\_create](#input\_bufstream\_identity\_create) | Whether to create a new Azure bufstream identity. | `bool` | `true` | no |
+| [bufstream\_identity\_name](#input\_bufstream\_identity\_name) | Name of Azure bufstream identity. | `string` | `"bufstream"` | no |
+| [bufstream\_k8s\_namespace](#input\_bufstream\_k8s\_namespace) | Bufstream Kubernetes Service Account Namespace to use if enabling workload identity federation. | `string` | `"bufstream"` | no |
+| [cluster\_create](#input\_cluster\_create) | Whether to create a new AKS cluster. | `bool` | `true` | no |
+| [cluster\_dns\_service\_ip](#input\_cluster\_dns\_service\_ip) | DNS Service IP. Must be within services\_subnet\_cidr. | `string` | `"10.192.4.10"` | no |
+| [cluster\_grant\_admin\_to\_caller](#input\_cluster\_grant\_admin\_to\_caller) | Grant admin role permission to the TF running actor. | `bool` | `true` | no |
+| [cluster\_name](#input\_cluster\_name) | Name of AKS cluster to create or use. | `string` | `"bufstream"` | no |
+| [cluster\_subnet\_cidr](#input\_cluster\_subnet\_cidr) | CIDR of cluster subnet in the VPC. | `string` | `"10.192.0.0/23"` | no |
+| [cluster\_subnet\_create](#input\_cluster\_subnet\_create) | Whether to create a cluster subnet in the VPC. | `bool` | `true` | no |
+| [cluster\_subnet\_name](#input\_cluster\_subnet\_name) | Name of cluster subnet in the VPC. | `string` | `"bufstream-cluster"` | no |
+| [cluster\_vm\_size](#input\_cluster\_vm\_size) | Cluster VM size. | `string` | `"Standard_D4as_v5"` | no |
+| [generate\_config\_files\_path](#input\_generate\_config\_files\_path) | If present, generate config files for bufstream values, kubeconfig and the context name at the selected path. | `string` | `null` | no |
+| [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use. | `string` | `"1.32"` | no |
+| [location](#input\_location) | Where to deploy the resources. A region with zones is required. | `string` | `"centralus"` | no |
+| [pods\_subnet\_cidr](#input\_pods\_subnet\_cidr) | CIDR of the pods subnet in the VPC. | `string` | `"10.192.2.0/23"` | no |
+| [pods\_subnet\_create](#input\_pods\_subnet\_create) | Whether to create a pods subnet in the VPC. | `bool` | `true` | no |
+| [pods\_subnet\_name](#input\_pods\_subnet\_name) | Name of pods subnet in the VPC. | `string` | `"bufstream-pods"` | no |
+| [resource\_group\_create](#input\_resource\_group\_create) | Whether to create a new resource group. | `bool` | `true` | no |
+| [resource\_group\_name](#input\_resource\_group\_name) | Name of new resource group to create or use. | `string` | `"bufstream"` | no |
+| [services\_subnet\_cidr](#input\_services\_subnet\_cidr) | Services CIDR. It is auto-created with the cluster if cluster\_create is true. | `string` | `"10.192.4.0/23"` | no |
+| [storage\_account\_create](#input\_storage\_account\_create) | Whether to create a new storage account. | `string` | `true` | no |
+| [storage\_account\_name](#input\_storage\_account\_name) | Name of the storage account. | `string` | `"bufstream"` | no |
+| [storage\_container\_create](#input\_storage\_container\_create) | Whether to create a new storage container. | `string` | `true` | no |
+| [storage\_container\_name](#input\_storage\_container\_name) | Name of the storage container. | `string` | `"bufstream"` | no |
+| [storage\_grant\_permissions](#input\_storage\_grant\_permissions) | Whether to grant necessary permissions on the storage account for the bufstream identity. | `string` | `true` | no |
+| [storage\_kind](#input\_storage\_kind) | Storage account kind. | `string` | `"StorageV2"` | no |
+| [storage\_large\_file\_share\_enabled](#input\_storage\_large\_file\_share\_enabled) | Storage Large file share enabled. | `bool` | `false` | no |
+| [storage\_replication\_type](#input\_storage\_replication\_type) | Storage account replication type. | `string` | `"RAGRS"` | no |
+| [storage\_tier](#input\_storage\_tier) | Storage account tier. | `string` | `"Standard"` | no |
+| [vpc\_cidr](#input\_vpc\_cidr) | CIDR of new VPC to create or use. | `string` | `"10.192.0.0/16"` | no |
+| [vpc\_create](#input\_vpc\_create) | Whether to create a new VPC. | `bool` | `true` | no |
+| [vpc\_name](#input\_vpc\_name) | Name of new VPC to create or use. | `string` | `"bufstream"` | no |
+| [wif\_bufstream\_k8s\_service\_account](#input\_wif\_bufstream\_k8s\_service\_account) | Bufstream Kubernetes Service Account Name to use if enabling workload identity federation. | `string` | `"bufstream-service-account"` | no |
+| [wif\_create](#input\_wif\_create) | Whether to enable workload identity federation. | `string` | `true` | no |
+
+## Outputs
+
+No outputs.
diff --git a/azure/bufstream.yaml.tpl b/azure/bufstream.yaml.tpl
new file mode 100644
index 0000000..b13d82d
--- /dev/null
+++ b/azure/bufstream.yaml.tpl
@@ -0,0 +1,18 @@
+storage:
+ use: azure
+ azure:
+ bucket: ${container_name}
+ endpoint: https://${account_name}.blob.core.windows.net
+bufstream:
+ deployment:
+ podLabels:
+ azure.workload.identity/use: "true"
+ serviceAccount:
+ annotations:
+ azure.workload.identity/client-id: ${bufstream_identity}
+metadata:
+ use: etcd
+ etcd:
+ addresses:
+ - host: "bufstream-etcd.bufstream.svc.cluster.local"
+ port: 2379
diff --git a/azure/kubeconfig.yaml.tpl b/azure/kubeconfig.yaml.tpl
new file mode 100644
index 0000000..23bf53e
--- /dev/null
+++ b/azure/kubeconfig.yaml.tpl
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: Config
+clusters:
+ - cluster:
+ certificate-authority-data: ${cluster_certificate}
+ server: ${cluster_host}
+ name: aks_${resource_group_name}_${cluster_name}
+contexts:
+ - context:
+ cluster: aks_${resource_group_name}_${cluster_name}
+ user: clusterAdmin_${resource_group_name}_${cluster_name}
+ name: aks_${resource_group_name}_${cluster_name}
+current-context: aks_${resource_group_name}_${cluster_name}
+users:
+ - name: clusterAdmin_${resource_group_name}_${cluster_name}
+ user:
+ exec:
+ apiVersion: client.authentication.k8s.io/v1beta1
+ args:
+ - get-token
+ - --login
+ - azurecli
+ - --server-id
+ - 6dae42f8-4368-4678-94ff-3960e28e3630
+ command: kubelogin
+ env: null
+ installHint: |
+ kubelogin is not installed, which is required to connect to AAD enabled cluster.
+ To learn more, go to https://aka.ms/aks/kubelogin
+ provideClusterInfo: false
diff --git a/azure/kubernetes/main.tf b/azure/kubernetes/main.tf
new file mode 100644
index 0000000..6f97e56
--- /dev/null
+++ b/azure/kubernetes/main.tf
@@ -0,0 +1,140 @@
+locals {
+ cluster_ref = var.cluster_create ? azurerm_kubernetes_cluster.cluster[0] : data.azurerm_kubernetes_cluster.cluster[0]
+ bufstream_id_ref = var.bufstream_identity_create ? azurerm_user_assigned_identity.bufstream[0] : data.azurerm_user_assigned_identity.bufstream[0]
+}
+
+data "azurerm_client_config" "current" {}
+
+resource "azurerm_kubernetes_cluster" "cluster" {
+ count = var.cluster_create ? 1 : 0
+
+ name = var.cluster_name
+ resource_group_name = var.resource_group_name
+ location = var.location
+
+ dns_prefix = var.resource_group_name
+
+ kubernetes_version = var.kubernetes_version
+
+ sku_tier = "Standard"
+
+ network_profile {
+ network_plugin = "azure"
+ network_policy = "cilium"
+ network_data_plane = "cilium"
+
+ service_cidrs = var.cluster_service_cidrs
+ dns_service_ip = var.cluster_dns_service_ip
+ }
+
+ default_node_pool {
+ name = "default"
+ temporary_name_for_rotation = "defaulttmp"
+
+ vm_size = var.cluster_vm_size
+
+ auto_scaling_enabled = true
+ min_count = 1
+ max_count = 3
+
+ vnet_subnet_id = var.cluster_vnet_subnet_id
+ pod_subnet_id = var.cluster_pod_subnet_id
+
+ os_sku = "AzureLinux"
+
+ zones = ["1", "2", "3"]
+
+ // Defaults, if not set, causes changes after initial creation
+ upgrade_settings {
+ drain_timeout_in_minutes = 0
+ max_surge = "10%"
+ node_soak_duration_in_minutes = 0
+ }
+ }
+
+ automatic_upgrade_channel = "stable"
+ node_os_upgrade_channel = "NodeImage"
+
+ # Enable AKS Managed Entra-ID authentication, with Azure RBAC
+ azure_active_directory_role_based_access_control {
+ tenant_id = data.azurerm_client_config.current.tenant_id
+ azure_rbac_enabled = true
+ }
+
+ oidc_issuer_enabled = true
+ role_based_access_control_enabled = true
+ local_account_disabled = true
+
+ workload_identity_enabled = true
+
+ # Disable legacy http application routing
+ http_application_routing_enabled = false
+
+ identity {
+ type = "SystemAssigned"
+ }
+
+ run_command_enabled = true
+
+ lifecycle {
+ ignore_changes = [
+ kubernetes_version,
+ azure_active_directory_role_based_access_control[0].tenant_id,
+ ]
+ }
+}
+
+data "azuread_user" "actor" {
+ count = var.cluster_grant_admin == true && var.cluster_grant_actor != null ? 1 : 0
+
+ user_principal_name = var.cluster_grant_actor
+}
+
+resource "azurerm_role_assignment" "bufstream" {
+ count = var.cluster_grant_admin ? 1 : 0
+
+ scope = local.cluster_ref.id
+ role_definition_name = "Azure Kubernetes Service RBAC Cluster Admin"
+ principal_id = var.cluster_grant_actor != null ? data.azuread_user.actor[0].object_id : data.azurerm_client_config.current.object_id
+
+ lifecycle {
+ # The plan will continously point this as a change otherwise.
+ ignore_changes = [
+ principal_id,
+ ]
+ }
+}
+
+data "azurerm_kubernetes_cluster" "cluster" {
+ count = var.cluster_create ? 0 : 1
+
+ name = var.cluster_name
+ resource_group_name = var.resource_group_name
+}
+
+resource "azurerm_user_assigned_identity" "bufstream" {
+ count = var.bufstream_identity_create ? 1 : 0
+
+ name = var.bufstream_identity_name
+ resource_group_name = var.resource_group_name
+ location = var.location
+}
+
+data "azurerm_user_assigned_identity" "bufstream" {
+ count = var.bufstream_identity_create ? 0 : 1
+
+ name = var.bufstream_identity_name
+ resource_group_name = var.resource_group_name
+}
+
+resource "azurerm_federated_identity_credential" "federated_credential" {
+ count = var.wif_create ? 1 : 0
+
+ name = "bufstream"
+ resource_group_name = var.resource_group_name
+
+ parent_id = local.bufstream_id_ref.id
+ audience = ["api://AzureADTokenExchange"]
+ issuer = local.cluster_ref.oidc_issuer_url
+ subject = "system:serviceaccount:${var.wif_bufstream_k8s_namespace}:${var.wif_bufstream_k8s_service_account}"
+}
diff --git a/azure/kubernetes/outputs.tf b/azure/kubernetes/outputs.tf
new file mode 100644
index 0000000..74b7338
--- /dev/null
+++ b/azure/kubernetes/outputs.tf
@@ -0,0 +1,39 @@
+output "bufstream_identity" {
+ description = "Bufstream Identity ID"
+ value = local.bufstream_id_ref
+}
+
+output "cluster_name" {
+ description = "Container Cluster Endpoint"
+ value = local.cluster_ref.name
+}
+
+output "cert" {
+ description = "Container Cluster Certificate"
+ value = local.cluster_ref.kube_config[0].cluster_ca_certificate
+}
+
+output "endpoint" {
+ description = "Container Cluster Endpoint"
+ value = local.cluster_ref.kube_config[0].host
+}
+
+output "client_cert" {
+ description = "Container Cluster Client Certificate"
+ value = local.cluster_ref.kube_config[0].client_certificate
+}
+
+output "client_key" {
+ description = "Container Cluster Client Key"
+ value = local.cluster_ref.kube_config[0].client_key
+}
+
+output "admin_user" {
+ description = "Container Cluster Admin User"
+ value = local.cluster_ref.kube_config[0].username
+}
+
+output "admin_password" {
+ description = "Container Cluster Admin Password"
+ value = local.cluster_ref.kube_config[0].password
+}
diff --git a/azure/kubernetes/variables.tf b/azure/kubernetes/variables.tf
new file mode 100644
index 0000000..5f53945
--- /dev/null
+++ b/azure/kubernetes/variables.tf
@@ -0,0 +1,97 @@
+variable "resource_group_name" {
+ description = "Resource group name to create the aks cluster within."
+ type = string
+}
+
+variable "location" {
+ description = "Where to deploy the resources. A region that supports availability zones is required."
+ type = string
+}
+
+variable "cluster_vm_size" {
+ description = "Cluster VM size."
+ type = string
+ default = "Standard_D4as_v5"
+}
+
+variable "cluster_vnet_subnet_id" {
+ description = "ID of subnet to use for nodes/cluster."
+ type = string
+}
+
+variable "cluster_pod_subnet_id" {
+ description = "ID of subnet to use for pods."
+ type = string
+}
+
+variable "cluster_service_cidrs" {
+ description = "Service CIDRs."
+ type = list(string)
+ default = ["10.192.8.0/23"]
+}
+
+variable "cluster_dns_service_ip" {
+ description = "DNS Service IP."
+ type = string
+ default = "10.192.8.10"
+}
+
+variable "kubernetes_version" {
+ description = "Kubernetes version to use."
+ type = string
+ default = "1.32"
+}
+
+variable "cluster_create" {
+ description = "Whether to create a new AKS cluster."
+ type = bool
+ default = true
+}
+
+variable "cluster_name" {
+ description = "Name of AKS cluster to create or use."
+ type = string
+ default = "bufstream"
+}
+
+variable "cluster_grant_admin" {
+ description = "Grant admin role permission to the TF running actor. If cluster_admin_actor is set, use that, otherwise use the current caller."
+ type = bool
+ default = true
+}
+
+variable "cluster_grant_actor" {
+ description = "If cluster_grant_admin and this are set, grant cluster admin role to user with this email."
+ type = string
+ default = null
+}
+
+variable "bufstream_identity_create" {
+ description = "Whether to create a new Azure bufstream identity."
+ type = bool
+ default = true
+}
+
+variable "bufstream_identity_name" {
+ description = "Name of Azure bufstream identity."
+ type = string
+ default = "bufstream"
+}
+
+variable "wif_create" {
+ description = "Whether to enable workload identity federation."
+ type = string
+ default = true
+}
+
+variable "wif_bufstream_k8s_namespace" {
+ description = "Bufstream Kubernetes Service Account Namespace to use if enabling workload identity federation."
+ type = string
+ default = "bufstream"
+}
+
+variable "wif_bufstream_k8s_service_account" {
+ description = "Bufstream Kubernetes Service Account Name to use if enabling workload identity federation."
+ type = string
+ default = "bufstream-service-account"
+}
diff --git a/azure/main.tf b/azure/main.tf
new file mode 100644
index 0000000..07c3d88
--- /dev/null
+++ b/azure/main.tf
@@ -0,0 +1,154 @@
+locals {
+ rg_ref = var.resource_group_create ? azurerm_resource_group.rg[0].id : data.azurerm_resource_group.rg[0].id
+}
+
+resource "azurerm_resource_provider_registration" "registrations" {
+ for_each = {
+ "Microsoft.Network" = {
+ // Allow deploying V2 application gateways with only a private IP
+ // https://learn.microsoft.com/en-us/azure/application-gateway/application-gateway-private-deployment
+ features = {
+ "EnableApplicationGatewayNetworkIsolation" : true,
+ }
+ },
+ "Microsoft.ContainerService" = {features = {}},
+ "Microsoft.Storage" = {features = {}},
+ }
+
+ name = each.key
+
+ dynamic "feature" {
+ for_each = each.value.features
+
+ content {
+ name = feature.key
+ registered = feature.value
+ }
+ }
+}
+
+resource "azurerm_resource_group" "rg" {
+ count = var.resource_group_create ? 1 : 0
+
+ name = var.resource_group_name
+ location = var.location
+}
+
+data "azurerm_resource_group" "rg" {
+ count = var.resource_group_create ? 0 : 1
+
+ name = var.resource_group_name
+}
+
+module "network" {
+ source = "./network"
+
+ vpc_create = var.vpc_create
+ vpc_name = var.vpc_name
+ resource_group_name = var.resource_group_name
+ location = var.location
+
+ address_space = [var.vpc_cidr]
+
+ cluster_subnet_create = var.cluster_subnet_create
+ cluster_subnet_name = var.cluster_subnet_name
+ cluster_subnet_cidr = var.cluster_subnet_cidr
+ pods_subnet_create = var.pods_subnet_create
+ pods_subnet_name = var.pods_subnet_name
+ pods_subnet_cidr = var.pods_subnet_cidr
+
+ depends_on = [
+ azurerm_resource_provider_registration.registrations,
+ ]
+}
+
+module "kubernetes" {
+ source = "./kubernetes"
+
+ resource_group_name = var.resource_group_name
+ location = var.location
+
+ kubernetes_version = var.kubernetes_version
+
+ cluster_create = var.cluster_create
+ cluster_name = var.cluster_name
+
+ cluster_grant_admin = var.cluster_grant_admin
+
+ cluster_vm_size = var.cluster_vm_size
+ cluster_service_cidrs = [var.services_subnet_cidr]
+ cluster_dns_service_ip = var.cluster_dns_service_ip
+ cluster_vnet_subnet_id = module.network.cluster_subnet.id
+ cluster_pod_subnet_id = module.network.pods_subnet.id
+
+ bufstream_identity_create = var.bufstream_identity_create
+ bufstream_identity_name = var.bufstream_identity_name
+
+ wif_create = var.wif_create
+ wif_bufstream_k8s_namespace = var.bufstream_k8s_namespace
+ wif_bufstream_k8s_service_account = var.wif_bufstream_k8s_service_account
+
+ depends_on = [
+ azurerm_resource_provider_registration.registrations,
+ ]
+}
+
+module "storage" {
+ source = "./storage"
+
+ storage_account_create = var.storage_account_create
+ storage_container_create = var.storage_container_create
+
+ storage_account_name = var.storage_account_name
+ storage_container_name = var.storage_container_name
+ resource_group_name = var.resource_group_name
+ location = var.location
+
+ storage_kind = var.storage_kind
+ storage_tier = var.storage_tier
+ storage_replication_type = var.storage_replication_type
+
+ storage_large_file_share_enabled = var.storage_large_file_share_enabled
+
+ bufstream_identity = module.kubernetes.bufstream_identity.principal_id
+
+ depends_on = [
+ azurerm_resource_provider_registration.registrations,
+ ]
+}
+
+locals {
+ bufstream_values = templatefile("${path.module}/bufstream.yaml.tpl", {
+ account_name = module.storage.storage_account_name
+ container_name = module.storage.storage_container_name
+ bufstream_identity = module.kubernetes.bufstream_identity.client_id
+ })
+
+ kubeconfig = templatefile("${path.module}/kubeconfig.yaml.tpl", {
+ resource_group_name = var.resource_group_name
+ cluster_name = module.kubernetes.cluster_name
+ cluster_host = module.kubernetes.endpoint
+ cluster_certificate = module.kubernetes.cert
+
+ admin_user = module.kubernetes.admin_user
+ admin_password = module.kubernetes.admin_password
+ client_cert = module.kubernetes.client_cert
+ client_key = module.kubernetes.client_key
+ })
+}
+
+resource "local_file" "bufstream_values" {
+ count = var.generate_config_files_path != null ? 1 : 0
+ content = local.bufstream_values
+ filename = "${var.generate_config_files_path}/bufstream.yaml"
+
+ file_permission = "0600"
+}
+
+resource "local_file" "kubeconfig" {
+ count = var.generate_config_files_path != null ? 1 : 0
+ content = local.kubeconfig
+ filename = "${var.generate_config_files_path}/kubeconfig.yaml"
+
+ file_permission = "0600"
+}
diff --git a/azure/network/main.tf b/azure/network/main.tf
new file mode 100644
index 0000000..3a1518f
--- /dev/null
+++ b/azure/network/main.tf
@@ -0,0 +1,66 @@
+locals {
+ vpc_ref = var.vpc_create ? azurerm_virtual_network.network[0] : data.azurerm_virtual_network.network[0]
+ cluster_subnet_ref = var.cluster_subnet_create ? azurerm_subnet.cluster[0] : data.azurerm_subnet.cluster[0]
+ pods_subnet_ref = var.pods_subnet_create ? azurerm_subnet.pods[0] : data.azurerm_subnet.pods[0]
+}
+
+resource "azurerm_virtual_network" "network" {
+ count = var.vpc_create ? 1 : 0
+
+ name = var.vpc_name
+ resource_group_name = var.resource_group_name
+ location = var.location
+ address_space = var.address_space
+}
+
+data "azurerm_virtual_network" "network" {
+ count = var.vpc_create ? 0 : 1
+
+ name = var.vpc_name
+ resource_group_name = var.resource_group_name
+}
+
+resource "azurerm_subnet" "cluster" {
+ count = var.cluster_subnet_create ? 1 : 0
+
+ name = var.cluster_subnet_name
+ address_prefixes = [var.cluster_subnet_cidr]
+
+ virtual_network_name = local.vpc_ref.name
+ resource_group_name = var.resource_group_name
+}
+
+data "azurerm_subnet" "cluster" {
+ count = var.cluster_subnet_create ? 0 : 1
+
+ name = var.cluster_subnet_name
+ virtual_network_name = local.vpc_ref.name
+ resource_group_name = var.resource_group_name
+}
+
+resource "azurerm_subnet" "pods" {
+ count = var.pods_subnet_create ? 1 : 0
+
+ name = var.pods_subnet_name
+ address_prefixes = [var.pods_subnet_cidr]
+
+ virtual_network_name = local.vpc_ref.name
+ resource_group_name = var.resource_group_name
+
+ delegation {
+ name = "aks-delegation"
+
+ service_delegation {
+ name = "Microsoft.ContainerService/managedClusters"
+ actions = ["Microsoft.Network/virtualNetworks/subnets/join/action"]
+ }
+ }
+}
+
+data "azurerm_subnet" "pods" {
+ count = var.pods_subnet_create ? 0 : 1
+
+ name = var.pods_subnet_name
+ virtual_network_name = local.vpc_ref.name
+ resource_group_name = var.resource_group_name
+}
diff --git a/azure/network/outputs.tf b/azure/network/outputs.tf
new file mode 100644
index 0000000..7ec75ac
--- /dev/null
+++ b/azure/network/outputs.tf
@@ -0,0 +1,9 @@
+output "cluster_subnet" {
+ description = "Cluster subnet"
+ value = local.cluster_subnet_ref
+}
+
+output "pods_subnet" {
+ description = "Pods subnet"
+ value = local.pods_subnet_ref
+}
diff --git a/azure/network/variables.tf b/azure/network/variables.tf
new file mode 100644
index 0000000..22c5d65
--- /dev/null
+++ b/azure/network/variables.tf
@@ -0,0 +1,62 @@
+variable "vpc_create" {
+ description = "Whether to create a new VPC in GCP."
+ type = bool
+ default = true
+}
+
+variable "vpc_name" {
+ description = "Name of new VPC to create or use."
+ type = string
+ default = "bufstream"
+}
+
+variable "resource_group_name" {
+ description = "Name of new Resource Group to create."
+ type = string
+}
+
+variable "location" {
+ description = "Where to deploy the resources. A region that supports availability zones is required."
+ type = string
+}
+
+variable "address_space" {
+ description = "Virtual network address space"
+ type = list(string)
+}
+
+variable "cluster_subnet_create" {
+ description = "Whether to create a cluster subnet in the VPC."
+ type = bool
+ default = true
+}
+
+variable "cluster_subnet_name" {
+ description = "Name of cluster subnet in the VPC."
+ type = string
+ default = "bufstream-cluster"
+}
+
+variable "cluster_subnet_cidr" {
+ description = "CIDR of cluster subnet in the VPC."
+ type = string
+ default = "10.192.0.0/23"
+}
+
+variable "pods_subnet_create" {
+ description = "Whether to create a pods subnet in the VPC."
+ type = bool
+ default = true
+}
+
+variable "pods_subnet_name" {
+ description = "Name of pods subnet in the VPC."
+ type = string
+ default = "bufstream-pods"
+}
+
+variable "pods_subnet_cidr" {
+ description = "CIDR of the pods subnet in the VPC."
+ type = string
+ default = "10.192.2.0/23"
+}
diff --git a/azure/provider.tf b/azure/provider.tf
new file mode 100644
index 0000000..0a0b57c
--- /dev/null
+++ b/azure/provider.tf
@@ -0,0 +1,12 @@
+terraform {
+ required_providers {
+ azurerm = {
+ source = "hashicorp/azurerm"
+ version = "~> 4.0"
+ }
+ azuread = {
+ source = "hashicorp/azuread"
+ version = "~> 3.0"
+ }
+ }
+}
diff --git a/azure/storage/main.tf b/azure/storage/main.tf
new file mode 100644
index 0000000..28fed43
--- /dev/null
+++ b/azure/storage/main.tf
@@ -0,0 +1,47 @@
+locals {
+ storage_account_ref = var.storage_account_create ? azurerm_storage_account.bufstream[0] : data.azurerm_storage_account.bufstream[0]
+ storage_container_ref = var.storage_container_create ? azurerm_storage_container.bufstream[0] : data.azurerm_storage_container.bufstream[0]
+}
+
+resource "azurerm_storage_account" "bufstream" {
+ count = var.storage_account_create ? 1 : 0
+
+ name = var.storage_account_name
+ resource_group_name = var.resource_group_name
+ location = var.location
+ account_kind = var.storage_kind
+ account_tier = var.storage_tier
+ account_replication_type = var.storage_replication_type
+ large_file_share_enabled = var.storage_large_file_share_enabled
+
+ allow_nested_items_to_be_public = false
+}
+
+data "azurerm_storage_account" "bufstream" {
+ count = var.storage_account_create ? 0 : 1
+
+ name = var.storage_account_name
+ resource_group_name = var.resource_group_name
+}
+
+resource "azurerm_storage_container" "bufstream" {
+ count = var.storage_container_create ? 1 : 0
+
+ name = var.storage_container_name
+ storage_account_id = local.storage_account_ref.id
+}
+
+data "azurerm_storage_container" "bufstream" {
+ count = var.storage_container_create ? 0 : 1
+
+ name = var.storage_container_name
+ storage_account_id = local.storage_account_ref.id
+}
+
+resource "azurerm_role_assignment" "bufstream" {
+ count = var.storage_grant_permissions ? 1 : 0
+
+ scope = local.storage_container_ref.id
+ role_definition_name = "Storage Blob Data Contributor"
+ principal_id = var.bufstream_identity
+}
diff --git a/azure/storage/outputs.tf b/azure/storage/outputs.tf
new file mode 100644
index 0000000..4ca0430
--- /dev/null
+++ b/azure/storage/outputs.tf
@@ -0,0 +1,7 @@
+output "storage_account_name" {
+ value = local.storage_account_ref.name
+}
+
+output "storage_container_name" {
+ value = local.storage_container_ref.name
+}
diff --git a/azure/storage/variables.tf b/azure/storage/variables.tf
new file mode 100644
index 0000000..61f0434
--- /dev/null
+++ b/azure/storage/variables.tf
@@ -0,0 +1,66 @@
+variable "resource_group_name" {
+ description = "Name of new Resource Group to create."
+ type = string
+}
+
+variable "location" {
+ description = "Where to deploy the resources. A region that supports availability zones is required."
+ type = string
+}
+
+variable "storage_account_create" {
+ description = "Whether to create a new storage account."
+ type = string
+ default = true
+}
+
+variable "storage_account_name" {
+ description = "Name of the storage account."
+ type = string
+}
+
+variable "storage_container_create" {
+ description = "Whether to create the storage account."
+ type = bool
+ default = true
+}
+
+variable "storage_container_name" {
+ description = "Name of the storage container."
+ type = string
+}
+
+variable "storage_kind" {
+ description = "Storage account kind"
+ type = string
+ default = "StorageV2"
+}
+
+variable "storage_tier" {
+ description = "Storage account tier"
+ type = string
+ default = "Standard"
+}
+
+variable "storage_replication_type" {
+ description = "Storage account replication type"
+ type = string
+ default = "RAGRS"
+}
+
+variable "storage_large_file_share_enabled" {
+ description = "Large file share enabled"
+ type = bool
+ default = false
+}
+
+variable "storage_grant_permissions" {
+ description = "Grant necessary permissions on the storage account for the bufstream identity."
+ type = string
+ default = true
+}
+
+variable "bufstream_identity" {
+ description = "Bufstream Identity."
+ type = string
+}
diff --git a/azure/variables.tf b/azure/variables.tf
new file mode 100644
index 0000000..188df68
--- /dev/null
+++ b/azure/variables.tf
@@ -0,0 +1,216 @@
+variable "resource_group_create" {
+ description = "Whether to create a new resource group."
+ type = bool
+ default = true
+}
+
+variable "resource_group_name" {
+ description = "Name of new resource group to create or use."
+ type = string
+ default = "bufstream"
+}
+
+variable "location" {
+ description = "Where to deploy the resources. A region that supports availability zones is required."
+ type = string
+ default = "centralus"
+}
+
+# Network
+variable "vpc_create" {
+ description = "Whether to create a new VPC."
+ type = bool
+ default = true
+}
+
+variable "vpc_name" {
+ description = "Name of new VPC to create or use."
+ type = string
+ default = "bufstream"
+}
+
+variable "vpc_cidr" {
+ description = "CIDR of new VPC to create or use."
+ type = string
+ default = "10.192.0.0/16"
+}
+
+variable "cluster_subnet_create" {
+ description = "Whether to create a cluster subnet in the VPC."
+ type = bool
+ default = true
+}
+
+variable "cluster_subnet_name" {
+ description = "Name of cluster subnet in the VPC."
+ type = string
+ default = "bufstream-cluster"
+}
+
+variable "cluster_subnet_cidr" {
+ description = "CIDR of cluster subnet in the VPC."
+ type = string
+ default = "10.192.0.0/23"
+}
+
+variable "pods_subnet_create" {
+ description = "Whether to create a pods subnet in the VPC."
+ type = bool
+ default = true
+}
+
+variable "pods_subnet_name" {
+ description = "Name of pods subnet in the VPC."
+ type = string
+ default = "bufstream-pods"
+}
+
+variable "pods_subnet_cidr" {
+ description = "CIDR of the pods subnet in the VPC."
+ type = string
+ default = "10.192.2.0/23"
+}
+
+variable "services_subnet_cidr" {
+ description = "Services CIDR. It is auto-created with the cluster if cluster_create is true."
+ type = string
+ default = "10.192.4.0/23"
+}
+
+# Kubernetes Cluster
+
+variable "kubernetes_version" {
+ description = "Kubernetes version to use."
+ type = string
+ default = "1.32"
+}
+
+variable "cluster_vm_size" {
+ description = "Cluster VM size."
+ type = string
+ default = "Standard_D4as_v5"
+}
+
+variable "cluster_dns_service_ip" {
+ description = "DNS Service IP. Must be within services_subnet_cidr."
+ type = string
+ default = "10.192.4.10"
+}
+
+variable "cluster_create" {
+ description = "Whether to create a new AKS cluster."
+ type = bool
+ default = true
+}
+
+variable "cluster_name" {
+ description = "Name of AKS cluster to create or use."
+ type = string
+ default = "bufstream"
+}
+
+variable "cluster_grant_admin" {
+ description = "Grant admin role permission to the TF running actor. If cluster_admin_actor is set, use that, otherwise use the current caller."
+ type = bool
+ default = true
+}
+
+variable "cluster_grant_actor" {
+ description = "If cluster_grant_admin and this are set, grant cluster admin role to user with this email."
+ type = string
+ default = null
+}
+
+variable "bufstream_identity_create" {
+ description = "Whether to create a new Azure bufstream identity."
+ type = bool
+ default = true
+}
+
+variable "bufstream_identity_name" {
+ description = "Name of Azure bufstream identity."
+ type = string
+ default = "bufstream"
+}
+
+variable "wif_create" {
+ description = "Whether to enable workload identity federation."
+ type = string
+ default = true
+}
+
+variable "bufstream_k8s_namespace" {
+ description = "Bufstream Kubernetes Service Account Namespace to use if enabling workload identity federation."
+ type = string
+ default = "bufstream"
+}
+
+variable "wif_bufstream_k8s_service_account" {
+ description = "Bufstream Kubernetes Service Account Name to use if enabling workload identity federation."
+ type = string
+ default = "bufstream-service-account"
+}
+
+# Storage
+
+variable "storage_account_create" {
+ description = "Whether to create a new storage account."
+ type = string
+ default = true
+}
+
+variable "storage_account_name" {
+ description = "Name of the storage account."
+ type = string
+ default = "bufstream"
+}
+
+variable "storage_container_create" {
+ description = "Whether to create a new storage container."
+ type = string
+ default = true
+}
+
+variable "storage_container_name" {
+ description = "Name of the storage container."
+ type = string
+ default = "bufstream"
+}
+
+variable "storage_kind" {
+ description = "Storage account kind."
+ type = string
+ default = "StorageV2"
+}
+
+variable "storage_tier" {
+ description = "Storage account tier."
+ type = string
+ default = "Standard"
+}
+
+variable "storage_replication_type" {
+ description = "Storage account replication type."
+ type = string
+ default = "RAGRS"
+}
+
+variable "storage_large_file_share_enabled" {
+ description = "Storage Large file share enabled."
+ type = bool
+ default = false
+}
+
+variable "storage_grant_permissions" {
+ description = "Whether to grant necessary permissions on the storage account for the bufstream identity."
+ type = string
+ default = true
+}
+
+# Config Gen
+
+variable "generate_config_files_path" {
+ description = "If present, generate config files for bufstream values, kubeconfig and the context name at the selected path."
+ type = string
+ default = null
+}
diff --git a/config/azure-storage-class.yaml b/config/azure-storage-class.yaml
new file mode 100644
index 0000000..49c38e4
--- /dev/null
+++ b/config/azure-storage-class.yaml
@@ -0,0 +1,13 @@
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: premium-rwo
+provisioner: disk.csi.azure.com
+reclaimPolicy: Delete
+volumeBindingMode: WaitForFirstConsumer
+parameters:
+ cachingMode: None
+ skuName: PremiumV2_LRS
+ DiskIOPSReadWrite: "5000"
+ DiskMBpsReadWrite: "1200"
+allowVolumeExpansion: true
diff --git a/gcp/kubeconfig.yaml.tpl b/gcp/kubeconfig.yaml.tpl
index e0916cd..cf35892 100644
--- a/gcp/kubeconfig.yaml.tpl
+++ b/gcp/kubeconfig.yaml.tpl
@@ -1,5 +1,17 @@
apiVersion: v1
kind: Config
+clusters:
+ - name: gke_${project}_${region}_${cluster_name}
+ cluster:
+ certificate-authority-data: ${cluster_certificate}
+ server: https://${cluster_host}
+ tls-server-name: kubernetes.default
+contexts:
+ - name: gke_${project}_${region}_${cluster_name}
+ context:
+ cluster: gke_${project}_${region}_${cluster_name}
+ user: gke
+current-context: gke_${project}_${region}_${cluster_name}
users:
- name: gke
user:
@@ -13,14 +25,3 @@ users:
%{~ endif ~}
installHint: Install gke-gcloud-auth-plugin for use with kubectl by following https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke
provideClusterInfo: true
-clusters:
- - name: gke_${project}_${region}_${cluster_name}
- cluster:
- certificate-authority-data: ${cluster_certificate}
- server: https://${cluster_host}
- tls-server-name: kubernetes.default
-contexts:
- - name: gke_${project}_${region}_${cluster_name}
- context:
- cluster: gke_${project}_${region}_${cluster_name}
- user: gke
diff --git a/gcp/main.tf b/gcp/main.tf
index bb32826..81aae02 100644
--- a/gcp/main.tf
+++ b/gcp/main.tf
@@ -116,8 +116,6 @@ resource "google_compute_address" "ip" {
data "google_client_openid_userinfo" "user" {}
locals {
- context = "gke_${var.project_id}_${module.kubernetes.cluster_region}_${module.kubernetes.cluster_name}"
-
bufstream_values = templatefile("${path.module}/bufstream.yaml.tpl", {
bucket_name = module.storage.bucket_ref
bufstream_service_account_email = module.kubernetes.bufstream_service_account
@@ -135,14 +133,6 @@ locals {
})
}
-resource "local_file" "context" {
- count = var.generate_config_files_path != null ? 1 : 0
- content = local.context
- filename = "${var.generate_config_files_path}/context"
-
- file_permission = "0600"
-}
-
resource "local_file" "bufstream_values" {
count = var.generate_config_files_path != null ? 1 : 0
content = local.bufstream_values
diff --git a/install.sh b/install.sh
index 85fa1d3..ef854ea 100755
--- a/install.sh
+++ b/install.sh
@@ -42,29 +42,25 @@ TF_VAR_generate_config_files_path="${CONFIG_GEN_PATH}" \
--var "bufstream_k8s_namespace=${BUFSTREAM_NAMESPACE:-bufstream}"
# AWS does not come with a working storage class even on automode.
-if [[ "${BUFSTREAM_CLOUD}" == "aws" ]] ; then
- echo "Creating storage class..."
+if [[ "${BUFSTREAM_CLOUD}" == "aws" || "${BUFSTREAM_CLOUD}" == "azure" ]] ; then
+ echo "Creating ${BUFSTREAM_CLOUD} storage class..."
kubectl \
--kubeconfig "${CONFIG_GEN_PATH}/kubeconfig.yaml" \
- --context $(cat "${CONFIG_GEN_PATH}/context") \
- apply -f ../config/aws-storage-class.yaml
+ apply -f "../config/${BUFSTREAM_CLOUD}-storage-class.yaml"
fi
echo "Create namespace..."
# Use dry run + apply to ignore existing namespace.
kubectl \
--kubeconfig "${CONFIG_GEN_PATH}/kubeconfig.yaml" \
- --context $(cat "${CONFIG_GEN_PATH}/context") \
create namespace "${BUFSTREAM_NAMESPACE:-bufstream}" --dry-run=client -o yaml \
| kubectl \
--kubeconfig "${CONFIG_GEN_PATH}/kubeconfig.yaml" \
- --context $(cat "${CONFIG_GEN_PATH}/context") \
apply -f -
echo "Installing ETCD..."
helm \
--kubeconfig "${CONFIG_GEN_PATH}/kubeconfig.yaml" \
- --kube-context $(cat "${CONFIG_GEN_PATH}/context") \
upgrade bufstream-etcd --install \
oci://registry-1.docker.io/bitnamicharts/etcd \
--namespace "${BUFSTREAM_NAMESPACE:-bufstream}" \
@@ -74,7 +70,6 @@ helm \
echo "Installing Bufstream..."
helm \
--kubeconfig "${CONFIG_GEN_PATH}/kubeconfig.yaml" \
- --kube-context $(cat "${CONFIG_GEN_PATH}/context") \
upgrade bufstream --install \
oci://us-docker.pkg.dev/buf-images-1/bufstream/charts/bufstream \
--version "${BUFSTREAM_VERSION}" \