diff --git a/README.md b/README.md index a137fe37..7d85f4bb 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,6 @@ This will deploy one or more clusters in the cloud, with optional post-install t ## Container * Kubernetes - * K3s - * Docker EE * Openshift 4 (only on AWS at this time) * EKS (only makes sense on AWS) * AKS (only makes sense on Azure) @@ -156,7 +154,7 @@ The `defaults.yml` file sets a number of deployment variables: * `post_script` - script to run on each master after deployment, output will go to stdout * `nodes` - the number of worker nodes on each cluster * `lock` - prevents deployment from being deleted by `px-deploy destroy ...`. use `px-deploy unlock -n ...` to unlock - * `platform` - can be set to either k8s, k3s, none, dockeree, ocp4, eks, gke or nomad + * `platform` - can be set to either k8s, none, ocp4, eks or gke * `azure_type` - the Azure machine type for each node * `azure_disks` - similar to aws_ebs, for example: `"Standard_LRS:49 Premium_LRS:50"` * `azure_client_id` - Azure Client ID diff --git a/px-deploy.go b/px-deploy.go index 9e12b8c9..421c076d 100644 --- a/px-deploy.go +++ b/px-deploy.go @@ -413,7 +413,7 @@ func main() { defaults := parse_yaml("defaults.yml") cmdCreate.Flags().StringVarP(&createName, "name", "n", "", "name of deployment to be created (if blank, generate UUID)") - cmdCreate.Flags().StringVarP(&flags.Platform, "platform", "p", "", "k8s | dockeree | none | k3s | ocp4 | rancher | eks | gke | aks | nomad (default "+defaults.Platform+")") + cmdCreate.Flags().StringVarP(&flags.Platform, "platform", "p", "", "k8s | none | ocp4 | rancher | eks | gke | aks (default "+defaults.Platform+")") cmdCreate.Flags().StringVarP(&flags.Clusters, "clusters", "c", "", "number of clusters to be deployed (default "+defaults.Clusters+")") cmdCreate.Flags().StringVarP(&flags.Nodes, "nodes", "N", "", "number of nodes to be deployed in each cluster (default "+defaults.Nodes+")") cmdCreate.Flags().StringVarP(&flags.K8s_Version, "k8s_version", "k", "", "Kubernetes version to be deployed (default "+defaults.K8s_Version+")") @@ -580,7 +580,7 @@ func validate_config(config *Config) []string { config.Vsphere_Folder = strings.TrimRight(config.Vsphere_Folder, "/") } - if config.Platform != "k8s" && config.Platform != "k3s" && config.Platform != "none" && config.Platform != "dockeree" && config.Platform != "ocp4" && config.Platform != "rancher" && config.Platform != "eks" && config.Platform != "gke" && config.Platform != "aks" && config.Platform != "nomad" { + if config.Platform != "k8s" && config.Platform != "none" && config.Platform != "ocp4" && config.Platform != "rancher" && config.Platform != "eks" && config.Platform != "gke" && config.Platform != "aks" { errormsg = append(errormsg, "Invalid platform '"+config.Platform+"'") } diff --git a/scripts/px-nomad b/scripts/px-nomad deleted file mode 100644 index 2f6f7014..00000000 --- a/scripts/px-nomad +++ /dev/null @@ -1,128 +0,0 @@ -cat </tmp/px.nomad -job "portworx" { - type = "service" - datacenters = ["px-deploy-$cluster"] - - group "portworx" { - count = 3 - - constraint { - operator = "distinct_hosts" - value = "true" - } - - # restart policy for failed portworx tasks - restart { - attempts = 3 - delay = "30s" - interval = "5m" - mode = "fail" - } - - # how to handle upgrades of portworx instances - update { - max_parallel = 1 - health_check = "checks" - min_healthy_time = "10s" - healthy_deadline = "5m" - auto_revert = true - canary = 0 - stagger = "30s" - } - - network { - port "portworx" { - static = "9015" - } - } - - task "px-node" { - driver = "docker" - kill_timeout = "120s" # allow portworx 2 min to gracefully shut down - kill_signal = "SIGTERM" # use SIGTERM to shut down the nodes - - # consul service check for portworx instances - service { - name = "portworx" - check { - port = "portworx" - type = "http" - path = "/health" - interval = "10s" - timeout = "2s" - } - } - - # setup environment variables for px-nodes - env { - AUTO_NODE_RECOVERY_TIMEOUT_IN_SECS = "1500" - PX_TEMPLATE_VERSION = "V4" - CSI_ENDPOINT = "unix://var/lib/csi/csi.sock" - } - - # CSI Driver config - csi_plugin { - id = "portworx" - type = "monolith" - mount_dir = "/var/lib/csi" - } - - # container config - config { - image = "portworx/oci-monitor:$px_version" - network_mode = "host" - ipc_mode = "host" - privileged = true - - # configure your parameters below - # do not remove the last parameter (needed for health check) - args = [ - "-c", "px-deploy-$cluster", - "-a", - "-b", - "-k", "consul://192.168.$[$cluster+100].90:8500", - "--endpoint", "0.0.0.0:9015" - ] - - volumes = [ - "/var/cores:/var/cores", - "/var/run/docker.sock:/var/run/docker.sock", - "/run/containerd:/run/containerd", - "/etc/pwx:/etc/pwx", - "/opt/pwx:/opt/pwx", - "/proc:/host_proc", - "/etc/systemd/system:/etc/systemd/system", - "/var/run/log:/var/run/log", - "/var/log:/var/log", - "/var/run/dbus:/var/run/dbus" - ] - - } - - # resource config - resources { - cpu = 1024 - memory = 2048 - } - - } - } -} -EOF - -dnf install -y consul -cat </etc/consul.d/consul.hcl -data_dir = "/opt/consul" -server = true -bootstrap_expect = 1 -advertise_addr = "$(hostname -i)" -client_addr = "0.0.0.0" -ui = true -datacenter = "px-deploy-$cluster" -retry_join = ["$(hostname -i)"] -EOF - -systemctl enable consul -systemctl start consul - -nomad job run /tmp/px.nomad diff --git a/templates/px-nomad.yml b/templates/px-nomad.yml deleted file mode 100644 index f64b172d..00000000 --- a/templates/px-nomad.yml +++ /dev/null @@ -1,3 +0,0 @@ -description: Install and run Portworx on each cluster -scripts: ["px-nomad"] -platform: nomad diff --git a/vagrant/dockeree-common b/vagrant/dockeree-common deleted file mode 100644 index 1161a177..00000000 --- a/vagrant/dockeree-common +++ /dev/null @@ -1,3 +0,0 @@ -yum -y install container-selinux -rpm -i https://storebits.docker.com/ee/trial/sub-b9e7ba43-59d6-4821-813d-8a735b5d9e8b/centos/7/x86_64/stable-19.03/Packages/docker-ee-19.03.5-3.el7.x86_64.rpm https://storebits.docker.com/ee/trial/sub-b9e7ba43-59d6-4821-813d-8a735b5d9e8b/centos/7/x86_64/stable-19.03/Packages/docker-ee-cli-19.03.5-3.el7.x86_64.rpm https://storebits.docker.com/ee/trial/sub-b9e7ba43-59d6-4821-813d-8a735b5d9e8b/centos/7/x86_64/stable-19.03/Packages/containerd.io-1.2.10-3.2.el7.x86_64.rpm -systemctl enable --now docker diff --git a/vagrant/dockeree-master b/vagrant/dockeree-master deleted file mode 100644 index 734821e2..00000000 --- a/vagrant/dockeree-master +++ /dev/null @@ -1,17 +0,0 @@ -docker container run --rm --name ucp -v /var/run/docker.sock:/var/run/docker.sock docker/ucp:3.2.6 install --pod-cidr 10.0.0.0/16 --admin-username=admin --admin-password=portworx -cat < /etc/yum.repos.d/kubernetes.repo -[kubernetes] -name=Kubernetes -baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 -enabled=1 -gpgcheck=1 -repo_gpgcheck=1 -gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg -EOF -yum install -y unzip kubectl -AUTHTOKEN=$(curl -sk -d '{"username":"admin","password":"portworx"}' https://master-$cluster/auth/login | jq -r .auth_token) -cd /root -curl -sk -H "Authorization: Bearer $AUTHTOKEN" https://master-$cluster/api/clientbundle -o bundle.zip -unzip bundle.zip -mkdir /root/.kube -cp kube.yml /root/.kube/config diff --git a/vagrant/dockeree-node b/vagrant/dockeree-node deleted file mode 100644 index 10cc360b..00000000 --- a/vagrant/dockeree-node +++ /dev/null @@ -1,6 +0,0 @@ -while : ; do - command=$(ssh -oConnectTimeout=1 -oStrictHostKeyChecking=no master-$cluster docker swarm join-token worker | grep token) - [ $? -eq 0 ] && break - sleep 5 -done -$command diff --git a/vagrant/k3s-common b/vagrant/k3s-common deleted file mode 100644 index 0f710787..00000000 --- a/vagrant/k3s-common +++ /dev/null @@ -1,2 +0,0 @@ -yum install -y container-selinux selinux-policy-base -rpm -i https://rpm.rancher.io/k3s-selinux-0.1.1-rc1.el7.noarch.rpm diff --git a/vagrant/k3s-master b/vagrant/k3s-master deleted file mode 100644 index f3ac6d40..00000000 --- a/vagrant/k3s-master +++ /dev/null @@ -1,8 +0,0 @@ -curl -sfL https://get.k3s.io | sh - -mkdir /root/.kube -cp /etc/rancher/k3s/k3s.yaml /root/.kube/config -ln -s /usr/local/bin/kubectl /usr/bin/ -while ! kubectl get nodes; do - echo waiting for k3s - sleep 1 -done diff --git a/vagrant/k3s-node b/vagrant/k3s-node deleted file mode 100644 index 9e75fe64..00000000 --- a/vagrant/k3s-node +++ /dev/null @@ -1,6 +0,0 @@ -while : ; do - token=$(ssh -oConnectTimeout=1 -oStrictHostKeyChecking=no master-$cluster cat /var/lib/rancher/k3s/server/node-token) - [ $? -eq 0 ] && break - sleep 5 -done -curl -sfL https://get.k3s.io | K3S_URL=https://master-$cluster:6443 K3S_TOKEN=$token sh - diff --git a/vagrant/nomad-common b/vagrant/nomad-common deleted file mode 100644 index 22b4796f..00000000 --- a/vagrant/nomad-common +++ /dev/null @@ -1,18 +0,0 @@ -cat <<\EOF >/etc/yum.repos.d/hashicorp.repo -[hashicorp] -name=Hashicorp Stable - $basearch -baseurl=https://rpm.releases.hashicorp.com/RHEL/$releasever/$basearch/stable -enabled=1 -gpgcheck=1 -gpgkey=https://rpm.releases.hashicorp.com/gpg - -[hashicorp-test] -name=Hashicorp Test - $basearch -baseurl=https://rpm.releases.hashicorp.com/RHEL/$releasever/$basearch/test -enabled=0 -gpgcheck=1 -gpgkey=https://rpm.releases.hashicorp.com/gpg -EOF - -while ! dnf install -y nomad-1.1.6-1.x86_64 docker; do sleep 1; done -systemctl enable --now docker diff --git a/vagrant/nomad-master b/vagrant/nomad-master deleted file mode 100644 index 8a3f038f..00000000 --- a/vagrant/nomad-master +++ /dev/null @@ -1,11 +0,0 @@ -cat </etc/nomad.d/nomad.hcl -data_dir = "/opt/nomad/data" -datacenter = "px-deploy-$cluster" - -server { - enabled = true - bootstrap_expect = 1 -} -EOF - -systemctl enable --now nomad diff --git a/vagrant/nomad-node b/vagrant/nomad-node deleted file mode 100644 index 7eefcfde..00000000 --- a/vagrant/nomad-node +++ /dev/null @@ -1,23 +0,0 @@ -cat </etc/nomad.d/nomad.hcl -# Full configuration options can be found at https://www.nomadproject.io/docs/configuration - -data_dir = "/opt/nomad/data" -datacenter = "px-deploy-$cluster" - -client { - enabled = true - servers = ["192.168.$[$cluster+100].90"] -} - -plugin "docker" { - config { - allow_privileged = true - volumes { - enabled = true - } - } -} -EOF - -#while ! curl -s http://master-$cluster:4646 ; do sleep 1; echo waiting for nomad server; done -systemctl enable --now nomad