diff --git a/scripts/helm-backup b/scripts/helm-backup index 154823c9..07adce45 100644 --- a/scripts/helm-backup +++ b/scripts/helm-backup @@ -1,5 +1,24 @@ NAMESPACE=central -VERSION=2.9.1 +VERSION=2.10.0 + +kubectl apply -f - </dev/null) -kubectl cp -n $NAMESPACE $BACKUP_POD_NAME:pxbackupctl/linux/pxbackupctl /usr/bin/pxbackupctl -chmod +x /usr/bin/pxbackupctl +#BACKUP_POD_NAME=$(kubectl get pods -n $NAMESPACE -l app=px-backup -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) +#kubectl cp -n $NAMESPACE $BACKUP_POD_NAME:pxbackupctl/linux/pxbackupctl /usr/bin/pxbackupctl +#chmod +x /usr/bin/pxbackupctl diff --git a/scripts/helm-backup-apps b/scripts/helm-backup-apps index ad5ec5fd..65f06f16 100644 --- a/scripts/helm-backup-apps +++ b/scripts/helm-backup-apps @@ -1,41 +1,128 @@ -BACKUP_POD_IP=$(kubectl get pods -n central -l app=px-backup -o jsonpath='{.items[*].status.podIP}' 2>/dev/null) AWS_ACCESS_KEY=$(sed -n 's/aws_access_key_id[ =]*//p' /root/.aws/credentials 2>/dev/null) AWS_SECRET_KEY=$(sed -n 's/aws_secret_access_key[ =]*//p' /root/.aws/credentials 2>/dev/null) -IMDSTOKEN=$(curl -s -X PUT 'http://169.254.169.254/latest/api/token' -H 'X-aws-ec2-metadata-token-ttl-seconds: 120') -pubIP=$(curl -H "X-aws-ec2-metadata-token: $IMDSTOKEN" -s http://169.254.169.254/latest/meta-data/public-ipv4) -backupPort=$(kubectl get svc px-backup-ui -n central -o=jsonpath='{.spec.ports[?(@.port==80)].nodePort}') -client_secret=$(kubectl get secret --namespace central pxc-backup-secret -o jsonpath={.data.OIDC_CLIENT_SECRET} | base64 --decode) - -# Configures backup with clusters and locations -pxbackupctl login -s http://$pubIP:$backupPort -u admin -p admin -pxbackupctl create cloudcredential --aws-access-key $AWS_ACCESS_KEY --aws-secret-key $AWS_SECRET_KEY -e $BACKUP_POD_IP:10002 --orgID default -n s3 -p aws -sleep 5 -cloud_credential_uid=$(pxbackupctl get cloudcredential -e $BACKUP_POD_IP:10002 --orgID default -o json | jq -cr '.[0].metadata.uid') -pxbackupctl create backuplocation --cloud-credential-name s3 --cloud-credential-Uid $cloud_credential_uid -n aws -p s3 --s3-endpoint https://s3.$aws_region.amazonaws.com --path $BACKUP_BUCKET --s3-region $aws_region -e $BACKUP_POD_IP:10002 --orgID default -pxbackupctl create schedulepolicy --interval-minutes 15 --interval-retain 12 --name 15min-schedule -e $BACKUP_POD_IP:10002 --orgID default -sleep 5 -ssh master-2 cat /root/.kube/config > /cluster-2-kube-config -ssh master-3 cat /root/.kube/config > /cluster-3-kube-config -pxbackupctl create cluster --name cluster-1 -k /root/.kube/.config -e $BACKUP_POD_IP:10002 --orgID default -pxbackupctl create cluster --name cluster-2 -k /cluster-2-kube-config -e $BACKUP_POD_IP:10002 --orgID default -pxbackupctl create cluster --name cluster-3 -k /cluster-3-kube-config -e $BACKUP_POD_IP:10002 --orgID default - -# Patches Prometheus operator to allow multiple instances to run -kubectl patch deployment prometheus-operator -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "-namespaces=kube-system" }]' +ADMIN_PW=$(kubectl get secret pxcentral-keycloak-http -n central -o jsonpath="{.data.password}" | base64 --decode) + +if [ "$platform" = ocp4 ]; then + +# create ocp route for backup UI +cat < /tmp/cluster-$i-kube-config + px pxb connect cluster --name cluster-$i --kubeconfig /tmp/cluster-$i-kube-config +done + +if [ "$platform" != ocp4 ]; then + # Patches Prometheus operator to allow multiple instances to run + kubectl patch deployment prometheus-operator -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "-namespaces=kube-system" }]' + ssh master-2 <> /etc/motd +================================================+ SAVE THE FOLLOWING DETAILS FOR FUTURE REFERENCES +================================================+ -PX-Central User Interface Access URL : http://$pubIP:$backupPort +PX-Central User Interface Access URL : http://$PXB_URL PX-Central admin user name: admin -PX-Central admin user password: admin +PX-Central admin user password: $ADMIN_PW +================================================+ EOF diff --git a/scripts/helm-backup-ocp4-kubevirt b/scripts/helm-backup-ocp4-kubevirt deleted file mode 100644 index 0153eab0..00000000 --- a/scripts/helm-backup-ocp4-kubevirt +++ /dev/null @@ -1,72 +0,0 @@ -# create ocp route for backup UI -cat </dev/null -#res=$? -#while [ "$res" != "23" ]; do -# echo "Waiting for grpc to accept connections. Ret: $res " -# sleep 2 -# curl --connect-timeout 2 $BACKUP_POD_IP:10002 2>/dev/null -# res=$? -#done -sleep 20 -# TODO: find a reliable way to detect if grpc is responding - -# get external px-backup route hostname -pubIP=$(kubectl get route px-backup-ui -n central -o json |jq -r ".status.ingress[0].host") -AWS_ACCESS_KEY=$(sed -n 's/aws_access_key_id[ =]*//p' /root/.aws/credentials 2>/dev/null) -AWS_SECRET_KEY=$(sed -n 's/aws_secret_access_key[ =]*//p' /root/.aws/credentials 2>/dev/null) -backupPort=80 -client_secret=$(kubectl get secret --namespace central pxc-backup-secret -o jsonpath={.data.OIDC_CLIENT_SECRET} | base64 --decode) - -# Configures backup with clusters and locations -pxbackupctl login -s http://$pubIP:$backupPort -u admin -p admin -pxbackupctl version -e $BACKUP_POD_IP:10002 -pxbackupctl create cloudcredential --aws-access-key $AWS_ACCESS_KEY --aws-secret-key $AWS_SECRET_KEY -e $BACKUP_POD_IP:10002 --orgID default -n s3 -p aws -sleep 5 -cloud_credential_uid=$(pxbackupctl get cloudcredential -e $BACKUP_POD_IP:10002 --orgID default -o json | jq -cr '.[0].metadata.uid') -pxbackupctl create backuplocation --cloud-credential-name s3 --cloud-credential-Uid $cloud_credential_uid -n $BACKUP_BUCKET -p s3 --s3-endpoint https://s3.$aws_region.amazonaws.com --path $BACKUP_BUCKET --s3-region $aws_region -e $BACKUP_POD_IP:10002 --orgID default -pxbackupctl create schedulepolicy --interval-minutes 15 --interval-retain 12 --name 15min-schedule -e $BACKUP_POD_IP:10002 --orgID default -sleep 5 - -pxbackupctl create cluster --name cluster-1 -k /root/.kube/config -e $BACKUP_POD_IP:10002 --orgID default - -cat <> /etc/motd -+================================================+ -How to access PX-BACKUP UI -+================================================+ -PX-Central User Interface Access URL : http://$pubIP:$backupPort -PX-Central admin user name: admin -PX-Central admin user password: admin -+================================================+ -EOF \ No newline at end of file diff --git a/scripts/install-px b/scripts/install-px index e2bc2fc3..bfda55d4 100644 --- a/scripts/install-px +++ b/scripts/install-px @@ -143,6 +143,12 @@ done kubectl apply -f /tmp/px.yml +# Install px cli +curl -L -o /tmp/pxcli.tgz https://mirrors.portworx.com/packages/px-cli/latest/px-v1.0.0.linux.amd64.tar.gz +tar -xzf /tmp/pxcli.tgz -C /tmp/ +chmod +x /tmp/px/bin/px* +cp /tmp/px/bin/* /usr/local/bin/ + # Install pxctl.sh cp /assets/pxctl.sh /usr/bin/pxctl chmod 755 /usr/bin/pxctl diff --git a/templates/ocp-backup.yml b/templates/ocp-backup.yml new file mode 100644 index 00000000..fb875fb7 --- /dev/null +++ b/templates/ocp-backup.yml @@ -0,0 +1,14 @@ +description: Two Cluster OCP with Backup & AsyncDR on a aws vm +scripts: ["install-awscli","install-px", "licenses"] +aws_type: "m6i.xlarge" +platform: "ocp4" +cloud: "aws" +clusters: 2 +nodes: 5 +cluster: + - id: 1 + scripts: ["helm-backup", "helm-backup-apps"] + - id: 2 + scripts: ["clusterpair"] +env: + cloud_drive: "type%3Dgp2%2Csize%3D150" diff --git a/templates/ocp-kubevirt.yml b/templates/ocp-kubevirt.yml index 623ddcff..e476c01f 100644 --- a/templates/ocp-kubevirt.yml +++ b/templates/ocp-kubevirt.yml @@ -9,7 +9,7 @@ cloud: "aws" clusters: 2 cluster: - id: 1 - scripts: [ "kubevirt-apps", "helm-backup", "helm-backup-ocp4-kubevirt"] + scripts: [ "kubevirt-apps", "helm-backup", "helm-backup-apps"] - id: 2 scripts: ["clusterpair"] env: