Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Kubevirt #468

Merged
merged 5 commits into from
Jan 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions assets/kubevirt/ocp/async-dr.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
apiVersion: stork.libopenstorage.org/v1alpha1
kind: SchedulePolicy
metadata:
name: 5-min
policy:
interval:
intervalMinutes: 5
---
apiVersion: stork.libopenstorage.org/v1alpha1
kind: MigrationSchedule
metadata:
name: pxbbq
namespace: kube-system
spec:
template:
spec:
clusterPair: remotecluster-2
includeResources: true
startApplications: false
excludeSelectors:
px-dr: 'false'
namespaces:
- pxbbq
schedulePolicyName: 5-min
3 changes: 2 additions & 1 deletion assets/kubevirt/ocp/pxbbq.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ spec:
spec:
containers:
- name: pxbbq-web
image: eshanks16/pxbbq:v3.2
image: eshanks16/pxbbq:4.0
env:
- name: MONGO_INIT_USER
value: "porxie" #Mongo User with permissions to create additional databases and users. Typically "porxie" or "pds"
Expand Down Expand Up @@ -71,6 +71,7 @@ metadata:
namespace: pxbbq
labels:
app: pxbbq-web
px-dr: 'false'
spec:
to:
kind: Service
Expand Down
2 changes: 1 addition & 1 deletion scripts/clusterpair-dr
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ while : ; do
sleep 1
done

if [ "$platform" = eks ]; then
if [ "$platform" = eks ] || [ "$platform" = ocp4 ]; then
kubectl patch stc $(kubectl get stc -n portworx -o jsonpath='{.items[].metadata.name}') -n portworx --type='json' -p '[{"op":"add","path":"/metadata/annotations/portworx.io~1service-type","value":"LoadBalancer"}]'
while : ;do
host=$(kubectl get svc -n portworx portworx-service -o jsonpath='{.status.loadBalancer.ingress[].hostname}')
Expand Down
72 changes: 72 additions & 0 deletions scripts/helm-backup-ocp4-kubevirt
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# create ocp route for backup UI
cat <<EOF | kubectl apply -f -
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: px-backup-ui
namespace: central
spec:
to:
kind: Service
name: px-backup-ui
weight: 100
port:
targetPort: http
wildcardPolicy: None
EOF

# expose px-backup service to run pxbackupctl
# kubectl patch svc px-backup -n central -p '{"spec":{"type":"LoadBalancer"}}'

kubectl patch svc px-backup -n central -p '{"spec":{"type":"LoadBalancer"}}'

BACKUP_POD_IP=$(kubectl get svc px-backup -n central -o json | jq -r ".status.loadBalancer.ingress[0].hostname")

while [ $BACKUP_POD_IP = "null" ]; do
sleep 2
echo "PX Backup grpc LB not ready"
BACKUP_POD_IP=$(kubectl get svc px-backup -n central -o json | jq -r ".status.loadBalancer.ingress[0].hostname")
done

echo "PX Backup grpc LB ready: $BACKUP_POD_IP"

# now as the service has a IP assigned lets check if the service is available
#curl --connect-timeout 2 $BACKUP_POD_IP:10002 2>/dev/null
#res=$?
#while [ "$res" != "23" ]; do
# echo "Waiting for grpc to accept connections. Ret: $res "
# sleep 2
# curl --connect-timeout 2 $BACKUP_POD_IP:10002 2>/dev/null
# res=$?
#done
sleep 20
# TODO: find a reliable way to detect if grpc is responding

# get external px-backup route hostname
pubIP=$(kubectl get route px-backup-ui -n central -o json |jq -r ".status.ingress[0].host")
AWS_ACCESS_KEY=$(sed -n 's/aws_access_key_id[ =]*//p' /root/.aws/credentials 2>/dev/null)
AWS_SECRET_KEY=$(sed -n 's/aws_secret_access_key[ =]*//p' /root/.aws/credentials 2>/dev/null)
backupPort=80
client_secret=$(kubectl get secret --namespace central pxc-backup-secret -o jsonpath={.data.OIDC_CLIENT_SECRET} | base64 --decode)

# Configures backup with clusters and locations
pxbackupctl login -s http://$pubIP:$backupPort -u admin -p admin
pxbackupctl version -e $BACKUP_POD_IP:10002
pxbackupctl create cloudcredential --aws-access-key $AWS_ACCESS_KEY --aws-secret-key $AWS_SECRET_KEY -e $BACKUP_POD_IP:10002 --orgID default -n s3 -p aws
sleep 5
cloud_credential_uid=$(pxbackupctl get cloudcredential -e $BACKUP_POD_IP:10002 --orgID default -o json | jq -cr '.[0].metadata.uid')
pxbackupctl create backuplocation --cloud-credential-name s3 --cloud-credential-Uid $cloud_credential_uid -n $BACKUP_BUCKET -p s3 --s3-endpoint https://s3.$aws_region.amazonaws.com --path $BACKUP_BUCKET --s3-region $aws_region -e $BACKUP_POD_IP:10002 --orgID default
pxbackupctl create schedulepolicy --interval-minutes 15 --interval-retain 12 --name example-schedule -e $BACKUP_POD_IP:10002 --orgID default
sleep 5

pxbackupctl create cluster --name cluster-1 -k /root/.kube/config -e $BACKUP_POD_IP:10002 --orgID default

cat <<EOF >> /etc/motd
+================================================+
How to access PX-BACKUP UI
+================================================+
PX-Central User Interface Access URL : http://$pubIP:$backupPort
PX-Central admin user name: admin
PX-Central admin user password: admin
+================================================+
EOF
11 changes: 7 additions & 4 deletions scripts/ocp-kubevirt
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@

# restart PX to prevent CSI bug
echo "restart PX to prevent CSI bug"
kubectl delete pods -n portworx -l name=portworx

OPVERSION=$(kubectl get packagemanifests.packages.operators.coreos.com kubevirt-hyperconverged -o json | jq -r ".status.channels[0].currentCSV")

echo "current kubevirt-hyperconverged operator version is $OPVERSION"
Expand Down Expand Up @@ -45,6 +50,8 @@ metadata:
spec:
EOF

sleep 2

# check if hyperconverged CRD has status condition containing ReconcileCompleted/True/Available
# TODO: maybe there is a better way to detect if this CRD is ready?
HC=$(kubectl get hyperconvergeds.hco.kubevirt.io kubevirt-hyperconverged -n openshift-cnv -ojson | jq -r '.status.conditions[] | select((.reason=="ReconcileCompleted") and (.status=="True") and (.type=="Available")) | .status')
Expand All @@ -54,7 +61,3 @@ while [ ! "$HC" == "True" ]; do
sleep 5
HC=$(kubectl get hyperconvergeds.hco.kubevirt.io kubevirt-hyperconverged -n openshift-cnv -ojson | jq -r '.status.conditions[] | select((.reason=="ReconcileCompleted") and (.status=="True") and (.type=="Available")) | .status')
done

kubectl create ns pxbbq
kubectl apply -f /assets/kubevirt/dv-ubuntu.yml
kubectl apply -f /assets/kubevirt/ocp/pxbbq.yml
6 changes: 6 additions & 0 deletions scripts/ocp-kubevirt-apps
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
kubectl create ns pxbbq
kubectl label ns pxbbq app=pxbbq
kubectl label ns pxbbq backup=true

kubectl apply -f /assets/kubevirt/dv-ubuntu.yml
kubectl apply -f /assets/kubevirt/ocp/pxbbq.yml
10 changes: 8 additions & 2 deletions templates/ocp-kubevirt.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,14 @@
description: OpenShift Virtualization Demo on a bare-metal aws cluster
scripts: ["install-px", "ocp-kubevirt"]
description: Two Cluster OCP Virtualization Demo with Backup & AsyncDR on a aws bare-metal
scripts: ["install-awscli","install-px", "licenses", "ocp-kubevirt"]
aws_type: "c5n.metal"
platform: "ocp4"
cloud: "aws"
clusters: 2
cluster:
- id: 1
scripts: [ "ocp-kubevirt-apps", "helm-backup", "helm-backup-ocp4-kubevirt"]
- id: 2
scripts: ["clusterpair-dr"]
env:
cloud_drive: "type%3Dgp2%2Csize%3D150"

8 changes: 8 additions & 0 deletions vagrant/ocp4-master
Original file line number Diff line number Diff line change
Expand Up @@ -47,3 +47,11 @@ echo "url $URL" >> /var/log/px-deploy/completed/tracking

CRED=$(grep 'Login to the console' /root/ocp4/.openshift_install.log | cut -d\\ -f4 | cut -d\" -f2)
echo "cred $CRED" >> /var/log/px-deploy/completed/tracking

cat <<EOF >> /etc/motd
+================================================+
OCP4 Web UI: $URL
Admin User Name: kubeadmin
Password: $CRED
+================================================+
EOF