diff --git a/pkg/controller/plan/kubevirt.go b/pkg/controller/plan/kubevirt.go index 31f6e557f..a56a77650 100644 --- a/pkg/controller/plan/kubevirt.go +++ b/pkg/controller/plan/kubevirt.go @@ -3,6 +3,7 @@ package plan import ( "context" "encoding/xml" + "errors" "fmt" "io" "math/rand" @@ -15,6 +16,9 @@ import ( "time" planbase "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/base" + "github.com/konveyor/forklift-controller/pkg/controller/provider/web" + model "github.com/konveyor/forklift-controller/pkg/controller/provider/web/vsphere" + libref "github.com/konveyor/forklift-controller/pkg/lib/ref" template "github.com/openshift/api/template/v1" "github.com/openshift/library-go/pkg/template/generator" "github.com/openshift/library-go/pkg/template/templateprocessing" @@ -2409,3 +2413,50 @@ func (r *KubeVirt) EnsurePersistentVolumeClaim(vmRef ref.Ref, persistentVolumeCl } return } + +// Load host CRs. +func (r *KubeVirt) loadHosts() (hosts map[string]*api.Host, err error) { + list := &api.HostList{} + err = r.List( + context.TODO(), + list, + &client.ListOptions{ + Namespace: r.Source.Provider.Namespace, + }, + ) + if err != nil { + err = liberr.Wrap(err) + return + } + hostMap := map[string]*api.Host{} + for i := range list.Items { + host := &list.Items[i] + ref := host.Spec.Ref + if !libref.Equals(&host.Spec.Provider, &r.Plan.Spec.Provider.Source) { + continue + } + + if !host.Status.HasCondition(libcnd.Ready) { + continue + } + // it's not that great to have a vSphere-specific entity here but as we don't + // intend to do the same for other providers, doing it here for simplicity + m := &model.Host{} + pErr := r.Source.Inventory.Find(m, ref) + if pErr != nil { + if errors.As(pErr, &web.NotFoundError{}) { + continue + } else { + err = pErr + return + } + } + ref.ID = m.ID + ref.Name = m.Name + hostMap[ref.ID] = host + } + + hosts = hostMap + + return +} diff --git a/pkg/controller/plan/migration.go b/pkg/controller/plan/migration.go index c633ae80e..5aa0a63e1 100644 --- a/pkg/controller/plan/migration.go +++ b/pkg/controller/plan/migration.go @@ -19,11 +19,11 @@ import ( plancontext "github.com/konveyor/forklift-controller/pkg/controller/plan/context" "github.com/konveyor/forklift-controller/pkg/controller/plan/scheduler" "github.com/konveyor/forklift-controller/pkg/controller/provider/web" - model "github.com/konveyor/forklift-controller/pkg/controller/provider/web/vsphere" + libcnd "github.com/konveyor/forklift-controller/pkg/lib/condition" liberr "github.com/konveyor/forklift-controller/pkg/lib/error" libitr "github.com/konveyor/forklift-controller/pkg/lib/itinerary" - libref "github.com/konveyor/forklift-controller/pkg/lib/ref" + "github.com/konveyor/forklift-controller/pkg/settings" batchv1 "k8s.io/api/batch/v1" core "k8s.io/api/core/v1" @@ -492,7 +492,7 @@ func (r *Migration) removeWarmSnapshots(vm *plan.VMStatus) { if vm.Warm == nil { return } - if err := r.provider.RemoveSnapshots(vm.Ref, vm.Warm.Precopies, r.loadHosts); err != nil { + if err := r.provider.RemoveSnapshots(vm.Ref, vm.Warm.Precopies, r.kubevirt.loadHosts); err != nil { r.Log.Error( err, "Failed to clean up warm migration snapshots.", @@ -791,7 +791,7 @@ func (r *Migration) execute(vm *plan.VMStatus) (err error) { } } if vm.Warm != nil { - err = r.provider.SetCheckpoints(vm.Ref, vm.Warm.Precopies, dataVolumes, false, r.loadHosts) + err = r.provider.SetCheckpoints(vm.Ref, vm.Warm.Precopies, dataVolumes, false, r.kubevirt.loadHosts) if err != nil { step.AddError(err.Error()) err = nil @@ -963,7 +963,7 @@ func (r *Migration) execute(vm *plan.VMStatus) (err error) { break } var snapshot string - if snapshot, err = r.provider.CreateSnapshot(vm.Ref, r.loadHosts); err != nil { + if snapshot, err = r.provider.CreateSnapshot(vm.Ref, r.kubevirt.loadHosts); err != nil { if errors.As(err, &web.ProviderNotReadyError{}) || errors.As(err, &web.ConflictError{}) { return } @@ -1079,7 +1079,7 @@ func (r *Migration) execute(vm *plan.VMStatus) (err error) { return } if step.MarkedCompleted() { - err = r.provider.RemoveSnapshots(vm.Ref, vm.Warm.Precopies, r.loadHosts) + err = r.provider.RemoveSnapshots(vm.Ref, vm.Warm.Precopies, r.kubevirt.loadHosts) if err != nil { r.Log.Info( "Failed to clean up warm migration snapshots.", @@ -1202,53 +1202,6 @@ func (r *Migration) execute(vm *plan.VMStatus) (err error) { return } -// Load host CRs. -func (r *Migration) loadHosts() (hosts map[string]*v1beta1.Host, err error) { - list := &v1beta1.HostList{} - err = r.List( - context.TODO(), - list, - &client.ListOptions{ - Namespace: r.Source.Provider.Namespace, - }, - ) - if err != nil { - err = liberr.Wrap(err) - return - } - hostMap := map[string]*v1beta1.Host{} - for i := range list.Items { - host := &list.Items[i] - ref := host.Spec.Ref - if !libref.Equals(&host.Spec.Provider, &r.Plan.Spec.Provider.Source) { - continue - } - - if !host.Status.HasCondition(libcnd.Ready) { - continue - } - // it's not that great to have a vSphere-specific entity here but as we don't - // intend to do the same for other providers, doing it here for simplicity - m := &model.Host{} - pErr := r.Source.Inventory.Find(m, ref) - if pErr != nil { - if errors.As(pErr, &web.NotFoundError{}) { - continue - } else { - err = pErr - return - } - } - ref.ID = m.ID - ref.Name = m.Name - hostMap[ref.ID] = host - } - - hosts = hostMap - - return -} - func (r *Migration) resetPrecopyTasks(vm *plan.VMStatus, step *plan.Step) { step.Completed = nil for _, task := range step.Tasks { @@ -1764,7 +1717,7 @@ func (r *Migration) setDataVolumeCheckpoints(vm *plan.VMStatus) (err error) { for _, disk := range disks { dvs = append(dvs, *disk.DataVolume) } - err = r.provider.SetCheckpoints(vm.Ref, vm.Warm.Precopies, dvs, vm.Phase == AddFinalCheckpoint, r.loadHosts) + err = r.provider.SetCheckpoints(vm.Ref, vm.Warm.Precopies, dvs, vm.Phase == AddFinalCheckpoint, r.kubevirt.loadHosts) if err != nil { return }