diff --git a/cmd/ova-provider-server/BUILD.bazel b/cmd/ova-provider-server/BUILD.bazel index 85e5c90de..6db2597ee 100644 --- a/cmd/ova-provider-server/BUILD.bazel +++ b/cmd/ova-provider-server/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = ["ova-provider-server.go"], importpath = "github.com/konveyor/forklift-controller/cmd/ova-provider-server", visibility = ["//visibility:private"], + deps = ["//vendor/github.com/google/uuid"], ) go_binary( diff --git a/cmd/ova-provider-server/ova-provider-server.go b/cmd/ova-provider-server/ova-provider-server.go index 50391e187..40c3f4e0f 100644 --- a/cmd/ova-provider-server/ova-provider-server.go +++ b/cmd/ova-provider-server/ova-provider-server.go @@ -2,16 +2,23 @@ package main import ( "archive/tar" + "bytes" + "crypto/sha256" + "encoding/gob" + "encoding/hex" "encoding/json" "encoding/xml" "fmt" "io" "io/ioutil" + "log" "net/http" "os" "path/filepath" "strconv" "strings" + + "github.com/google/uuid" ) // xml struct @@ -28,6 +35,7 @@ type Item struct { HostResource string `xml:"HostResource,omitempty"` Connection string `xml:"Connection,omitempty"` Configs []VirtualConfig `xml:"Config"` + CoresPerSocket string `xml:"CoresPerSocket"` } type VirtualConfig struct { @@ -43,6 +51,12 @@ type VirtualHardwareSection struct { Configs []VirtualConfig `xml:"Config"` } +type References struct { + File []struct { + Href string `xml:"href,attr"` + } `xml:"File"` +} + type DiskSection struct { XMLName xml.Name `xml:"DiskSection"` Info string `xml:"Info"` @@ -51,12 +65,12 @@ type DiskSection struct { type Disk struct { XMLName xml.Name `xml:"Disk"` - Capacity string `xml:"capacity,attr"` + Capacity int64 `xml:"capacity,attr"` CapacityAllocationUnits string `xml:"capacityAllocationUnits,attr"` DiskId string `xml:"diskId,attr"` FileRef string `xml:"fileRef,attr"` Format string `xml:"format,attr"` - PopulatedSize string `xml:"populatedSize,attr"` + PopulatedSize int64 `xml:"populatedSize,attr"` } type NetworkSection struct { @@ -77,6 +91,7 @@ type VirtualSystem struct { OperatingSystemSection struct { Info string `xml:"Info"` Description string `xml:"Description"` + OsType string `xml:"osType,attr"` } `xml:"OperatingSystemSection"` HardwareSection VirtualHardwareSection `xml:"VirtualHardwareSection"` } @@ -86,12 +101,14 @@ type Envelope struct { VirtualSystem []VirtualSystem `xml:"VirtualSystem"` DiskSection DiskSection `xml:"DiskSection"` NetworkSection NetworkSection `xml:"NetworkSection"` + References References `xml:"References"` } // vm struct type VM struct { Name string OvaPath string + OsType string RevisionValidated int64 PolicyVersion int UUID string @@ -104,6 +121,8 @@ type VM struct { CpuCount int32 CoresPerSocket int32 MemoryMB int32 + MemoryUnits string + CpuUnits string BalloonedMemory int32 IpAddress string NumaNodeAffinity []string @@ -117,13 +136,15 @@ type VM struct { // Virtual Disk. type VmDisk struct { + ID string + Name string FilePath string - Capacity string + Capacity int64 CapacityAllocationUnits string DiskId string FileRef string Format string - PopulatedSize string + PopulatedSize int64 } // Virtual Device. @@ -138,18 +159,28 @@ type Conf struct { // Virtual ethernet card. type NIC struct { - Name string `json:"name"` - MAC string `json:"mac"` - Config []Conf + Name string `json:"name"` + MAC string `json:"mac"` + Network string + Config []Conf } type VmNetwork struct { Name string Description string + ID string } +var vmIDMap *UUIDMap +var diskIDMap *UUIDMap +var networkIDMap *UUIDMap + func main() { + vmIDMap = NewUUIDMap() + diskIDMap = NewUUIDMap() + networkIDMap = NewUUIDMap() + http.HandleFunc("/vms", vmHandler) http.HandleFunc("/disks", diskHandler) http.HandleFunc("/networks", networkHandler) @@ -346,20 +377,31 @@ func convertToVmStruct(envelope []Envelope, ovaPath []string) ([]VM, error) { newVM := VM{ OvaPath: ovaPath[i], Name: virtualSystem.Name, + OsType: virtualSystem.OperatingSystemSection.OsType, } for _, item := range virtualSystem.HardwareSection.Items { if strings.Contains(item.ElementName, "Network adapter") { newVM.NICs = append(newVM.NICs, NIC{ - Name: item.ElementName, - MAC: item.Address, + Name: item.ElementName, + MAC: item.Address, + Network: item.Connection, }) //for _conf := range item. } else if strings.Contains(item.Description, "Number of Virtual CPUs") { newVM.CpuCount = item.VirtualQuantity - + newVM.CpuUnits = item.AllocationUnits + if item.CoresPerSocket != "" { + num, err := strconv.ParseInt(item.CoresPerSocket, 10, 32) + if err != nil { + newVM.CoresPerSocket = 1 + } else { + newVM.CoresPerSocket = int32(num) + } + } } else if strings.Contains(item.Description, "Memory Size") { newVM.MemoryMB = item.VirtualQuantity + newVM.MemoryUnits = item.AllocationUnits } else { newVM.Devices = append(newVM.Devices, Device{ @@ -369,7 +411,8 @@ func convertToVmStruct(envelope []Envelope, ovaPath []string) ([]VM, error) { } - for _, disk := range vmXml.DiskSection.Disks { + for j, disk := range vmXml.DiskSection.Disks { + name := envelope[i].References.File[j].Href newVM.Disks = append(newVM.Disks, VmDisk{ FilePath: getDiskPath(ovaPath[i]), Capacity: disk.Capacity, @@ -378,13 +421,17 @@ func convertToVmStruct(envelope []Envelope, ovaPath []string) ([]VM, error) { FileRef: disk.FileRef, Format: disk.Format, PopulatedSize: disk.PopulatedSize, + Name: name, }) + newVM.Disks[j].ID = diskIDMap.GetUUID(newVM.Disks[j], ovaPath[i]+"/"+name) + } for _, network := range vmXml.NetworkSection.Networks { newVM.Networks = append(newVM.Networks, VmNetwork{ Name: network.Name, Description: network.Description, + ID: networkIDMap.GetUUID(network.Name, network.Name), }) } @@ -399,6 +446,15 @@ func convertToVmStruct(envelope []Envelope, ovaPath []string) ([]VM, error) { newVM.CpuHotRemoveEnabled, _ = strconv.ParseBool(conf.Value) } } + + var id string + if isValidUUID(virtualSystem.ID) { + id = virtualSystem.ID + } else { + id = vmIDMap.GetUUID(newVM, ovaPath[i]) + } + newVM.UUID = id + vms = append(vms, newVM) } } @@ -412,6 +468,7 @@ func convertToNetworkStruct(envelope []Envelope) ([]VmNetwork, error) { newNetwork := VmNetwork{ Name: network.Name, Description: network.Description, + ID: networkIDMap.GetUUID(network.Name, network.Name), } networks = append(networks, newNetwork) } @@ -422,9 +479,9 @@ func convertToNetworkStruct(envelope []Envelope) ([]VmNetwork, error) { func convertToDiskStruct(envelope []Envelope, ovaPath []string) ([]VmDisk, error) { var disks []VmDisk - for i := 0; i < len(envelope); i++ { - ova := envelope[i] - for _, disk := range ova.DiskSection.Disks { + for i, ova := range envelope { + for j, disk := range ova.DiskSection.Disks { + name := ova.References.File[j].Href newDisk := VmDisk{ FilePath: getDiskPath(ovaPath[i]), Capacity: disk.Capacity, @@ -433,8 +490,9 @@ func convertToDiskStruct(envelope []Envelope, ovaPath []string) ([]VmDisk, error FileRef: disk.FileRef, Format: disk.Format, PopulatedSize: disk.PopulatedSize, + Name: name, } - + newDisk.ID = diskIDMap.GetUUID(newDisk, ovaPath[i]+"/"+name) disks = append(disks, newDisk) } } @@ -453,3 +511,37 @@ func getDiskPath(path string) string { } return path } + +type UUIDMap struct { + m map[string]string +} + +func NewUUIDMap() *UUIDMap { + return &UUIDMap{ + m: make(map[string]string), + } +} + +func (um *UUIDMap) GetUUID(object interface{}, key string) string { + var id string + id, ok := um.m[key] + + if !ok { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + + if err := enc.Encode(object); err != nil { + log.Fatal(err) + } + + hash := sha256.Sum256(buf.Bytes()) + um.m[key] = hex.EncodeToString(hash[:]) + id = um.m[key] + } + return id +} + +func isValidUUID(id string) bool { + _, err := uuid.Parse(id) + return err == nil +} diff --git a/hack/virt-v2v-cold-rpm-deps.sh b/hack/virt-v2v-cold-rpm-deps.sh index 2c2f6fff9..f4b4e696d 100755 --- a/hack/virt-v2v-cold-rpm-deps.sh +++ b/hack/virt-v2v-cold-rpm-deps.sh @@ -14,6 +14,7 @@ bazel run \ # These are the packages we really depend on. virt_v2v=" + tar virt-v2v virtio-win " diff --git a/pkg/apis/forklift/v1beta1/plan.go b/pkg/apis/forklift/v1beta1/plan.go index d751e46d8..4a9dd9c50 100644 --- a/pkg/apis/forklift/v1beta1/plan.go +++ b/pkg/apis/forklift/v1beta1/plan.go @@ -104,6 +104,8 @@ func (p *Plan) VSphereUsesEl9VirtV2v() (bool, error) { switch source.Type() { case VSphere: return !p.Spec.Warm && destination.IsHost(), nil + case Ova: + return true, nil default: return false, nil } diff --git a/pkg/controller/map/network/handler/BUILD.bazel b/pkg/controller/map/network/handler/BUILD.bazel index 5d0ec6d96..a3f94bb28 100644 --- a/pkg/controller/map/network/handler/BUILD.bazel +++ b/pkg/controller/map/network/handler/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "//pkg/apis/forklift/v1beta1", "//pkg/controller/map/network/handler/ocp", "//pkg/controller/map/network/handler/openstack", + "//pkg/controller/map/network/handler/ova", "//pkg/controller/map/network/handler/ovirt", "//pkg/controller/map/network/handler/vsphere", "//pkg/controller/watch/handler", diff --git a/pkg/controller/map/network/handler/doc.go b/pkg/controller/map/network/handler/doc.go index cce114d8c..da53e49aa 100644 --- a/pkg/controller/map/network/handler/doc.go +++ b/pkg/controller/map/network/handler/doc.go @@ -4,6 +4,7 @@ import ( api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" "github.com/konveyor/forklift-controller/pkg/controller/map/network/handler/ocp" "github.com/konveyor/forklift-controller/pkg/controller/map/network/handler/openstack" + "github.com/konveyor/forklift-controller/pkg/controller/map/network/handler/ova" "github.com/konveyor/forklift-controller/pkg/controller/map/network/handler/ovirt" "github.com/konveyor/forklift-controller/pkg/controller/map/network/handler/vsphere" "github.com/konveyor/forklift-controller/pkg/controller/watch/handler" @@ -47,6 +48,11 @@ func New( client, channel, provider) + case api.Ova: + h, err = ova.New( + client, + channel, + provider) default: err = liberr.New("provider not supported.") } diff --git a/pkg/controller/map/network/handler/ova/BUILD.bazel b/pkg/controller/map/network/handler/ova/BUILD.bazel new file mode 100644 index 000000000..63cd1b0e1 --- /dev/null +++ b/pkg/controller/map/network/handler/ova/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ova", + srcs = [ + "doc.go", + "handler.go", + ], + importpath = "github.com/konveyor/forklift-controller/pkg/controller/map/network/handler/ova", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/forklift/v1beta1", + "//pkg/controller/provider/web/ova", + "//pkg/controller/watch/handler", + "//pkg/lib/error", + "//pkg/lib/inventory/web", + "//pkg/lib/logging", + "//vendor/golang.org/x/net/context", + "//vendor/sigs.k8s.io/controller-runtime/pkg/client", + "//vendor/sigs.k8s.io/controller-runtime/pkg/event", + ], +) diff --git a/pkg/controller/map/network/handler/ova/doc.go b/pkg/controller/map/network/handler/ova/doc.go new file mode 100644 index 000000000..72bdc1b13 --- /dev/null +++ b/pkg/controller/map/network/handler/ova/doc.go @@ -0,0 +1,22 @@ +package ova + +import ( + api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" + "github.com/konveyor/forklift-controller/pkg/controller/watch/handler" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +// Handler factory. +func New( + client client.Client, + channel chan event.GenericEvent, + provider *api.Provider) (h *Handler, err error) { + // + b, err := handler.New(client, channel, provider) + if err != nil { + return + } + h = &Handler{Handler: b} + return +} diff --git a/pkg/controller/map/network/handler/ova/handler.go b/pkg/controller/map/network/handler/ova/handler.go new file mode 100644 index 000000000..ab1f1f6b7 --- /dev/null +++ b/pkg/controller/map/network/handler/ova/handler.go @@ -0,0 +1,116 @@ +package ova + +import ( + "path" + "strings" + + api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" + "github.com/konveyor/forklift-controller/pkg/controller/provider/web/ova" + "github.com/konveyor/forklift-controller/pkg/controller/watch/handler" + liberr "github.com/konveyor/forklift-controller/pkg/lib/error" + libweb "github.com/konveyor/forklift-controller/pkg/lib/inventory/web" + "github.com/konveyor/forklift-controller/pkg/lib/logging" + "golang.org/x/net/context" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +// Package logger. +var log = logging.WithName("networkMap|ova") + +// Provider watch event handler. +type Handler struct { + *handler.Handler +} + +// Ensure watch on networks. +func (r *Handler) Watch(watch *handler.WatchManager) (err error) { + w, err := watch.Ensure( + r.Provider(), + &ova.Network{}, + r) + if err != nil { + return + } + + log.Info( + "Inventory watch ensured.", + "provider", + path.Join( + r.Provider().Namespace, + r.Provider().Name), + "watch", + w.ID()) + + return +} + +// Resource created. +func (r *Handler) Created(e libweb.Event) { + if network, cast := e.Resource.(*ova.Network); cast { + r.changed(network) + } +} + +// Resource created. +func (r *Handler) Updated(e libweb.Event) { + if network, cast := e.Resource.(*ova.Network); cast { + updated := e.Updated.(*ova.Network) + if updated.Path != network.Path { + r.changed(network, updated) + } + } +} + +// Resource deleted. +func (r *Handler) Deleted(e libweb.Event) { + if network, cast := e.Resource.(*ova.Network); cast { + r.changed(network) + } +} + +// Network changed. +// Find all of the NetworkMap CRs the reference both the +// provider and the changed network and enqueue reconcile events. +func (r *Handler) changed(models ...*ova.Network) { + log.V(3).Info( + "Network changed.", + "id", + models[0].ID) + list := api.NetworkMapList{} + err := r.List(context.TODO(), &list) + if err != nil { + err = liberr.Wrap(err) + return + } + for i := range list.Items { + mp := &list.Items[i] + ref := mp.Spec.Provider.Source + if !r.MatchProvider(ref) { + continue + } + referenced := false + for _, pair := range mp.Spec.Map { + ref := pair.Source + for _, network := range models { + if ref.ID == network.ID || strings.HasSuffix(network.Path, ref.Name) { + referenced = true + break + } + } + if referenced { + break + } + } + if referenced { + log.V(3).Info( + "Queue reconcile event.", + "map", + path.Join( + mp.Namespace, + mp.Name)) + r.Enqueue(event.GenericEvent{ + Object: mp, + }) + } + } +} diff --git a/pkg/controller/map/storage/handler/BUILD.bazel b/pkg/controller/map/storage/handler/BUILD.bazel index c2e11dbcb..29a4c4bd9 100644 --- a/pkg/controller/map/storage/handler/BUILD.bazel +++ b/pkg/controller/map/storage/handler/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "//pkg/apis/forklift/v1beta1", "//pkg/controller/map/storage/handler/ocp", "//pkg/controller/map/storage/handler/openstack", + "//pkg/controller/map/storage/handler/ova", "//pkg/controller/map/storage/handler/ovirt", "//pkg/controller/map/storage/handler/vsphere", "//pkg/controller/watch/handler", diff --git a/pkg/controller/map/storage/handler/doc.go b/pkg/controller/map/storage/handler/doc.go index c5875a708..27d4a1fc2 100644 --- a/pkg/controller/map/storage/handler/doc.go +++ b/pkg/controller/map/storage/handler/doc.go @@ -4,6 +4,7 @@ import ( api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" "github.com/konveyor/forklift-controller/pkg/controller/map/storage/handler/ocp" "github.com/konveyor/forklift-controller/pkg/controller/map/storage/handler/openstack" + "github.com/konveyor/forklift-controller/pkg/controller/map/storage/handler/ova" "github.com/konveyor/forklift-controller/pkg/controller/map/storage/handler/ovirt" "github.com/konveyor/forklift-controller/pkg/controller/map/storage/handler/vsphere" "github.com/konveyor/forklift-controller/pkg/controller/watch/handler" @@ -47,6 +48,11 @@ func New( client, channel, provider) + case api.Ova: + h, err = ova.New( + client, + channel, + provider) default: err = liberr.New("provider not supported.") } diff --git a/pkg/controller/map/storage/handler/ova/BUILD.bazel b/pkg/controller/map/storage/handler/ova/BUILD.bazel index 61b00207c..99e087e4c 100644 --- a/pkg/controller/map/storage/handler/ova/BUILD.bazel +++ b/pkg/controller/map/storage/handler/ova/BUILD.bazel @@ -10,8 +10,12 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/apis/forklift/v1beta1", + "//pkg/controller/provider/web/ova", "//pkg/controller/watch/handler", + "//pkg/lib/error", + "//pkg/lib/inventory/web", "//pkg/lib/logging", + "//vendor/golang.org/x/net/context", "//vendor/sigs.k8s.io/controller-runtime/pkg/client", "//vendor/sigs.k8s.io/controller-runtime/pkg/event", ], diff --git a/pkg/controller/map/storage/handler/ova/handler.go b/pkg/controller/map/storage/handler/ova/handler.go index ff512cbd0..b3358130b 100644 --- a/pkg/controller/map/storage/handler/ova/handler.go +++ b/pkg/controller/map/storage/handler/ova/handler.go @@ -1,8 +1,17 @@ package ova import ( + "path" + "strings" + + api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" + "github.com/konveyor/forklift-controller/pkg/controller/provider/web/ova" "github.com/konveyor/forklift-controller/pkg/controller/watch/handler" + liberr "github.com/konveyor/forklift-controller/pkg/lib/error" + libweb "github.com/konveyor/forklift-controller/pkg/lib/inventory/web" "github.com/konveyor/forklift-controller/pkg/lib/logging" + "golang.org/x/net/context" + "sigs.k8s.io/controller-runtime/pkg/event" ) // Package logger. @@ -12,3 +21,96 @@ var log = logging.WithName("storageMap|ova") type Handler struct { *handler.Handler } + +// Ensure watch on Disk. +func (r *Handler) Watch(watch *handler.WatchManager) (err error) { + w, err := watch.Ensure( + r.Provider(), + &ova.Disk{}, + r) + if err != nil { + return + } + + log.Info( + "Inventory watch ensured.", + "provider", + path.Join( + r.Provider().Namespace, + r.Provider().Name), + "watch", + w.ID()) + + return +} + +// Resource created. +func (r *Handler) Created(e libweb.Event) { + if ds, cast := e.Resource.(*ova.Disk); cast { + r.changed(ds) + } +} + +// Resource created. +func (r *Handler) Updated(e libweb.Event) { + if ds, cast := e.Resource.(*ova.Disk); cast { + updated := e.Updated.(*ova.Disk) + if updated.Path != ds.Path { + r.changed(ds, updated) + } + } +} + +// Resource deleted. +func (r *Handler) Deleted(e libweb.Event) { + if ds, cast := e.Resource.(*ova.Disk); cast { + r.changed(ds) + } +} + +// Storage changed. +// Find all of the StorageMap CRs the reference both the +// provider and the changed storage domain and enqueue reconcile events. +func (r *Handler) changed(models ...*ova.Disk) { + log.V(3).Info( + "Disk changed.", + "id", + models[0].ID) + list := api.StorageMapList{} + err := r.List(context.TODO(), &list) + if err != nil { + err = liberr.Wrap(err) + return + } + for i := range list.Items { + mp := &list.Items[i] + ref := mp.Spec.Provider.Source + if !r.MatchProvider(ref) { + continue + } + referenced := false + for _, pair := range mp.Spec.Map { + ref := pair.Source + for _, ds := range models { + if ref.ID == ds.ID || strings.HasSuffix(ds.Path, ref.Name) { + referenced = true + break + } + } + if referenced { + break + } + } + if referenced { + log.V(3).Info( + "Queue reconcile event.", + "map", + path.Join( + mp.Namespace, + mp.Name)) + r.Enqueue(event.GenericEvent{ + Object: mp, + }) + } + } +} diff --git a/pkg/controller/plan/adapter/BUILD.bazel b/pkg/controller/plan/adapter/BUILD.bazel index 08eec4639..012105cc6 100644 --- a/pkg/controller/plan/adapter/BUILD.bazel +++ b/pkg/controller/plan/adapter/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "//pkg/controller/plan/adapter/base", "//pkg/controller/plan/adapter/ocp", "//pkg/controller/plan/adapter/openstack", + "//pkg/controller/plan/adapter/ova", "//pkg/controller/plan/adapter/ovirt", "//pkg/controller/plan/adapter/vsphere", "//pkg/lib/error", diff --git a/pkg/controller/plan/adapter/doc.go b/pkg/controller/plan/adapter/doc.go index 8558f3144..dbc882034 100644 --- a/pkg/controller/plan/adapter/doc.go +++ b/pkg/controller/plan/adapter/doc.go @@ -5,6 +5,7 @@ import ( "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/base" "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/ocp" "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/openstack" + "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/ova" "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/ovirt" "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/vsphere" liberr "github.com/konveyor/forklift-controller/pkg/lib/error" @@ -28,6 +29,8 @@ func New(provider *api.Provider) (adapter Adapter, err error) { adapter = &openstack.Adapter{} case api.OpenShift: adapter = &ocp.Adapter{} + case api.Ova: + adapter = &ova.Adapter{} default: err = liberr.New("provider not supported.") } diff --git a/pkg/controller/plan/adapter/ova/BUILD.bazel b/pkg/controller/plan/adapter/ova/BUILD.bazel new file mode 100644 index 000000000..1225db038 --- /dev/null +++ b/pkg/controller/plan/adapter/ova/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ova", + srcs = [ + "adapter.go", + "builder.go", + "client.go", + "destinationclient.go", + "validator.go", + ], + importpath = "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/ova", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/forklift/v1beta1", + "//pkg/apis/forklift/v1beta1/plan", + "//pkg/apis/forklift/v1beta1/ref", + "//pkg/controller/plan/adapter/base", + "//pkg/controller/plan/context", + "//pkg/controller/provider/model/ova", + "//pkg/controller/provider/web", + "//pkg/controller/provider/web/base", + "//pkg/controller/provider/web/ocp", + "//pkg/controller/provider/web/ova", + "//pkg/lib/error", + "//pkg/lib/inventory/web", + "//pkg/lib/itinerary", + "//vendor/github.com/go-logr/logr", + "//vendor/k8s.io/api/core/v1:core", + "//vendor/k8s.io/apimachinery/pkg/api/resource", + "//vendor/kubevirt.io/api/core/v1:core", + "//vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1", + ], +) diff --git a/pkg/controller/plan/adapter/ova/adapter.go b/pkg/controller/plan/adapter/ova/adapter.go new file mode 100644 index 000000000..55343b909 --- /dev/null +++ b/pkg/controller/plan/adapter/ova/adapter.go @@ -0,0 +1,45 @@ +package ova + +import ( + api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" + "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/base" + plancontext "github.com/konveyor/forklift-controller/pkg/controller/plan/context" +) + +// OVA adapter. +type Adapter struct{} + +// Constructs a OVA builder. +func (r *Adapter) Builder(ctx *plancontext.Context) (builder base.Builder, err error) { + b := &Builder{Context: ctx} + builder = b + return +} + +// Constructs a OVA validator. +func (r *Adapter) Validator(plan *api.Plan) (validator base.Validator, err error) { + v := &Validator{plan: plan} + err = v.Load() + if err != nil { + return + } + validator = v + return +} + +// Constructs a OVA client. +func (r *Adapter) Client(ctx *plancontext.Context) (client base.Client, err error) { + c := &Client{Context: ctx} + err = c.connect() + if err != nil { + return + } + client = c + return +} + +// Constucts a destination client. +func (r *Adapter) DestinationClient(ctx *plancontext.Context) (destinationClient base.DestinationClient, err error) { + destinationClient = &DestinationClient{Context: ctx} + return +} diff --git a/pkg/controller/plan/adapter/ova/builder.go b/pkg/controller/plan/adapter/ova/builder.go new file mode 100644 index 000000000..b597a5de2 --- /dev/null +++ b/pkg/controller/plan/adapter/ova/builder.go @@ -0,0 +1,554 @@ +package ova + +import ( + "fmt" + "math" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/plan" + "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/ref" + planbase "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/base" + plancontext "github.com/konveyor/forklift-controller/pkg/controller/plan/context" + "github.com/konveyor/forklift-controller/pkg/controller/provider/model/ova" + "github.com/konveyor/forklift-controller/pkg/controller/provider/web/base" + "github.com/konveyor/forklift-controller/pkg/controller/provider/web/ocp" + model "github.com/konveyor/forklift-controller/pkg/controller/provider/web/ova" + liberr "github.com/konveyor/forklift-controller/pkg/lib/error" + libitr "github.com/konveyor/forklift-controller/pkg/lib/itinerary" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + cnv "kubevirt.io/api/core/v1" + cdi "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" +) + +// BIOS types +const ( + Efi = "efi" +) + +// Bus types +const ( + Virtio = "virtio" +) + +// Input types +const ( + Tablet = "tablet" +) + +// Network types +const ( + Pod = "pod" + Multus = "multus" +) + +// Template labels +const ( + TemplateOSLabel = "os.template.kubevirt.io/%s" + TemplateWorkloadLabel = "workload.template.kubevirt.io/server" + TemplateFlavorLabel = "flavor.template.kubevirt.io/medium" +) + +// Operating Systems +const ( + Unknown = "unknown" +) + +// Annotations +const ( + // CDI import backing file annotation on PVC + AnnImportBackingFile = "cdi.kubevirt.io/storage.import.backingFile" +) + +// Error messages +const ( + ErrVMLookupFailed = "VM lookup failed." +) + +// Regex which matches the snapshot identifier suffix of a +// OVA disk backing file. +var backingFilePattern = regexp.MustCompile("-\\d\\d\\d\\d\\d\\d.vmdk") + +// OVA builder. +type Builder struct { + *plancontext.Context + // MAC addresses already in use on the destination cluster. k=mac, v=vmName + macConflictsMap map[string]string +} + +// Get list of destination VMs with mac addresses that would +// conflict with this VM, if any exist. +func (r *Builder) macConflicts(vm *model.VM) (conflictingVMs []string, err error) { + if r.macConflictsMap == nil { + list := []ocp.VM{} + err = r.Destination.Inventory.List(&list, base.Param{ + Key: base.DetailParam, + Value: "all", + }) + if err != nil { + return + } + + r.macConflictsMap = make(map[string]string) + for _, kVM := range list { + for _, iface := range kVM.Object.Spec.Template.Spec.Domain.Devices.Interfaces { + r.macConflictsMap[iface.MacAddress] = path.Join(kVM.Namespace, kVM.Name) + } + } + } + + for _, nic := range vm.NICs { + if conflictingVm, found := r.macConflictsMap[nic.MAC]; found { + for i := range conflictingVMs { + // ignore duplicates + if conflictingVMs[i] == conflictingVm { + continue + } + } + conflictingVMs = append(conflictingVMs, conflictingVm) + } + } + + return +} + +// Create DataVolume certificate configmap. +// No-op for OVA. +func (r *Builder) ConfigMap(_ ref.Ref, _ *core.Secret, _ *core.ConfigMap) (err error) { + return +} + +func (r *Builder) PodEnvironment(vmRef ref.Ref, sourceSecret *core.Secret) (env []core.EnvVar, err error) { + vm := &model.VM{} + err = r.Source.Inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap( + err, + ErrVMLookupFailed, + "vm", + vmRef.String()) + return + } + + env = append( + env, + core.EnvVar{ + Name: "V2V_vmName", + Value: vm.Name, + }, + core.EnvVar{ + Name: "V2V_diskPath", + Value: getDiskSourcePath(vm.OvaPath), + }, + core.EnvVar{ + Name: "V2V_source", + Value: "ova", + }) + + return +} + +// Build the DataVolume credential secret. +func (r *Builder) Secret(vmRef ref.Ref, in, object *core.Secret) (err error) { + return +} + +// Create DataVolume specs for the VM. +func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.ConfigMap, dvTemplate *cdi.DataVolume) (dvs []cdi.DataVolume, err error) { + vm := &model.VM{} + err = r.Source.Inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap( + err, + ErrVMLookupFailed, + "vm", + vmRef.String()) + return + } + + dsMapIn := r.Context.Map.Storage.Spec.Map + for i := range dsMapIn { + mapped := &dsMapIn[i] + ref := mapped.Source + ds := &model.Disk{} + fErr := r.Source.Inventory.Find(ds, ref) + if fErr != nil { + err = fErr + return + } + for _, disk := range vm.Disks { + if disk.ID == ds.ID { + diskSize, err := getResourceCapacity(disk.Capacity, disk.CapacityAllocationUnits) + if err != nil { + return nil, err + } + storageClass := mapped.Destination.StorageClass + var dvSource cdi.DataVolumeSource + // Let virt-v2v do the copying + dvSource = cdi.DataVolumeSource{ + Blank: &cdi.DataVolumeBlankImage{}, + } + dvSpec := cdi.DataVolumeSpec{ + Source: &dvSource, + Storage: &cdi.StorageSpec{ + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceStorage: *resource.NewQuantity(diskSize, resource.BinarySI), + }, + }, + StorageClassName: &storageClass, + }, + } + // set the access mode and volume mode if they were specified in the storage map. + // otherwise, let the storage profile decide the default values. + if mapped.Destination.AccessMode != "" { + dvSpec.Storage.AccessModes = []core.PersistentVolumeAccessMode{mapped.Destination.AccessMode} + } + if mapped.Destination.VolumeMode != "" { + dvSpec.Storage.VolumeMode = &mapped.Destination.VolumeMode + } + + dv := dvTemplate.DeepCopy() + dv.Spec = dvSpec + if dv.ObjectMeta.Annotations == nil { + dv.ObjectMeta.Annotations = make(map[string]string) + } + dv.ObjectMeta.Annotations[planbase.AnnDiskSource] = getDiskSourcePath(disk.FilePath) + dvs = append(dvs, *dv) + } + } + } + + return +} + +// Create the destination Kubevirt VM. +func (r *Builder) VirtualMachine(vmRef ref.Ref, object *cnv.VirtualMachineSpec, persistentVolumeClaims []core.PersistentVolumeClaim) (err error) { + vm := &model.VM{} + err = r.Source.Inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap( + err, + ErrVMLookupFailed, + "vm", + vmRef.String()) + return + } + + var conflicts []string + conflicts, err = r.macConflicts(vm) + if err != nil { + return + } + if len(conflicts) > 0 { + err = liberr.New( + fmt.Sprintf("Source VM has a mac address conflict with one or more destination VMs: %s", conflicts)) + return + } + + if object.Template == nil { + object.Template = &cnv.VirtualMachineInstanceTemplateSpec{} + } + r.mapDisks(vm, persistentVolumeClaims, object) + r.mapFirmware(vm, object) + r.mapCPU(vm, object) + r.mapMemory(vm, object) + r.mapClock(object) + r.mapInput(object) + err = r.mapNetworks(vm, object) + if err != nil { + return + } + + return +} + +func (r *Builder) mapNetworks(vm *model.VM, object *cnv.VirtualMachineSpec) (err error) { + var kNetworks []cnv.Network + var kInterfaces []cnv.Interface + + numNetworks := 0 + netMapIn := r.Context.Map.Network.Spec.Map + for i := range netMapIn { + mapped := &netMapIn[i] + ref := mapped.Source + network := &model.Network{} + fErr := r.Source.Inventory.Find(network, ref) + if fErr != nil { + err = fErr + return + } + + needed := []ova.NIC{} + for _, nic := range vm.NICs { + if nic.Network == network.Name { + needed = append(needed, nic) + } + } + if len(needed) == 0 { + continue + } + for _, nic := range needed { + networkName := fmt.Sprintf("net-%v", numNetworks) + numNetworks++ + kNetwork := cnv.Network{ + Name: networkName, + } + kInterface := cnv.Interface{ + Name: networkName, + Model: Virtio, + MacAddress: nic.MAC, + } + switch mapped.Destination.Type { + case Pod: + kNetwork.Pod = &cnv.PodNetwork{} + kInterface.Masquerade = &cnv.InterfaceMasquerade{} + case Multus: + kNetwork.Multus = &cnv.MultusNetwork{ + NetworkName: path.Join(mapped.Destination.Namespace, mapped.Destination.Name), + } + kInterface.Bridge = &cnv.InterfaceBridge{} + } + kNetworks = append(kNetworks, kNetwork) + kInterfaces = append(kInterfaces, kInterface) + } + } + object.Template.Spec.Networks = kNetworks + object.Template.Spec.Domain.Devices.Interfaces = kInterfaces + return +} + +func (r *Builder) mapInput(object *cnv.VirtualMachineSpec) { + tablet := cnv.Input{ + Type: Tablet, + Name: Tablet, + Bus: Virtio, + } + object.Template.Spec.Domain.Devices.Inputs = []cnv.Input{tablet} +} + +func (r *Builder) mapClock(object *cnv.VirtualMachineSpec) { + clock := &cnv.Clock{ + Timer: &cnv.Timer{}, + } + object.Template.Spec.Domain.Clock = clock +} + +func (r *Builder) mapMemory(vm *model.VM, object *cnv.VirtualMachineSpec) { + var memoryBytes int64 + memoryBytes, _ = getResourceCapacity(int64(vm.MemoryMB), vm.MemoryUnits) + reservation := resource.NewQuantity(memoryBytes, resource.BinarySI) + object.Template.Spec.Domain.Resources = cnv.ResourceRequirements{ + Requests: map[core.ResourceName]resource.Quantity{ + core.ResourceMemory: *reservation, + }, + } +} + +func (r *Builder) mapCPU(vm *model.VM, object *cnv.VirtualMachineSpec) { + if vm.CoresPerSocket == 0 { + vm.CoresPerSocket = 1 + } + object.Template.Spec.Domain.Machine = &cnv.Machine{Type: "q35"} + object.Template.Spec.Domain.CPU = &cnv.CPU{ + Sockets: uint32(vm.CpuCount / vm.CoresPerSocket), + Cores: uint32(vm.CoresPerSocket), + } +} + +func (r *Builder) mapFirmware(vm *model.VM, object *cnv.VirtualMachineSpec) { + features := &cnv.Features{} + firmware := &cnv.Firmware{ + Serial: vm.UUID, + } + switch vm.Firmware { + case Efi: + // We don't distinguish between UEFI and UEFI with secure boot but we anyway would have + // disabled secure boot, even if we knew it was enabled on the source, because the guest + // OS won't be able to boot without getting the NVRAM data. By starting the VM without + // secure boot we ease the procedure users need to do in order to make a guest OS that + // was previously configured with secure boot to boot. + secureBootEnabled := false + firmware.Bootloader = &cnv.Bootloader{ + EFI: &cnv.EFI{ + SecureBoot: &secureBootEnabled, + }} + default: + firmware.Bootloader = &cnv.Bootloader{BIOS: &cnv.BIOS{}} + } + object.Template.Spec.Domain.Features = features + object.Template.Spec.Domain.Firmware = firmware +} + +func (r *Builder) mapDisks(vm *model.VM, persistentVolumeClaims []core.PersistentVolumeClaim, object *cnv.VirtualMachineSpec) { + var kVolumes []cnv.Volume + var kDisks []cnv.Disk + + disks := vm.Disks + // TODO might need sort by the disk id (incremental name) + /*sort.Slice(disks, func(i, j int) bool { + return disks[i].Key < disks[j].Key + })*/ + pvcMap := make(map[string]*core.PersistentVolumeClaim) + for i := range persistentVolumeClaims { + pvc := &persistentVolumeClaims[i] + // the PVC BackingFile value has already been trimmed. + if source, ok := pvc.Annotations[planbase.AnnDiskSource]; ok { + pvcMap[source] = pvc + } else { + pvcMap[pvc.Annotations[AnnImportBackingFile]] = pvc + } + } + for i, disk := range disks { + pvc := pvcMap[getDiskSourcePath(disk.FilePath)] + volumeName := fmt.Sprintf("vol-%v", i) + volume := cnv.Volume{ + Name: volumeName, + VolumeSource: cnv.VolumeSource{ + PersistentVolumeClaim: &cnv.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: core.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + }, + } + kubevirtDisk := cnv.Disk{ + Name: volumeName, + DiskDevice: cnv.DiskDevice{ + Disk: &cnv.DiskTarget{ + Bus: Virtio, + }, + }, + } + kVolumes = append(kVolumes, volume) + kDisks = append(kDisks, kubevirtDisk) + } + object.Template.Spec.Volumes = kVolumes + object.Template.Spec.Domain.Devices.Disks = kDisks +} + +// Build tasks. +func (r *Builder) Tasks(vmRef ref.Ref) (list []*plan.Task, err error) { + vm := &model.VM{} + err = r.Source.Inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap( + err, + ErrVMLookupFailed, + "vm", + vmRef.String()) + return + } + for _, disk := range vm.Disks { + mB := disk.Capacity / 0x100000 + list = append( + list, + &plan.Task{ + Name: getDiskSourcePath(disk.FilePath), + Progress: libitr.Progress{ + Total: mB, + }, + Annotations: map[string]string{ + "unit": "MB", + }, + }) + } + + return +} + +func (r *Builder) TemplateLabels(vmRef ref.Ref) (labels map[string]string, err error) { + vm := &model.VM{} + err = r.Source.Inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap( + err, + ErrVMLookupFailed, + "vm", + vmRef.String()) + return + } + + os := Unknown + + labels = make(map[string]string) + labels[fmt.Sprintf(TemplateOSLabel, os)] = "true" + labels[TemplateWorkloadLabel] = "true" + labels[TemplateFlavorLabel] = "true" + + return +} + +// Return a stable identifier for a VDDK DataVolume. +func (r *Builder) ResolveDataVolumeIdentifier(dv *cdi.DataVolume) string { + return trimBackingFileName(dv.ObjectMeta.Annotations[planbase.AnnDiskSource]) +} + +// Return a stable identifier for a PersistentDataVolume. +func (r *Builder) ResolvePersistentVolumeClaimIdentifier(pvc *core.PersistentVolumeClaim) string { + return trimBackingFileName(pvc.Annotations[AnnImportBackingFile]) +} + +// Trims the snapshot suffix from a disk backing file name if there is one. +// +// Example: +// Input: [datastore13] my-vm/disk-name-000015.vmdk +// Output: [datastore13] my-vm/disk-name.vmdk +func trimBackingFileName(fileName string) string { + return backingFilePattern.ReplaceAllString(fileName, ".vmdk") +} + +func getDiskSourcePath(filePath string) string { + if strings.HasSuffix(filePath, ".ova") { + return filePath + } + return filepath.Dir(filePath) +} + +func getResourceCapacity(capacity int64, units string) (int64, error) { + if units == "" { + return 0, nil + } + + re := regexp.MustCompile("[0-9]+") + + numbers := re.FindAllString(units, -1) + if len(numbers) != 2 { + return 0, nil + } + base, err := strconv.Atoi(numbers[0]) + if err != nil { + return 0, err + } + pow, err := strconv.Atoi(numbers[1]) + if err != nil { + return 0, err + } + + return int64(capacity) * int64(math.Pow(float64(base), float64(pow))), nil + +} + +func (r *Builder) PersistentVolumeClaimWithSourceRef(da interface{}, storageName *string, populatorName string, accessModes []core.PersistentVolumeAccessMode, volumeMode *core.PersistentVolumeMode) *core.PersistentVolumeClaim { + return nil +} + +func (r *Builder) PreTransferActions(c planbase.Client, vmRef ref.Ref) (ready bool, err error) { + return true, nil +} + +// Build LUN PVs. +func (r *Builder) LunPersistentVolumes(vmRef ref.Ref) (pvs []core.PersistentVolume, err error) { + // do nothing + return +} + +// Build LUN PVCs. +func (r *Builder) LunPersistentVolumeClaims(vmRef ref.Ref) (pvcs []core.PersistentVolumeClaim, err error) { + // do nothing + return +} diff --git a/pkg/controller/plan/adapter/ova/client.go b/pkg/controller/plan/adapter/ova/client.go new file mode 100644 index 000000000..eeade1a9a --- /dev/null +++ b/pkg/controller/plan/adapter/ova/client.go @@ -0,0 +1,100 @@ +package ova + +import ( + "net" + "net/http" + "time" + + "github.com/go-logr/logr" + planapi "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/plan" + "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/ref" + plancontext "github.com/konveyor/forklift-controller/pkg/controller/plan/context" + libweb "github.com/konveyor/forklift-controller/pkg/lib/inventory/web" + core "k8s.io/api/core/v1" + cdi "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" +) + +// OVA VM Client +type Client struct { + *plancontext.Context + URL string + client *libweb.Client + Secret *core.Secret + Log logr.Logger +} + +// Connect to the OVA provider server. +func (r *Client) connect() (err error) { + if r.client != nil { + return + } + URL := r.Source.Provider.Spec.URL + client := &libweb.Client{ + Transport: &http.Transport{ + DialContext: (&net.Dialer{ + Timeout: 15 * time.Second, + KeepAlive: 15 * time.Second, + }).DialContext, + MaxIdleConns: 10, + }, + } + r.URL = URL + r.client = client + + return +} + +// Create a VM snapshot and return its ID. +func (r *Client) CreateSnapshot(vmRef ref.Ref) (snapshot string, err error) { + return +} + +// Remove all warm migration snapshots. +func (r *Client) RemoveSnapshots(vmRef ref.Ref, precopies []planapi.Precopy) (err error) { + return +} + +// Check if a snapshot is ready to transfer, to avoid importer restarts. +func (r *Client) CheckSnapshotReady(vmRef ref.Ref, snapshot string) (ready bool, err error) { + return +} + +// Set DataVolume checkpoints. +func (r *Client) SetCheckpoints(vmRef ref.Ref, precopies []planapi.Precopy, datavolumes []cdi.DataVolume, final bool) (err error) { + return +} + +// Get the power state of the VM. +func (r *Client) PowerState(vmRef ref.Ref) (state string, err error) { + return +} + +// Power on the VM. +func (r *Client) PowerOn(vmRef ref.Ref) (err error) { + return +} + +// Power off the VM. +func (r *Client) PowerOff(vmRef ref.Ref) (err error) { + return +} + +// Determine whether the VM has been powered off. +func (r *Client) PoweredOff(vmRef ref.Ref) (poweredOff bool, err error) { + return true, nil +} + +// Close the connection to the OVA provider server. +func (r *Client) Close() { + if r.client != nil { + r.client = nil + } +} + +func (r *Client) DetachDisks(vmRef ref.Ref) (err error) { + return +} + +func (r Client) Finalize(vms []*planapi.VMStatus, planName string) { + return +} diff --git a/pkg/controller/plan/adapter/ova/destinationclient.go b/pkg/controller/plan/adapter/ova/destinationclient.go new file mode 100644 index 000000000..05d7fbc90 --- /dev/null +++ b/pkg/controller/plan/adapter/ova/destinationclient.go @@ -0,0 +1,20 @@ +package ova + +import ( + "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/plan" + plancontext "github.com/konveyor/forklift-controller/pkg/controller/plan/context" +) + +type DestinationClient struct { + *plancontext.Context +} + +func (d *DestinationClient) DeletePopulatorDataSource(vm *plan.VMStatus) error { + // not supported - do nothing + return nil +} + +func (r *DestinationClient) SetPopulatorCrOwnership() (err error) { + // not supported - do nothing + return +} diff --git a/pkg/controller/plan/adapter/ova/validator.go b/pkg/controller/plan/adapter/ova/validator.go new file mode 100644 index 000000000..28df4970c --- /dev/null +++ b/pkg/controller/plan/adapter/ova/validator.go @@ -0,0 +1,127 @@ +package ova + +import ( + api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" + "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/ref" + "github.com/konveyor/forklift-controller/pkg/controller/provider/web" + model "github.com/konveyor/forklift-controller/pkg/controller/provider/web/ova" + liberr "github.com/konveyor/forklift-controller/pkg/lib/error" +) + +// OVA validator. +type Validator struct { + plan *api.Plan + inventory web.Client +} + +// Error messages +const ( + ErrVMNotFound = "VM not found in inventory." +) + +// Load. +func (r *Validator) Load() (err error) { + r.inventory, err = web.NewClient(r.plan.Referenced.Provider.Source) + return +} + +// Validate whether warm migration is supported from this provider type. +func (r *Validator) WarmMigration() (ok bool) { + ok = false + return +} + +// Validate that a VM's networks have been mapped. +func (r *Validator) NetworksMapped(vmRef ref.Ref) (ok bool, err error) { + if r.plan.Referenced.Map.Network == nil { + return + } + vm := &model.VM{} + err = r.inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap( + err, + ErrVMNotFound, + "vm", + vmRef.String()) + return + } + + for _, net := range vm.Networks { + if !r.plan.Referenced.Map.Network.Status.Refs.Find(ref.Ref{ID: net.ID}) { + return + } + } + ok = true + return +} + +// Validate that no more than one of a VM's networks is mapped to the pod network. +func (r *Validator) PodNetwork(vmRef ref.Ref) (ok bool, err error) { + if r.plan.Referenced.Map.Network == nil { + return + } + vm := &model.Workload{} + err = r.inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap( + err, + ErrVMNotFound, + "vm", + vmRef.String()) + return + } + + mapping := r.plan.Referenced.Map.Network.Spec.Map + podMapped := 0 + for i := range mapping { + mapped := &mapping[i] + ref := mapped.Source + network := &model.Network{} + fErr := r.inventory.Find(network, ref) + if fErr != nil { + err = fErr + return + } + for _, nic := range vm.NICs { + // TODO move from NIC name to NIC ID? ID should be unique + if nic.Name == network.Name && mapped.Destination.Type == Pod { + podMapped++ + } + } + } + + ok = podMapped <= 1 + return +} + +// Validate that a VM's disk backing storage has been mapped. +func (r *Validator) StorageMapped(vmRef ref.Ref) (ok bool, err error) { + if r.plan.Referenced.Map.Storage == nil { + return + } + vm := &model.VM{} + err = r.inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap( + err, + ErrVMNotFound, + "vm", + vmRef.String()) + return + } + + for _, disk := range vm.Disks { + if !r.plan.Referenced.Map.Storage.Status.Refs.Find(ref.Ref{ID: disk.ID}) { + return + } + } + ok = true + return +} + +// Validate that a VM's Host isn't in maintenance mode. +func (r *Validator) MaintenanceMode(vmRef ref.Ref) (ok bool, err error) { + ok = true + return +} diff --git a/pkg/controller/plan/adapter/vsphere/builder.go b/pkg/controller/plan/adapter/vsphere/builder.go index ebb142848..a0d038485 100644 --- a/pkg/controller/plan/adapter/vsphere/builder.go +++ b/pkg/controller/plan/adapter/vsphere/builder.go @@ -257,6 +257,10 @@ func (r *Builder) PodEnvironment(vmRef ref.Ref, sourceSecret *core.Secret) (env Name: "V2V_libvirtURL", Value: libvirtURL.String(), }, + core.EnvVar{ + Name: "V2V_source", + Value: "vCenter", + }, ) return } diff --git a/pkg/controller/plan/kubevirt.go b/pkg/controller/plan/kubevirt.go index 1225ff213..9284d7e29 100644 --- a/pkg/controller/plan/kubevirt.go +++ b/pkg/controller/plan/kubevirt.go @@ -12,6 +12,7 @@ import ( "strings" "time" + api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" template "github.com/openshift/api/template/v1" "github.com/openshift/library-go/pkg/template/generator" "github.com/openshift/library-go/pkg/template/templateprocessing" @@ -1452,16 +1453,43 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi }, }, }) - mounts = append(mounts, - core.VolumeMount{ - Name: "libvirt-domain-xml", - MountPath: "/mnt/v2v", - }, - core.VolumeMount{ - Name: "vddk-vol-mount", - MountPath: "/opt", - }, - ) + + if r.Source.Provider.Type() == api.Ova { + server := r.Source.Provider.Spec.URL + splitted := strings.Split(server, ":") + + if len(splitted) != 2 { + r.Log.Info("The NFS server path format is wrong") + return + } + nfsServer := splitted[0] + nfsPath := splitted[1] + + //path from disk + volumes = append(volumes, core.Volume{ + Name: "nfs", + VolumeSource: core.VolumeSource{ + NFS: &core.NFSVolumeSource{ + Server: nfsServer, + Path: nfsPath, + }, + }, + }) + mounts = append(mounts, + core.VolumeMount{ + Name: "libvirt-domain-xml", + MountPath: "/mnt/v2v", + }, + core.VolumeMount{ + Name: "vddk-vol-mount", + MountPath: "/opt", + }, + core.VolumeMount{ + Name: "nfs", + MountPath: "/ova", + }, + ) + } // Temporary space for VDDK library volumes = append(volumes, core.Volume{ diff --git a/pkg/controller/plan/scheduler/BUILD.bazel b/pkg/controller/plan/scheduler/BUILD.bazel index 169fa320c..334051db2 100644 --- a/pkg/controller/plan/scheduler/BUILD.bazel +++ b/pkg/controller/plan/scheduler/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "//pkg/controller/plan/context", "//pkg/controller/plan/scheduler/ocp", "//pkg/controller/plan/scheduler/openstack", + "//pkg/controller/plan/scheduler/ova", "//pkg/controller/plan/scheduler/ovirt", "//pkg/controller/plan/scheduler/vsphere", "//pkg/lib/error", diff --git a/pkg/controller/plan/scheduler/doc.go b/pkg/controller/plan/scheduler/doc.go index 42a14c7d5..737b422de 100644 --- a/pkg/controller/plan/scheduler/doc.go +++ b/pkg/controller/plan/scheduler/doc.go @@ -6,6 +6,7 @@ import ( plancontext "github.com/konveyor/forklift-controller/pkg/controller/plan/context" "github.com/konveyor/forklift-controller/pkg/controller/plan/scheduler/ocp" "github.com/konveyor/forklift-controller/pkg/controller/plan/scheduler/openstack" + "github.com/konveyor/forklift-controller/pkg/controller/plan/scheduler/ova" "github.com/konveyor/forklift-controller/pkg/controller/plan/scheduler/ovirt" "github.com/konveyor/forklift-controller/pkg/controller/plan/scheduler/vsphere" liberr "github.com/konveyor/forklift-controller/pkg/lib/error" @@ -42,6 +43,11 @@ func New(ctx *plancontext.Context) (scheduler Scheduler, err error) { Context: ctx, MaxInFlight: settings.Settings.MaxInFlight, } + case api.Ova: + scheduler = &ova.Scheduler{ + Context: ctx, + MaxInFlight: settings.Settings.MaxInFlight, + } default: liberr.New("provider not supported.") } diff --git a/pkg/controller/plan/scheduler/ova/BUILD.bazel b/pkg/controller/plan/scheduler/ova/BUILD.bazel new file mode 100644 index 000000000..05a9adca4 --- /dev/null +++ b/pkg/controller/plan/scheduler/ova/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "ova", + srcs = ["scheduler.go"], + importpath = "github.com/konveyor/forklift-controller/pkg/controller/plan/scheduler/ova", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/forklift/v1beta1", + "//pkg/apis/forklift/v1beta1/plan", + "//pkg/controller/plan/context", + "//pkg/lib/error", + ], +) diff --git a/pkg/controller/plan/scheduler/ova/scheduler.go b/pkg/controller/plan/scheduler/ova/scheduler.go new file mode 100644 index 000000000..085b5bd16 --- /dev/null +++ b/pkg/controller/plan/scheduler/ova/scheduler.go @@ -0,0 +1,71 @@ +package ova + +import ( + "context" + "sync" + + api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" + "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/plan" + plancontext "github.com/konveyor/forklift-controller/pkg/controller/plan/context" + liberr "github.com/konveyor/forklift-controller/pkg/lib/error" +) + +// Package level mutex to ensure that +// multiple concurrent reconciles don't +// attempt to schedule VMs into the same +// slots. +var mutex sync.Mutex + +// Scheduler for migrations from OVA. +type Scheduler struct { + *plancontext.Context + // Maximum number of VMs that can be + // migrated at once per provider. + MaxInFlight int +} + +func (r *Scheduler) Next() (vm *plan.VMStatus, hasNext bool, err error) { + mutex.Lock() + defer mutex.Unlock() + + planList := &api.PlanList{} + err = r.List(context.TODO(), planList) + if err != nil { + err = liberr.Wrap(err) + return + } + + inFlight := 0 + for _, p := range planList.Items { + // ignore plans that aren't using the same source provider + if p.Spec.Provider.Source != r.Plan.Spec.Provider.Source { + continue + } + + // skip plans that aren't being executed + snapshot := p.Status.Migration.ActiveSnapshot() + if !snapshot.HasCondition("Executing") { + continue + } + + for _, vmStatus := range p.Status.Migration.VMs { + if vmStatus.Running() { + inFlight++ + } + } + } + + if inFlight >= r.MaxInFlight { + return + } + + for _, vmStatus := range r.Plan.Status.Migration.VMs { + if !vmStatus.MarkedStarted() && !vmStatus.MarkedCompleted() { + vm = vmStatus + hasNext = true + return + } + } + + return +} diff --git a/pkg/controller/provider/container/ova/collector.go b/pkg/controller/provider/container/ova/collector.go index 661cd1d50..a07e8102c 100644 --- a/pkg/controller/provider/container/ova/collector.go +++ b/pkg/controller/provider/container/ova/collector.go @@ -21,7 +21,7 @@ const ( // Retry interval. RetryInterval = 5 * time.Second // Refresh interval. - RefreshInterval = 10 * time.Second + RefreshInterval = 5 * time.Minute ) // Phases diff --git a/pkg/controller/provider/container/ova/resource.go b/pkg/controller/provider/container/ova/resource.go index 133715f56..27c3b3493 100644 --- a/pkg/controller/provider/container/ova/resource.go +++ b/pkg/controller/provider/container/ova/resource.go @@ -45,6 +45,8 @@ type VM struct { CoresPerSocket int32 `json:"CoresPerSocket"` MemoryMB int32 `json:"MemoryMB"` BalloonedMemory int32 `json:"BalloonedMemory"` + MemoryUnits string `json:"MemoryUnits"` + CpuUnits string `json:"CpuUnits"` IpAddress string `json:"IpAddress"` NumaNodeAffinity []string `json:"NumaNodeAffinity"` StorageUsed int64 `json:"StorageUsed"` @@ -53,23 +55,27 @@ type VM struct { Kind string `json:"Kind"` } `json:"Devices"` NICs []struct { - Name string `json:"Name"` - MAC string `json:"MAC"` - Config []struct { + Name string `json:"Name"` + MAC string `json:"MAC"` + Network string `json:"Network"` + Config []struct { Key string `json:"Key"` Value string `json:"Value"` } `json:"Config"` } `json:"Nics"` Disks []struct { + ID string `json:"ID"` + Name string `json:"Name"` FilePath string `json:"FilePath"` - Capacity string `json:"Capacity"` + Capacity int64 `json:"Capacity"` CapacityAllocationUnits string `json:"CapacityAllocationUnits"` DiskId string `json:"DiskId"` FileRef string `json:"FileRef"` Format string `json:"Format"` - PopulatedSize string `json:"PopulatedSize"` + PopulatedSize int64 `json:"PopulatedSize"` } `json:"Disks"` Networks []struct { + ID string `json:"ID"` Name string `json:"Name"` Description string `json:"Description"` } `json:"Networks"` @@ -78,6 +84,7 @@ type VM struct { // Apply to (update) the model. func (r *VM) ApplyTo(m *model.VM) { m.Name = r.Name + m.ID = r.UUID m.OvaPath = r.OvaPath m.RevisionValidated = r.RevisionValidated m.PolicyVersion = r.PolicyVersion @@ -91,6 +98,8 @@ func (r *VM) ApplyTo(m *model.VM) { m.CpuCount = r.CpuCount m.CoresPerSocket = r.CoresPerSocket m.MemoryMB = r.MemoryMB + m.MemoryUnits = r.MemoryUnits + m.CpuUnits = r.CpuUnits m.BalloonedMemory = r.BalloonedMemory m.IpAddress = r.IpAddress m.NumaNodeAffinity = r.NumaNodeAffinity @@ -116,9 +125,10 @@ func (r *VM) addNICs(m *model.VM) { } m.NICs = append( m.NICs, model.NIC{ - Name: n.Name, - MAC: n.MAC, - Config: configs, + Name: n.Name, + MAC: n.MAC, + Config: configs, + Network: n.Network, }) } } @@ -129,6 +139,10 @@ func (r *VM) addDisks(m *model.VM) { m.Disks = append( m.Disks, model.Disk{ + Base: model.Base{ + Name: disk.Name, + ID: disk.ID, + }, FilePath: disk.FilePath, Capacity: disk.Capacity, CapacityAllocationUnits: disk.CapacityAllocationUnits, @@ -158,37 +172,45 @@ func (r *VM) addNetworks(m *model.VM) { m.Networks, model.Network{ Description: network.Description, + Base: model.Base{ + Name: network.Name, + ID: network.ID, + }, }) } } // Network. type Network struct { + ID string `json:"ID"` Name string `json:"Name"` Description string `json:"Description"` } // Apply to (update) the model. func (r *Network) ApplyTo(m *model.Network) { + m.ID = r.ID m.Description = r.Description + m.Base.Name = r.Name } -// Network (list). -//type NetworkList []Network `json:"network"` - // Disk. type Disk struct { + ID string `json:"ID"` + Name string `json:"Name"` FilePath string `json:"FilePath"` - Capacity string `json:"Capacity"` + Capacity int64 `json:"Capacity"` CapacityAllocationUnits string `json:"Capacity_allocation_units"` DiskId string `json:"DiskId"` FileRef string `json:"FileRef"` Format string `json:"Format"` - PopulatedSize string `json:"PopulatedSize"` + PopulatedSize int64 `json:"PopulatedSize"` } // Apply to (update) the model. func (r *Disk) ApplyTo(m *model.Disk) { + m.Base.Name = r.Name + m.Base.ID = r.ID m.FilePath = r.FilePath m.Capacity = r.Capacity m.CapacityAllocationUnits = r.CapacityAllocationUnits @@ -197,8 +219,3 @@ func (r *Disk) ApplyTo(m *model.Disk) { m.Format = r.Format m.PopulatedSize = r.PopulatedSize } - -// Disk (list). -type DiskList struct { - Items []Disk `json:"Disk"` -} diff --git a/pkg/controller/provider/model/ova/model.go b/pkg/controller/provider/model/ova/model.go index e89026d52..3bd8548a9 100644 --- a/pkg/controller/provider/model/ova/model.go +++ b/pkg/controller/provider/model/ova/model.go @@ -81,6 +81,8 @@ type VM struct { CpuCount int32 `sql:""` CoresPerSocket int32 `sql:""` MemoryMB int32 `sql:""` + MemoryUnits string `sql:""` + CpuUnits string `sql:""` BalloonedMemory int32 `sql:""` IpAddress string `sql:""` NumaNodeAffinity []string `sql:""` @@ -97,12 +99,12 @@ type VM struct { type Disk struct { Base FilePath string `sql:""` - Capacity string `sql:""` + Capacity int64 `sql:""` CapacityAllocationUnits string `sql:""` DiskId string `sql:""` FileRef string `sql:""` Format string `sql:""` - PopulatedSize string `sql:""` + PopulatedSize int64 `sql:""` } // Virtual Device. @@ -117,7 +119,8 @@ type Conf struct { // Virtual ethernet card. type NIC struct { - Name string `sql:""` - MAC string `sql:""` - Config []Conf `sql:""` + Name string `sql:""` + MAC string `sql:""` + Network string `sql:""` + Config []Conf `sql:""` } diff --git a/pkg/controller/provider/validation.go b/pkg/controller/provider/validation.go index 193985ae1..655658513 100644 --- a/pkg/controller/provider/validation.go +++ b/pkg/controller/provider/validation.go @@ -247,6 +247,7 @@ func (r *Reconciler) validateSecret(provider *api.Provider) (secret *core.Secret "user", "password", } + } else { keyList = []string{ "user", @@ -254,6 +255,11 @@ func (r *Reconciler) validateSecret(provider *api.Provider) (secret *core.Secret "cacert", } } + case api.Ova: + keyList = []string{ + "url", + "insecureSkipVerify", + } } for _, key := range keyList { if _, found := secret.Data[key]; !found { diff --git a/pkg/controller/provider/web/ova/client.go b/pkg/controller/provider/web/ova/client.go index b39b13e4f..afae24800 100644 --- a/pkg/controller/provider/web/ova/client.go +++ b/pkg/controller/provider/web/ova/client.go @@ -262,7 +262,7 @@ func (r *Finder) Network(ref *base.Ref) (object interface{}, err error) { // ProviderNotReadyErr // NotFoundErr // RefNotUniqueErr -func (r *Finder) Disk(ref *base.Ref) (object interface{}, err error) { +func (r *Finder) Storage(ref *base.Ref) (object interface{}, err error) { disk := &Disk{} err = r.ByRef(disk, *ref) if err == nil { diff --git a/pkg/controller/provider/web/ova/disk.go b/pkg/controller/provider/web/ova/disk.go index 5ce6de3e1..ef1c48c01 100644 --- a/pkg/controller/provider/web/ova/disk.go +++ b/pkg/controller/provider/web/ova/disk.go @@ -132,12 +132,12 @@ func (h *DiskHandler) watch(ctx *gin.Context) { type Disk struct { Resource FilePath string - Capacity string + Capacity int64 CapacityAllocationUnits string DiskId string FileRef string Format string - PopulatedSize string + PopulatedSize int64 } // Build the resource using the model. diff --git a/pkg/controller/provider/web/ova/provider.go b/pkg/controller/provider/web/ova/provider.go index dbfdfe3ab..52388425a 100644 --- a/pkg/controller/provider/web/ova/provider.go +++ b/pkg/controller/provider/web/ova/provider.go @@ -160,7 +160,7 @@ type Provider struct { Product string `json:"product"` VMCount int64 `json:"vmCount"` NetworkCount int64 `json:"networkCount"` - DiskCount int64 + DiskCount int64 `json:"diskCount"` } // Set fields with the specified object. diff --git a/pkg/controller/provider/web/ova/vm.go b/pkg/controller/provider/web/ova/vm.go index 9b6110852..12d626853 100644 --- a/pkg/controller/provider/web/ova/vm.go +++ b/pkg/controller/provider/web/ova/vm.go @@ -216,6 +216,8 @@ type VM struct { CpuCount int32 CoresPerSocket int32 MemoryMB int32 + MemoryUnits string + CpuUnits string BalloonedMemory int32 IpAddress string NumaNodeAffinity []string @@ -242,6 +244,8 @@ func (r *VM) With(m *model.VM) { r.CoresPerSocket = m.CoresPerSocket r.MemoryMB = m.MemoryMB r.BalloonedMemory = m.BalloonedMemory + r.MemoryUnits = m.MemoryUnits + r.CpuUnits = m.CpuUnits r.IpAddress = m.IpAddress r.StorageUsed = m.StorageUsed r.FaultToleranceEnabled = m.FaultToleranceEnabled diff --git a/virt-v2v/cold/entrypoint b/virt-v2v/cold/entrypoint index 14e4872d1..7ee0f3008 100755 --- a/virt-v2v/cold/entrypoint +++ b/virt-v2v/cold/entrypoint @@ -2,15 +2,34 @@ set -o pipefail shopt -s nullglob -if [ -z "$V2V_libvirtURL" ] || \ - [ -z "$V2V_secretKey" ] || \ - [ -z "$V2V_vmName" ] ; then +if [ -z "$V2V_source"] ; then echo "Following environment needs to be defined:" echo - echo " V2V_libvirtURL, V2V_secretKey, V2V_vmName" + echo " V2V_source" exit 1 fi +if [ "$V2V_source" == "vCenter" ] ; then + if [ -z "$V2V_libvirtURL" ] || \ + [ -z "$V2V_secretKey" ] || \ + [ -z "$V2V_vmName" ] ; then + echo "Following environment needs to be defined:" + echo + echo " V2V_libvirtURL, V2V_secretKey, V2V_vmName" + exit 1 + fi +fi + +if [ "$V2V_source" == "ova" ] ; then + if [ -z "$V2V_diskPath" ] || \ + [ -z "$V2V_vmName" ] ; then + echo "Following environment needs to be defined:" + echo + echo " V2V_diskPath, V2V_vmName" + exit 1 + fi +fi + export LIBGUESTFS_PATH=/usr/lib64/guestfs/appliance echo "Preparing virt-v2v" @@ -53,26 +72,37 @@ for disk in /dev/block[0-9]* ; do ln -s "$disk" "$DIR/$V2V_vmName-sd$(gen_name "$((num+1))")" done -# Store password to file -echo -n "$V2V_secretKey" > "$DIR/vmware.pw" -args=("${args[@]}" - -ip "$DIR/vmware.pw" -) +if [ "$V2V_source" == "vCenter" ] ; then + # Store password to file + echo -n "$V2V_secretKey" > "$DIR/vmware.pw" + args=("${args[@]}" + -ip "$DIR/vmware.pw" + ) -# Use VDDK if present -if [ -d "/opt/vmware-vix-disklib-distrib" ]; then - args=("${args[@]}" - -it vddk - -io vddk-libdir=/opt/vmware-vix-disklib-distrib - -io "vddk-thumbprint=$V2V_thumbprint" - ) + # Use VDDK if present + if [ -d "/opt/vmware-vix-disklib-distrib" ]; then + args=("${args[@]}" + -it vddk + -io vddk-libdir=/opt/vmware-vix-disklib-distrib + -io "vddk-thumbprint=$V2V_thumbprint" + ) + fi fi echo "Starting virt-v2v" set -x ls -l "$DIR" -exec virt-v2v -v -x \ - -i libvirt \ - -ic "$V2V_libvirtURL" \ - "${args[@]}" \ - -- "$V2V_vmName" |& /usr/local/bin/virt-v2v-monitor +if [ "$V2V_source" == "vCenter" ] ; then + exec virt-v2v -v -x \ + -i libvirt \ + -ic "$V2V_libvirtURL" \ + "${args[@]}" \ + -- "$V2V_vmName" |& /usr/local/bin/virt-v2v-monitor +fi + +if [ "$V2V_source" == "ova" ] ; then + exec virt-v2v -v -x \ + -i ova "$V2V_diskPath" \ + "${args[@]}"\ + |& /usr/local/bin/virt-v2v-monitor +fi \ No newline at end of file