diff --git a/api/metadata/annotations.go b/api/metadata/annotations.go index a0980ab94..a8f560913 100644 --- a/api/metadata/annotations.go +++ b/api/metadata/annotations.go @@ -73,3 +73,31 @@ const ( // Ideally used in production use cases GitOpsProfile ProfileType = "gitops" ) + +const ( + DefaultProfile = PreviewProfile +) + +// deprecated prod profile is deprecate and not supported, use preview profile +var supportedProfiles = map[ProfileType]ProfileType{DevProfile: DevProfile, PreviewProfile: PreviewProfile, GitOpsProfile: GitOpsProfile} + +func GetProfileOrDefault(annotation map[string]string) ProfileType { + if annotation == nil { + return DefaultProfile + } + if profile, ok := supportedProfiles[ProfileType(annotation[Profile])]; !ok { + return DefaultProfile + } else { + return profile + } +} + +func IsDevProfile(annotation map[string]string) bool { + if annotation == nil { + return false + } + if len(annotation[Profile]) == 0 { + return false + } + return ProfileType(annotation[Profile]) == DevProfile +} diff --git a/api/metadata/annotations_test.go b/api/metadata/annotations_test.go new file mode 100644 index 000000000..b0437bb38 --- /dev/null +++ b/api/metadata/annotations_test.go @@ -0,0 +1,43 @@ +// Copyright 2024 Apache Software Foundation (ASF) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "testing" +) + +func TestGetProfile(t *testing.T) { + type args struct { + annotation map[string]string + } + tests := []struct { + name string + args args + want ProfileType + }{ + {"Empty Annotations", args{annotation: nil}, DefaultProfile}, + {"Non-existent Profile", args{annotation: map[string]string{Profile: "IDontExist"}}, DefaultProfile}, + {"Regular Annotation", args{annotation: map[string]string{Profile: GitOpsProfile.String()}}, GitOpsProfile}, + {"Deprecated Annotation", args{annotation: map[string]string{Profile: ProdProfile.String()}}, DefaultProfile}, + {"Dev Annotation", args{annotation: map[string]string{Profile: DevProfile.String()}}, DevProfile}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := GetProfileOrDefault(tt.args.annotation); got != tt.want { + t.Errorf("GetProfileOrDefault() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/api/v1alpha08/podtemplate_types.go b/api/v1alpha08/podtemplate_types.go new file mode 100644 index 000000000..3811a1bb8 --- /dev/null +++ b/api/v1alpha08/podtemplate_types.go @@ -0,0 +1,545 @@ +// Copyright 2024 Apache Software Foundation (ASF) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha08 + +import corev1 "k8s.io/api/core/v1" + +// ContainerSpec is the container for the internal deployments based on the default Kubernetes Container API +type ContainerSpec struct { + // Container image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The container image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The container image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // List of ports to expose from the container. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Modifying this array with strategic merge patch may corrupt the data. + // For more information See https://github.com/kubernetes/kubernetes/issues/108255. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + Ports []corev1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + ResizePolicy []corev1.ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + StartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` +} + +// ToContainer converts to Kubernetes Container API. +func (f *ContainerSpec) ToContainer() corev1.Container { + return corev1.Container{ + Name: DefaultContainerName, + Image: f.Image, + Command: f.Command, + Args: f.Args, + Ports: f.Ports, + EnvFrom: f.EnvFrom, + Env: f.Env, + Resources: f.Resources, + ResizePolicy: f.ResizePolicy, + VolumeMounts: f.VolumeMounts, + VolumeDevices: f.VolumeDevices, + LivenessProbe: f.LivenessProbe, + ReadinessProbe: f.ReadinessProbe, + StartupProbe: f.StartupProbe, + Lifecycle: f.Lifecycle, + TerminationMessagePath: f.TerminationMessagePath, + TerminationMessagePolicy: f.TerminationMessagePolicy, + ImagePullPolicy: f.ImagePullPolicy, + SecurityContext: f.SecurityContext, + Stdin: f.Stdin, + StdinOnce: f.StdinOnce, + TTY: f.TTY, + } +} + +// PodSpec describes the PodSpec for the internal deployments based on the default Kubernetes PodSpec API +type PodSpec struct { + // List of volumes that can be mounted by containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +optional + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + Volumes []corev1.Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +patchMergeKey=name + // +patchStrategy=merge + InitContainers []corev1.Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` + // List of containers belonging to the pod. + // Containers cannot currently be added or removed. + // There must be at least one container in a Pod. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Containers []corev1.Container `json:"containers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` + // Restart policy for all containers within the pod. + // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. + // Default to Always. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + // +optional + RestartPolicy corev1.RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates stop immediately via + // the kill signal (no opportunity to shut down). + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` + // Optional duration in seconds the pod may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + // +mapType=atomic + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` + // Host networking requested for this pod. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` + // Share a single process namespace between all of the containers in a pod. + // When this is set containers will be able to view and signal processes from other containers + // in the same pod, and the first process in each container will not be assigned PID 1. + // HostPID and ShareProcessNamespace cannot both be set. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"` + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"` + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"` + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` + // If specified, the pod's scheduling constraints + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"` + // If specified, the pod's tolerations. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + // +patchMergeKey=ip + // +patchStrategy=merge + HostAliases []corev1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"` + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"` + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"` + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` + // If specified, all readiness gates will be evaluated for pod readiness. + // A pod is ready when all its containers are ready AND + // all conditions specified in the readiness gates have status equal to "True" + // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates + // +optional + ReadinessGates []corev1.PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` + // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + // empty definition that uses the default runtime handler. + // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + // +optional + RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // Optional: Defaults to true. + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // +optional + PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"` + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md + // +optional + Overhead corev1.ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"` + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"` + // If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). + // In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). + // In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. + // If a pod does not have FQDN, this has no effect. + // Default to false. + // +optional + SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty" protobuf:"varint,35,opt,name=setHostnameAsFQDN"` + // Specifies the OS of the containers in the pod. + // Some pod and container fields are restricted if this is set. + // + // If the OS field is set to linux, the following fields must be unset: + // -securityContext.windowsOptions + // + // If the OS field is set to windows, following fields must be unset: + // - spec.hostPID + // - spec.hostIPC + // - spec.hostUsers + // - spec.securityContext.seLinuxOptions + // - spec.securityContext.seccompProfile + // - spec.securityContext.fsGroup + // - spec.securityContext.fsGroupChangePolicy + // - spec.securityContext.sysctls + // - spec.shareProcessNamespace + // - spec.securityContext.runAsUser + // - spec.securityContext.runAsGroup + // - spec.securityContext.supplementalGroups + // - spec.containers[*].securityContext.seLinuxOptions + // - spec.containers[*].securityContext.seccompProfile + // - spec.containers[*].securityContext.capabilities + // - spec.containers[*].securityContext.readOnlyRootFilesystem + // - spec.containers[*].securityContext.privileged + // - spec.containers[*].securityContext.allowPrivilegeEscalation + // - spec.containers[*].securityContext.procMount + // - spec.containers[*].securityContext.runAsUser + // - spec.containers[*].securityContext.runAsGroup + // +optional + OS *corev1.PodOS `json:"os,omitempty" protobuf:"bytes,36,opt,name=os"` + + // Use the host's user namespace. + // Optional: Default to true. + // If set to true or not present, the pod will be run in the host user namespace, useful + // for when the pod needs a feature only available to the host user namespace, such as + // loading a kernel module with CAP_SYS_MODULE. + // When set to false, a new userns is created for the pod. Setting false is useful for + // mitigating container breakout vulnerabilities even allowing users to run their + // containers as root without actually having root privileges on the host. + // This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + // +k8s:conversion-gen=false + // +optional + HostUsers *bool `json:"hostUsers,omitempty" protobuf:"bytes,37,opt,name=hostUsers"` + + // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + // scheduler will not attempt to schedule the pod. + // + // SchedulingGates can only be set at pod creation time, and be removed only afterwards. + // + // This is a beta feature enabled by the PodSchedulingReadiness feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=PodSchedulingReadiness + // +optional + SchedulingGates []corev1.PodSchedulingGate `json:"schedulingGates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,38,opt,name=schedulingGates"` + // ResourceClaims defines which ResourceClaims must be allocated + // and reserved before the Pod is allowed to start. The resources + // will be made available to those containers which consume them + // by name. + // + // This is an alpha field and requires enabling the + // DynamicResourceAllocation feature gate. + // + // This field is immutable. + // + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + ResourceClaims []corev1.PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"` +} + +func (f *PodSpec) ToPodSpec() corev1.PodSpec { + return corev1.PodSpec{ + Volumes: f.Volumes, + InitContainers: f.InitContainers, + Containers: f.Containers, + RestartPolicy: f.RestartPolicy, + TerminationGracePeriodSeconds: f.TerminationGracePeriodSeconds, + ActiveDeadlineSeconds: f.ActiveDeadlineSeconds, + DNSPolicy: f.DNSPolicy, + NodeSelector: f.NodeSelector, + ServiceAccountName: f.ServiceAccountName, + AutomountServiceAccountToken: f.AutomountServiceAccountToken, + NodeName: f.NodeName, + HostNetwork: f.HostNetwork, + HostPID: f.HostPID, + HostIPC: f.HostIPC, + ShareProcessNamespace: f.ShareProcessNamespace, + SecurityContext: f.SecurityContext, + ImagePullSecrets: f.ImagePullSecrets, + Hostname: f.Hostname, + Subdomain: f.Subdomain, + Affinity: f.Affinity, + SchedulerName: f.SchedulerName, + Tolerations: f.Tolerations, + HostAliases: f.HostAliases, + PriorityClassName: f.PriorityClassName, + Priority: f.Priority, + DNSConfig: f.DNSConfig, + ReadinessGates: f.ReadinessGates, + RuntimeClassName: f.RuntimeClassName, + EnableServiceLinks: f.EnableServiceLinks, + PreemptionPolicy: f.PreemptionPolicy, + Overhead: f.Overhead, + TopologySpreadConstraints: f.TopologySpreadConstraints, + SetHostnameAsFQDN: f.SetHostnameAsFQDN, + OS: f.OS, + HostUsers: f.HostUsers, + SchedulingGates: f.SchedulingGates, + ResourceClaims: f.ResourceClaims, + } +} + +// PodTemplateSpec describes the desired custom Kubernetes PodTemplate definition for the deployed flow or service. +// +// The ContainerSpec describes the container where the actual flow or service is running. It will override any default definitions. +// For example, to override the image one can use `.spec.podTemplate.container.image = my/image:tag`. +type PodTemplateSpec struct { + // Container is the Kubernetes container where the application should run. + // One can change this attribute in order to override the defaults provided by the operator. + // +optional + Container ContainerSpec `json:"container,omitempty"` + // +optional + PodSpec `json:",inline"` + // +optional + Replicas *int32 `json:"replicas,omitempty"` +} diff --git a/api/v1alpha08/sonataflow_types.go b/api/v1alpha08/sonataflow_types.go index 3237fc1f5..7763dc23d 100644 --- a/api/v1alpha08/sonataflow_types.go +++ b/api/v1alpha08/sonataflow_types.go @@ -31,524 +31,19 @@ import ( const DefaultContainerName = "workflow" -// ContainerSpec is the container for the internal deployments based on the default Kubernetes Container API -type ContainerSpec struct { - // Container image name. - // More info: https://kubernetes.io/docs/concepts/containers/images - // This field is optional to allow higher level config management to default or override - // container images in workload controllers like Deployments and StatefulSets. - // +optional - Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` - // Entrypoint array. Not executed within a shell. - // The container image's ENTRYPOINT is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - // of whether the variable exists or not. Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // +optional - Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` - // Arguments to the entrypoint. - // The container image's CMD is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - // of whether the variable exists or not. Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // +optional - Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` - // List of ports to expose from the container. Not specifying a port here - // DOES NOT prevent that port from being exposed. Any port which is - // listening on the default "0.0.0.0" address inside a container will be - // accessible from the network. - // Modifying this array with strategic merge patch may corrupt the data. - // For more information See https://github.com/kubernetes/kubernetes/issues/108255. - // Cannot be updated. - // +optional - // +patchMergeKey=containerPort - // +patchStrategy=merge - // +listType=map - // +listMapKey=containerPort - // +listMapKey=protocol - Ports []corev1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` - // List of environment variables to set in the container. - // Cannot be updated. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` - // Compute Resources required by this container. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - // Resources resize policy for the container. - // +featureGate=InPlacePodVerticalScaling - // +optional - // +listType=atomic - ResizePolicy []corev1.ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` - // Pod volumes to mount into the container's filesystem. - // Cannot be updated. - // +optional - // +patchMergeKey=mountPath - // +patchStrategy=merge - VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` - // volumeDevices is the list of block devices to be used by the container. - // +patchMergeKey=devicePath - // +patchStrategy=merge - // +optional - VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` - // Periodic probe of container liveness. - // Container will be restarted if the probe fails. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` - // Periodic probe of container service readiness. - // Container will be removed from service endpoints if the probe fails. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` - // StartupProbe indicates that the Pod has successfully initialized. - // If specified, no other probes are executed until this completes successfully. - // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - // when it might take a long time to load data or warm a cache, than during steady-state operation. - // This cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - StartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` - // Actions that the management system should take in response to container lifecycle events. - // Cannot be updated. - // +optional - Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` - // Optional: Path at which the file to which the container's termination message - // will be written is mounted into the container's filesystem. - // Message written is intended to be brief final status, such as an assertion failure message. - // Will be truncated by the node if greater than 4096 bytes. The total message length across - // all containers will be limited to 12kb. - // Defaults to /dev/termination-log. - // Cannot be updated. - // +optional - TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` - // Indicate how the termination message should be populated. File will use the contents of - // terminationMessagePath to populate the container status message on both success and failure. - // FallbackToLogsOnError will use the last chunk of container log output if the termination - // message file is empty and the container exited with an error. - // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - // Defaults to File. - // Cannot be updated. - // +optional - TerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` - // Image pull policy. - // One of Always, Never, IfNotPresent. - // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - // +optional - ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` - // SecurityContext defines the security options the container should be run with. - // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - - // Whether this container should allocate a buffer for stdin in the container runtime. If this - // is not set, reads from stdin in the container will always result in EOF. - // Default is false. - // +optional - Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` - // Whether the container runtime should close the stdin channel after it has been opened by - // a single attach. When stdin is true the stdin stream will remain open across multiple attach - // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - // first client attaches to stdin, and then remains open and accepts data until the client disconnects, - // at which time stdin is closed and remains closed until the container is restarted. If this - // flag is false, a container processes that reads from stdin will never receive an EOF. - // Default is false - // +optional - StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` - // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - // Default is false. - // +optional - TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` -} - -// ToContainer converts to Kubernetes Container API. -func (f *ContainerSpec) ToContainer() corev1.Container { - return corev1.Container{ - Name: DefaultContainerName, - Image: f.Image, - Command: f.Command, - Args: f.Args, - Ports: f.Ports, - EnvFrom: f.EnvFrom, - Env: f.Env, - Resources: f.Resources, - ResizePolicy: f.ResizePolicy, - VolumeMounts: f.VolumeMounts, - VolumeDevices: f.VolumeDevices, - LivenessProbe: f.LivenessProbe, - ReadinessProbe: f.ReadinessProbe, - StartupProbe: f.StartupProbe, - Lifecycle: f.Lifecycle, - TerminationMessagePath: f.TerminationMessagePath, - TerminationMessagePolicy: f.TerminationMessagePolicy, - ImagePullPolicy: f.ImagePullPolicy, - SecurityContext: f.SecurityContext, - Stdin: f.Stdin, - StdinOnce: f.StdinOnce, - TTY: f.TTY, - } -} - -// PodSpec describes the PodSpec for the internal deployments based on the default Kubernetes PodSpec API -type PodSpec struct { - // List of volumes that can be mounted by containers belonging to the pod. - // More info: https://kubernetes.io/docs/concepts/storage/volumes - // +optional - // +patchMergeKey=name - // +patchStrategy=merge,retainKeys - Volumes []corev1.Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` - // List of initialization containers belonging to the pod. - // Init containers are executed in order prior to containers being started. If any - // init container fails, the pod is considered to have failed and is handled according - // to its restartPolicy. The name for an init container or normal container must be - // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. - // The resourceRequirements of an init container are taken into account during scheduling - // by finding the highest request/limit for each resource type, and then using the max of - // of that value or the sum of the normal containers. Limits are applied to init containers - // in a similar fashion. - // Init containers cannot currently be added or removed. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // +patchMergeKey=name - // +patchStrategy=merge - InitContainers []corev1.Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` - // List of containers belonging to the pod. - // Containers cannot currently be added or removed. - // There must be at least one container in a Pod. - // Cannot be updated. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - Containers []corev1.Container `json:"containers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` - // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. - // Default to Always. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy - // +optional - RestartPolicy corev1.RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates stop immediately via - // the kill signal (no opportunity to shut down). - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // Defaults to 30 seconds. - // +optional - TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` - // Optional duration in seconds the pod may be active on the node relative to - // StartTime before the system will actively try to mark it failed and kill associated containers. - // Value must be a positive integer. - // +optional - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` - // Set DNS policy for the pod. - // Defaults to "ClusterFirst". - // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - // To have DNS options set along with hostNetwork, you have to specify DNS policy - // explicitly to 'ClusterFirstWithHostNet'. - // +optional - DNSPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` - // NodeSelector is a selector which must be true for the pod to fit on a node. - // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - // +optional - // +mapType=atomic - NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - // +optional - AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional - NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` - // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. - // Default to false. - // +k8s:conversion-gen=false - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` - // Use the host's pid namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` - // Use the host's ipc namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` - // Share a single process namespace between all of the containers in a pod. - // When this is set containers will be able to view and signal processes from other containers - // in the same pod, and the first process in each container will not be assigned PID 1. - // HostPID and ShareProcessNamespace cannot both be set. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"` - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"` - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. - // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` - // Specifies the hostname of the Pod - // If not specified, the pod's hostname will be set to a system-defined value. - // +optional - Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"` - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - // +optional - Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` - // If specified, the pod's scheduling constraints - // +optional - Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"` - // If specified, the pod's tolerations. - // +optional - Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` - // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. - // +optional - // +patchMergeKey=ip - // +patchStrategy=merge - HostAliases []corev1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"` - // If specified, indicates the pod's priority. "system-node-critical" and - // "system-cluster-critical" are two special keywords which indicate the - // highest priorities with the former being the highest priority. Any other - // name must be defined by creating a PriorityClass object with that name. - // If not specified, the pod priority will be default or zero if there is no - // default. - // +optional - PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"` - // The priority value. Various system components use this field to find the - // priority of the pod. When Priority Admission Controller is enabled, it - // prevents users from setting this field. The admission controller populates - // this field from PriorityClassName. - // The higher the value, the higher the priority. - // +optional - Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"` - // Specifies the DNS parameters of a pod. - // Parameters specified here will be merged to the generated DNS - // configuration based on DNSPolicy. - // +optional - DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` - // If specified, all readiness gates will be evaluated for pod readiness. - // A pod is ready when all its containers are ready AND - // all conditions specified in the readiness gates have status equal to "True" - // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates - // +optional - ReadinessGates []corev1.PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` - // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used - // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. - // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an - // empty definition that uses the default runtime handler. - // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class - // +optional - RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` - // EnableServiceLinks indicates whether information about services should be injected into pod's - // environment variables, matching the syntax of Docker links. - // Optional: Defaults to true. - // +optional - EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // +optional - PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"` - // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. - // This field will be autopopulated at admission time by the RuntimeClass admission controller. If - // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. - // The RuntimeClass admission controller will reject Pod create requests which have the overhead already - // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value - // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. - // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - // +optional - Overhead corev1.ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"` - // TopologySpreadConstraints describes how a group of pods ought to spread across topology - // domains. Scheduler will schedule pods in a way which abides by the constraints. - // All topologySpreadConstraints are ANDed. - // +optional - // +patchMergeKey=topologyKey - // +patchStrategy=merge - // +listType=map - // +listMapKey=topologyKey - // +listMapKey=whenUnsatisfiable - TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"` - // If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). - // In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). - // In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. - // If a pod does not have FQDN, this has no effect. - // Default to false. - // +optional - SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty" protobuf:"varint,35,opt,name=setHostnameAsFQDN"` - // Specifies the OS of the containers in the pod. - // Some pod and container fields are restricted if this is set. - // - // If the OS field is set to linux, the following fields must be unset: - // -securityContext.windowsOptions - // - // If the OS field is set to windows, following fields must be unset: - // - spec.hostPID - // - spec.hostIPC - // - spec.hostUsers - // - spec.securityContext.seLinuxOptions - // - spec.securityContext.seccompProfile - // - spec.securityContext.fsGroup - // - spec.securityContext.fsGroupChangePolicy - // - spec.securityContext.sysctls - // - spec.shareProcessNamespace - // - spec.securityContext.runAsUser - // - spec.securityContext.runAsGroup - // - spec.securityContext.supplementalGroups - // - spec.containers[*].securityContext.seLinuxOptions - // - spec.containers[*].securityContext.seccompProfile - // - spec.containers[*].securityContext.capabilities - // - spec.containers[*].securityContext.readOnlyRootFilesystem - // - spec.containers[*].securityContext.privileged - // - spec.containers[*].securityContext.allowPrivilegeEscalation - // - spec.containers[*].securityContext.procMount - // - spec.containers[*].securityContext.runAsUser - // - spec.containers[*].securityContext.runAsGroup - // +optional - OS *corev1.PodOS `json:"os,omitempty" protobuf:"bytes,36,opt,name=os"` - - // Use the host's user namespace. - // Optional: Default to true. - // If set to true or not present, the pod will be run in the host user namespace, useful - // for when the pod needs a feature only available to the host user namespace, such as - // loading a kernel module with CAP_SYS_MODULE. - // When set to false, a new userns is created for the pod. Setting false is useful for - // mitigating container breakout vulnerabilities even allowing users to run their - // containers as root without actually having root privileges on the host. - // This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. - // +k8s:conversion-gen=false - // +optional - HostUsers *bool `json:"hostUsers,omitempty" protobuf:"bytes,37,opt,name=hostUsers"` - - // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the - // scheduler will not attempt to schedule the pod. - // - // SchedulingGates can only be set at pod creation time, and be removed only afterwards. - // - // This is a beta feature enabled by the PodSchedulingReadiness feature gate. - // - // +patchMergeKey=name - // +patchStrategy=merge - // +listType=map - // +listMapKey=name - // +featureGate=PodSchedulingReadiness - // +optional - SchedulingGates []corev1.PodSchedulingGate `json:"schedulingGates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,38,opt,name=schedulingGates"` - // ResourceClaims defines which ResourceClaims must be allocated - // and reserved before the Pod is allowed to start. The resources - // will be made available to those containers which consume them - // by name. - // - // This is an alpha field and requires enabling the - // DynamicResourceAllocation feature gate. - // - // This field is immutable. - // - // +patchMergeKey=name - // +patchStrategy=merge,retainKeys - // +listType=map - // +listMapKey=name - // +featureGate=DynamicResourceAllocation - // +optional - ResourceClaims []corev1.PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"` -} - -func (f *PodSpec) ToPodSpec() corev1.PodSpec { - return corev1.PodSpec{ - Volumes: f.Volumes, - InitContainers: f.InitContainers, - Containers: f.Containers, - RestartPolicy: f.RestartPolicy, - TerminationGracePeriodSeconds: f.TerminationGracePeriodSeconds, - ActiveDeadlineSeconds: f.ActiveDeadlineSeconds, - DNSPolicy: f.DNSPolicy, - NodeSelector: f.NodeSelector, - ServiceAccountName: f.ServiceAccountName, - AutomountServiceAccountToken: f.AutomountServiceAccountToken, - NodeName: f.NodeName, - HostNetwork: f.HostNetwork, - HostPID: f.HostPID, - HostIPC: f.HostIPC, - ShareProcessNamespace: f.ShareProcessNamespace, - SecurityContext: f.SecurityContext, - ImagePullSecrets: f.ImagePullSecrets, - Hostname: f.Hostname, - Subdomain: f.Subdomain, - Affinity: f.Affinity, - SchedulerName: f.SchedulerName, - Tolerations: f.Tolerations, - HostAliases: f.HostAliases, - PriorityClassName: f.PriorityClassName, - Priority: f.Priority, - DNSConfig: f.DNSConfig, - ReadinessGates: f.ReadinessGates, - RuntimeClassName: f.RuntimeClassName, - EnableServiceLinks: f.EnableServiceLinks, - PreemptionPolicy: f.PreemptionPolicy, - Overhead: f.Overhead, - TopologySpreadConstraints: f.TopologySpreadConstraints, - SetHostnameAsFQDN: f.SetHostnameAsFQDN, - OS: f.OS, - HostUsers: f.HostUsers, - SchedulingGates: f.SchedulingGates, - ResourceClaims: f.ResourceClaims, - } -} +// DeploymentModel defines how a given pod will be deployed +// +kubebuilder:validation:Enum=kubernetes;knative +type DeploymentModel string + +const ( + // KubernetesDeploymentModel defines a PodSpec to be deployed as a regular Kubernetes Deployment + KubernetesDeploymentModel DeploymentModel = "kubernetes" + // KnativeDeploymentModel defines a PodSpec to be deployed as a Knative Serving Service + KnativeDeploymentModel DeploymentModel = "knative" +) -// PodTemplateSpec describes the desired custom Kubernetes PodTemplate definition for the deployed flow or service. -// -// The ContainerSpec describes the container where the actual flow or service is running. It will override any default definitions. -// For example, to override the image one can use `.spec.podTemplate.container.image = my/image:tag`. -type PodTemplateSpec struct { +// FlowPodTemplateSpec is a special PodTemplateSpec designed for SonataFlow deployments +type FlowPodTemplateSpec struct { // Container is the Kubernetes container where the application should run. // One can change this attribute in order to override the defaults provided by the operator. // +optional @@ -556,7 +51,11 @@ type PodTemplateSpec struct { // +optional PodSpec `json:",inline"` // +optional + // Replicas define the number of pods to start by default for this deployment model. Ignored in "knative" deployment model. Replicas *int32 `json:"replicas,omitempty"` + // Defines the kind of deployment model for this pod spec. In dev profile, only "kubernetes" is valid. + // +optional + DeploymentModel DeploymentModel `json:"deploymentModel,omitempty"` } // Flow describes the contents of the Workflow definition following the CNCF Serverless Workflow Specification. @@ -656,7 +155,7 @@ type SonataFlowSpec struct { Resources WorkflowResources `json:"resources,omitempty"` // PodTemplate describes the deployment details of this SonataFlow instance. //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="podTemplate" - PodTemplate PodTemplateSpec `json:"podTemplate,omitempty"` + PodTemplate FlowPodTemplateSpec `json:"podTemplate,omitempty"` // Persistence defines the database persistence configuration for the workflow Persistence *PersistenceOptionsSpec `json:"persistence,omitempty"` // Sink describes the sinkBinding details of this SonataFlow instance. @@ -752,6 +251,7 @@ func (s *SonataFlowStatus) IsBuildFailed() bool { // +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=='Running')].reason` // +operator-sdk:csv:customresourcedefinitions:resources={{SonataFlowBuild,sonataflow.org/v1alpha08,"A SonataFlow Build"}} // +operator-sdk:csv:customresourcedefinitions:resources={{Deployment,apps/v1,"A Deployment for the Flow"}} +// +operator-sdk:csv:customresourcedefinitions:resources={{Service,serving.knative.dev/v1,"A Knative Serving Service for the Flow"}} // +operator-sdk:csv:customresourcedefinitions:resources={{Service,v1,"A Service for the Flow"}} // +operator-sdk:csv:customresourcedefinitions:resources={{Route,route.openshift.io/v1,"An OpenShift Route for the Flow"}} // +operator-sdk:csv:customresourcedefinitions:resources={{ConfigMap,v1,"The ConfigMaps with Flow definition and additional configuration files"}} @@ -763,6 +263,10 @@ type SonataFlow struct { Status SonataFlowStatus `json:"status,omitempty"` } +func (s *SonataFlow) IsKnativeDeployment() bool { + return s.Spec.PodTemplate.DeploymentModel == KnativeDeploymentModel +} + func (s *SonataFlow) HasContainerSpecImage() bool { return len(s.Spec.PodTemplate.Container.Image) > 0 } diff --git a/api/v1alpha08/zz_generated.deepcopy.go b/api/v1alpha08/zz_generated.deepcopy.go index c970b1081..645d2b60a 100644 --- a/api/v1alpha08/zz_generated.deepcopy.go +++ b/api/v1alpha08/zz_generated.deepcopy.go @@ -320,6 +320,28 @@ func (in *Flow) DeepCopy() *Flow { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowPodTemplateSpec) DeepCopyInto(out *FlowPodTemplateSpec) { + *out = *in + in.Container.DeepCopyInto(&out.Container) + in.PodSpec.DeepCopyInto(&out.PodSpec) + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowPodTemplateSpec. +func (in *FlowPodTemplateSpec) DeepCopy() *FlowPodTemplateSpec { + if in == nil { + return nil + } + out := new(FlowPodTemplateSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PersistenceOptionsSpec) DeepCopyInto(out *PersistenceOptionsSpec) { *out = *in diff --git a/bundle/manifests/sonataflow-operator-controllers-config_v1_configmap.yaml b/bundle/manifests/sonataflow-operator-controllers-config_v1_configmap.yaml index a16e6be86..27e29749a 100644 --- a/bundle/manifests/sonataflow-operator-controllers-config_v1_configmap.yaml +++ b/bundle/manifests/sonataflow-operator-controllers-config_v1_configmap.yaml @@ -29,6 +29,18 @@ data: sonataFlowDevModeImageTag: "" # The default name of the builder configMap in the operator's namespace builderConfigMapName: "sonataflow-operator-builder-config" + # Quarkus extensions required for workflows persistence. These extensions are used by the SonataFlow build system, + # in cases where the workflow being built has configured postgresql persistence. + postgreSQLPersistenceExtensions: + - groupId: io.quarkus + artifactId: quarkus-jdbc-postgresql + version: 3.2.10.Final + - groupId: io.quarkus + artifactId: quarkus-agroal + version: 3.2.10.Final + - groupId: org.kie + artifactId: kie-addons-quarkus-persistence-jdbc + version: 999-SNAPSHOT kind: ConfigMap metadata: name: sonataflow-operator-controllers-config diff --git a/bundle/manifests/sonataflow-operator.clusterserviceversion.yaml b/bundle/manifests/sonataflow-operator.clusterserviceversion.yaml index b71c508b1..7e78264fa 100644 --- a/bundle/manifests/sonataflow-operator.clusterserviceversion.yaml +++ b/bundle/manifests/sonataflow-operator.clusterserviceversion.yaml @@ -288,6 +288,9 @@ spec: - kind: Deployment name: A Deployment for the Flow version: apps/v1 + - kind: Service + name: A Knative Serving Service for the Flow + version: serving.knative.dev/v1 - kind: Service name: A Service for the Flow version: v1 @@ -437,6 +440,22 @@ spec: - patch - update - watch + - apiGroups: + - serving.knative.dev + resources: + - service + - services + - services/status + - services/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch - apiGroups: - coordination.k8s.io resources: diff --git a/bundle/manifests/sonataflow.org_sonataflows.yaml b/bundle/manifests/sonataflow.org_sonataflows.yaml index eaf80c918..6f2cb2e75 100644 --- a/bundle/manifests/sonataflow.org_sonataflows.yaml +++ b/bundle/manifests/sonataflow.org_sonataflows.yaml @@ -5626,6 +5626,13 @@ spec: - name type: object type: array + deploymentModel: + description: Defines the kind of deployment model for this pod + spec. In dev profile, only "kubernetes" is valid. + enum: + - kubernetes + - knative + type: string dnsConfig: description: Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration @@ -7153,6 +7160,8 @@ spec: type: object type: array replicas: + description: Replicas define the number of pods to start by default + for this deployment model. Ignored in "knative" deployment model. format: int32 type: integer resourceClaims: diff --git a/config/crd/bases/sonataflow.org_sonataflows.yaml b/config/crd/bases/sonataflow.org_sonataflows.yaml index 1557c3b86..61002c32f 100644 --- a/config/crd/bases/sonataflow.org_sonataflows.yaml +++ b/config/crd/bases/sonataflow.org_sonataflows.yaml @@ -5627,6 +5627,13 @@ spec: - name type: object type: array + deploymentModel: + description: Defines the kind of deployment model for this pod + spec. In dev profile, only "kubernetes" is valid. + enum: + - kubernetes + - knative + type: string dnsConfig: description: Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration @@ -7154,6 +7161,8 @@ spec: type: object type: array replicas: + description: Replicas define the number of pods to start by default + for this deployment model. Ignored in "knative" deployment model. format: int32 type: integer resourceClaims: diff --git a/config/manager/controllers_cfg.yaml b/config/manager/controllers_cfg.yaml index 41f2f860e..08f2a4062 100644 --- a/config/manager/controllers_cfg.yaml +++ b/config/manager/controllers_cfg.yaml @@ -26,3 +26,15 @@ sonataFlowBaseBuilderImageTag: "" sonataFlowDevModeImageTag: "" # The default name of the builder configMap in the operator's namespace builderConfigMapName: "sonataflow-operator-builder-config" +# Quarkus extensions required for workflows persistence. These extensions are used by the SonataFlow build system, +# in cases where the workflow being built has configured postgresql persistence. +postgreSQLPersistenceExtensions: + - groupId: io.quarkus + artifactId: quarkus-jdbc-postgresql + version: 3.2.10.Final + - groupId: io.quarkus + artifactId: quarkus-agroal + version: 3.2.10.Final + - groupId: org.kie + artifactId: kie-addons-quarkus-persistence-jdbc + version: 999-SNAPSHOT diff --git a/config/manifests/bases/sonataflow-operator.clusterserviceversion.yaml b/config/manifests/bases/sonataflow-operator.clusterserviceversion.yaml index c61183d84..c6fdf4e6a 100644 --- a/config/manifests/bases/sonataflow-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/sonataflow-operator.clusterserviceversion.yaml @@ -172,6 +172,9 @@ spec: - kind: Deployment name: A Deployment for the Flow version: apps/v1 + - kind: Service + name: A Knative Serving Service for the Flow + version: serving.knative.dev/v1 - kind: Service name: A Service for the Flow version: v1 diff --git a/config/rbac/builder_role.yaml b/config/rbac/builder_role.yaml index 70b2ab54e..ffced490e 100644 --- a/config/rbac/builder_role.yaml +++ b/config/rbac/builder_role.yaml @@ -66,33 +66,3 @@ rules: - patch - update - watch -- apiGroups: - - eventing.knative.dev - resources: - - triggers - - triggers/status - - triggers/finalizers - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -- apiGroups: - - sources.knative.dev - resources: - - sinkbindings - - sinkbindings/status - - sinkbindings/finalizers - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch \ No newline at end of file diff --git a/config/rbac/knative_role.yaml b/config/rbac/knative_role.yaml new file mode 100644 index 000000000..8dad9419d --- /dev/null +++ b/config/rbac/knative_role.yaml @@ -0,0 +1,52 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: knative-manager-role +rules: + - apiGroups: + - eventing.knative.dev + resources: + - triggers + - triggers/status + - triggers/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - apiGroups: + - sources.knative.dev + resources: + - sinkbindings + - sinkbindings/status + - sinkbindings/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - apiGroups: + - serving.knative.dev + resources: + - service + - services + - services/status + - services/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch diff --git a/config/rbac/knative_role_binding.yaml b/config/rbac/knative_role_binding.yaml new file mode 100644 index 000000000..cbab613cf --- /dev/null +++ b/config/rbac/knative_role_binding.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: knative-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: knative-manager-role +subjects: + - kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 2ffd5efa4..9a097cc26 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -17,6 +17,8 @@ resources: - operator_role_binding_leases.yaml - service_discovery_role.yaml - service_discovery_role_binding.yaml +- knative_role.yaml +- knative_role_binding.yaml # Comment the following 4 lines if you want to disable # the auth proxy (https://github.com/brancz/kube-rbac-proxy) # which protects your /metrics endpoint. diff --git a/controllers/builder/containerbuilder.go b/controllers/builder/containerbuilder.go index c945f02dd..d23c8388c 100644 --- a/controllers/builder/containerbuilder.go +++ b/controllers/builder/containerbuilder.go @@ -22,6 +22,10 @@ package builder import ( "time" + "github.com/apache/incubator-kie-kogito-serverless-operator/workflowproj" + + corev1 "k8s.io/api/core/v1" + "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/cfg" "k8s.io/klog/v2" @@ -52,6 +56,7 @@ type kanikoBuildInput struct { task *api.KanikoTask workflowDefinition []byte workflow *operatorapi.SonataFlow + workflowProperties []operatorapi.ConfigMapWorkflowResource dockerfile string imageTag string } @@ -139,6 +144,7 @@ func (c *containerBuilderManager) scheduleNewKanikoBuildWithContainerFile(build task: task, workflowDefinition: workflowDef, workflow: workflow, + workflowProperties: buildWorkflowPropertyResources(workflow), dockerfile: platform.GetCustomizedBuilderDockerfile(c.builderConfigMap.Data[defaultBuilderResourceName], *c.platform), imageTag: buildNamespacedImageTag(workflow), } @@ -200,6 +206,11 @@ func newBuild(buildInput kanikoBuildInput, platform api.PlatformContainerBuild, newBuilder.AddConfigMapResource(res.ConfigMap, res.WorkflowPath) } + //make the workflow properties available to the kaniko build. + for _, props := range buildInput.workflowProperties { + newBuilder.AddConfigMapResource(props.ConfigMap, props.WorkflowPath) + } + return newBuilder.Scheduler(). WithAdditionalArgs(buildInput.task.AdditionalFlags). WithResourceRequirements(buildInput.task.Resources). @@ -213,3 +224,10 @@ func newBuild(buildInput kanikoBuildInput, platform api.PlatformContainerBuild, func buildNamespacedImageTag(workflow *operatorapi.SonataFlow) string { return workflow.Namespace + "/" + workflowdef.GetWorkflowAppImageNameTag(workflow) } + +func buildWorkflowPropertyResources(workflow *operatorapi.SonataFlow) []operatorapi.ConfigMapWorkflowResource { + return []operatorapi.ConfigMapWorkflowResource{ + {ConfigMap: corev1.LocalObjectReference{Name: workflowproj.GetWorkflowUserPropertiesConfigMapName(workflow)}, WorkflowPath: ""}, + {ConfigMap: corev1.LocalObjectReference{Name: workflowproj.GetWorkflowManagedPropertiesConfigMapName(workflow)}, WorkflowPath: ""}, + } +} diff --git a/controllers/builder/kogitoserverlessbuild_manager.go b/controllers/builder/kogitoserverlessbuild_manager.go index b6dfa7baf..044710d44 100644 --- a/controllers/builder/kogitoserverlessbuild_manager.go +++ b/controllers/builder/kogitoserverlessbuild_manager.go @@ -21,7 +21,12 @@ package builder import ( "context" + "strings" + "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/cfg" + + "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/profiles/common/persistence" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -31,6 +36,8 @@ import ( operatorapi "github.com/apache/incubator-kie-kogito-serverless-operator/api/v1alpha08" ) +const QuarkusExtensionsBuildArg = "QUARKUS_EXTENSIONS" + var _ SonataFlowBuildManager = &sonataFlowBuildManager{} type sonataFlowBuildManager struct { @@ -54,7 +61,11 @@ func (k *sonataFlowBuildManager) GetOrCreateBuild(workflow *operatorapi.SonataFl if plat, err = platform.GetActivePlatform(k.ctx, k.client, workflow.Namespace); err != nil { return nil, err } - buildInstance.Spec.BuildTemplate = plat.Spec.Build.Template + workflowBuildTemplate := plat.Spec.Build.Template.DeepCopy() + if persistence.UsesPostgreSQLPersistence(workflow, plat) { + addPersistenceExtensions(workflowBuildTemplate) + } + buildInstance.Spec.BuildTemplate = *workflowBuildTemplate if err = controllerutil.SetControllerReference(workflow, buildInstance, k.client.Scheme()); err != nil { return nil, err } @@ -86,3 +97,44 @@ func NewSonataFlowBuildManager(ctx context.Context, client client.Client) Sonata ctx: ctx, } } + +// addPersistenceExtensions Adds the persistence related extensions to the current BuildTemplate if none of them is +// already provided. If any of them is detected, its assumed that users might already have provided them in the +// SonataFlowPlatform, so we just let the provided configuration. +func addPersistenceExtensions(template *operatorapi.BuildTemplate) { + quarkusExtensions := getBuildArg(template.BuildArgs, QuarkusExtensionsBuildArg) + if quarkusExtensions == nil { + template.BuildArgs = append(template.BuildArgs, v1.EnvVar{Name: QuarkusExtensionsBuildArg}) + quarkusExtensions = &template.BuildArgs[len(template.BuildArgs)-1] + } + if !hasAnyExtensionPresent(quarkusExtensions, persistence.GetPostgreSQLExtensions()) { + for _, extension := range persistence.GetPostgreSQLExtensions() { + if len(quarkusExtensions.Value) > 0 { + quarkusExtensions.Value = quarkusExtensions.Value + "," + } + quarkusExtensions.Value = quarkusExtensions.Value + extension.String() + } + } +} + +func getBuildArg(buildArgs []v1.EnvVar, name string) *v1.EnvVar { + for i := 0; i < len(buildArgs); i++ { + if buildArgs[i].Name == name { + return &buildArgs[i] + } + } + return nil +} + +func hasAnyExtensionPresent(buildArg *v1.EnvVar, extensions []cfg.GAV) bool { + for _, extension := range extensions { + if isExtensionPresent(buildArg, extension) { + return true + } + } + return false +} + +func isExtensionPresent(buildArg *v1.EnvVar, extension cfg.GAV) bool { + return strings.Contains(buildArg.Value, extension.GroupAndArtifact()) +} diff --git a/controllers/builder/kogitoserverlessbuild_manager_test.go b/controllers/builder/kogitoserverlessbuild_manager_test.go new file mode 100644 index 000000000..ac865e63d --- /dev/null +++ b/controllers/builder/kogitoserverlessbuild_manager_test.go @@ -0,0 +1,178 @@ +// Copyright 2024 Apache Software Foundation (ASF) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builder + +import ( + "testing" + + operatorapi "github.com/apache/incubator-kie-kogito-serverless-operator/api/v1alpha08" + "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/cfg" + "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/profiles/common/persistence" + "github.com/apache/incubator-kie-kogito-serverless-operator/test" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestSonataFlowBuildManager_GetOrCreateBuildWithWorkflowPersistence(t *testing.T) { + // Current platform with no persistence + currentPlatform := operatorapi.SonataFlowPlatform{ + ObjectMeta: metav1.ObjectMeta{Name: "current-platform"}, + Spec: operatorapi.SonataFlowPlatformSpec{}, + Status: operatorapi.SonataFlowPlatformStatus{}, + } + // Persistence is configured in the workflow + workflow := operatorapi.SonataFlow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-workflow", + }, + Spec: operatorapi.SonataFlowSpec{ + Persistence: &operatorapi.PersistenceOptionsSpec{ + PostgreSQL: &operatorapi.PersistencePostgreSQL{}, + }, + }, + Status: operatorapi.SonataFlowStatus{}, + } + testGetOrCreateBuildWithPersistence(t, ¤tPlatform, &workflow) +} + +func TestSonataFlowBuildManager_GetOrCreateBuildWithPlatformPersistence(t *testing.T) { + // Persistence is configured in the platform + currentPlatform := operatorapi.SonataFlowPlatform{ + ObjectMeta: metav1.ObjectMeta{Name: "current-platform"}, + Spec: operatorapi.SonataFlowPlatformSpec{ + Persistence: &operatorapi.PlatformPersistenceOptionsSpec{ + PostgreSQL: &operatorapi.PlatformPersistencePostgreSQL{}, + }, + }, + Status: operatorapi.SonataFlowPlatformStatus{}, + } + // Workflow with no persistence + workflow := operatorapi.SonataFlow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-workflow", + }, + Status: operatorapi.SonataFlowStatus{}, + } + testGetOrCreateBuildWithPersistence(t, ¤tPlatform, &workflow) +} + +func TestSonataFlowBuildManager_GetOrCreateBuildWithNoPersistence(t *testing.T) { + // Platform has no persistence + currentPlatform := operatorapi.SonataFlowPlatform{ + ObjectMeta: metav1.ObjectMeta{Name: "current-platform"}, + Spec: operatorapi.SonataFlowPlatformSpec{}, + Status: operatorapi.SonataFlowPlatformStatus{}, + } + // Workflow has no persistence + workflow := operatorapi.SonataFlow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-workflow", + }, + Status: operatorapi.SonataFlowStatus{}, + } + buildManager := prepareGetOrCreateBuildTest(t, ¤tPlatform) + build, _ := buildManager.GetOrCreateBuild(&workflow) + assert.Equal(t, 0, len(build.Spec.BuildArgs)) + test.RestoreControllersConfig(t) +} + +func testGetOrCreateBuildWithPersistence(t *testing.T, currentPlatform *operatorapi.SonataFlowPlatform, workflow *operatorapi.SonataFlow) { + buildManager := prepareGetOrCreateBuildTest(t, currentPlatform) + build, _ := buildManager.GetOrCreateBuild(workflow) + assert.NotNil(t, build) + assert.Equal(t, 1, len(build.Spec.BuildArgs)) + assertContainsPersistence(t, build.Spec.BuildArgs, 0) + test.RestoreControllersConfig(t) +} + +func prepareGetOrCreateBuildTest(t *testing.T, currentPlatform *operatorapi.SonataFlowPlatform) sonataFlowBuildManager { + initializeControllersConfig(t) + platforms := operatorapi.NewSonataFlowPlatformList() + platforms.Items = []operatorapi.SonataFlowPlatform{*currentPlatform} + cli := test.NewSonataFlowClientBuilder().WithRuntimeObjects(&platforms).Build() + buildManager := sonataFlowBuildManager{ + client: cli, + } + return buildManager +} + +func Test_addPersistenceExtensionsWithEmptyArgs(t *testing.T) { + initializeControllersConfig(t) + buildTemplate := &operatorapi.BuildTemplate{} + addPersistenceExtensions(buildTemplate) + assert.Equal(t, 1, len(buildTemplate.BuildArgs)) + assertContainsPersistence(t, buildTemplate.BuildArgs, 0) + test.RestoreControllersConfig(t) +} + +func Test_addPersistenceExtensionsWithNoQuarkusExtensionsArg(t *testing.T) { + initializeControllersConfig(t) + buildTemplate := &operatorapi.BuildTemplate{ + BuildArgs: []v1.EnvVar{ + {Name: "VAR1"}, + }, + } + addPersistenceExtensions(buildTemplate) + assert.Equal(t, 2, len(buildTemplate.BuildArgs)) + assertContainsPersistence(t, buildTemplate.BuildArgs, 1) + test.RestoreControllersConfig(t) +} + +func Test_addPersistenceExtensionsWithQuarkusExtensionsArgAndNoPersistenceExtensions(t *testing.T) { + initializeControllersConfig(t) + buildTemplate := &operatorapi.BuildTemplate{ + BuildArgs: []v1.EnvVar{ + {Name: "VAR1"}, + {Name: "QUARKUS_EXTENSIONS", Value: "org.acme:org.acme.library:1.0.0"}, + }, + } + addPersistenceExtensions(buildTemplate) + assert.Equal(t, 2, len(buildTemplate.BuildArgs)) + assertContainsPersistence(t, buildTemplate.BuildArgs, 1) + test.RestoreControllersConfig(t) +} + +func Test_addPersistenceExtensionsWithQuarkusExtensionsArgAndPersistenceExtensions(t *testing.T) { + initializeControllersConfig(t) + buildTemplate := &operatorapi.BuildTemplate{ + BuildArgs: []v1.EnvVar{ + {Name: "VAR1", Value: "VALUE1"}, + {Name: "QUARKUS_EXTENSIONS", Value: "org.acme:org.acme.library:1.0.0,io.quarkus:quarkus-jdbc-postgresql:8.8.0.Final"}, + }, + } + addPersistenceExtensions(buildTemplate) + assert.Equal(t, 2, len(buildTemplate.BuildArgs)) + assert.Equal(t, v1.EnvVar{Name: "VAR1", Value: "VALUE1"}, buildTemplate.BuildArgs[0]) + assert.Equal(t, v1.EnvVar{Name: "QUARKUS_EXTENSIONS", Value: "org.acme:org.acme.library:1.0.0,io.quarkus:quarkus-jdbc-postgresql:8.8.0.Final"}, buildTemplate.BuildArgs[1]) + test.RestoreControllersConfig(t) +} + +func initializeControllersConfig(t *testing.T) { + // emulate the controllers config initialization + cfg, err := cfg.InitializeControllersCfgAt("../cfg/testdata/controllers-cfg-test.yaml") + assert.NoError(t, err) + assert.NotNil(t, cfg) + assert.Equal(t, 3, len(cfg.PostgreSQLPersistenceExtensions)) +} + +func assertContainsPersistence(t *testing.T, buildArgs []v1.EnvVar, position int) { + assert.GreaterOrEqual(t, len(buildArgs), position) + envVar := buildArgs[position] + assert.Equal(t, QuarkusExtensionsBuildArg, envVar.Name) + for _, extension := range persistence.GetPostgreSQLExtensions() { + assert.Contains(t, envVar.Value, extension.String()) + } +} diff --git a/controllers/builder/openshiftbuilder.go b/controllers/builder/openshiftbuilder.go index bb84b2c53..09ab8f986 100644 --- a/controllers/builder/openshiftbuilder.go +++ b/controllers/builder/openshiftbuilder.go @@ -186,9 +186,6 @@ func (o *openshiftBuilderManager) newDefaultBuildConfig(build *operatorapi.Sonat } func (o *openshiftBuilderManager) addExternalResources(config *buildv1.BuildConfig, workflow *operatorapi.SonataFlow) error { - if len(workflow.Spec.Resources.ConfigMaps) == 0 { - return nil - } var configMapSources []buildv1.ConfigMapBuildSource for _, workflowRes := range workflow.Spec.Resources.ConfigMaps { configMapSources = append(configMapSources, buildv1.ConfigMapBuildSource{ @@ -196,6 +193,14 @@ func (o *openshiftBuilderManager) addExternalResources(config *buildv1.BuildConf DestinationDir: workflowRes.WorkflowPath, }) } + //make the workflow properties available to the OpenShift build config. + configMapSources = append(configMapSources, buildv1.ConfigMapBuildSource{ + ConfigMap: corev1.LocalObjectReference{Name: workflowproj.GetWorkflowUserPropertiesConfigMapName(workflow)}, + DestinationDir: ""}) + configMapSources = append(configMapSources, buildv1.ConfigMapBuildSource{ + ConfigMap: corev1.LocalObjectReference{Name: workflowproj.GetWorkflowManagedPropertiesConfigMapName(workflow)}, + DestinationDir: ""}) + config.Spec.Source.ConfigMaps = configMapSources return nil } diff --git a/controllers/builder/openshiftbuilder_test.go b/controllers/builder/openshiftbuilder_test.go index ea8c8b748..48b6ef3ec 100644 --- a/controllers/builder/openshiftbuilder_test.go +++ b/controllers/builder/openshiftbuilder_test.go @@ -103,7 +103,7 @@ func Test_openshiftbuilder_externalCMs(t *testing.T) { }, } workflow.Spec.Resources.ConfigMaps = append(workflow.Spec.Resources.ConfigMaps, - operatorapi.ConfigMapWorkflowResource{ConfigMap: v1.LocalObjectReference{Name: externalCm.Name}}) + operatorapi.ConfigMapWorkflowResource{ConfigMap: v1.LocalObjectReference{Name: externalCm.Name}, WorkflowPath: "specs"}) namespacedName := types.NamespacedName{Namespace: workflow.Namespace, Name: workflow.Name} client := test.NewKogitoClientBuilderWithOpenShift().WithRuntimeObjects(workflow, platform, config, externalCm).Build() @@ -129,7 +129,13 @@ func Test_openshiftbuilder_externalCMs(t *testing.T) { bc := &buildv1.BuildConfig{} assert.NoError(t, client.Get(context.TODO(), namespacedName, bc)) - assert.Len(t, bc.Spec.Source.ConfigMaps, 1) + assert.Len(t, bc.Spec.Source.ConfigMaps, 3) + assert.Equal(t, "myopenapis", bc.Spec.Source.ConfigMaps[0].ConfigMap.Name) + assert.Equal(t, "specs", bc.Spec.Source.ConfigMaps[0].DestinationDir) + assert.Equal(t, "greeting-props", bc.Spec.Source.ConfigMaps[1].ConfigMap.Name) + assert.Equal(t, "", bc.Spec.Source.ConfigMaps[1].DestinationDir) + assert.Equal(t, "greeting-managed-props", bc.Spec.Source.ConfigMaps[2].ConfigMap.Name) + assert.Equal(t, "", bc.Spec.Source.ConfigMaps[2].DestinationDir) } func Test_openshiftbuilder_forcePull(t *testing.T) { diff --git a/controllers/cfg/controllers_cfg.go b/controllers/cfg/controllers_cfg.go index 4f2990923..965737e44 100644 --- a/controllers/cfg/controllers_cfg.go +++ b/controllers/cfg/controllers_cfg.go @@ -18,6 +18,7 @@ package cfg import ( "bytes" + "fmt" "os" "github.com/apache/incubator-kie-kogito-serverless-operator/log" @@ -40,18 +41,33 @@ var defaultControllersCfg = &ControllersCfg{ BuilderConfigMapName: "sonataflow-operator-builder-config", } +type GAV struct { + GroupId string `yaml:"groupId,omitempty"` + ArtifactId string `yaml:"artifactId,omitempty"` + Version string `yaml:"version,omitempty"` +} + +func (g *GAV) GroupAndArtifact() string { + return fmt.Sprintf("%s:%s", g.GroupId, g.ArtifactId) +} + +func (g *GAV) String() string { + return fmt.Sprintf("%s:%s:%s", g.GroupId, g.ArtifactId, g.Version) +} + type ControllersCfg struct { - DefaultPvcKanikoSize string `yaml:"defaultPvcKanikoSize,omitempty"` - HealthFailureThresholdDevMode int32 `yaml:"healthFailureThresholdDevMode,omitempty"` - KanikoDefaultWarmerImageTag string `yaml:"kanikoDefaultWarmerImageTag,omitempty"` - KanikoExecutorImageTag string `yaml:"kanikoExecutorImageTag,omitempty"` - JobsServicePostgreSQLImageTag string `yaml:"jobsServicePostgreSQLImageTag,omitempty"` - JobsServiceEphemeralImageTag string `yaml:"jobsServiceEphemeralImageTag,omitempty"` - DataIndexPostgreSQLImageTag string `yaml:"dataIndexPostgreSQLImageTag,omitempty"` - DataIndexEphemeralImageTag string `yaml:"dataIndexEphemeralImageTag,omitempty"` - SonataFlowBaseBuilderImageTag string `yaml:"sonataFlowBaseBuilderImageTag,omitempty"` - SonataFlowDevModeImageTag string `yaml:"sonataFlowDevModeImageTag,omitempty"` - BuilderConfigMapName string `yaml:"builderConfigMapName,omitempty"` + DefaultPvcKanikoSize string `yaml:"defaultPvcKanikoSize,omitempty"` + HealthFailureThresholdDevMode int32 `yaml:"healthFailureThresholdDevMode,omitempty"` + KanikoDefaultWarmerImageTag string `yaml:"kanikoDefaultWarmerImageTag,omitempty"` + KanikoExecutorImageTag string `yaml:"kanikoExecutorImageTag,omitempty"` + JobsServicePostgreSQLImageTag string `yaml:"jobsServicePostgreSQLImageTag,omitempty"` + JobsServiceEphemeralImageTag string `yaml:"jobsServiceEphemeralImageTag,omitempty"` + DataIndexPostgreSQLImageTag string `yaml:"dataIndexPostgreSQLImageTag,omitempty"` + DataIndexEphemeralImageTag string `yaml:"dataIndexEphemeralImageTag,omitempty"` + SonataFlowBaseBuilderImageTag string `yaml:"sonataFlowBaseBuilderImageTag,omitempty"` + SonataFlowDevModeImageTag string `yaml:"sonataFlowDevModeImageTag,omitempty"` + BuilderConfigMapName string `yaml:"builderConfigMapName,omitempty"` + PostgreSQLPersistenceExtensions []GAV `yaml:"postgreSQLPersistenceExtensions,omitempty"` } // InitializeControllersCfg initializes the platform configuration for this instance. diff --git a/controllers/cfg/controllers_cfg_test.go b/controllers/cfg/controllers_cfg_test.go index d2d092dc8..51c25ae4c 100644 --- a/controllers/cfg/controllers_cfg_test.go +++ b/controllers/cfg/controllers_cfg_test.go @@ -32,6 +32,25 @@ func TestInitializeControllersCfgAt_ValidFile(t *testing.T) { assert.Equal(t, "local/data-index:1.0.0", cfg.DataIndexPostgreSQLImageTag) assert.Equal(t, "local/sonataflow-builder:1.0.0", cfg.SonataFlowBaseBuilderImageTag) assert.Equal(t, "local/sonataflow-devmode:1.0.0", cfg.SonataFlowDevModeImageTag) + assert.Equal(t, 3, len(cfg.PostgreSQLPersistenceExtensions)) + postgresExtensions := cfg.PostgreSQLPersistenceExtensions + assert.Equal(t, GAV{ + GroupId: "io.quarkus", + ArtifactId: "quarkus-jdbc-postgresql", + Version: "3.2.10.Final", + }, postgresExtensions[0]) + + assert.Equal(t, GAV{ + GroupId: "io.quarkus", + ArtifactId: "quarkus-agroal", + Version: "3.2.10.Final", + }, postgresExtensions[1]) + + assert.Equal(t, GAV{ + GroupId: "org.kie", + ArtifactId: "kie-addons-quarkus-persistence-jdbc", + Version: "999-SNAPSHOT", + }, postgresExtensions[2]) } func TestInitializeControllersCfgAt_FileNotFound(t *testing.T) { diff --git a/controllers/cfg/testdata/controllers-cfg-test.yaml b/controllers/cfg/testdata/controllers-cfg-test.yaml index 66ec30d8e..0903f51ad 100644 --- a/controllers/cfg/testdata/controllers-cfg-test.yaml +++ b/controllers/cfg/testdata/controllers-cfg-test.yaml @@ -21,3 +21,13 @@ jobsServicePostgreSQLImageTag: "local/jobs-service:1.0.0" dataIndexPostgreSQLImageTag: "local/data-index:1.0.0" sonataFlowBaseBuilderImageTag: "local/sonataflow-builder:1.0.0" sonataFlowDevModeImageTag: "local/sonataflow-devmode:1.0.0" +postgreSQLPersistenceExtensions: + - groupId: io.quarkus + artifactId: quarkus-jdbc-postgresql + version: 3.2.10.Final + - groupId: io.quarkus + artifactId: quarkus-agroal + version: 3.2.10.Final + - groupId: org.kie + artifactId: kie-addons-quarkus-persistence-jdbc + version: 999-SNAPSHOT \ No newline at end of file diff --git a/controllers/platform/k8s.go b/controllers/platform/k8s.go index e4310de87..62d8b3699 100644 --- a/controllers/platform/k8s.go +++ b/controllers/platform/k8s.go @@ -115,7 +115,7 @@ func createOrUpdateDeployment(ctx context.Context, client client.Client, platfor LivenessProbe: liveProbe, Ports: []corev1.ContainerPort{ { - Name: utils.HttpScheme, + Name: utils.DefaultServicePortName, ContainerPort: int32(constants.DefaultHTTPWorkflowPortInt), Protocol: corev1.ProtocolTCP, }, @@ -199,7 +199,7 @@ func createOrUpdateService(ctx context.Context, client client.Client, platform * dataSvcSpec := corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { - Name: utils.HttpScheme, + Name: utils.DefaultServicePortName, Protocol: corev1.ProtocolTCP, Port: 80, TargetPort: variables.DefaultHTTPWorkflowPortIntStr, diff --git a/controllers/platform/services/services.go b/controllers/platform/services/services.go index 856ad90a6..4cd9c02ba 100644 --- a/controllers/platform/services/services.go +++ b/controllers/platform/services/services.go @@ -405,6 +405,7 @@ func (j JobServiceHandler) ConfigurePersistence(containerSpec *corev1.Container) c.Env = append(c.Env, persistence.ConfigurePostgreSQLEnv(p.PostgreSQL, j.GetServiceName(), j.platform.Namespace)...) // Specific to Job Service c.Env = append(c.Env, corev1.EnvVar{Name: "QUARKUS_FLYWAY_MIGRATE_AT_START", Value: "true"}) + c.Env = append(c.Env, corev1.EnvVar{Name: "KOGITO_JOBS_SERVICE_LOADJOBERRORSTRATEGY", Value: "FAIL_SERVICE"}) return c } return containerSpec diff --git a/controllers/profiles/common/deployment.go b/controllers/profiles/common/deployment_status_manager.go similarity index 83% rename from controllers/profiles/common/deployment.go rename to controllers/profiles/common/deployment_status_manager.go index 64b677678..6272d3f31 100644 --- a/controllers/profiles/common/deployment.go +++ b/controllers/profiles/common/deployment_status_manager.go @@ -26,7 +26,9 @@ import ( appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -39,6 +41,8 @@ import ( var _ WorkflowDeploymentManager = &deploymentHandler{} +const knativeDeploymentSuffix = "-deployment" + // WorkflowDeploymentManager interface to handle workflow deployment features. type WorkflowDeploymentManager interface { // SyncDeploymentStatus updates the workflow status aligned with the deployment counterpart. @@ -58,24 +62,42 @@ type deploymentHandler struct { c client.Client } -func (d *deploymentHandler) RolloutDeployment(ctx context.Context, workflow *operatorapi.SonataFlow) error { +func (d *deploymentHandler) getDeployment(ctx context.Context, workflow *operatorapi.SonataFlow) (*appsv1.Deployment, error) { + deploymentName := workflow.Name + if workflow.IsKnativeDeployment() { + ksvc := &servingv1.Service{} + if err := d.c.Get(ctx, client.ObjectKeyFromObject(workflow), ksvc); err != nil { + if errors.IsNotFound(err) { + return nil, nil + } + return nil, err + } + deploymentName = ksvc.Status.LatestCreatedRevisionName + knativeDeploymentSuffix + } deployment := &appsv1.Deployment{} - if err := d.c.Get(ctx, client.ObjectKeyFromObject(workflow), deployment); err != nil { - // Deployment not found, nothing to do. + if err := d.c.Get(ctx, types.NamespacedName{Namespace: workflow.Namespace, Name: deploymentName}, deployment); err != nil { if errors.IsNotFound(err) { - return nil + return nil, nil } + return nil, err + } + return deployment, nil +} + +func (d *deploymentHandler) RolloutDeployment(ctx context.Context, workflow *operatorapi.SonataFlow) error { + deployment, err := d.getDeployment(ctx, workflow) + if err != nil || deployment == nil { return err } - if err := kubeutil.MarkDeploymentToRollout(deployment); err != nil { + if err = kubeutil.MarkDeploymentToRollout(deployment); err != nil { return err } return d.c.Update(ctx, deployment) } func (d *deploymentHandler) SyncDeploymentStatus(ctx context.Context, workflow *operatorapi.SonataFlow) (ctrl.Result, error) { - deployment := &appsv1.Deployment{} - if err := d.c.Get(ctx, client.ObjectKeyFromObject(workflow), deployment); err != nil { + deployment, err := d.getDeployment(ctx, workflow) + if err != nil || deployment == nil { // we should have the deployment by this time, so even if the error above is not found, we should halt. workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.DeploymentUnavailableReason, "Couldn't find the workflow deployment") return ctrl.Result{RequeueAfter: constants.RequeueAfterFailure}, err diff --git a/controllers/profiles/common/knative.go b/controllers/profiles/common/knative_eventing.go similarity index 100% rename from controllers/profiles/common/knative.go rename to controllers/profiles/common/knative_eventing.go diff --git a/controllers/profiles/common/mutate_visitors.go b/controllers/profiles/common/mutate_visitors.go index eab1944bd..426154ee0 100644 --- a/controllers/profiles/common/mutate_visitors.go +++ b/controllers/profiles/common/mutate_visitors.go @@ -27,6 +27,7 @@ import ( "github.com/imdario/mergo" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -55,6 +56,25 @@ func ImageDeploymentMutateVisitor(workflow *operatorapi.SonataFlow, image string } } +// ImageKServiceMutateVisitor same as ImageDeploymentMutateVisitor for Knative Serving +func ImageKServiceMutateVisitor(workflow *operatorapi.SonataFlow, image string) MutateVisitor { + return func(object client.Object) controllerutil.MutateFn { + // noop since we already have an image in the flow container defined by the user. + if workflow.HasContainerSpecImage() { + return func() error { + return nil + } + } + return func() error { + ksvc := object.(*servingv1.Service) + _, idx := kubeutil.GetContainerByName(operatorapi.DefaultContainerName, &ksvc.Spec.Template.Spec.PodSpec) + ksvc.Spec.Template.Spec.Containers[idx].Image = image + ksvc.Spec.Template.Spec.Containers[idx].ImagePullPolicy = kubeutil.GetImagePullPolicy(image) + return nil + } + } +} + // DeploymentMutateVisitor guarantees the state of the default Deployment object func DeploymentMutateVisitor(workflow *operatorapi.SonataFlow, plf *operatorapi.SonataFlowPlatform) MutateVisitor { return func(object client.Object) controllerutil.MutateFn { @@ -87,6 +107,36 @@ func EnsureDeployment(original *appsv1.Deployment, object *appsv1.Deployment) er return mergo.Merge(&object.Spec.Template.Spec, original.Spec.Template.Spec, mergo.WithOverride) } +// KServiceMutateVisitor guarantees the state of the default Knative Service object +func KServiceMutateVisitor(workflow *operatorapi.SonataFlow, plf *operatorapi.SonataFlowPlatform) MutateVisitor { + return func(object client.Object) controllerutil.MutateFn { + return func() error { + if kubeutil.IsObjectNew(object) { + return nil + } + original, err := KServiceCreator(workflow, plf) + if err != nil { + return err + } + return EnsureKService(original.(*servingv1.Service), object.(*servingv1.Service)) + } + } +} + +// EnsureKService Ensure that the original Knative Service fields are immutable. +func EnsureKService(original *servingv1.Service, object *servingv1.Service) error { + object.Labels = original.GetLabels() + + // Clean up the volumes, they are inherited from original, additional are added by other visitors + object.Spec.Template.Spec.Volumes = nil + for i := range object.Spec.Template.Spec.Containers { + object.Spec.Template.Spec.Containers[i].VolumeMounts = nil + } + + // we do a merge to not keep changing the spec since k8s will set default values to the podSpec + return mergo.Merge(&object.Spec.Template.Spec.PodSpec, original.Spec.Template.Spec.PodSpec, mergo.WithOverride) +} + func ServiceMutateVisitor(workflow *operatorapi.SonataFlow) MutateVisitor { return func(object client.Object) controllerutil.MutateFn { return func() error { diff --git a/controllers/profiles/common/object_creators.go b/controllers/profiles/common/object_creators.go index 8da5956d7..c1ac67030 100644 --- a/controllers/profiles/common/object_creators.go +++ b/controllers/profiles/common/object_creators.go @@ -24,6 +24,7 @@ import ( "strings" "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/workflowdef" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" cncfmodel "github.com/serverlessworkflow/sdk-go/v2/model" @@ -108,6 +109,41 @@ func DeploymentCreator(workflow *operatorapi.SonataFlow, plf *operatorapi.Sonata return deployment, nil } +// KServiceCreator creates the default Knative Service object for SonataFlow instances. It's based on the default DeploymentCreator. +func KServiceCreator(workflow *operatorapi.SonataFlow, plf *operatorapi.SonataFlowPlatform) (client.Object, error) { + lbl := workflowproj.GetMergedLabels(workflow) + ksvc := &servingv1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: workflow.Name, + Namespace: workflow.Namespace, + Labels: lbl, + }, + Spec: servingv1.ServiceSpec{ + ConfigurationSpec: servingv1.ConfigurationSpec{ + Template: servingv1.RevisionTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: lbl, + }, + Spec: servingv1.RevisionSpec{ + PodSpec: corev1.PodSpec{}, + }, + }, + }, + }, + } + + if err := mergo.Merge(&ksvc.Spec.Template.Spec.PodSpec, workflow.Spec.PodTemplate.PodSpec.ToPodSpec(), mergo.WithOverride); err != nil { + return nil, err + } + flowContainer, err := defaultContainer(workflow, plf) + if err != nil { + return nil, err + } + kubeutil.AddOrReplaceContainer(operatorapi.DefaultContainerName, *flowContainer, &ksvc.Spec.Template.Spec.PodSpec) + + return ksvc, nil +} + func getReplicasOrDefault(workflow *operatorapi.SonataFlow) *int32 { var dReplicas int32 = 1 if workflow.Spec.PodTemplate.Replicas == nil { @@ -119,7 +155,7 @@ func getReplicasOrDefault(workflow *operatorapi.SonataFlow) *int32 { func defaultContainer(workflow *operatorapi.SonataFlow, plf *operatorapi.SonataFlowPlatform) (*corev1.Container, error) { defaultContainerPort := corev1.ContainerPort{ ContainerPort: variables.DefaultHTTPWorkflowPortIntStr.IntVal, - Name: utils.HttpScheme, + Name: utils.DefaultServicePortName, Protocol: corev1.ProtocolTCP, } defaultFlowContainer := &corev1.Container{ @@ -134,6 +170,7 @@ func defaultContainer(workflow *operatorapi.SonataFlow, plf *operatorapi.SonataF }, }, TimeoutSeconds: healthTimeoutSeconds, + PeriodSeconds: healthStartedPeriodSeconds, }, ReadinessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ @@ -143,6 +180,7 @@ func defaultContainer(workflow *operatorapi.SonataFlow, plf *operatorapi.SonataF }, }, TimeoutSeconds: healthTimeoutSeconds, + PeriodSeconds: healthStartedPeriodSeconds, }, StartupProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ @@ -173,7 +211,7 @@ func defaultContainer(workflow *operatorapi.SonataFlow, plf *operatorapi.SonataF defaultFlowContainer.Name = operatorapi.DefaultContainerName portIdx := -1 for i := range defaultFlowContainer.Ports { - if defaultFlowContainer.Ports[i].Name == utils.HttpScheme || + if defaultFlowContainer.Ports[i].Name == utils.DefaultServicePortName || defaultFlowContainer.Ports[i].ContainerPort == variables.DefaultHTTPWorkflowPortIntStr.IntVal { portIdx = i break diff --git a/controllers/profiles/common/object_creators_test.go b/controllers/profiles/common/object_creators_test.go index 26b549997..5057b2180 100644 --- a/controllers/profiles/common/object_creators_test.go +++ b/controllers/profiles/common/object_creators_test.go @@ -94,13 +94,13 @@ func Test_ensureWorkflowPropertiesConfigMapMutator_DollarReplacement(t *testing. func TestMergePodSpec(t *testing.T) { workflow := test.GetBaseSonataFlow(t.Name()) - workflow.Spec.PodTemplate = v1alpha08.PodTemplateSpec{ + workflow.Spec.PodTemplate = v1alpha08.FlowPodTemplateSpec{ Container: v1alpha08.ContainerSpec{ // this one we can override Image: "quay.io/example/my-workflow:1.0.0", Ports: []corev1.ContainerPort{ // let's override a immutable attribute - {Name: utils.HttpScheme, ContainerPort: 9090}, + {Name: utils.DefaultServicePortName, ContainerPort: 9090}, }, Env: []corev1.EnvVar{ // We should be able to override this too @@ -147,7 +147,7 @@ func TestMergePodSpec(t *testing.T) { func TestMergePodSpec_OverrideContainers(t *testing.T) { workflow := test.GetBaseSonataFlow(t.Name()) - workflow.Spec.PodTemplate = v1alpha08.PodTemplateSpec{ + workflow.Spec.PodTemplate = v1alpha08.FlowPodTemplateSpec{ PodSpec: v1alpha08.PodSpec{ // Try to override the workflow container via the podspec Containers: []corev1.Container{ @@ -155,7 +155,7 @@ func TestMergePodSpec_OverrideContainers(t *testing.T) { Name: v1alpha08.DefaultContainerName, Image: "quay.io/example/my-workflow:1.0.0", Ports: []corev1.ContainerPort{ - {Name: utils.HttpScheme, ContainerPort: 9090}, + {Name: utils.DefaultServicePortName, ContainerPort: 9090}, }, Env: []corev1.EnvVar{ {Name: "ENV1", Value: "VALUE_CUSTOM"}, @@ -213,13 +213,13 @@ func Test_ensureWorkflowTriggersAreCreated(t *testing.T) { func TestMergePodSpec_WithPostgreSQL_and_JDBC_URL_field(t *testing.T) { workflow := test.GetBaseSonataFlow(t.Name()) workflow.Spec = v1alpha08.SonataFlowSpec{ - PodTemplate: v1alpha08.PodTemplateSpec{ + PodTemplate: v1alpha08.FlowPodTemplateSpec{ Container: v1alpha08.ContainerSpec{ // this one we can override Image: "quay.io/example/my-workflow:1.0.0", Ports: []corev1.ContainerPort{ // let's override a immutable attribute - {Name: utils.HttpScheme, ContainerPort: 9090}, + {Name: utils.DefaultServicePortName, ContainerPort: 9090}, }, Env: []corev1.EnvVar{ // We should be able to override this too @@ -295,14 +295,6 @@ func TestMergePodSpec_WithPostgreSQL_and_JDBC_URL_field(t *testing.T) { Name: "KOGITO_PERSISTENCE_TYPE", Value: "jdbc", }, - { - Name: "KOGITO_PERSISTENCE_PROTO_MARSHALLER", - Value: "false", - }, - { - Name: "KOGITO_PERSISTENCE_QUERY_TIMEOUT_MILLIS", - Value: "10000", - }, } assert.Len(t, deployment.Spec.Template.Spec.Containers, 2) assert.Equal(t, "superuser", deployment.Spec.Template.Spec.ServiceAccountName) @@ -321,7 +313,7 @@ var ( func TestMergePodSpec_OverrideContainers_WithPostgreSQL_In_Workflow_CR(t *testing.T) { workflow := test.GetBaseSonataFlow(t.Name()) workflow.Spec = v1alpha08.SonataFlowSpec{ - PodTemplate: v1alpha08.PodTemplateSpec{ + PodTemplate: v1alpha08.FlowPodTemplateSpec{ PodSpec: v1alpha08.PodSpec{ // Try to override the workflow container via the podspec Containers: []corev1.Container{ @@ -329,7 +321,7 @@ func TestMergePodSpec_OverrideContainers_WithPostgreSQL_In_Workflow_CR(t *testin Name: v1alpha08.DefaultContainerName, Image: "quay.io/example/my-workflow:1.0.0", Ports: []corev1.ContainerPort{ - {Name: utils.HttpScheme, ContainerPort: 9090}, + {Name: utils.DefaultServicePortName, ContainerPort: 9090}, }, Env: []corev1.EnvVar{ {Name: "ENV1", Value: "VALUE_CUSTOM"}, @@ -387,14 +379,6 @@ func TestMergePodSpec_OverrideContainers_WithPostgreSQL_In_Workflow_CR(t *testin Name: "KOGITO_PERSISTENCE_TYPE", Value: "jdbc", }, - { - Name: "KOGITO_PERSISTENCE_PROTO_MARSHALLER", - Value: "false", - }, - { - Name: "KOGITO_PERSISTENCE_QUERY_TIMEOUT_MILLIS", - Value: "10000", - }, } assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) flowContainer, _ := kubeutil.GetContainerByName(v1alpha08.DefaultContainerName, &deployment.Spec.Template.Spec) @@ -467,14 +451,6 @@ func TestMergePodSpec_WithServicedPostgreSQL_In_Platform_CR_And_Worflow_Requesti Name: "KOGITO_PERSISTENCE_TYPE", Value: "jdbc", }, - { - Name: "KOGITO_PERSISTENCE_PROTO_MARSHALLER", - Value: "false", - }, - { - Name: "KOGITO_PERSISTENCE_QUERY_TIMEOUT_MILLIS", - Value: "10000", - }, } assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) flowContainer, _ := kubeutil.GetContainerByName(v1alpha08.DefaultContainerName, &deployment.Spec.Template.Spec) @@ -510,7 +486,7 @@ func TestMergePodSpec_WithServicedPostgreSQL_In_Platform_And_In_Workflow_CR(t *t } workflow := test.GetBaseSonataFlow(t.Name()) workflow.Spec = v1alpha08.SonataFlowSpec{ - PodTemplate: v1alpha08.PodTemplateSpec{ + PodTemplate: v1alpha08.FlowPodTemplateSpec{ PodSpec: v1alpha08.PodSpec{ // Try to override the workflow container via the podspec Containers: []corev1.Container{ @@ -518,7 +494,7 @@ func TestMergePodSpec_WithServicedPostgreSQL_In_Platform_And_In_Workflow_CR(t *t Name: v1alpha08.DefaultContainerName, Image: "quay.io/example/my-workflow:1.0.0", Ports: []corev1.ContainerPort{ - {Name: utils.HttpScheme, ContainerPort: 9090}, + {Name: utils.DefaultServicePortName, ContainerPort: 9090}, }, Env: []corev1.EnvVar{ {Name: "ENV1", Value: "VALUE_CUSTOM"}, @@ -575,14 +551,6 @@ func TestMergePodSpec_WithServicedPostgreSQL_In_Platform_And_In_Workflow_CR(t *t Name: "KOGITO_PERSISTENCE_TYPE", Value: "jdbc", }, - { - Name: "KOGITO_PERSISTENCE_PROTO_MARSHALLER", - Value: "false", - }, - { - Name: "KOGITO_PERSISTENCE_QUERY_TIMEOUT_MILLIS", - Value: "10000", - }, } assert.Len(t, deployment.Spec.Template.Spec.Containers, 1) flowContainer, _ := kubeutil.GetContainerByName(v1alpha08.DefaultContainerName, &deployment.Spec.Template.Spec) diff --git a/controllers/workflows/constants.go b/controllers/profiles/common/persistence/persistence.go similarity index 59% rename from controllers/workflows/constants.go rename to controllers/profiles/common/persistence/persistence.go index afcf157a1..226714059 100644 --- a/controllers/workflows/constants.go +++ b/controllers/profiles/common/persistence/persistence.go @@ -12,13 +12,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -package workflows +package persistence + +import ( + operatorapi "github.com/apache/incubator-kie-kogito-serverless-operator/api/v1alpha08" + "github.com/magiconair/properties" +) const ( QuarkusFlywayMigrateAtStart string = "quarkus.flyway.migrate-at-start" + QuarkusDatasourceDBKind string = "quarkus.datasource.db-kind" QuarkusDatasourceJDBCURL string = "quarkus.datasource.jdbc.url" KogitoPersistenceType string = "kogito.persistence.type" JDBCPersistenceType string = "jdbc" KogitoPersistenceQueryTimeoutMillis string = "kogito.persistence.query.timeout.millis" KogitoPersistenceProtoMarshaller string = "kogito.persistence.proto.marshaller" + PostgreSQLDBKind string = "postgresql" ) + +// ResolveWorkflowPersistenceProperties returns the set of application properties required for the workflow persistence. +// Never nil. +func ResolveWorkflowPersistenceProperties(workflow *operatorapi.SonataFlow, platform *operatorapi.SonataFlowPlatform) (*properties.Properties, error) { + if UsesPostgreSQLPersistence(workflow, platform) { + return GetPostgreSQLWorkflowProperties(workflow), nil + } + return properties.NewProperties(), nil +} diff --git a/controllers/profiles/common/persistence/persistence_test.go b/controllers/profiles/common/persistence/persistence_test.go new file mode 100644 index 000000000..6af8bb13e --- /dev/null +++ b/controllers/profiles/common/persistence/persistence_test.go @@ -0,0 +1,84 @@ +// Copyright 2024 Apache Software Foundation (ASF) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persistence + +import ( + "testing" + + operatorapi "github.com/apache/incubator-kie-kogito-serverless-operator/api/v1alpha08" + "github.com/stretchr/testify/assert" +) + +func TestResolveWorkflowPersistenceProperties_WithWorkflowPersistence(t *testing.T) { + workflow := operatorapi.SonataFlow{ + Spec: operatorapi.SonataFlowSpec{ + Persistence: &operatorapi.PersistenceOptionsSpec{ + PostgreSQL: &operatorapi.PersistencePostgreSQL{}, + }, + }, + } + platform := operatorapi.SonataFlowPlatform{} + testResolveWorkflowPersistencePropertiesWithPersistence(t, &workflow, &platform) +} + +func TestResolveWorkflowPersistenceProperties_WithPlatformPersistence(t *testing.T) { + workflow := operatorapi.SonataFlow{} + platform := operatorapi.SonataFlowPlatform{ + Spec: operatorapi.SonataFlowPlatformSpec{ + Persistence: &operatorapi.PlatformPersistenceOptionsSpec{ + PostgreSQL: &operatorapi.PlatformPersistencePostgreSQL{}, + }, + }, + } + testResolveWorkflowPersistencePropertiesWithPersistence(t, &workflow, &platform) +} + +func TestResolveWorkflowPersistenceProperties_WithPlatformPersistenceButBannedInWorkflow(t *testing.T) { + workflow := operatorapi.SonataFlow{} + workflow.Spec.Persistence = &operatorapi.PersistenceOptionsSpec{} + platform := operatorapi.SonataFlowPlatform{ + Spec: operatorapi.SonataFlowPlatformSpec{ + Persistence: &operatorapi.PlatformPersistenceOptionsSpec{ + PostgreSQL: &operatorapi.PlatformPersistencePostgreSQL{}, + }, + }, + } + props, err := ResolveWorkflowPersistenceProperties(&workflow, &platform) + assert.NotNil(t, props) + assert.Nil(t, err) + assert.Equal(t, 0, props.Len()) +} + +func TestResolveWorkflowPersistenceProperties_WithNoPersistence(t *testing.T) { + workflow := operatorapi.SonataFlow{} + platform := operatorapi.SonataFlowPlatform{} + props, err := ResolveWorkflowPersistenceProperties(&workflow, &platform) + assert.NotNil(t, props) + assert.Nil(t, err) + assert.Equal(t, 0, props.Len()) +} + +func testResolveWorkflowPersistencePropertiesWithPersistence(t *testing.T, workflow *operatorapi.SonataFlow, platform *operatorapi.SonataFlowPlatform) { + props, err := ResolveWorkflowPersistenceProperties(workflow, platform) + assert.Nil(t, err) + assert.NotNil(t, props) + assert.Equal(t, 3, props.Len()) + value, _ := props.Get("kogito.persistence.type") + assert.Equal(t, "jdbc", value) + value, _ = props.Get("quarkus.datasource.db-kind") + assert.Equal(t, "postgresql", value) + value, _ = props.Get("kogito.persistence.proto.marshaller") + assert.Equal(t, "false", value) +} diff --git a/controllers/profiles/common/persistence/postgresql.go b/controllers/profiles/common/persistence/postgresql.go index 4bb5f38d8..068d85ca9 100644 --- a/controllers/profiles/common/persistence/postgresql.go +++ b/controllers/profiles/common/persistence/postgresql.go @@ -17,30 +17,21 @@ package persistence import ( "fmt" - corev1 "k8s.io/api/core/v1" + "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/cfg" + "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/profiles" + "github.com/magiconair/properties" "github.com/apache/incubator-kie-kogito-serverless-operator/api/v1alpha08" operatorapi "github.com/apache/incubator-kie-kogito-serverless-operator/api/v1alpha08" "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/profiles/common/constants" + corev1 "k8s.io/api/core/v1" ) const ( - defaultDatabaseName = "sonataflow" - timeoutSeconds = 3 - failureThreshold = 5 - initialPeriodSeconds = 15 - initialDelaySeconds = 10 - successThreshold = 1 - - postgreSQLCPULimit = "500m" - postgreSQLMemoryLimit = "256Mi" - postgreSQLMemoryRequest = "256Mi" - postgreSQLCPURequest = "100m" - - defaultPostgreSQLUsername = "sonataflow" - defaultPostgresSQLPassword = "sonataflow" + defaultDatabaseName = "sonataflow" ) +// ConfigurePostgreSQLEnv returns the common env variables required for the DataIndex or JobsService when postresql persistence is used. func ConfigurePostgreSQLEnv(postgresql *operatorapi.PersistencePostgreSQL, databaseSchema, databaseNamespace string) []corev1.EnvVar { dataSourcePort := constants.DefaultPostgreSQLPort databaseName := defaultDatabaseName @@ -102,14 +93,6 @@ func ConfigurePostgreSQLEnv(postgresql *operatorapi.PersistencePostgreSQL, datab Name: "KOGITO_PERSISTENCE_TYPE", Value: "jdbc", }, - { - Name: "KOGITO_PERSISTENCE_PROTO_MARSHALLER", - Value: "false", - }, - { - Name: "KOGITO_PERSISTENCE_QUERY_TIMEOUT_MILLIS", - Value: "10000", - }, } } @@ -145,3 +128,27 @@ func RetrieveConfiguration(primary *v1alpha08.PersistenceOptionsSpec, platformPe } return c } + +func UsesPostgreSQLPersistence(workflow *operatorapi.SonataFlow, platform *operatorapi.SonataFlowPlatform) bool { + return (workflow.Spec.Persistence != nil && workflow.Spec.Persistence.PostgreSQL != nil) || + (workflow.Spec.Persistence == nil && platform.Spec.Persistence != nil && platform.Spec.Persistence.PostgreSQL != nil) +} + +// GetPostgreSQLExtensions returns the Quarkus extensions required for postgresql persistence. +func GetPostgreSQLExtensions() []cfg.GAV { + return cfg.GetCfg().PostgreSQLPersistenceExtensions +} + +// GetPostgreSQLWorkflowProperties returns the set of application properties required for postgresql persistence. +// Never nil. +func GetPostgreSQLWorkflowProperties(workflow *operatorapi.SonataFlow) *properties.Properties { + props := properties.NewProperties() + if !profiles.IsDevProfile(workflow) && !profiles.IsGitOpsProfile(workflow) { + // build-time property required by kogito-runtimes to feed flyway build-time settings and package the necessary .sql files. + props.Set(QuarkusDatasourceDBKind, PostgreSQLDBKind) + // build-time properties for kogito-runtimes to use jdbc + props.Set(KogitoPersistenceType, JDBCPersistenceType) + props.Set(KogitoPersistenceProtoMarshaller, "false") + } + return props +} diff --git a/controllers/profiles/common/properties/managed.go b/controllers/profiles/common/properties/managed.go index a9da9edf4..98a73ad33 100644 --- a/controllers/profiles/common/properties/managed.go +++ b/controllers/profiles/common/properties/managed.go @@ -23,6 +23,8 @@ import ( "context" "fmt" + "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/profiles/common/persistence" + "github.com/apache/incubator-kie-kogito-serverless-operator/utils" "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/discovery" @@ -153,6 +155,11 @@ func NewManagedPropertyHandler(workflow *operatorapi.SonataFlow, platform *opera return nil, err } props.Merge(p) + p, err = persistence.ResolveWorkflowPersistenceProperties(workflow, platform) + if err != nil { + return nil, err + } + props.Merge(p) p, err = services.GenerateDataIndexWorkflowProperties(workflow, platform) if err != nil { return nil, err diff --git a/controllers/profiles/dev/object_creators_dev.go b/controllers/profiles/dev/object_creators_dev.go index 45f6926ca..0f6069f18 100644 --- a/controllers/profiles/dev/object_creators_dev.go +++ b/controllers/profiles/dev/object_creators_dev.go @@ -90,8 +90,7 @@ func deploymentMutateVisitor(workflow *operatorapi.SonataFlow, plf *operatorapi. if err != nil { return err } - common.EnsureDeployment(original.(*appsv1.Deployment), object.(*appsv1.Deployment)) - return nil + return common.EnsureDeployment(original.(*appsv1.Deployment), object.(*appsv1.Deployment)) } } } diff --git a/controllers/profiles/factory/factory.go b/controllers/profiles/factory/factory.go index d54b0df33..511c6ca36 100644 --- a/controllers/profiles/factory/factory.go +++ b/controllers/profiles/factory/factory.go @@ -34,10 +34,6 @@ import ( "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/profiles/dev" ) -const ( - defaultProfile = metadata.PreviewProfile -) - type reconcilerBuilder func(client client.Client, cfg *rest.Config, recorder record.EventRecorder) profiles.ProfileReconciler var profileBuilders = map[metadata.ProfileType]reconcilerBuilder{ @@ -47,25 +43,22 @@ var profileBuilders = map[metadata.ProfileType]reconcilerBuilder{ } func profileBuilder(workflow *operatorapi.SonataFlow) reconcilerBuilder { - profile := workflow.Annotations[metadata.Profile] - if len(profile) == 0 { - profile = defaultProfile.String() - } + profile := metadata.GetProfileOrDefault(workflow.Annotations) // keep backward compatibility - if profile == metadata.ProdProfile.String() { + if profile == metadata.ProdProfile { klog.V(log.W).Infof("Profile %s is deprecated, please use '%s' instead.", metadata.ProdProfile, metadata.PreviewProfile) - profile = metadata.PreviewProfile.String() + profile = metadata.PreviewProfile } // Enforce GitOps profile if the .spec.podTemplate.container.image is set in the Preview profile. - if (profile == metadata.PreviewProfile.String() || profile == metadata.ProdProfile.String()) && workflow.HasContainerSpecImage() { + if (profile == metadata.PreviewProfile || profile == metadata.ProdProfile) && workflow.HasContainerSpecImage() { workflow.Annotations[metadata.Profile] = metadata.GitOpsProfile.String() return profileBuilders[metadata.GitOpsProfile] } - if _, ok := profileBuilders[metadata.ProfileType(profile)]; !ok { - klog.V(log.W).Infof("Profile %s not supported, please use '%s' or '%s'. Falling back to %s", profile, metadata.PreviewProfile, metadata.DevProfile, defaultProfile) - return profileBuilders[defaultProfile] + if _, ok := profileBuilders[profile]; !ok { + klog.V(log.W).Infof("Profile %s not supported, please use '%s' or '%s'. Falling back to %s", profile, metadata.PreviewProfile, metadata.DevProfile, metadata.DefaultProfile) + return profileBuilders[metadata.DefaultProfile] } - return profileBuilders[metadata.ProfileType(profile)] + return profileBuilders[profile] } // NewReconciler creates a new ProfileReconciler based on the given workflow and context. diff --git a/controllers/profiles/gitops/profile_gitops_test.go b/controllers/profiles/gitops/profile_gitops_test.go index 9287cbc2b..051ea5772 100644 --- a/controllers/profiles/gitops/profile_gitops_test.go +++ b/controllers/profiles/gitops/profile_gitops_test.go @@ -29,7 +29,7 @@ import ( ) func Test_Reconciler_ProdOps(t *testing.T) { - workflow := test.GetBaseSonataFlowWithProdOpsProfile(t.Name()) + workflow := test.GetBaseSonataFlowWithPreviewProfile(t.Name()) workflow.Spec.PodTemplate.PodSpec.InitContainers = append(workflow.Spec.PodTemplate.PodSpec.InitContainers, corev1.Container{ Name: "check-postgres", Image: "registry.access.redhat.com/ubi9/ubi-micro:latest", diff --git a/controllers/profiles/preview/deployment_handler.go b/controllers/profiles/preview/deployment_handler.go index 5fce26885..dfbac2538 100644 --- a/controllers/profiles/preview/deployment_handler.go +++ b/controllers/profiles/preview/deployment_handler.go @@ -17,8 +17,8 @@ package preview import ( "context" + "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/knative" v1 "k8s.io/api/core/v1" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -44,81 +44,117 @@ func NewDeploymentReconciler(stateSupport *common.StateSupport, ensurer *ObjectE } func (d *DeploymentReconciler) Reconcile(ctx context.Context, workflow *operatorapi.SonataFlow) (reconcile.Result, []client.Object, error) { - return d.reconcileWithBuiltImage(ctx, workflow, "") + return d.reconcileWithImage(ctx, workflow, "") } -func (d *DeploymentReconciler) reconcileWithBuiltImage(ctx context.Context, workflow *operatorapi.SonataFlow, image string) (reconcile.Result, []client.Object, error) { +func (d *DeploymentReconciler) reconcileWithImage(ctx context.Context, workflow *operatorapi.SonataFlow, image string) (reconcile.Result, []client.Object, error) { + // Checks if we need Knative installed and is not present. + if requires, err := d.ensureKnativeServingRequired(workflow); requires || err != nil { + return reconcile.Result{Requeue: false}, nil, err + } + + // Ensure objects + result, objs, err := d.ensureObjects(ctx, workflow, image) + if err != nil || result.Requeue { + return result, objs, err + } + + // Follow deployment status + result, err = common.DeploymentManager(d.C).SyncDeploymentStatus(ctx, workflow) + if err != nil { + return reconcile.Result{Requeue: false}, nil, err + } + + if _, err := d.PerformStatusUpdate(ctx, workflow); err != nil { + return reconcile.Result{Requeue: false}, nil, err + } + return result, objs, nil +} + +// ensureKnativeServingRequired returns true if the SonataFlow instance requires Knative deployment and Knative Serving is not available. +func (d *DeploymentReconciler) ensureKnativeServingRequired(workflow *operatorapi.SonataFlow) (bool, error) { + if workflow.IsKnativeDeployment() { + avail, err := knative.GetKnativeAvailability(d.Cfg) + if err != nil { + return true, err + } + if !avail.Serving { + d.Recorder.Eventf(workflow, v1.EventTypeWarning, + "KnativeServingNotAvailable", + "Knative Serving is not available in this cluster, can't deploy workflow. Please update the deployment model to %s", + operatorapi.KubernetesDeploymentModel) + return true, nil + } + } + return false, nil +} + +func (d *DeploymentReconciler) ensureObjects(ctx context.Context, workflow *operatorapi.SonataFlow, image string) (reconcile.Result, []client.Object, error) { pl, _ := platform.GetActivePlatform(ctx, d.C, workflow.Namespace) userPropsCM, _, err := d.ensurers.userPropsConfigMap.Ensure(ctx, workflow) if err != nil { workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.ExternalResourcesNotFoundReason, "Unable to retrieve the user properties config map") - _, err = d.PerformStatusUpdate(ctx, workflow) - return ctrl.Result{}, nil, err + _, _ = d.PerformStatusUpdate(ctx, workflow) + return reconcile.Result{}, nil, err } managedPropsCM, _, err := d.ensurers.managedPropsConfigMap.Ensure(ctx, workflow, pl, common.ManagedPropertiesMutateVisitor(ctx, d.StateSupport.Catalog, workflow, pl, userPropsCM.(*v1.ConfigMap))) if err != nil { workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.ExternalResourcesNotFoundReason, "Unable to retrieve the managed properties config map") - _, err = d.PerformStatusUpdate(ctx, workflow) - return ctrl.Result{}, nil, err + _, _ = d.PerformStatusUpdate(ctx, workflow) + return reconcile.Result{}, nil, err } deployment, deploymentOp, err := - d.ensurers.deployment.Ensure( - ctx, - workflow, - pl, - d.getDeploymentMutateVisitors(workflow, pl, image, userPropsCM.(*v1.ConfigMap), managedPropsCM.(*v1.ConfigMap))..., - ) + d.ensurers.DeploymentByDeploymentModel(workflow).Ensure(ctx, workflow, pl, + d.deploymentModelMutateVisitors(workflow, pl, image, userPropsCM.(*v1.ConfigMap), managedPropsCM.(*v1.ConfigMap))...) if err != nil { workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.DeploymentUnavailableReason, "Unable to perform the deploy due to ", err) - _, err = d.PerformStatusUpdate(ctx, workflow) + _, _ = d.PerformStatusUpdate(ctx, workflow) return reconcile.Result{}, nil, err } - service, _, err := d.ensurers.service.Ensure(ctx, workflow, common.ServiceMutateVisitor(workflow)) + service, _, err := d.ensurers.ServiceByDeploymentModel(workflow).Ensure(ctx, workflow, common.ServiceMutateVisitor(workflow)) if err != nil { workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.DeploymentUnavailableReason, "Unable to make the service available due to ", err) - _, err = d.PerformStatusUpdate(ctx, workflow) + _, _ = d.PerformStatusUpdate(ctx, workflow) return reconcile.Result{}, nil, err } - knativeObjs, err := common.NewKnativeEventingHandler(d.StateSupport).Ensure(ctx, workflow) + eventingObjs, err := common.NewKnativeEventingHandler(d.StateSupport).Ensure(ctx, workflow) if err != nil { - return ctrl.Result{RequeueAfter: constants.RequeueAfterFailure}, nil, err + return reconcile.Result{}, nil, err } - objs := []client.Object{deployment, service, managedPropsCM} - objs = append(objs, knativeObjs...) + objs := []client.Object{deployment, managedPropsCM, service} if deploymentOp == controllerutil.OperationResultCreated { workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.WaitingForDeploymentReason, "") if _, err := d.PerformStatusUpdate(ctx, workflow); err != nil { - return reconcile.Result{Requeue: false}, nil, err + return reconcile.Result{}, nil, err } return reconcile.Result{RequeueAfter: constants.RequeueAfterFollowDeployment, Requeue: true}, objs, nil } + objs = append(objs, eventingObjs...) - // Follow deployment status - result, err := common.DeploymentManager(d.C).SyncDeploymentStatus(ctx, workflow) - if err != nil { - return reconcile.Result{Requeue: false}, nil, err - } - - if _, err := d.PerformStatusUpdate(ctx, workflow); err != nil { - return reconcile.Result{Requeue: false}, nil, err - } - return result, objs, nil + return reconcile.Result{}, objs, nil } -func (d *DeploymentReconciler) getDeploymentMutateVisitors( +func (d *DeploymentReconciler) deploymentModelMutateVisitors( workflow *operatorapi.SonataFlow, plf *operatorapi.SonataFlowPlatform, image string, userPropsCM *v1.ConfigMap, managedPropsCM *v1.ConfigMap) []common.MutateVisitor { + + if workflow.IsKnativeDeployment() { + return []common.MutateVisitor{common.KServiceMutateVisitor(workflow, plf), + common.ImageKServiceMutateVisitor(workflow, image), + mountConfigMapsMutateVisitor(workflow, userPropsCM, managedPropsCM)} + } + if utils.IsOpenShift() { return []common.MutateVisitor{common.DeploymentMutateVisitor(workflow, plf), - mountProdConfigMapsMutateVisitor(workflow, userPropsCM, managedPropsCM), + mountConfigMapsMutateVisitor(workflow, userPropsCM, managedPropsCM), addOpenShiftImageTriggerDeploymentMutateVisitor(workflow, image), common.ImageDeploymentMutateVisitor(workflow, image), common.RolloutDeploymentIfCMChangedMutateVisitor(workflow, userPropsCM, managedPropsCM), @@ -126,6 +162,6 @@ func (d *DeploymentReconciler) getDeploymentMutateVisitors( } return []common.MutateVisitor{common.DeploymentMutateVisitor(workflow, plf), common.ImageDeploymentMutateVisitor(workflow, image), - mountProdConfigMapsMutateVisitor(workflow, userPropsCM, managedPropsCM), + mountConfigMapsMutateVisitor(workflow, userPropsCM, managedPropsCM), common.RolloutDeploymentIfCMChangedMutateVisitor(workflow, userPropsCM, managedPropsCM)} } diff --git a/controllers/profiles/preview/deployment_handler_test.go b/controllers/profiles/preview/deployment_handler_test.go index d70436d7b..5faf98bb9 100644 --- a/controllers/profiles/preview/deployment_handler_test.go +++ b/controllers/profiles/preview/deployment_handler_test.go @@ -28,10 +28,42 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" ) +type fakeDeploymentReconciler struct { + DeploymentReconciler +} + +func Test_CheckDeploymentModelIsKnative(t *testing.T) { + workflow := test.GetBaseSonataFlowWithPreviewProfile(t.Name()) + workflow.Spec.PodTemplate.DeploymentModel = v1alpha08.KnativeDeploymentModel + + cli := test.NewSonataFlowClientBuilderWithKnative(). + WithRuntimeObjects(workflow). + WithStatusSubresource(workflow). + Build() + stateSupport := fakeReconcilerSupport(cli) + handler := NewDeploymentReconciler(stateSupport, NewObjectEnsurers(stateSupport)) + + result, objects, err := handler.ensureObjects(context.TODO(), workflow, "") + assert.NoError(t, err) + assert.NotEmpty(t, objects) + assert.True(t, result.Requeue) + + var ksvc *servingv1.Service + for _, o := range objects { + if _, ok := o.(*servingv1.Service); ok { + ksvc = o.(*servingv1.Service) + assert.Equal(t, v1alpha08.DefaultContainerName, ksvc.Spec.Template.Spec.Containers[0].Name) + break + } + } + assert.NotNil(t, ksvc) +} + func Test_CheckPodTemplateChangesReflectDeployment(t *testing.T) { - workflow := test.GetBaseSonataFlowWithProdOpsProfile(t.Name()) + workflow := test.GetBaseSonataFlowWithPreviewProfile(t.Name()) client := test.NewSonataFlowClientBuilder(). WithRuntimeObjects(workflow). @@ -53,18 +85,20 @@ func Test_CheckPodTemplateChangesReflectDeployment(t *testing.T) { assert.NoError(t, err) assert.NotEmpty(t, objects) assert.True(t, result.Requeue) + var deployment *v1.Deployment for _, o := range objects { if _, ok := o.(*v1.Deployment); ok { - deployment := o.(*v1.Deployment) + deployment = o.(*v1.Deployment) assert.Equal(t, expectedImg, deployment.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, v1alpha08.DefaultContainerName, deployment.Spec.Template.Spec.Containers[0].Name) break } } + assert.NotNil(t, deployment) } func Test_CheckDeploymentRolloutAfterCMChange(t *testing.T) { - workflow := test.GetBaseSonataFlowWithProdOpsProfile(t.Name()) + workflow := test.GetBaseSonataFlowWithPreviewProfile(t.Name()) client := test.NewSonataFlowClientBuilder(). WithRuntimeObjects(workflow). @@ -126,7 +160,7 @@ func Test_CheckDeploymentRolloutAfterCMChange(t *testing.T) { } func Test_CheckDeploymentUnchangedAfterCMChangeOtherKeys(t *testing.T) { - workflow := test.GetBaseSonataFlowWithProdOpsProfile(t.Name()) + workflow := test.GetBaseSonataFlowWithPreviewProfile(t.Name()) client := test.NewSonataFlowClientBuilder(). WithRuntimeObjects(workflow). diff --git a/controllers/profiles/preview/object_creators_preview.go b/controllers/profiles/preview/object_creators_preview.go index 903cd7418..a48a4e5db 100644 --- a/controllers/profiles/preview/object_creators_preview.go +++ b/controllers/profiles/preview/object_creators_preview.go @@ -24,6 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -64,28 +65,40 @@ func addOpenShiftImageTriggerDeploymentMutateVisitor(workflow *v1alpha08.SonataF } } -// mountDevConfigMapsMutateVisitor mounts the required configMaps in the Workflow Dev Deployment -func mountProdConfigMapsMutateVisitor(workflow *operatorapi.SonataFlow, userPropsCM *v1.ConfigMap, managedPropsCM *v1.ConfigMap) common.MutateVisitor { +// mountConfigMapsMutateVisitor mounts the required configMaps in the SonataFlow instance +func mountConfigMapsMutateVisitor(workflow *operatorapi.SonataFlow, userPropsCM *v1.ConfigMap, managedPropsCM *v1.ConfigMap) common.MutateVisitor { return func(object client.Object) controllerutil.MutateFn { return func() error { - deployment := object.(*appsv1.Deployment) - _, idx := kubeutil.GetContainerByName(v1alpha08.DefaultContainerName, &deployment.Spec.Template.Spec) + var podTemplateSpec *v1.PodSpec - if len(deployment.Spec.Template.Spec.Volumes) == 0 { - deployment.Spec.Template.Spec.Volumes = make([]v1.Volume, 0, 1) + if workflow.IsKnativeDeployment() { + ksvc := object.(*servingv1.Service) + podTemplateSpec = &ksvc.Spec.Template.Spec.PodSpec + } else { + deployment := object.(*appsv1.Deployment) + podTemplateSpec = &deployment.Spec.Template.Spec + if err := kubeutil.AnnotateDeploymentConfigChecksum(workflow, deployment, userPropsCM, managedPropsCM); err != nil { + return err + } } - if len(deployment.Spec.Template.Spec.Containers[idx].VolumeMounts) == 0 { - deployment.Spec.Template.Spec.Containers[idx].VolumeMounts = make([]v1.VolumeMount, 0, 1) + + _, idx := kubeutil.GetContainerByName(v1alpha08.DefaultContainerName, podTemplateSpec) + + if len(podTemplateSpec.Volumes) == 0 { + podTemplateSpec.Volumes = make([]v1.Volume, 0, 1) + } + if len(podTemplateSpec.Containers[idx].VolumeMounts) == 0 { + podTemplateSpec.Containers[idx].VolumeMounts = make([]v1.VolumeMount, 0, 1) } defaultResourcesVolume := v1.Volume{Name: constants.ConfigMapWorkflowPropsVolumeName, VolumeSource: v1.VolumeSource{Projected: &v1.ProjectedVolumeSource{}}} kubeutil.VolumeProjectionAddConfigMap(defaultResourcesVolume.Projected, userPropsCM.Name, v1.KeyToPath{Key: workflowproj.ApplicationPropertiesFileName, Path: workflowproj.ApplicationPropertiesFileName}) kubeutil.VolumeProjectionAddConfigMap(defaultResourcesVolume.Projected, managedPropsCM.Name, v1.KeyToPath{Key: workflowproj.GetManagedPropertiesFileName(workflow), Path: workflowproj.GetManagedPropertiesFileName(workflow)}) - kubeutil.AddOrReplaceVolume(&deployment.Spec.Template.Spec, defaultResourcesVolume) - kubeutil.AddOrReplaceVolumeMount(idx, &deployment.Spec.Template.Spec, + kubeutil.AddOrReplaceVolume(podTemplateSpec, defaultResourcesVolume) + kubeutil.AddOrReplaceVolumeMount(idx, podTemplateSpec, kubeutil.VolumeMount(constants.ConfigMapWorkflowPropsVolumeName, true, quarkusProdConfigMountPath)) - return kubeutil.AnnotateDeploymentConfigChecksum(workflow, deployment, userPropsCM, managedPropsCM) + return nil } } } diff --git a/controllers/profiles/preview/profile_preview.go b/controllers/profiles/preview/profile_preview.go index a4afea9b1..d891da343 100644 --- a/controllers/profiles/preview/profile_preview.go +++ b/controllers/profiles/preview/profile_preview.go @@ -23,6 +23,7 @@ import ( "time" "github.com/apache/incubator-kie-kogito-serverless-operator/api/metadata" + "github.com/apache/incubator-kie-kogito-serverless-operator/api/v1alpha08" "k8s.io/client-go/rest" "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/discovery" @@ -48,20 +49,42 @@ const ( quarkusProdConfigMountPath = "/deployments/config" ) -// ObjectEnsurers is a struct for the objects that ReconciliationState needs to create in the platform for the Production profile. +// ObjectEnsurers is a struct for the objects that ReconciliationState needs to create in the platform for the preview profile. // ReconciliationState that needs access to it must include this struct as an attribute and initialize it in the profile builder. // Use NewObjectEnsurers to facilitate building this struct type ObjectEnsurers struct { - deployment common.ObjectEnsurerWithPlatform + // deployment for this ensurer. Don't call it directly, use DeploymentByDeploymentModel instead + deployment common.ObjectEnsurerWithPlatform + // kservice Knative Serving deployment for this ensurer. Don't call it directly, use DeploymentByDeploymentModel instead + kservice common.ObjectEnsurerWithPlatform + // service for this ensurer. Don't call it directly, use ServiceByDeploymentModel instead service common.ObjectEnsurer userPropsConfigMap common.ObjectEnsurer managedPropsConfigMap common.ObjectEnsurerWithPlatform } +// DeploymentByDeploymentModel gets the deployment ensurer based on the SonataFlow deployment model +func (o *ObjectEnsurers) DeploymentByDeploymentModel(workflow *v1alpha08.SonataFlow) common.ObjectEnsurerWithPlatform { + if workflow.IsKnativeDeployment() { + return o.kservice + } + return o.deployment +} + +// ServiceByDeploymentModel gets the service ensurer based on the SonataFlow deployment model +func (o *ObjectEnsurers) ServiceByDeploymentModel(workflow *v1alpha08.SonataFlow) common.ObjectEnsurer { + if workflow.IsKnativeDeployment() { + // Knative Serving handles the service + return common.NewNoopObjectEnsurer() + } + return o.service +} + // NewObjectEnsurers common.ObjectEnsurer(s) for the preview profile. func NewObjectEnsurers(support *common.StateSupport) *ObjectEnsurers { return &ObjectEnsurers{ deployment: common.NewObjectEnsurerWithPlatform(support.C, common.DeploymentCreator), + kservice: common.NewObjectEnsurerWithPlatform(support.C, common.KServiceCreator), service: common.NewObjectEnsurer(support.C, common.ServiceCreator), userPropsConfigMap: common.NewObjectEnsurer(support.C, common.UserPropsConfigMapCreator), managedPropsConfigMap: common.NewObjectEnsurerWithPlatform(support.C, common.ManagedPropsConfigMapCreator), @@ -79,7 +102,7 @@ func NewProfileReconciler(client client.Client, cfg *rest.Config, recorder recor } // the reconciliation state machine stateMachine := common.NewReconciliationStateMachine( - &newBuilderState{StateSupport: support}, + &newBuilderState{StateSupport: support, ensurers: NewObjectEnsurers(support)}, &followBuildStatusState{StateSupport: support}, &deployWithBuildWorkflowState{StateSupport: support, ensurers: NewObjectEnsurers(support)}, ) diff --git a/controllers/profiles/preview/profile_preview_test.go b/controllers/profiles/preview/profile_preview_test.go index cc518db03..22146ccf6 100644 --- a/controllers/profiles/preview/profile_preview_test.go +++ b/controllers/profiles/preview/profile_preview_test.go @@ -198,5 +198,6 @@ func fakeReconcilerSupport(client clientruntime.Client) *common.StateSupport { return &common.StateSupport{ C: client, Recorder: test.NewFakeRecorder(), + Cfg: &rest.Config{}, } } diff --git a/controllers/profiles/preview/states_preview.go b/controllers/profiles/preview/states_preview.go index 933820e8d..333c33976 100644 --- a/controllers/profiles/preview/states_preview.go +++ b/controllers/profiles/preview/states_preview.go @@ -40,6 +40,7 @@ import ( type newBuilderState struct { *common.StateSupport + ensurers *ObjectEnsurers } func (h *newBuilderState) CanReconcile(workflow *operatorapi.SonataFlow) bool { @@ -49,7 +50,7 @@ func (h *newBuilderState) CanReconcile(workflow *operatorapi.SonataFlow) bool { } func (h *newBuilderState) Do(ctx context.Context, workflow *operatorapi.SonataFlow) (ctrl.Result, []client.Object, error) { - _, err := platform.GetActivePlatform(ctx, h.C, workflow.Namespace) + pl, err := platform.GetActivePlatform(ctx, h.C, workflow.Namespace) if err != nil { if errors.IsNotFound(err) { workflow.Status.Manager().MarkFalse(api.BuiltConditionType, api.WaitingForPlatformReason, @@ -62,6 +63,26 @@ func (h *newBuilderState) Do(ctx context.Context, workflow *operatorapi.SonataFl klog.V(log.E).ErrorS(err, "Failed to get active platform") return ctrl.Result{RequeueAfter: requeueWhileWaitForPlatform}, nil, err } + + // Perform status updated to ensure workflow.Status.Services references are set before properties calculation. + _, err = h.PerformStatusUpdate(ctx, workflow) + // Ensure the user and managed properties are prepared before starting the build process, and thus, we make them + // available at build time. + userPropsCM, _, err := h.ensurers.userPropsConfigMap.Ensure(ctx, workflow) + if err != nil { + workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.ExternalResourcesNotFoundReason, "Unable to retrieve the user properties config map") + _, err = h.PerformStatusUpdate(ctx, workflow) + return ctrl.Result{}, nil, err + } + + _, _, err = h.ensurers.managedPropsConfigMap.Ensure(ctx, workflow, pl, + common.ManagedPropertiesMutateVisitor(ctx, h.StateSupport.Catalog, workflow, pl, userPropsCM.(*corev1.ConfigMap))) + if err != nil { + workflow.Status.Manager().MarkFalse(api.RunningConditionType, api.ExternalResourcesNotFoundReason, "Unable to retrieve the managed properties config map") + _, err = h.PerformStatusUpdate(ctx, workflow) + return ctrl.Result{}, nil, err + } + // If there is an active platform we have got all the information to build but... // ...let's check before if we have got already a build! buildManager := builder.NewSonataFlowBuildManager(ctx, h.C) @@ -189,7 +210,7 @@ func (h *deployWithBuildWorkflowState) Do(ctx context.Context, workflow *operato } // didn't change, business as usual - return NewDeploymentReconciler(h.StateSupport, h.ensurers).reconcileWithBuiltImage(ctx, workflow, build.Status.ImageTag) + return NewDeploymentReconciler(h.StateSupport, h.ensurers).reconcileWithImage(ctx, workflow, build.Status.ImageTag) } func (h *deployWithBuildWorkflowState) PostReconcile(ctx context.Context, workflow *operatorapi.SonataFlow) error { diff --git a/controllers/profiles/profile.go b/controllers/profiles/profile.go index 794ca7a70..7e05b4f15 100644 --- a/controllers/profiles/profile.go +++ b/controllers/profiles/profile.go @@ -76,3 +76,6 @@ type ReconciliationState interface { // IsDevProfile is an alias for workflowproj.IsDevProfile var IsDevProfile = workflowproj.IsDevProfile + +// IsGitOpsProfile is an alias for workflowproj.IsGitOpsProfile +var IsGitOpsProfile = workflowproj.IsGitOpsProfile diff --git a/controllers/profiles/profile_test.go b/controllers/profiles/profile_test.go index f2bc01390..63a35cf0f 100644 --- a/controllers/profiles/profile_test.go +++ b/controllers/profiles/profile_test.go @@ -37,3 +37,17 @@ func Test_workflowIsDevProfile(t *testing.T) { workflowWithProdProfile := test.GetBaseSonataFlowWithProdProfile(t.Name()) assert.False(t, IsDevProfile(workflowWithProdProfile)) } + +func Test_workflowGitOpsProfile(t *testing.T) { + workflowWithDevProfile := test.GetBaseSonataFlowWithDevProfile(t.Name()) + assert.False(t, IsGitOpsProfile(workflowWithDevProfile)) + + workflowWithNoProfile := test.GetBaseSonataFlow(t.Name()) + assert.False(t, IsGitOpsProfile(workflowWithNoProfile)) + + workflowWithProdProfile := test.GetBaseSonataFlowWithProdProfile(t.Name()) + assert.False(t, IsGitOpsProfile(workflowWithProdProfile)) + + workflowWithGitopsProfile := test.GetBaseSonataFlowWithGitopsProfile(t.Name()) + assert.True(t, IsGitOpsProfile(workflowWithGitopsProfile)) +} diff --git a/controllers/sonataflow_controller.go b/controllers/sonataflow_controller.go index 447e386f6..7c1d28abb 100644 --- a/controllers/sonataflow_controller.go +++ b/controllers/sonataflow_controller.go @@ -23,6 +23,7 @@ import ( "context" "fmt" + "github.com/apache/incubator-kie-kogito-serverless-operator/api/metadata" "k8s.io/klog/v2" profiles "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/profiles/factory" @@ -89,6 +90,8 @@ func (r *SonataFlowReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } + r.setDefaults(workflow) + // Only process resources assigned to the operator if !platform.IsOperatorHandlerConsideringLock(ctx, r.Client, req.Namespace, workflow) { klog.V(log.I).InfoS("Ignoring request because resource is not assigned to current operator") @@ -97,6 +100,18 @@ func (r *SonataFlowReconciler) Reconcile(ctx context.Context, req ctrl.Request) return profiles.NewReconciler(r.Client, r.Config, r.Recorder, workflow).Reconcile(ctx, workflow) } +// TODO: move to webhook see https://github.com/apache/incubator-kie-kogito-serverless-operator/pull/239 +func (r *SonataFlowReconciler) setDefaults(workflow *operatorapi.SonataFlow) { + if workflow.Annotations == nil { + workflow.Annotations = map[string]string{} + } + profile := metadata.GetProfileOrDefault(workflow.Annotations) + workflow.Annotations[metadata.Profile] = string(profile) + if profile == metadata.DevProfile { + workflow.Spec.PodTemplate.DeploymentModel = operatorapi.KubernetesDeploymentModel + } +} + func platformEnqueueRequestsFromMapFunc(c client.Client, p *operatorapi.SonataFlowPlatform) []reconcile.Request { var requests []reconcile.Request diff --git a/hack/local/run-operator.sh b/hack/local/run-operator.sh index 5cb8a73c9..8b57477c0 100755 --- a/hack/local/run-operator.sh +++ b/hack/local/run-operator.sh @@ -17,6 +17,9 @@ # under the License. # Runs the operator locally via go main +POD_NAMESPACE=$(kubectl config view --minify | grep namespace | cut -d" " -f6) + +export POD_NAMESPACE kubectl delete --ignore-not-found=true -f ./bundle/manifests/sonataflow.org_sonataflowclusterplatforms.yaml kubectl delete --ignore-not-found=true -f ./bundle/manifests/sonataflow.org_sonataflowplatforms.yaml diff --git a/main.go b/main.go index 535a3ed0f..e0265ed60 100644 --- a/main.go +++ b/main.go @@ -26,6 +26,7 @@ import ( "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/cfg" eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1" sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" "k8s.io/klog/v2/klogr" @@ -60,6 +61,7 @@ func init() { utilruntime.Must(operatorapi.AddToScheme(scheme)) utilruntime.Must(sourcesv1.AddToScheme(scheme)) utilruntime.Must(eventingv1.AddToScheme(scheme)) + utilruntime.Must(servingv1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } diff --git a/operator.yaml b/operator.yaml index 14dfec145..1575b9eb2 100644 --- a/operator.yaml +++ b/operator.yaml @@ -22523,6 +22523,13 @@ spec: - name type: object type: array + deploymentModel: + description: Defines the kind of deployment model for this pod + spec. In dev profile, only "kubernetes" is valid. + enum: + - kubernetes + - knative + type: string dnsConfig: description: Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration @@ -24050,6 +24057,8 @@ spec: type: object type: array replicas: + description: Replicas define the number of pods to start by default + for this deployment model. Ignored in "knative" deployment model. format: int32 type: integer resourceClaims: @@ -26492,6 +26501,12 @@ rules: - patch - update - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: sonataflow-operator-knative-manager-role +rules: - apiGroups: - eventing.knative.dev resources: @@ -26522,6 +26537,22 @@ rules: - patch - update - watch +- apiGroups: + - serving.knative.dev + resources: + - service + - services + - services/status + - services/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -26864,6 +26895,19 @@ subjects: --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + name: sonataflow-operator-knative-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: sonataflow-operator-knative-manager-role +subjects: +- kind: ServiceAccount + name: sonataflow-operator-controller-manager + namespace: sonataflow-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: name: sonataflow-operator-leases-binding roleRef: @@ -26981,6 +27025,18 @@ data: sonataFlowDevModeImageTag: "" # The default name of the builder configMap in the operator's namespace builderConfigMapName: "sonataflow-operator-builder-config" + # Quarkus extensions required for workflows persistence. These extensions are used by the SonataFlow build system, + # in cases where the workflow being built has configured postgresql persistence. + postgreSQLPersistenceExtensions: + - groupId: io.quarkus + artifactId: quarkus-jdbc-postgresql + version: 3.2.10.Final + - groupId: io.quarkus + artifactId: quarkus-agroal + version: 3.2.10.Final + - groupId: org.kie + artifactId: kie-addons-quarkus-persistence-jdbc + version: 999-SNAPSHOT kind: ConfigMap metadata: name: sonataflow-operator-controllers-config diff --git a/test/cfg.go b/test/cfg.go new file mode 100644 index 000000000..c581ed8ba --- /dev/null +++ b/test/cfg.go @@ -0,0 +1,30 @@ +// Copyright 2024 Apache Software Foundation (ASF) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "testing" + + "github.com/apache/incubator-kie-kogito-serverless-operator/controllers/cfg" + "github.com/stretchr/testify/assert" +) + +// RestoreControllersConfig Utility function to restore the controllers global configuration in situations where +// a particular test must populate it with values form a given file. As part of the given test finalization we can +// invoke this function to restore the global configuration. +func RestoreControllersConfig(t *testing.T) { + _, err := cfg.InitializeControllersCfgAt(getProjectDir() + "/config/manager/controllers_cfg.yaml") + assert.NoError(t, err) +} diff --git a/test/e2e/workflow_test.go b/test/e2e/workflow_test.go index 0750cd996..ed8f95c4e 100644 --- a/test/e2e/workflow_test.go +++ b/test/e2e/workflow_test.go @@ -201,21 +201,53 @@ var _ = Describe("Validate the persistence ", Ordered, func() { continue } Expect(h.Status).To(Equal(upStatus), "Pod health is not UP") - for _, c := range h.Checks { - if c.Name == dbConnectionName { - Expect(c.Status).To(Equal(upStatus), "Pod's database connection is not UP") - if withPersistence { + if withPersistence { + connectionCheckFound := false + for _, c := range h.Checks { + if c.Name == dbConnectionName { + Expect(c.Status).To(Equal(upStatus), "Pod's database connection is not UP") Expect(c.Data[defaultDataCheck]).To(Equal(upStatus), "Pod's 'default' database data is not UP") - return true - } else { - Expect(defaultDataCheck).NotTo(BeElementOf(c.Data), "Pod's 'default' database data check exists in health manifest") - return true + connectionCheckFound = true } } + Expect(connectionCheckFound).To(Equal(true), "Connection health check not found, but the wofkflow has persistence") + return true + } else { + connectionCheckFound := false + for _, c := range h.Checks { + if c.Name == dbConnectionName { + connectionCheckFound = true + } + } + Expect(connectionCheckFound).To(Equal(false), "Connection health check was found, but the workflow don't have persistence") + return true } } return false }, 1*time.Minute).Should(BeTrue()) + // Persistence initialization checks + cmd = exec.Command("kubectl", "get", "pod", "-l", "sonataflow.org/workflow-app", "-n", ns, "-ojsonpath={.items[*].metadata.name}") + output, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + podName := string(output) + cmd = exec.Command("kubectl", "logs", podName, "-n", ns) + output, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + logs := string(output) + if withPersistence { + By("Validate that the workflow persistence was properly initialized") + Expect(logs).Should(ContainSubstring("Flyway Community Edition")) + Expect(logs).Should(ContainSubstring("Database: jdbc:postgresql://postgres.%s:5432", ns)) + Expect(logs).Should(ContainSubstring("Creating schema \"callbackstatetimeouts\"")) + Expect(logs).Should(ContainSubstring("Migrating schema \"callbackstatetimeouts\" to version")) + Expect(logs).Should(MatchRegexp("Successfully applied \\d migrations to schema \"callbackstatetimeouts\"")) + Expect(logs).Should(ContainSubstring("Profile prod activated")) + } else { + By("Validate that the workflow has no persistence") + Expect(logs).ShouldNot(ContainSubstring("Flyway Community Edition")) + Expect(logs).ShouldNot(ContainSubstring("Creating schema \"callbackstatetimeouts\"")) + Expect(logs).Should(ContainSubstring("Profile prod activated")) + } }, Entry("defined in the workflow from an existing kubernetes service as a reference", test.GetSonataFlowE2EWorkflowPersistenceSampleDataDirectory("by_service"), true), Entry("defined in the workflow and from the sonataflow platform", test.GetSonataFlowE2EWorkflowPersistenceSampleDataDirectory("from_platform_overwritten_by_service"), true), diff --git a/test/kubernetes_cli.go b/test/kubernetes_cli.go index c4d7021f8..72029fd1c 100644 --- a/test/kubernetes_cli.go +++ b/test/kubernetes_cli.go @@ -33,6 +33,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" + servingv1 "knative.dev/serving/pkg/apis/serving/v1" ctrl "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -50,6 +51,13 @@ func NewSonataFlowClientBuilder() *fake.ClientBuilder { return fake.NewClientBuilder().WithScheme(s) } +func NewSonataFlowClientBuilderWithKnative() *fake.ClientBuilder { + s := scheme.Scheme + utilruntime.Must(operatorapi.AddToScheme(s)) + utilruntime.Must(servingv1.AddToScheme(s)) + return fake.NewClientBuilder().WithScheme(s) +} + // NewKogitoClientBuilderWithOpenShift creates a new fake client with OpenShift schemas. // If your object is not present, just add in the list below. func NewKogitoClientBuilderWithOpenShift() *fake.ClientBuilder { diff --git a/test/testdata/workflow/persistence/by_service/02-sonataflow_platform.yaml b/test/testdata/workflow/persistence/by_service/02-sonataflow_platform.yaml index f61974cbd..082b2ef97 100644 --- a/test/testdata/workflow/persistence/by_service/02-sonataflow_platform.yaml +++ b/test/testdata/workflow/persistence/by_service/02-sonataflow_platform.yaml @@ -18,10 +18,6 @@ metadata: name: sonataflow-platform spec: build: - template: - buildArgs: - - name: QUARKUS_EXTENSIONS - value: org.kie:kie-addons-quarkus-persistence-jdbc:999-SNAPSHOT,io.quarkus:quarkus-jdbc-postgresql:3.2.9.Final,io.quarkus:quarkus-agroal:3.2.9.Final config: strategyOptions: KanikoBuildCacheEnabled: "true" diff --git a/test/testdata/workflow/persistence/by_service/03-configmap_callbackstatetimeouts-props.yaml b/test/testdata/workflow/persistence/by_service/03-configmap_callbackstatetimeouts-props.yaml new file mode 100644 index 000000000..85d3ac6ac --- /dev/null +++ b/test/testdata/workflow/persistence/by_service/03-configmap_callbackstatetimeouts-props.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Apache Software Foundation (ASF) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +data: + application.properties: | + # set the flyway initialization in the WF ConfigMap + quarkus.flyway.migrate-at-start=true +kind: ConfigMap +metadata: + labels: + app: callbackstatetimeouts + name: callbackstatetimeouts-props diff --git a/test/testdata/workflow/persistence/by_service/03-sonataflow_callbackstatetimeouts.sw.yaml b/test/testdata/workflow/persistence/by_service/04-sonataflow_callbackstatetimeouts.sw.yaml similarity index 97% rename from test/testdata/workflow/persistence/by_service/03-sonataflow_callbackstatetimeouts.sw.yaml rename to test/testdata/workflow/persistence/by_service/04-sonataflow_callbackstatetimeouts.sw.yaml index bb64d2753..ec738e946 100644 --- a/test/testdata/workflow/persistence/by_service/03-sonataflow_callbackstatetimeouts.sw.yaml +++ b/test/testdata/workflow/persistence/by_service/04-sonataflow_callbackstatetimeouts.sw.yaml @@ -32,10 +32,6 @@ spec: databaseName: sonataflow databaseSchema: callbackstatetimeouts podTemplate: - container: - env: - - name: QUARKUS_FLYWAY_MIGRATE_AT_START - value: "true" initContainers: - name: init-postgres image: registry.access.redhat.com/ubi9/ubi-micro:latest diff --git a/test/testdata/workflow/persistence/by_service/kustomization.yaml b/test/testdata/workflow/persistence/by_service/kustomization.yaml index b7f587bcc..2aaac5b14 100644 --- a/test/testdata/workflow/persistence/by_service/kustomization.yaml +++ b/test/testdata/workflow/persistence/by_service/kustomization.yaml @@ -15,7 +15,8 @@ resources: - 01-postgres.yaml - 02-sonataflow_platform.yaml -- 03-sonataflow_callbackstatetimeouts.sw.yaml +- 03-configmap_callbackstatetimeouts-props.yaml +- 04-sonataflow_callbackstatetimeouts.sw.yaml generatorOptions: disableNameSuffixHash: true diff --git a/test/testdata/workflow/persistence/from_platform_overwritten_by_service/02-sonataflow_platform.yaml b/test/testdata/workflow/persistence/from_platform_overwritten_by_service/02-sonataflow_platform.yaml index ddae61a8f..733b0b952 100644 --- a/test/testdata/workflow/persistence/from_platform_overwritten_by_service/02-sonataflow_platform.yaml +++ b/test/testdata/workflow/persistence/from_platform_overwritten_by_service/02-sonataflow_platform.yaml @@ -28,10 +28,6 @@ spec: port: 3456 databaseName: db_name build: - template: - buildArgs: - - name: QUARKUS_EXTENSIONS - value: org.kie:kie-addons-quarkus-persistence-jdbc:999-SNAPSHOT,io.quarkus:quarkus-jdbc-postgresql:3.2.9.Final,io.quarkus:quarkus-agroal:3.2.9.Final config: strategyOptions: KanikoBuildCacheEnabled: "true" diff --git a/test/testdata/workflow/persistence/from_platform_overwritten_by_service/03-sonataflow_callbackstatetimeouts.sw.yaml b/test/testdata/workflow/persistence/from_platform_overwritten_by_service/03-sonataflow_callbackstatetimeouts.sw.yaml index bb64d2753..fb696bff4 100644 --- a/test/testdata/workflow/persistence/from_platform_overwritten_by_service/03-sonataflow_callbackstatetimeouts.sw.yaml +++ b/test/testdata/workflow/persistence/from_platform_overwritten_by_service/03-sonataflow_callbackstatetimeouts.sw.yaml @@ -34,6 +34,7 @@ spec: podTemplate: container: env: + # set the flyway initialization in the WF container env - name: QUARKUS_FLYWAY_MIGRATE_AT_START value: "true" initContainers: diff --git a/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/02-sonataflow_platform.yaml b/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/02-sonataflow_platform.yaml index 0d1088fd5..2a7db22a6 100644 --- a/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/02-sonataflow_platform.yaml +++ b/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/02-sonataflow_platform.yaml @@ -28,10 +28,6 @@ spec: port: 5432 databaseName: sonataflow build: - template: - buildArgs: - - name: QUARKUS_EXTENSIONS - value: org.kie:kie-addons-quarkus-persistence-jdbc:999-SNAPSHOT,io.quarkus:quarkus-jdbc-postgresql:3.2.9.Final,io.quarkus:quarkus-agroal:3.2.9.Final config: strategyOptions: KanikoBuildCacheEnabled: "true" diff --git a/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/03-configmap_callbackstatetimeouts-props.yaml b/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/03-configmap_callbackstatetimeouts-props.yaml new file mode 100644 index 000000000..d866ff7c9 --- /dev/null +++ b/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/03-configmap_callbackstatetimeouts-props.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Apache Software Foundation (ASF) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +data: + application.properties: | + # set the flyway initialization in the WF ConfigMap + quarkus.flyway.migrate-at-start=true +kind: ConfigMap +metadata: + labels: + app: callbackstatetimeouts + name: callbackstatetimeouts-props diff --git a/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/03-sonataflow_callbackstatetimeouts.sw.yaml b/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/04-sonataflow_callbackstatetimeouts.sw.yaml similarity index 96% rename from test/testdata/workflow/persistence/from_platform_with_di_and_js_services/03-sonataflow_callbackstatetimeouts.sw.yaml rename to test/testdata/workflow/persistence/from_platform_with_di_and_js_services/04-sonataflow_callbackstatetimeouts.sw.yaml index 1d4bdbb96..b1d3f5ef0 100644 --- a/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/03-sonataflow_callbackstatetimeouts.sw.yaml +++ b/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/04-sonataflow_callbackstatetimeouts.sw.yaml @@ -20,11 +20,6 @@ metadata: sonataflow.org/description: Callback State Timeouts Example k8s sonataflow.org/version: 0.0.1 spec: - podTemplate: - container: - env: - - name: QUARKUS_FLYWAY_MIGRATE_AT_START - value: "true" flow: start: PrintStartMessage events: diff --git a/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/kustomization.yaml b/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/kustomization.yaml index b7f587bcc..2aaac5b14 100644 --- a/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/kustomization.yaml +++ b/test/testdata/workflow/persistence/from_platform_with_di_and_js_services/kustomization.yaml @@ -15,7 +15,8 @@ resources: - 01-postgres.yaml - 02-sonataflow_platform.yaml -- 03-sonataflow_callbackstatetimeouts.sw.yaml +- 03-configmap_callbackstatetimeouts-props.yaml +- 04-sonataflow_callbackstatetimeouts.sw.yaml generatorOptions: disableNameSuffixHash: true diff --git a/test/testdata/workflow/persistence/from_platform_with_no_persistence_required/02-sonataflow_platform.yaml b/test/testdata/workflow/persistence/from_platform_with_no_persistence_required/02-sonataflow_platform.yaml index 0b076a86f..817ce0c31 100644 --- a/test/testdata/workflow/persistence/from_platform_with_no_persistence_required/02-sonataflow_platform.yaml +++ b/test/testdata/workflow/persistence/from_platform_with_no_persistence_required/02-sonataflow_platform.yaml @@ -28,10 +28,6 @@ spec: port: 5432 databaseName: sonataflow build: - template: - buildArgs: - - name: QUARKUS_EXTENSIONS - value: org.kie:kie-addons-quarkus-persistence-jdbc:999-SNAPSHOT,io.quarkus:quarkus-jdbc-postgresql:3.2.9.Final,io.quarkus:quarkus-agroal:3.2.9.Final config: strategyOptions: KanikoBuildCacheEnabled: "true" diff --git a/test/testdata/workflow/persistence/from_platform_without_di_and_js_services/02-sonataflow_platform.yaml b/test/testdata/workflow/persistence/from_platform_without_di_and_js_services/02-sonataflow_platform.yaml index 3e24fe666..b5ac4e161 100644 --- a/test/testdata/workflow/persistence/from_platform_without_di_and_js_services/02-sonataflow_platform.yaml +++ b/test/testdata/workflow/persistence/from_platform_without_di_and_js_services/02-sonataflow_platform.yaml @@ -28,10 +28,6 @@ spec: port: 5432 databaseName: sonataflow build: - template: - buildArgs: - - name: QUARKUS_EXTENSIONS - value: org.kie:kie-addons-quarkus-persistence-jdbc:999-SNAPSHOT,io.quarkus:quarkus-jdbc-postgresql:3.2.9.Final,io.quarkus:quarkus-agroal:3.2.9.Final config: strategyOptions: KanikoBuildCacheEnabled: "true" diff --git a/test/testdata/workflow/persistence/from_platform_without_di_and_js_services/03-sonataflow_callbackstatetimeouts.sw.yaml b/test/testdata/workflow/persistence/from_platform_without_di_and_js_services/03-sonataflow_callbackstatetimeouts.sw.yaml index 1d4bdbb96..307f85b61 100644 --- a/test/testdata/workflow/persistence/from_platform_without_di_and_js_services/03-sonataflow_callbackstatetimeouts.sw.yaml +++ b/test/testdata/workflow/persistence/from_platform_without_di_and_js_services/03-sonataflow_callbackstatetimeouts.sw.yaml @@ -23,6 +23,7 @@ spec: podTemplate: container: env: + # set the flyway initialization in the WF container env - name: QUARKUS_FLYWAY_MIGRATE_AT_START value: "true" flow: diff --git a/test/yaml.go b/test/yaml.go index 84ac1b7d8..368ac6b79 100644 --- a/test/yaml.go +++ b/test/yaml.go @@ -199,6 +199,10 @@ func SetPreviewProfile(workflow *operatorapi.SonataFlow) { workflow.Annotations["sonataflow.org/profile"] = "preview" } +func SetGitopsProfile(workflow *operatorapi.SonataFlow) { + workflow.Annotations["sonataflow.org/profile"] = "gitops" +} + func GetBaseSonataFlow(namespace string) *operatorapi.SonataFlow { return NewSonataFlow(sonataFlowSampleYamlCR, namespace) } @@ -215,11 +219,15 @@ func GetBaseSonataFlowWithProdProfile(namespace string) *operatorapi.SonataFlow return NewSonataFlow(sonataFlowSampleYamlCR, namespace, SetPreviewProfile) } -// GetBaseSonataFlowWithProdOpsProfile gets a base workflow that has a pre-built image set in podTemplate. -func GetBaseSonataFlowWithProdOpsProfile(namespace string) *operatorapi.SonataFlow { +// GetBaseSonataFlowWithPreviewProfile gets a base workflow that has a pre-built image set in podTemplate. +func GetBaseSonataFlowWithPreviewProfile(namespace string) *operatorapi.SonataFlow { return NewSonataFlow(SonataFlowSimpleOpsYamlCR, namespace) } +func GetBaseSonataFlowWithGitopsProfile(namespace string) *operatorapi.SonataFlow { + return NewSonataFlow(sonataFlowSampleYamlCR, namespace, SetGitopsProfile) +} + func GetBaseClusterPlatformInReadyPhase(namespace string) *operatorapi.SonataFlowClusterPlatform { return GetSonataFlowClusterPlatformInReadyPhase(sonataFlowClusterPlatformYamlCR, namespace) } diff --git a/testbdd/go.mod b/testbdd/go.mod index 5f0c0ed65..bea1546df 100644 --- a/testbdd/go.mod +++ b/testbdd/go.mod @@ -66,6 +66,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-containerregistry v0.13.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect github.com/google/uuid v1.3.1 // indirect @@ -89,6 +90,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/onsi/ginkgo/v2 v2.13.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect github.com/openshift/client-go v0.0.0-20230503144108-75015d2347cb // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -137,7 +139,9 @@ require ( k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 // indirect k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect + knative.dev/networking v0.0.0-20231017124814-2a7676e912b7 // indirect knative.dev/pkg v0.0.0-20231023151236-29775d7c9e5c // indirect + knative.dev/serving v0.39.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/testbdd/go.sum b/testbdd/go.sum index 2e0cf2989..16617aeda 100644 --- a/testbdd/go.sum +++ b/testbdd/go.sum @@ -460,6 +460,7 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v27 v27.0.6/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -718,6 +719,7 @@ github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1566,10 +1568,12 @@ knative.dev/eventing v0.26.0 h1:osDUdav7S0FuChN0onfwL5cEcsdb54Kee2hjAPMpY7o= knative.dev/eventing v0.26.0/go.mod h1:6tTam0lsPtBSJHJ63/195obj2VAHlTZZB7TLiBSeqk0= knative.dev/hack v0.0.0-20210806075220-815cd312d65c/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack/schema v0.0.0-20210806075220-815cd312d65c/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= +knative.dev/networking v0.0.0-20231017124814-2a7676e912b7 h1:6+1icZuxiZO1paFZ4d/ysKWVG2M4WB7OxNJNyLG0P/E= knative.dev/pkg v0.0.0-20210914164111-4857ab6939e3/go.mod h1:jMSqkNMsrzuy+XR4Yr/BMy7SDVbUOl3KKB6+5MR+ZU8= knative.dev/pkg v0.0.0-20210919202233-5ae482141474/go.mod h1:jMSqkNMsrzuy+XR4Yr/BMy7SDVbUOl3KKB6+5MR+ZU8= knative.dev/pkg v0.0.0-20231023151236-29775d7c9e5c h1:xyPoEToTWeBdn6tinhLxXfnhJhTNQt5WzHiTNiFphRw= knative.dev/reconciler-test v0.0.0-20210915181908-49fac7555086/go.mod h1:6yDmb26SINSmgw6wVy9qQwgRMewiW8ddkkwGLR0ZvOY= +knative.dev/serving v0.39.0 h1:NVt8WthHmFFMWZ3qpBblXt47del8qqrbCegqwGBVSwk= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= diff --git a/utils/common.go b/utils/common.go index e24e86916..073bb255c 100644 --- a/utils/common.go +++ b/utils/common.go @@ -28,7 +28,11 @@ import ( ) const ( - HttpScheme = "http" + // DefaultServicePortName default service name to increase compatibility with Knative + // + // see: https://github.com/knative/specs/blob/main/specs/serving/runtime-contract.md#protocols-and-ports + // By default we do support HTTP/2:https://quarkus.io/guides/http-reference#http2-support + DefaultServicePortName = "h2c" ) // GetOperatorIDAnnotation to safely get the operator id annotation value. diff --git a/workflowproj/operator.go b/workflowproj/operator.go index 2a5c3e3f0..2383f34fa 100644 --- a/workflowproj/operator.go +++ b/workflowproj/operator.go @@ -121,7 +121,7 @@ func CreateNewUserPropsConfigMap(workflow *operatorapi.SonataFlow) *corev1.Confi } } -// CreateNewManagedPropsConfigMap creates a new ConfigMap object to hold the managed application properties of the workflos. +// CreateNewManagedPropsConfigMap creates a new ConfigMap object to hold the managed application properties of the workflows. func CreateNewManagedPropsConfigMap(workflow *operatorapi.SonataFlow, properties string) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ diff --git a/workflowproj/workflowproj.go b/workflowproj/workflowproj.go index bd8653d2b..3845da81e 100644 --- a/workflowproj/workflowproj.go +++ b/workflowproj/workflowproj.go @@ -304,9 +304,18 @@ func (w *workflowProjectHandler) addResourceConfigMapToProject(cm *corev1.Config // IsDevProfile detects if the workflow is using the Dev profile or not func IsDevProfile(workflow *operatorapi.SonataFlow) bool { + return isProfile(workflow, metadata.DevProfile) +} + +// IsGitOpsProfile detects if the workflow is using the GitOps profile or not +func IsGitOpsProfile(workflow *operatorapi.SonataFlow) bool { + return isProfile(workflow, metadata.GitOpsProfile) +} + +func isProfile(workflow *operatorapi.SonataFlow, profileType metadata.ProfileType) bool { profile := workflow.Annotations[metadata.Profile] if len(profile) == 0 { return false } - return metadata.ProfileType(profile) == metadata.DevProfile + return metadata.ProfileType(profile) == profileType }