diff --git a/build-scripts/hack/sync-images.yaml b/build-scripts/hack/sync-images.yaml new file mode 100644 index 000000000..95432b5e7 --- /dev/null +++ b/build-scripts/hack/sync-images.yaml @@ -0,0 +1,31 @@ +sync: + - source: ghcr.io/canonical/k8s-snap/pause:3.10 + target: '{{ env "MIRROR" }}/canonical/k8s-snap/pause:3.10' + type: image + - source: ghcr.io/canonical/cilium-operator-generic:1.15.2-ck2 + target: '{{ env "MIRROR" }}/canonical/cilium-operator-generic:1.15.2-ck2' + type: image + - source: ghcr.io/canonical/cilium:1.15.2-ck2 + target: '{{ env "MIRROR" }}/canonical/cilium:1.15.2-ck2' + type: image + - source: ghcr.io/canonical/coredns:1.11.1-ck4 + target: '{{ env "MIRROR" }}/canonical/coredns:1.11.1-ck4' + type: image + - source: ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1 + target: '{{ env "MIRROR" }}/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1' + type: image + - source: ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.1 + target: '{{ env "MIRROR" }}/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.1' + type: image + - source: ghcr.io/canonical/k8s-snap/sig-storage/csi-resizer:v1.11.1 + target: '{{ env "MIRROR" }}/canonical/k8s-snap/sig-storage/csi-resizer:v1.11.1' + type: image + - source: ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.1 + target: '{{ env "MIRROR" }}/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.1' + type: image + - source: ghcr.io/canonical/metrics-server:0.7.0-ck0 + target: '{{ env "MIRROR" }}/canonical/metrics-server:0.7.0-ck0' + type: image + - source: ghcr.io/canonical/rawfile-localpv:0.8.0-ck5 + target: '{{ env "MIRROR" }}/canonical/rawfile-localpv:0.8.0-ck5' + type: image diff --git a/docs/src/snap/howto/install/index.md b/docs/src/snap/howto/install/index.md index a995cecaa..f2cef5104 100644 --- a/docs/src/snap/howto/install/index.md +++ b/docs/src/snap/howto/install/index.md @@ -15,4 +15,5 @@ the current How-to guides below. snap multipass lxd +offline ``` diff --git a/docs/src/snap/howto/install/offline.md b/docs/src/snap/howto/install/offline.md new file mode 100644 index 000000000..590f015aa --- /dev/null +++ b/docs/src/snap/howto/install/offline.md @@ -0,0 +1,311 @@ +# Installing Canonical Kubernetes in air-gapped environments + +There are situations where it is necessary or desirable to run Canonical +Kubernetes on a machine that is not connected to the internet. +Based on different degrees of separation from the network, +different solutions are offered to accomplish this goal. +This guide documents any necessary extra preparation for air-gap deployments, +as well the steps that are needed to successfully deploy Canonical Kubernetes +in such environments. + +## Prepare for Deployment + +In preparation for the offline deployment download the Canonical +Kubernetes snap, fulfil the networking requirements based on your scenario and +handle images for workloads and Canonical Kubernetes features. + +### Download the Canonical Kubernetes snap + +From a machine with access to the internet download the +`k8s` and `core20` snap with: + +``` +sudo snap download k8s --channel 1.30-classic/beta --basename k8s +sudo snap download core20 --basename core20 +``` + +Besides the snaps, this will also download the corresponding assert files which +are necessary to verify the integrity of the packages. + +```{note} +Update the version of k8s by adjusting the channel parameter. +For more information on channels visit the +[channels explanation](/snap/explanation/channels.md). +``` + +```{note} +Future updates to the `k8s` snap may require a different version of the core +snap. +``` + +### Network Requirements + +Air-gap deployments are typically associated with a number of constraints and +restrictions when it comes to the networking connectivity of the machines. +Below we discuss the requirements that the deployment needs to fulfil. + +#### Cluster node communication + + + +Ensure that all cluster nodes are reachable from each other. + + + +#### Default Gateway + +In cases where the air-gap environment does not have a default gateway, +add a dummy default route on the `eth0` interface using the following command: + +``` +ip route add default dev eth0 +``` + +```{note} +Ensure that `eth0` is the name of the default network interface used for +pod-to-pod communication. +``` + +The dummy gateway will only be used by the Kubernetes services to +know which interface to use, actual connectivity to the internet is not +required. Ensure that the dummy gateway rule survives a node reboot. + +#### Ensure proxy access + +This section is only relevant if access to upstream image registries +(e.g. docker.io, quay.io, rocks.canonical.com, etc.) +is only allowed through an HTTP proxy (e.g. [squid][squid]). + +Ensure that all nodes can use the proxy to access the image registry. +For example, if using `http://squid.internal:3128` to access docker.io, +an easy way to test connectivity is: + +``` +export https_proxy=http://squid.internal:3128 +curl -v https://registry-1.docker.io/v2 +``` + +### Images + +All workloads in a Kubernetes cluster are run as an OCI image. +Kubernetes needs to be able to fetch these images and load them +into the container runtime. +For Canonical Kubernetes, it is also necessary to fetch the images used +by its features (network, dns, etc.) as well as any images that are +needed to run specific workloads. + +```{note} +The image options are presented in the order of +increasing complexity of implementation. +It may be helpful to combine these options for different scenarios. +``` + +#### List images + +If the `k8s` snap is already installed, +list the images in use with the following command: + +``` +k8s list-images +``` + +The output will look similar to the following: + +``` +ghcr.io/canonical/cilium-operator-generic:1.15.2-ck2 +ghcr.io/canonical/cilium:1.15.2-ck2 +ghcr.io/canonical/coredns:1.11.1-ck4 +ghcr.io/canonical/k8s-snap/pause:3.10 +ghcr.io/canonical/k8s-snap/sig-storage/csi-node-driver-registrar:v2.10.1 +ghcr.io/canonical/k8s-snap/sig-storage/csi-provisioner:v5.0.1 +ghcr.io/canonical/k8s-snap/sig-storage/csi-resizer:v1.11.1 +ghcr.io/canonical/k8s-snap/sig-storage/csi-snapshotter:v8.0.1 +ghcr.io/canonical/metrics-server:0.7.0-ck0 +ghcr.io/canonical/rawfile-localpv:0.8.0-ck5 +``` + +A list of images can also be found in the `images.txt` file when unsquashing the +downloaded k8s snap. + +Please ensure that the images used by workloads are tracked as well. + +#### Images Option A: via an HTTP proxy + +In many cases, the nodes of the air-gap deployment may not have direct access to +upstream registries, but can reach them through the +[use of an HTTP proxy][proxy]. + +The configuration of the proxy is out of the scope of this documentation. + +#### Images Option B: private registry mirror + +In case regulations and/or network constraints do not allow the cluster nodes +to access any upstream image registry, +it is typical to deploy a private registry mirror. +This is an image registry service that contains all the required OCI Images +(e.g. [registry](https://distribution.github.io/distribution/), +[harbor](https://goharbor.io/) or any other OCI registry) and +is reachable from all cluster nodes. + +This requires three steps: + +1. Deploy and secure the registry service. + Please follow the instructions for the desired registry deployment. +2. Using [regsync][regsync], load all images from the upstream source and + push to your registry mirror. +3. Configure the Canonical Kubernetes container runtime (`containerd`) to load + images from + the private registry mirror instead of the upstream source. This will be + described in the [Configure registry mirrors]( + #Container-Runtime-Option-B:-Configure-registry-mirrors) section. + +In order to load images into the private registry, a machine is needed with +access to any upstream registries (e.g. `docker.io`) +and the private mirror. + +##### Load images with regsync + +We recommend using [regsync][regsync] to copy images +from the upstream registry to your private registry. Refer to the +[sync-images.yaml][sync-images-yaml] file that contains the configuration for +syncing images from the upstream registry to the private registry. Using the +output from `k8s list-images` update the images in the +[sync-images.yaml][sync-images-yaml] file if necessary. Update the file with the +appropriate mirror, and specify a mirror for ghcr.io that points to the +registry. + +After creating the `sync-images.yaml` file, use [regsync][regsync] to sync the +images. Assuming your registry mirror is at http://10.10.10.10:5050, run: + +``` +USERNAME="$username" PASSWORD="$password" MIRROR="10.10.10.10:5050" \ +./src/k8s/tools/regsync.sh once -c path/to/sync-images.yaml +``` + +An alternative to configuring a registry mirror is to download all necessary +OCI images, and then manually add them to all cluster nodes. +Instructions for this are described in +[Side-load images](#images-option-c-side-load-images). + +#### Images Option C: Side-load images + +Image side-loading is the process of loading all required OCI images directly +into the container runtime, so they do not have to be fetched at runtime. + +To create a bundle of images, use the [regctl][regctl] tool +or invoke the [regctl.sh][regctl.sh] script: + +``` +./src/k8s/tools/regctl.sh image export ghcr.io/canonical/k8s-snap/pause:3.10 \ +--platform=local > pause.tar +``` + +Upon choosing this option, place all images under +`/var/snap/k8s/common/images` and they will be picked up by containerd. + +## Deploy Canonical Kubernetes + +Once you've completed all the preparatory steps for your air-gapped cluster, +you can proceed with the deployment. + +### Step 1: Install Canonical Kubernetes + +Transfer the following files to the target node: + +- `k8s.snap` +- `k8s.assert` +- `core20.snap` +- `core20.assert` + +On the target node, run the following command to install the Kubernetes snap: + +``` +sudo snap ack core20.assert && sudo snap install ./core20.snap +sudo snap ack k8s.assert && sudo snap install ./k8s.snap --classic +``` + +Repeat the above for all nodes of the cluster. + +### Step 2: Container Runtime + +The container runtime must be configured to fetch images properly. +Choose one of the following options: + +#### Container Runtime Option A: Configure HTTP proxy for registries + +Create or edit the +`/etc/systemd/system/snap.k8s.containerd.service.d/http-proxy.conf` +file on each node and set the appropriate http_proxy, https_proxy and +no_proxy variables as described in the +[adding proxy configuration section][proxy]. + +#### Container Runtime Option B: Configure registry mirrors + +This requires having already set up a registry mirror, +as explained in the preparation section on the private registry mirror. +Complete the following instructions on all nodes. +For each upstream registry that needs mirroring, create a `hosts.toml` file. +Here's an example that configures `http://10.10.10.10:5050` as a mirror for +`ghcr.io`: + +##### HTTP registry + +In `/var/snap/k8s/common/etc/containerd/hosts.d/ghcr.io/hosts.toml` +add the configuration: + +``` +[host."http://10.10.10.10:5050"] +capabilities = ["pull", "resolve"] +``` + +##### HTTPS registry + +HTTPS requires the additionally specification of the registry CA certificate. +Copy the certificate to +`/var/snap/k8s/common/etc/containerd/hosts.d/ghcr.io/ca.crt`. +Then add the configuration in +`/var/snap/k8s/common/etc/containerd/hosts.d/ghcr.io/hosts.toml`: + +``` +[host."https://10.10.10.10:5050"] +capabilities = ["pull", "resolve"] +ca = "/var/snap/k8s/common/etc/containerd/hosts.d/ghcr.io/ca.crt" +``` + +#### Container Runtime Option C: Side-load images + +This is only required if choosing to +[side-load images](#images-option-c-side-load-images). +Make sure that the directory `/var/snap/k8s/common/images` directory exists, +then copy all `$image.tar` to that directory, such that containerd automatically +picks them up and imports them when it starts. +Copy the `images.tar` file(s) to `/var/snap/k8s/common/images`. +Repeat this step for all cluster nodes. + +### Step 3: Bootstrap cluster + +Now, bootstrap the cluster and replace `MY-NODE-IP` with the IP of the node +by running the command: + +``` +sudo k8s bootstrap --address MY-NODE-IP +``` + +Add and remove nodes as described in the +[add-and-remove-nodes tutorial][nodes]. + +After a while, confirm that all the cluster nodes show up in +the output of the `sudo k8s kubectl get node` command. + + + +[Core20]: https://canonical.com/blog/ubuntu-core-20-secures-linux-for-iot +[svc-ports]: /snap/explanation/services-and-ports.md +[proxy]: /snap/howto/proxy.md +[sync-images-yaml]: https://github.com/canonical/k8s-snap/blob/main/build-scripts/hack/sync-images.yaml +[regsync]: https://github.com/regclient/regclient/blob/main/docs/regsync.md +[regctl]: https://github.com/regclient/regclient/blob/main/docs/regctl.md +[regctl.sh]: https://github.com/canonical/k8s-snap/blob/main/src/k8s/tools/regctl.sh +[nodes]: /snap/tutorial/add-remove-nodes.md +[squid]: https://www.squid-cache.org/ diff --git a/docs/src/snap/howto/proxy.md b/docs/src/snap/howto/proxy.md index 43786eb40..d029a6a4e 100644 --- a/docs/src/snap/howto/proxy.md +++ b/docs/src/snap/howto/proxy.md @@ -4,28 +4,35 @@ Canonical Kubernetes packages a number of utilities (eg curl, helm) which need to fetch resources they expect to find on the internet. In a constrained network environment, such access is usually controlled through proxies. -On Ubuntu and other Linux operating systems, proxies are configured through -system-wide environment variables defined in the `/etc/environment` file. +To set up a proxy using squid follow the +[how-to-install-a-squid-server][squid] tutorial. ## Adding proxy configuration for the k8s snap -Edit the `/etc/environment` file and add the relevant URLs +If necessary, create the `snap.k8s.containerd.service.d` directory: + +```bash +sudo mkdir -p /etc/systemd/system/snap.k8s.containerd.service.d +``` ```{note} It is important to add whatever address ranges are used by the cluster itself to the `NO_PROXY` and `no_proxy` variables. ``` For example, assume we have a proxy running at `http://squid.internal:3128` and -we are using the networks `10.0.0.0/8`,`192.168.0.0/16` and `172.16.0.0/12`. We -would edit the environment (`/etc/environment`) file to include these lines: - -``` -HTTPS_PROXY=http://squid.internal:3128 -HTTP_PROXY=http://squid.internal:3128 -NO_PROXY=10.0.0.0/8,192.168.0.0/16,127.0.0.1,172.16.0.0/12 -https_proxy=http://squid.internal:3128 -http_proxy=http://squid.internal:3128 -no_proxy=10.0.0.0/8,192.168.0.0/16,127.0.0.1,172.16.0.0/12 +we are using the networks `10.0.0.0/8`,`192.168.0.0/16` and `172.16.0.0/12`. +We would add the configuration to the +(`/etc/systemd/system/snap.k8s.containerd.service.d/http-proxy.conf`) file: + +```bash +# /etc/systemd/system/snap.k8s.containerd.service.d/http-proxy.conf +[Service] +Environment="HTTPS_PROXY=http://squid.internal:3128" +Environment="HTTP_PROXY=http://squid.internal:3128" +Environment="NO_PROXY=10.0.0.0/8,10.152.183.1,192.168.0.0/16,127.0.0.1,172.16.0.0/12" +Environment="https_proxy=http://squid.internal:3128" +Environment="http_proxy=http://squid.internal:3128" +Environment="no_proxy=10.0.0.0/8,10.152.183.1,192.168.0.0/16,127.0.0.1,172.16.0.0/12" ``` Note that you may need to restart for these settings to take effect. @@ -39,9 +46,10 @@ Note that you may need to restart for these settings to take effect. ## Adding proxy configuration for the k8s charms -Proxy confiuration is handled by Juju when deploying the `k8s` charms. Please +Proxy configuration is handled by Juju when deploying the `k8s` charms. Please see the [documentation for adding proxy configuration via Juju]. [documentation for adding proxy configuration via Juju]: /charm/howto/proxy +[squid]: https://ubuntu.com/server/docs/how-to-install-a-squid-server diff --git a/src/k8s/pkg/k8sd/app/hooks_bootstrap.go b/src/k8s/pkg/k8sd/app/hooks_bootstrap.go index 59c24afd4..de686fe63 100644 --- a/src/k8s/pkg/k8sd/app/hooks_bootstrap.go +++ b/src/k8s/pkg/k8sd/app/hooks_bootstrap.go @@ -520,7 +520,7 @@ func (a *App) onBootstrapControlPlane(ctx context.Context, s *state.State, boots if err := setup.KubeScheduler(snap, bootstrapConfig.ExtraNodeKubeSchedulerArgs); err != nil { return fmt.Errorf("failed to configure kube-scheduler: %w", err) } - if err := setup.KubeAPIServer(snap, cfg.Network.GetServiceCIDR(), s.Address().Path("1.0", "kubernetes", "auth", "webhook").String(), true, cfg.Datastore, cfg.APIServer.GetAuthorizationMode(), bootstrapConfig.ExtraNodeKubeAPIServerArgs); err != nil { + if err := setup.KubeAPIServer(snap, nodeIP, cfg.Network.GetServiceCIDR(), s.Address().Path("1.0", "kubernetes", "auth", "webhook").String(), true, cfg.Datastore, cfg.APIServer.GetAuthorizationMode(), bootstrapConfig.ExtraNodeKubeAPIServerArgs); err != nil { return fmt.Errorf("failed to configure kube-apiserver: %w", err) } diff --git a/src/k8s/pkg/k8sd/app/hooks_join.go b/src/k8s/pkg/k8sd/app/hooks_join.go index a2c3be965..788ff3285 100644 --- a/src/k8s/pkg/k8sd/app/hooks_join.go +++ b/src/k8s/pkg/k8sd/app/hooks_join.go @@ -258,7 +258,7 @@ func (a *App) onPostJoin(s *state.State, initConfig map[string]string) (rerr err if err := setup.KubeScheduler(snap, joinConfig.ExtraNodeKubeSchedulerArgs); err != nil { return fmt.Errorf("failed to configure kube-scheduler: %w", err) } - if err := setup.KubeAPIServer(snap, cfg.Network.GetServiceCIDR(), s.Address().Path("1.0", "kubernetes", "auth", "webhook").String(), true, cfg.Datastore, cfg.APIServer.GetAuthorizationMode(), joinConfig.ExtraNodeKubeAPIServerArgs); err != nil { + if err := setup.KubeAPIServer(snap, nodeIP, cfg.Network.GetServiceCIDR(), s.Address().Path("1.0", "kubernetes", "auth", "webhook").String(), true, cfg.Datastore, cfg.APIServer.GetAuthorizationMode(), joinConfig.ExtraNodeKubeAPIServerArgs); err != nil { return fmt.Errorf("failed to configure kube-apiserver: %w", err) } diff --git a/src/k8s/pkg/k8sd/setup/kube_apiserver.go b/src/k8s/pkg/k8sd/setup/kube_apiserver.go index 225f2e984..2dd0318b8 100644 --- a/src/k8s/pkg/k8sd/setup/kube_apiserver.go +++ b/src/k8s/pkg/k8sd/setup/kube_apiserver.go @@ -2,6 +2,7 @@ package setup import ( "fmt" + "net" "os" "path" "strings" @@ -48,7 +49,7 @@ var ( ) // KubeAPIServer configures kube-apiserver on the local node. -func KubeAPIServer(snap snap.Snap, serviceCIDR string, authWebhookURL string, enableFrontProxy bool, datastore types.Datastore, authorizationMode string, extraArgs map[string]*string) error { +func KubeAPIServer(snap snap.Snap, nodeIP net.IP, serviceCIDR string, authWebhookURL string, enableFrontProxy bool, datastore types.Datastore, authorizationMode string, extraArgs map[string]*string) error { authTokenWebhookConfigFile := path.Join(snap.ServiceExtraConfigDir(), "auth-token-webhook.conf") authTokenWebhookFile, err := os.OpenFile(authTokenWebhookConfigFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { @@ -86,6 +87,10 @@ func KubeAPIServer(snap snap.Snap, serviceCIDR string, authWebhookURL string, en "--tls-private-key-file": path.Join(snap.KubernetesPKIDir(), "apiserver.key"), } + if nodeIP != nil && !nodeIP.IsLoopback() { + args["--advertise-address"] = nodeIP.String() + } + switch datastore.GetType() { case "k8s-dqlite", "external": default: diff --git a/src/k8s/pkg/k8sd/setup/kube_apiserver_test.go b/src/k8s/pkg/k8sd/setup/kube_apiserver_test.go index 44ff635c2..c97393f2a 100644 --- a/src/k8s/pkg/k8sd/setup/kube_apiserver_test.go +++ b/src/k8s/pkg/k8sd/setup/kube_apiserver_test.go @@ -2,6 +2,7 @@ package setup_test import ( "fmt" + "net" "os" "path" "testing" @@ -36,13 +37,14 @@ func TestKubeAPIServer(t *testing.T) { s := mustSetupSnapAndDirectories(t, setKubeAPIServerMock) // Call the KubeAPIServer setup function with mock arguments - g.Expect(setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", true, types.Datastore{Type: utils.Pointer("k8s-dqlite")}, "Node,RBAC", nil)).To(BeNil()) + g.Expect(setup.KubeAPIServer(s, net.ParseIP("192.168.0.1"), "10.0.0.0/24", "https://auth-webhook.url", true, types.Datastore{Type: utils.Pointer("k8s-dqlite")}, "Node,RBAC", nil)).To(BeNil()) // Ensure the kube-apiserver arguments file has the expected arguments and values tests := []struct { key string expectedVal string }{ + {key: "--advertise-address", expectedVal: "192.168.0.1"}, {key: "--anonymous-auth", expectedVal: "false"}, {key: "--allow-privileged", expectedVal: "true"}, {key: "--authentication-token-webhook-config-file", expectedVal: path.Join(s.Mock.ServiceExtraConfigDir, "auth-token-webhook.conf")}, @@ -94,13 +96,14 @@ func TestKubeAPIServer(t *testing.T) { s := mustSetupSnapAndDirectories(t, setKubeAPIServerMock) // Call the KubeAPIServer setup function with mock arguments - g.Expect(setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", false, types.Datastore{Type: utils.Pointer("k8s-dqlite")}, "Node,RBAC", nil)).To(BeNil()) + g.Expect(setup.KubeAPIServer(s, net.ParseIP("192.168.0.1"), "10.0.0.0/24", "https://auth-webhook.url", false, types.Datastore{Type: utils.Pointer("k8s-dqlite")}, "Node,RBAC", nil)).To(BeNil()) // Ensure the kube-apiserver arguments file has the expected arguments and values tests := []struct { key string expectedVal string }{ + {key: "--advertise-address", expectedVal: "192.168.0.1"}, {key: "--anonymous-auth", expectedVal: "false"}, {key: "--allow-privileged", expectedVal: "true"}, {key: "--authentication-token-webhook-config-file", expectedVal: path.Join(s.Mock.ServiceExtraConfigDir, "auth-token-webhook.conf")}, @@ -150,13 +153,14 @@ func TestKubeAPIServer(t *testing.T) { "--my-extra-arg": utils.Pointer("my-extra-val"), } // Call the KubeAPIServer setup function with mock arguments - g.Expect(setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", true, types.Datastore{Type: utils.Pointer("k8s-dqlite")}, "Node,RBAC", extraArgs)).To(BeNil()) + g.Expect(setup.KubeAPIServer(s, net.ParseIP("192.168.0.1"), "10.0.0.0/24", "https://auth-webhook.url", true, types.Datastore{Type: utils.Pointer("k8s-dqlite")}, "Node,RBAC", extraArgs)).To(BeNil()) // Ensure the kube-apiserver arguments file has the expected arguments and values tests := []struct { key string expectedVal string }{ + {key: "--advertise-address", expectedVal: "192.168.0.1"}, {key: "--anonymous-auth", expectedVal: "false"}, {key: "--authentication-token-webhook-config-file", expectedVal: path.Join(s.Mock.ServiceExtraConfigDir, "auth-token-webhook.conf")}, {key: "--authorization-mode", expectedVal: "Node,RBAC"}, @@ -210,7 +214,7 @@ func TestKubeAPIServer(t *testing.T) { s := mustSetupSnapAndDirectories(t, setKubeAPIServerMock) // Setup without proxy to simplify argument list - g.Expect(setup.KubeAPIServer(s, "10.0.0.0/24,fd01::/64", "https://auth-webhook.url", false, types.Datastore{Type: utils.Pointer("external"), ExternalServers: utils.Pointer([]string{"datastoreurl1", "datastoreurl2"})}, "Node,RBAC", nil)).To(BeNil()) + g.Expect(setup.KubeAPIServer(s, net.ParseIP("192.168.0.1"), "10.0.0.0/24,fd01::/64", "https://auth-webhook.url", false, types.Datastore{Type: utils.Pointer("external"), ExternalServers: utils.Pointer([]string{"datastoreurl1", "datastoreurl2"})}, "Node,RBAC", nil)).To(BeNil()) g.Expect(snaputil.GetServiceArgument(s, "kube-apiserver", "--service-cluster-ip-range")).To(Equal("10.0.0.0/24,fd01::/64")) _, err := utils.ParseArgumentFile(path.Join(s.Mock.ServiceArgumentsDir, "kube-apiserver")) @@ -223,7 +227,7 @@ func TestKubeAPIServer(t *testing.T) { s := mustSetupSnapAndDirectories(t, setKubeAPIServerMock) // Setup without proxy to simplify argument list - g.Expect(setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", false, types.Datastore{Type: utils.Pointer("external"), ExternalServers: utils.Pointer([]string{"datastoreurl1", "datastoreurl2"})}, "Node,RBAC", nil)).To(BeNil()) + g.Expect(setup.KubeAPIServer(s, net.ParseIP("192.168.0.1"), "10.0.0.0/24", "https://auth-webhook.url", false, types.Datastore{Type: utils.Pointer("external"), ExternalServers: utils.Pointer([]string{"datastoreurl1", "datastoreurl2"})}, "Node,RBAC", nil)).To(BeNil()) g.Expect(snaputil.GetServiceArgument(s, "kube-apiserver", "--etcd-servers")).To(Equal("datastoreurl1,datastoreurl2")) _, err := utils.ParseArgumentFile(path.Join(s.Mock.ServiceArgumentsDir, "kube-apiserver")) @@ -237,7 +241,7 @@ func TestKubeAPIServer(t *testing.T) { s := mustSetupSnapAndDirectories(t, setKubeAPIServerMock) // Attempt to configure kube-apiserver with an unsupported datastore - err := setup.KubeAPIServer(s, "10.0.0.0/24", "https://auth-webhook.url", false, types.Datastore{Type: utils.Pointer("unsupported")}, "Node,RBAC", nil) + err := setup.KubeAPIServer(s, net.ParseIP("192.168.0.1"), "10.0.0.0/24", "https://auth-webhook.url", false, types.Datastore{Type: utils.Pointer("unsupported")}, "Node,RBAC", nil) g.Expect(err).To(HaveOccurred()) g.Expect(err).To(MatchError(ContainSubstring("unsupported datastore"))) })