From d2ea1746d361aabaafba9e7d78e758f933b3faf2 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 28 May 2020 14:02:22 +0300 Subject: [PATCH 01/69] Correct test-integration-kubernetes at Makefile Signed-off-by: MOZGIII --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a8267addefae1..543cde73fbcc9 100644 --- a/Makefile +++ b/Makefile @@ -281,7 +281,7 @@ ifeq ($(AUTODESPAWN), true) ${MAYBE_ENVIRONMENT_EXEC} $(CONTAINER_TOOL)-compose stop endif -PACKAGE_DEB_USE_CONTAINER ?= "$(USE_CONTAINER)" +PACKAGE_DEB_USE_CONTAINER ?= $(USE_CONTAINER) test-integration-kubernetes: ## Runs Kubernetes integration tests (Sorry, no `ENVIRONMENT=true` support) PACKAGE_DEB_USE_CONTAINER="$(PACKAGE_DEB_USE_CONTAINER)" USE_CONTAINER=none $(RUN) test-integration-kubernetes From 8eff64c496983082f8b9f97672127a6cc0237731 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 28 May 2020 14:02:49 +0300 Subject: [PATCH 02/69] Fix the tag overwrite logic at scripts/deploy-kubernetes-test.sh Signed-off-by: MOZGIII --- scripts/deploy-kubernetes-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/deploy-kubernetes-test.sh b/scripts/deploy-kubernetes-test.sh index f7e9a73755bc4..cb9d99be94c4d 100755 --- a/scripts/deploy-kubernetes-test.sh +++ b/scripts/deploy-kubernetes-test.sh @@ -55,7 +55,7 @@ up() { $VECTOR_TEST_KUBECTL create --namespace "$NAMESPACE" -f "$CUSTOM_RESOURCE_CONIFGS_FILE" fi - sed "s|timerio/vector:latest|$CONTAINER_IMAGE|" < "distribution/kubernetes/vector-namespaced.yaml" \ + sed 's|image: timberio/vector:[^$]*$'"|image: $CONTAINER_IMAGE|" < "distribution/kubernetes/vector-namespaced.yaml" \ | $VECTOR_TEST_KUBECTL create --namespace "$NAMESPACE" -f - } From 656df58bca8dd6c3aa666e84515ffab8de854ef7 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 28 May 2020 14:03:10 +0300 Subject: [PATCH 03/69] Make scripts/test-integration-kubernetes.sh more tweakable Signed-off-by: MOZGIII --- scripts/test-integration-kubernetes.sh | 96 ++++++++++++++------------ 1 file changed, 52 insertions(+), 44 deletions(-) diff --git a/scripts/test-integration-kubernetes.sh b/scripts/test-integration-kubernetes.sh index 329154d95fdec..3c072e86fd1f2 100755 --- a/scripts/test-integration-kubernetes.sh +++ b/scripts/test-integration-kubernetes.sh @@ -16,60 +16,68 @@ random-string() { echo } -# Require a repo to put the container image at. -# -# Hint #1: you can use `scripts/start-docker-registry.sh`, but it requires -# manually preparing the environment to allow insecure registries, and it can -# also not work if you k8s cluster doesn't have network connectivity to the -# registry. -# -# Hint #2: if using with minikube, set `USE_MINIKUBE_DOCKER` to `true` and use -# any value for `CONTAINER_IMAGE_REPO` (for instance, `vector-test` will do). -# -CONTAINER_IMAGE_REPO="${CONTAINER_IMAGE_REPO:?"You have to specify CONTAINER_IMAGE_REPO to upload the test image to."}" +# Build a docker image if it wasn't provided. +if [[ -z "${CONTAINER_IMAGE:-}" ]]; then + # Require a repo to put the container image at. + # + # Hint #1: you can use `scripts/start-docker-registry.sh`, but it requires + # manually preparing the environment to allow insecure registries, and it can + # also not work if you k8s cluster doesn't have network connectivity to the + # registry. + # + # Hint #2: if using with minikube, set `USE_MINIKUBE_DOCKER` to `true` and use + # any value for `CONTAINER_IMAGE_REPO` (for instance, `vector-test` will do). + # + CONTAINER_IMAGE_REPO="${CONTAINER_IMAGE_REPO:?"You have to specify CONTAINER_IMAGE_REPO to upload the test image to."}" -# Whether to use minikube docker. -# After we build vector docker image, instead of pushing to the remote repo, -# we'll be exporting it to a file after (from the "host" docker engine), and -# then importing that file into the minikube in-cluster docker engine, that -# nodes have access to. -# This effectively eliminates the requirement to have a docker registry, but -# it requires that we run against minikube cluster. -USE_MINIKUBE_DOCKER="${USE_MINIKUBE_DOCKER:-"false"}" + # Assign a default test run ID if none is provided by the user. + TEST_RUN_ID="${TEST_RUN_ID:-"$(date +%s)-$(random-string)"}" -# Assign a default test run ID if none is provided by the user. -TEST_RUN_ID="${TEST_RUN_ID:-"test-$(date +%s)-$(random-string)"}" + # Package a .deb file to build a docker container, unless skipped. + if [[ -z "${SKIP_PACKAGE_DEB:-}" ]]; then + make package-deb-x86_64 USE_CONTAINER="${PACKAGE_DEB_USE_CONTAINER:-"docker"}" + fi -if [[ -z "${SKIP_PACKAGE_DEB:-}" ]]; then - make package-deb-x86_64 USE_CONTAINER="${PACKAGE_DEB_USE_CONTAINER:-"docker"}" -fi + # Prepare test image parameters. + VERSION_TAG="test-$TEST_RUN_ID" + BASE_TAG="debian" -# Prepare test image parameters. -VERSION_TAG="test-$TEST_RUN_ID" -BASE_TAG="debian" + # Build docker image with Vector - the same way it's done for releses. Don't + # do the push - we'll handle it later. + REPO="$CONTAINER_IMAGE_REPO" \ + CHANNEL="test" \ + BASE="$BASE_TAG" \ + TAG="$VERSION_TAG" \ + PUSH="" \ + scripts/build-docker.sh -# Build docker image with Vector - the same way it's done for releses. Don't -# do the push - we'll handle it later. -REPO="$CONTAINER_IMAGE_REPO" \ - CHANNEL="test" \ - BASE="$BASE_TAG" \ - TAG="$VERSION_TAG" \ - PUSH="" \ - scripts/build-docker.sh + # Prepare the container image for the deployment command. + CONTAINER_IMAGE="$CONTAINER_IMAGE_REPO:$VERSION_TAG-$BASE_TAG" +fi -# Prepare the container image for the deployment command. -export CONTAINER_IMAGE="$CONTAINER_IMAGE_REPO:$VERSION_TAG-$BASE_TAG" +if [[ -z "${SKIP_CONTAINER_IMAGE_PUBLISHING:-}" ]]; then + # Whether to use minikube docker. + # After we build vector docker image, instead of pushing to the remote repo, + # we'll be exporting it to a file after (from the "host" docker engine), and + # then importing that file into the minikube in-cluster docker engine, that + # nodes have access to. + # This effectively eliminates the requirement to have a docker registry, but + # it requires that we run against minikube cluster. + USE_MINIKUBE_DOCKER="${USE_MINIKUBE_DOCKER:-"false"}" -# Make the container image accessible to the k8s cluster. -if [[ "$USE_MINIKUBE_DOCKER" == "true" ]]; then - scripts/copy-docker-image-to-minikube.sh "$CONTAINER_IMAGE" -else - docker push "$CONTAINER_IMAGE" + # Make the container image accessible to the k8s cluster. + if [[ "$USE_MINIKUBE_DOCKER" == "true" ]]; then + scripts/copy-docker-image-to-minikube.sh "$CONTAINER_IMAGE" + else + docker push "$CONTAINER_IMAGE" + fi fi +# Export the container image to be accessible from the deployment command. +export CONTAINER_IMAGE + # Set the deployment command for integration tests. export KUBE_TEST_DEPLOY_COMMAND="scripts/deploy-kubernetes-test.sh" - # Run the tests. -cargo test --no-default-features --features kubernetes-integration-tests +cargo test --test kubernetes --no-default-features --features k8s-openapi -- --nocapture From e2f03f2465fb9f532c80ebe6638993512c52733d Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Mon, 15 Jun 2020 11:26:46 +0300 Subject: [PATCH 04/69] Reorder namespace and global config deletion command The idea is namespace removal takes the longest, so we'd rather leave it hanging than config deletion. Then is user gets tired of waiting and sends a SIGINT we don't leave the global config dangling - just the namespace removal, which will complete in the background. So it's just a user experience improvement. Signed-off-by: MOZGIII --- scripts/deploy-kubernetes-test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/deploy-kubernetes-test.sh b/scripts/deploy-kubernetes-test.sh index cb9d99be94c4d..9cb4cf85dfef1 100755 --- a/scripts/deploy-kubernetes-test.sh +++ b/scripts/deploy-kubernetes-test.sh @@ -66,9 +66,9 @@ down() { $VECTOR_TEST_KUBECTL delete --namespace "$NAMESPACE" -f "$CUSTOM_RESOURCE_CONIFGS_FILE" fi - $VECTOR_TEST_KUBECTL delete namespace "$NAMESPACE" - templated-config-global | $VECTOR_TEST_KUBECTL delete -f - + + $VECTOR_TEST_KUBECTL delete namespace "$NAMESPACE" } case "$COMMAND" in From fd974757f5dba65cc5abc1f4fcd6ba6c5e1a80d3 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 28 May 2020 16:28:12 +0300 Subject: [PATCH 05/69] Add kubernetes-test-framework Signed-off-by: MOZGIII --- Cargo.lock | 9 ++ Cargo.toml | 1 + lib/kubernetes-test-framework/Cargo.toml | 11 ++ .../src/framework.rs | 81 ++++++++++++ .../src/interface.rs | 17 +++ lib/kubernetes-test-framework/src/lib.rs | 34 +++++ .../src/log_lookup.rs | 124 ++++++++++++++++++ .../src/namespace.rs | 45 +++++++ .../src/resource_file.rs | 27 ++++ lib/kubernetes-test-framework/src/test_pod.rs | 79 +++++++++++ lib/kubernetes-test-framework/src/vector.rs | 58 ++++++++ .../src/wait_for_resource.rs | 90 +++++++++++++ .../src/wait_for_rollout.rs | 37 ++++++ 13 files changed, 613 insertions(+) create mode 100644 lib/kubernetes-test-framework/Cargo.toml create mode 100644 lib/kubernetes-test-framework/src/framework.rs create mode 100644 lib/kubernetes-test-framework/src/interface.rs create mode 100644 lib/kubernetes-test-framework/src/lib.rs create mode 100644 lib/kubernetes-test-framework/src/log_lookup.rs create mode 100644 lib/kubernetes-test-framework/src/namespace.rs create mode 100644 lib/kubernetes-test-framework/src/resource_file.rs create mode 100644 lib/kubernetes-test-framework/src/test_pod.rs create mode 100644 lib/kubernetes-test-framework/src/vector.rs create mode 100644 lib/kubernetes-test-framework/src/wait_for_resource.rs create mode 100644 lib/kubernetes-test-framework/src/wait_for_rollout.rs diff --git a/Cargo.lock b/Cargo.lock index ba94114a33ee2..bab93c7b03c57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1907,6 +1907,15 @@ dependencies = [ "openssl-sys", ] +[[package]] +name = "kubernetes-test-framework" +version = "0.1.0" +dependencies = [ + "k8s-openapi", + "serde_json", + "tempfile", +] + [[package]] name = "lazy_static" version = "0.2.11" diff --git a/Cargo.toml b/Cargo.toml index 167b7fd14805e..8c571c4e8e9b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,6 +31,7 @@ members = [ "lib/file-source", "lib/tracing-limit", "lib/vector-wasm", + "lib/kubernetes-test-framework", ] [dependencies] diff --git a/lib/kubernetes-test-framework/Cargo.toml b/lib/kubernetes-test-framework/Cargo.toml new file mode 100644 index 0000000000000..c7bc4257f5c66 --- /dev/null +++ b/lib/kubernetes-test-framework/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "kubernetes-test-framework" +version = "0.1.0" +authors = ["MOZGIII "] +edition = "2018" +description = "Kubernetes Test Framework used to test Vector in Kubernetes" + +[dependencies] +k8s-openapi = { version = "0.9", default-features = false, features = ["v1_15"] } +serde_json = "1" +tempfile = "3" diff --git a/lib/kubernetes-test-framework/src/framework.rs b/lib/kubernetes-test-framework/src/framework.rs new file mode 100644 index 0000000000000..57fa556a69346 --- /dev/null +++ b/lib/kubernetes-test-framework/src/framework.rs @@ -0,0 +1,81 @@ +//! The test framework main entry point. + +use super::{ + log_lookup, namespace, test_pod, vector, wait_for_resource, wait_for_rollout, Interface, Result, +}; + +pub struct Framework { + interface: Interface, +} + +impl Framework { + /// Create a new [`Framework`]. + pub fn new(interface: Interface) -> Self { + Self { interface } + } + + pub fn vector(&self, namespace: &str, custom_resource: &str) -> Result { + let manager = vector::Manager::new( + self.interface.deploy_vector_command.as_str(), + namespace, + custom_resource, + )?; + manager.up()?; + Ok(manager) + } + + pub fn namespace(&self, namespace: &str) -> Result { + let manager = namespace::Manager::new(&self.interface.kubectl_command, namespace)?; + manager.up()?; + Ok(manager) + } + + pub fn test_pod(&self, config: test_pod::Config) -> Result { + let manager = test_pod::Manager::new(&self.interface.kubectl_command, config)?; + manager.up()?; + Ok(manager) + } + + pub fn logs(&self, namespace: &str, resource: &str) -> Result { + log_lookup::logs(&self.interface.kubectl_command, namespace, resource) + } + + pub fn wait<'a>( + &self, + namespace: &str, + resources: impl IntoIterator, + wait_for: wait_for_resource::WaitFor<&'_ str>, + extra: impl IntoIterator, + ) -> Result<()> { + wait_for_resource::namespace( + &self.interface.kubectl_command, + namespace, + resources, + wait_for, + extra, + ) + } + + pub fn wait_all_namespaces<'a>( + &self, + resources: impl IntoIterator, + wait_for: wait_for_resource::WaitFor<&'_ str>, + extra: impl IntoIterator, + ) -> Result<()> { + wait_for_resource::all_namespaces( + &self.interface.kubectl_command, + resources, + wait_for, + extra, + ) + } + + pub fn wait_for_rollout<'a>( + &self, + namespace: &str, + resource: &str, + extra: impl IntoIterator, + ) -> Result<()> { + wait_for_rollout::run(&self.interface.kubectl_command, namespace, resource, extra) + } +} diff --git a/lib/kubernetes-test-framework/src/interface.rs b/lib/kubernetes-test-framework/src/interface.rs new file mode 100644 index 0000000000000..d175c4e26aabd --- /dev/null +++ b/lib/kubernetes-test-framework/src/interface.rs @@ -0,0 +1,17 @@ +use std::env; + +#[derive(Debug)] +pub struct Interface { + pub deploy_vector_command: String, + pub kubectl_command: String, +} + +impl Interface { + pub fn from_env() -> Option { + Some(Self { + deploy_vector_command: env::var("KUBE_TEST_DEPLOY_COMMAND").ok()?, + kubectl_command: env::var("VECTOR_TEST_KUBECTL") + .unwrap_or_else(|_| "kubectl".to_owned()), + }) + } +} diff --git a/lib/kubernetes-test-framework/src/lib.rs b/lib/kubernetes-test-framework/src/lib.rs new file mode 100644 index 0000000000000..e5709c32d67bc --- /dev/null +++ b/lib/kubernetes-test-framework/src/lib.rs @@ -0,0 +1,34 @@ +//! Kubernetes test framework. +//! +//! The main goal of the design of this test framework is to wire kubernetes +//! components testing through the same tools that are available to the +//! developer as executable commands, rather than using a rust interface to talk +//! to k8s cluster directly. +//! This enables very trivial troubleshooting and allows us to use the same +//! deployemnt mechanisms that we use for prodcution - effectively giving us +//! the opportunity to test e2e - not just the code layer, but also the +//! deployment configuration. + +// TODO: deny +#![allow( + missing_debug_implementations, + missing_copy_implementations, + missing_docs +)] + +pub mod framework; +pub mod interface; +pub mod log_lookup; +pub mod namespace; +mod resource_file; +pub mod test_pod; +pub mod vector; +pub mod wait_for_resource; +pub mod wait_for_rollout; + +// Re-export some unit for trivial accessability. + +pub use framework::Framework; +pub use interface::Interface; + +type Result = std::result::Result>; diff --git a/lib/kubernetes-test-framework/src/log_lookup.rs b/lib/kubernetes-test-framework/src/log_lookup.rs new file mode 100644 index 0000000000000..c712d04bc3055 --- /dev/null +++ b/lib/kubernetes-test-framework/src/log_lookup.rs @@ -0,0 +1,124 @@ +use super::Result; +use std::io::{BufRead, BufReader}; +use std::process::{Child, ChildStdout, Command, ExitStatus, Stdio}; + +pub fn logs(kubectl_command: &str, namespace: &str, resource: &str) -> Result { + let mut command = Command::new(kubectl_command); + + command.stdin(Stdio::null()).stderr(Stdio::inherit()); + + command.arg("logs"); + command.arg("-f"); + command.arg("-n").arg(namespace); + command.arg(resource); + + let reader = Reader::spawn(command)?; + Ok(reader) +} + +pub struct Reader { + child: Child, + reader: BufReader, +} + +impl Reader { + pub fn spawn(mut command: Command) -> std::io::Result { + Self::prepare_stdout(&mut command); + let child = command.spawn()?; + Ok(Self::new(child)) + } + + fn prepare_stdout(command: &mut Command) { + command.stdout(Stdio::piped()); + } + + fn new(mut child: Child) -> Self { + let stdout = child.stdout.take().unwrap(); + let reader = BufReader::new(stdout); + Reader { child, reader } + } + + pub fn wait(&mut self) -> std::io::Result { + self.child.wait() + } + + pub fn kill(&mut self) -> std::io::Result<()> { + self.child.kill() + } +} + +impl Iterator for Reader { + type Item = String; + + fn next(&mut self) -> Option { + let mut s = String::new(); + let result = self.reader.read_line(&mut s); + match result { + Ok(0) => None, + Ok(_) => Some(s), + Err(err) => panic!(err), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_reader_finite() { + let mut command = Command::new("echo"); + command.arg("test"); + + let mut reader = Reader::spawn(command).expect("unable to spawn"); + + // Collect all line, expect stream to finish. + let lines: Vec<_> = (&mut reader).collect(); + // Assert we got all the lines we expected. + assert_eq!(lines, vec!["test\n".to_owned()]); + + // Ensure wait doesn't fail, and that we exit status is success. + let exit_status = reader.wait().expect("wait failed"); + assert!(exit_status.success()); + } + + #[test] + fn test_reader_inifinite() { + let mut command = Command::new("bash"); + command.arg("-c"); + command.arg(r#"NUM=0; while true; do echo "Line $NUM"; NUM=$((NUM+=1)); sleep 0.01; done"#); + + let mut reader = Reader::spawn(command).expect("unable to spawn"); + + // Read the lines and at some point ask the command we're reading from + // to stop. + let mut expected_num = 0; + while let Some(line) = (&mut reader).next() { + // Assert we're getting expected lines. + assert_eq!(line, format!("Line {}\n", expected_num)); + + // On line 100 issue a `kill` to stop the infinite stream. + if expected_num == 100 { + reader.kill().expect("process already stopped") + } + + // If we are past 200 it means we issued `kill` at 100 and it wasn't + // effective. This is problem, fail the test. + // We don't to this immediately after `kill` to allow for some + // potential race condition. That kind of race is not just ok, but + // is desirable in the real-life usage to read-up the whole stdout + // buffer. + if expected_num > 200 { + panic!("went too far without stop being effective"); + } + + // Bump the expected num for the next iteration. + expected_num += 1; + } + + // Ensure wait doesn't fail. We killed the process, so expect + // a non-success exit code. + let exit_status = reader.wait().expect("wait failed"); + assert!(!exit_status.success()); + } +} diff --git a/lib/kubernetes-test-framework/src/namespace.rs b/lib/kubernetes-test-framework/src/namespace.rs new file mode 100644 index 0000000000000..9e9ed1640d6df --- /dev/null +++ b/lib/kubernetes-test-framework/src/namespace.rs @@ -0,0 +1,45 @@ +use super::Result; +use std::process::{Command, Stdio}; + +pub struct Manager { + kubectl_command: String, + namespace: String, +} + +impl Manager { + pub fn new(kubectl_command: &str, namespace: &str) -> Result { + Ok(Self { + kubectl_command: kubectl_command.to_owned(), + namespace: namespace.to_owned(), + }) + } + + pub fn up(&self) -> Result<()> { + self.exec("create") + } + + pub fn down(&self) -> Result<()> { + self.exec("delete") + } + + fn exec(&self, subcommand: &str) -> Result<()> { + if !Command::new(&self.kubectl_command) + .arg(subcommand) + .arg("namespace") + .arg(&self.namespace) + .stdin(Stdio::null()) + .spawn()? + .wait()? + .success() + { + Err(format!("failed to exec: {}", subcommand))?; + } + Ok(()) + } +} + +impl Drop for Manager { + fn drop(&mut self) { + self.down().expect("namespace turndown failed"); + } +} diff --git a/lib/kubernetes-test-framework/src/resource_file.rs b/lib/kubernetes-test-framework/src/resource_file.rs new file mode 100644 index 0000000000000..9f9ac469f002a --- /dev/null +++ b/lib/kubernetes-test-framework/src/resource_file.rs @@ -0,0 +1,27 @@ +use std::path::{Path, PathBuf}; +use tempfile::{tempdir, TempDir}; + +#[derive(Debug)] +pub struct ResourceFile { + dir: TempDir, + path: PathBuf, +} + +impl ResourceFile { + pub fn new(data: &str) -> std::io::Result { + let dir = tempdir()?; + let path = dir.path().join("custom.yaml"); + std::fs::write(&path, data)?; + Ok(Self { dir, path }) + } + + pub fn path(&self) -> &Path { + self.path.as_path() + } +} + +impl Drop for ResourceFile { + fn drop(&mut self) { + std::fs::remove_file(&self.path).expect("unable to clean up custom resource file"); + } +} diff --git a/lib/kubernetes-test-framework/src/test_pod.rs b/lib/kubernetes-test-framework/src/test_pod.rs new file mode 100644 index 0000000000000..468cb23965c0c --- /dev/null +++ b/lib/kubernetes-test-framework/src/test_pod.rs @@ -0,0 +1,79 @@ +use super::{resource_file::ResourceFile, Result}; +use k8s_openapi::api::core::v1::Pod; +use std::process::{Command, Stdio}; + +#[derive(Debug)] +pub struct Config { + custom_resource_file: ResourceFile, +} + +impl Config { + pub fn from_pod(pod: &Pod) -> Result { + Self::from_resource_string(serde_json::to_string(pod)?.as_str()) + } + + pub fn from_resource_string(resource: &str) -> Result { + let custom_resource_file = ResourceFile::new(resource)?; + Ok(Self { + custom_resource_file, + }) + } +} + +#[derive(Debug)] +pub struct Manager { + kubectl_command: String, + config: Config, +} + +impl Manager { + pub fn new(kubectl_command: &str, config: Config) -> Result { + Ok(Self { + kubectl_command: kubectl_command.to_owned(), + config, + }) + } + + pub fn up(&self) -> Result<()> { + let mut command = self.prepare_command(); + + command.arg("create"); + command + .arg("-f") + .arg(self.config.custom_resource_file.path()); + Self::run_command(command)?; + + Ok(()) + } + + pub fn down(&self) -> Result<()> { + let mut command = self.prepare_command(); + + command.arg("delete"); + command + .arg("-f") + .arg(self.config.custom_resource_file.path()); + Self::run_command(command)?; + + Ok(()) + } + + fn prepare_command(&self) -> Command { + let mut command = Command::new(&self.kubectl_command); + command.stdin(Stdio::null()); + command + } + + fn run_command(mut command: Command) -> Result<()> { + if !command.spawn()?.wait()?.success() { + Err(format!("failed to exec: {:?}", &command))?; + } + Ok(()) + } +} + +impl Drop for Manager { + fn drop(&mut self) { + self.down().expect("test pod turndown failed"); + } +} diff --git a/lib/kubernetes-test-framework/src/vector.rs b/lib/kubernetes-test-framework/src/vector.rs new file mode 100644 index 0000000000000..8fac1d58ceb6e --- /dev/null +++ b/lib/kubernetes-test-framework/src/vector.rs @@ -0,0 +1,58 @@ +use super::{resource_file::ResourceFile, Result}; +use std::process::{Command, Stdio}; + +/// Takes care of deploying vector into the kubernetes cluster. +/// +/// Manages the config file secret accordingly. +#[derive(Debug)] +pub struct Manager { + interface_command: String, + namespace: String, + custom_resource_file: ResourceFile, +} + +impl Manager { + /// Create a new [`Manager`]. + pub fn new(interface_command: &str, namespace: &str, custom_resource: &str) -> Result { + let custom_resource_file = ResourceFile::new(custom_resource)?; + Ok(Self { + interface_command: interface_command.to_owned(), + namespace: namespace.to_owned(), + custom_resource_file, + }) + } + + pub fn up(&self) -> Result<()> { + self.exec("up")?; + Ok(()) + } + + pub fn down(&self) -> Result<()> { + self.exec("down")?; + Ok(()) + } + + fn exec(&self, operation: &str) -> Result<()> { + if !Command::new(&self.interface_command) + .arg(operation) + .arg(&self.namespace) + .env( + "CUSTOM_RESOURCE_CONIFGS_FILE", + self.custom_resource_file.path(), + ) + .stdin(Stdio::null()) + .spawn()? + .wait()? + .success() + { + Err(format!("failed to exec: {}", operation))?; + } + Ok(()) + } +} + +impl Drop for Manager { + fn drop(&mut self) { + self.down().expect("vector turndown failed"); + } +} diff --git a/lib/kubernetes-test-framework/src/wait_for_resource.rs b/lib/kubernetes-test-framework/src/wait_for_resource.rs new file mode 100644 index 0000000000000..7b9c9a34c1970 --- /dev/null +++ b/lib/kubernetes-test-framework/src/wait_for_resource.rs @@ -0,0 +1,90 @@ +use super::Result; +use std::{ + ffi::OsStr, + process::{Command, Stdio}, +}; + +pub enum WaitFor +where + C: std::fmt::Display, +{ + Delete, + Condition(C), +} + +pub fn namespace( + kubectl_command: CMD, + namespace: NS, + resources: impl IntoIterator, + wait_for: WaitFor, + extra: impl IntoIterator, +) -> Result<()> +where + CMD: AsRef, + NS: AsRef, + R: AsRef, + COND: std::fmt::Display, + EX: AsRef, +{ + let mut command = prepare_base_command(kubectl_command, resources, wait_for, extra); + command.arg("-n").arg(namespace); + run_wait_command(command) +} + +pub fn all_namespaces( + kubectl_command: CMD, + resources: impl IntoIterator, + wait_for: WaitFor, + extra: impl IntoIterator, +) -> Result<()> +where + CMD: AsRef, + R: AsRef, + COND: std::fmt::Display, + EX: AsRef, +{ + let mut command = prepare_base_command(kubectl_command, resources, wait_for, extra); + command.arg("--all-namespaces=true"); + run_wait_command(command) +} + +pub fn prepare_base_command( + kubectl_command: CMD, + resources: impl IntoIterator, + wait_for: WaitFor, + extra: impl IntoIterator, +) -> Command +where + CMD: AsRef, + R: AsRef, + COND: std::fmt::Display, + EX: AsRef, +{ + let mut command = Command::new(kubectl_command); + + command + .stdin(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()); + + command.arg("wait"); + command.args(resources); + + command.arg("--for"); + match wait_for { + WaitFor::Delete => command.arg("delete"), + WaitFor::Condition(cond) => command.arg(format!("condition={}", cond)), + }; + + command.args(extra); + command +} + +fn run_wait_command(mut command: Command) -> Result<()> { + let mut child = command.spawn()?; + let exit_status = child.wait()?; + if !exit_status.success() { + Err(format!("waiting for resources failed: {:?}", command))?; + } + Ok(()) +} diff --git a/lib/kubernetes-test-framework/src/wait_for_rollout.rs b/lib/kubernetes-test-framework/src/wait_for_rollout.rs new file mode 100644 index 0000000000000..481e4f8170d34 --- /dev/null +++ b/lib/kubernetes-test-framework/src/wait_for_rollout.rs @@ -0,0 +1,37 @@ +use super::Result; +use std::{ + ffi::OsStr, + process::{Command, Stdio}, +}; + +pub fn run( + kubectl_command: CMD, + namespace: NS, + resource: R, + extra: impl IntoIterator, +) -> Result<()> +where + CMD: AsRef, + NS: AsRef, + R: AsRef, + EX: AsRef, +{ + let mut command = Command::new(kubectl_command); + + command + .stdin(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()); + + command.arg("rollout").arg("status"); + command.arg("-n").arg(namespace); + command.arg(resource); + command.args(extra); + + let mut child = command.spawn()?; + let exit_status = child.wait()?; + if !exit_status.success() { + Err(format!("waiting for rollout failed: {:?}", command))?; + } + Ok(()) +} From eff8b3045a923ac316754b7736677c1311e88683 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 28 May 2020 16:28:23 +0300 Subject: [PATCH 06/69] Implement a first PoC kubernetes test Signed-off-by: MOZGIII --- Cargo.lock | 1 + Cargo.toml | 2 +- tests/kubernetes.rs | 145 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 tests/kubernetes.rs diff --git a/Cargo.lock b/Cargo.lock index bab93c7b03c57..6d6c5e63f8e0c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5452,6 +5452,7 @@ dependencies = [ "inventory", "jemallocator", "k8s-openapi", + "kubernetes-test-framework", "lazy_static 1.4.0", "leveldb", "libc", diff --git a/Cargo.toml b/Cargo.toml index 8c571c4e8e9b6..6aa927adf4863 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -196,6 +196,7 @@ tokio-test = "0.2" tokio = { version = "0.2", features = ["test-util"] } assert_cmd = "1.0" reqwest = { version = "0.10.6", features = ["json"] } +kubernetes-test-framework = { version = "0.1", path = "lib/kubernetes-test-framework" } [features] # Default features for *-unknown-linux-gnu and *-apple-darwin @@ -432,7 +433,6 @@ kafka-integration-tests = ["sources-kafka", "sinks-kafka"] loki-integration-tests = ["sinks-loki"] pulsar-integration-tests = ["sinks-pulsar"] splunk-integration-tests = ["sinks-splunk_hec", "warp"] -kubernetes-integration-tests = ["sources-kubernetes-logs"] shutdown-tests = ["sources","sinks-console","sinks-prometheus","sinks-blackhole","unix","rdkafka","transforms-log_to_metric","transforms-lua"] disable-resolv-conf = [] diff --git a/tests/kubernetes.rs b/tests/kubernetes.rs new file mode 100644 index 0000000000000..080671f040778 --- /dev/null +++ b/tests/kubernetes.rs @@ -0,0 +1,145 @@ +use k8s_openapi::{ + api::core::v1::{Container, Pod, PodSpec}, + apimachinery::pkg::apis::meta::v1::ObjectMeta, +}; +use kubernetes_test_framework::{test_pod, wait_for_resource::WaitFor, Framework, Interface}; + +const VECTOR_CONFIG: &str = r#" +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-config +data: + vector.toml: | + [sinks.stdout] + type = "console" + inputs = ["kubernetes_logs"] + target = "stdout" + encoding = "json" +"#; + +const BUSYBOX_IMAGE: &str = "busybox:1.28"; + +fn repeating_echo_cmd(marker: &str) -> String { + format!( + r#"echo before; i=0; while [ $i -le 600 ]; do sleep 0.1; echo "{}"; i=$((i+1)); done"#, + marker + ) +} + +fn make_framework() -> Framework { + let interface = Interface::from_env().expect("interface is not ready"); + Framework::new(interface) +} + +fn make_test_pod(namespace: &str, name: &str, command: &str) -> Pod { + Pod { + metadata: ObjectMeta { + name: Some(name.to_owned()), + namespace: Some(namespace.to_owned()), + ..ObjectMeta::default() + }, + spec: Some(PodSpec { + containers: vec![Container { + name: name.to_owned(), + image: Some(BUSYBOX_IMAGE.to_owned()), + command: Some(vec!["sh".to_owned()]), + args: Some(vec!["-c".to_owned(), command.to_owned()]), + ..Container::default() + }], + restart_policy: Some("Never".to_owned()), + ..PodSpec::default() + }), + ..Pod::default() + } +} + +fn parse_json(s: &str) -> Result> { + Ok(serde_json::from_str(s)?) +} + +#[test] +fn test() -> Result<(), Box> { + let framework = make_framework(); + + let vector = framework.vector("test-vector", VECTOR_CONFIG)?; + framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; + + let test_namespace = framework.namespace("test-vector-test-pod")?; + + let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + "echo MARKER", + ))?)?; + framework.wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + )?; + + let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; + + // Wait for first line as a smoke check. + let first_line = log_reader.next().expect("unable to read first line"); + let expected_pat = "INFO vector: Log level \"info\" is enabled.\n"; + assert!( + first_line.ends_with(expected_pat), + "Expected a line ending with {:?} but got {:?}; vector might be malfunctioning", + expected_pat, + first_line + ); + + // Read the rest of the log lines. + let mut lines_till_we_give_up = 10000; + let mut got_marker = false; + while let Some(line) = log_reader.next() { + println!("Got line: {:?}", line); + + lines_till_we_give_up -= 1; + if lines_till_we_give_up <= 0 { + println!("Giving up"); + log_reader.kill()?; + break; + } + + if !line.starts_with("{") { + // This isn't a json, must be an entry from Vector's own log stream. + continue; + } + + let val = parse_json(&line)?; + + if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { + // A log from something other than our test pod, predend we don't + // see it. + continue; + } + + // Ensure we got the marker. + assert_eq!(val["message"], "MARKER"); + + if got_marker { + // We've already seen one marker! This is not good, we only emitted + // one. + panic!("marker seen more than once"); + } + + // If we did, remember it. + got_marker = true; + + // We got a marker, so we're pretty much done. + log_reader.kill()?; + } + + // Ensure log reader exited. + log_reader.wait().expect("log reader wait failed"); + + assert!(got_marker); + + drop(test_pod); + drop(test_namespace); + drop(vector); + Ok(()) +} From 3b4fd8ea4744261ec76f86bd7b011bdcb653a9b8 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Mon, 1 Jun 2020 13:52:15 +0300 Subject: [PATCH 07/69] K8s integration test is really an e2e test, rename accordingly Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 20 +++++++++++++++---- Makefile | 4 ++-- ...n-kubernetes.sh => test-e2e-kubernetes.sh} | 6 +++--- tests/{kubernetes.rs => kubernetes-e2e.rs} | 0 4 files changed, 21 insertions(+), 9 deletions(-) rename scripts/{test-integration-kubernetes.sh => test-e2e-kubernetes.sh} (93%) rename tests/{kubernetes.rs => kubernetes-e2e.rs} (100%) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 02fcdee2c9227..d81345c69dc18 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -265,8 +265,8 @@ jobs: - run: make slim-builds - run: make test-integration-splunk - test-integration-kubernetes: - name: Integration - Linux, Kubernetes, flaky + test-e2e-kubernetes: + name: E2E - Linux, Kubernetes, flaky # This is an experimental test. Allow it to fail without failing the whole # workflow, but keep it executing on every build to gather stats. continue-on-error: true @@ -281,5 +281,17 @@ jobs: - v1.14.10 fail-fast: false steps: - - name: Temporarily off - run: "true" + - name: Setup Minikube + uses: manusa/actions-setup-minikube@v1.0.2 + with: + minikube version: 'v1.9.2' + kubernetes version: '${{ matrix.kubernetes }}' + github token: '${{ secrets.GITHUB_TOKEN }}' + - name: Checkout + uses: actions/checkout@v1 + - run: USE_CONTAINER=none make slim-builds + - run: make test-e2e-kubernetes + env: + USE_MINIKUBE_DOCKER: "true" + CONTAINER_IMAGE_REPO: vector-test + PACKAGE_DEB_USE_CONTAINER: docker diff --git a/Makefile b/Makefile index 543cde73fbcc9..1d3d80551743d 100644 --- a/Makefile +++ b/Makefile @@ -282,8 +282,8 @@ ifeq ($(AUTODESPAWN), true) endif PACKAGE_DEB_USE_CONTAINER ?= $(USE_CONTAINER) -test-integration-kubernetes: ## Runs Kubernetes integration tests (Sorry, no `ENVIRONMENT=true` support) - PACKAGE_DEB_USE_CONTAINER="$(PACKAGE_DEB_USE_CONTAINER)" USE_CONTAINER=none $(RUN) test-integration-kubernetes +test-e2e-kubernetes: ## Runs Kubernetes E2E tests (Sorry, no `ENVIRONMENT=true` support) + PACKAGE_DEB_USE_CONTAINER="$(PACKAGE_DEB_USE_CONTAINER)" USE_CONTAINER=none $(RUN) test-e2e-kubernetes test-shutdown: ## Runs shutdown tests ifeq ($(AUTOSPAWN), true) diff --git a/scripts/test-integration-kubernetes.sh b/scripts/test-e2e-kubernetes.sh similarity index 93% rename from scripts/test-integration-kubernetes.sh rename to scripts/test-e2e-kubernetes.sh index 3c072e86fd1f2..e6fa21133f146 100755 --- a/scripts/test-integration-kubernetes.sh +++ b/scripts/test-e2e-kubernetes.sh @@ -1,11 +1,11 @@ #!/usr/bin/env bash set -euo pipefail -# test-integration-kubernetes.sh +# test-e2e-kubernetes.sh # # SUMMARY # -# Run integration tests for Kubernetes components only. +# Run E2E tests for Kubernetes. random-string() { local CHARS="abcdefghijklmnopqrstuvwxyz0123456789" @@ -80,4 +80,4 @@ export CONTAINER_IMAGE export KUBE_TEST_DEPLOY_COMMAND="scripts/deploy-kubernetes-test.sh" # Run the tests. -cargo test --test kubernetes --no-default-features --features k8s-openapi -- --nocapture +cargo test --test kubernetes-e2e --no-default-features --features k8s-openapi -- --nocapture diff --git a/tests/kubernetes.rs b/tests/kubernetes-e2e.rs similarity index 100% rename from tests/kubernetes.rs rename to tests/kubernetes-e2e.rs From ff65e042f7d1941a557193f2a98443dafca77070 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Mon, 1 Jun 2020 13:53:33 +0300 Subject: [PATCH 08/69] Do not even publish container image at CI since we use "none" minikube driver Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d81345c69dc18..a97f3c5affa36 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -292,6 +292,6 @@ jobs: - run: USE_CONTAINER=none make slim-builds - run: make test-e2e-kubernetes env: - USE_MINIKUBE_DOCKER: "true" + SKIP_CONTAINER_IMAGE_PUBLISHING: "true" CONTAINER_IMAGE_REPO: vector-test PACKAGE_DEB_USE_CONTAINER: docker From 17d6af99c16f1bd92b0ce890f3a33e9bccaef050 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Mon, 1 Jun 2020 13:54:41 +0300 Subject: [PATCH 09/69] Isolate kubernetes e2e tests via requried-features Signed-off-by: MOZGIII --- Cargo.toml | 7 +++++++ scripts/test-e2e-kubernetes.sh | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 6aa927adf4863..7275adc94f99c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -437,6 +437,9 @@ splunk-integration-tests = ["sinks-splunk_hec", "warp"] shutdown-tests = ["sources","sinks-console","sinks-prometheus","sinks-blackhole","unix","rdkafka","transforms-log_to_metric","transforms-lua"] disable-resolv-conf = [] +# E2E tests +kubernetes-e2e-tests = ["k8s-openapi"] + [[bench]] name = "bench" harness = false @@ -454,5 +457,9 @@ name = "wasm" harness = false required-features = ["transforms-wasm", "transforms-lua"] +[[test]] +name = "kubernetes-e2e" +required-features = ["kubernetes-e2e-tests"] + [patch.'https://github.com/tower-rs/tower'] tower-layer = "0.3" diff --git a/scripts/test-e2e-kubernetes.sh b/scripts/test-e2e-kubernetes.sh index e6fa21133f146..66f99a9409891 100755 --- a/scripts/test-e2e-kubernetes.sh +++ b/scripts/test-e2e-kubernetes.sh @@ -80,4 +80,4 @@ export CONTAINER_IMAGE export KUBE_TEST_DEPLOY_COMMAND="scripts/deploy-kubernetes-test.sh" # Run the tests. -cargo test --test kubernetes-e2e --no-default-features --features k8s-openapi -- --nocapture +cargo test --test kubernetes-e2e --no-default-features --features kubernetes-e2e-tests -- --nocapture From 81fc716865d673e7106739bb832ab8f18128af9f Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Wed, 10 Jun 2020 06:33:35 +0300 Subject: [PATCH 10/69] Add lock to the test framework Signed-off-by: MOZGIII --- Cargo.lock | 1 + lib/kubernetes-test-framework/Cargo.toml | 1 + lib/kubernetes-test-framework/src/lib.rs | 2 ++ lib/kubernetes-test-framework/src/lock.rs | 15 +++++++++++++++ 4 files changed, 19 insertions(+) create mode 100644 lib/kubernetes-test-framework/src/lock.rs diff --git a/Cargo.lock b/Cargo.lock index 6d6c5e63f8e0c..52136eb3029e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1912,6 +1912,7 @@ name = "kubernetes-test-framework" version = "0.1.0" dependencies = [ "k8s-openapi", + "once_cell", "serde_json", "tempfile", ] diff --git a/lib/kubernetes-test-framework/Cargo.toml b/lib/kubernetes-test-framework/Cargo.toml index c7bc4257f5c66..3f27174f302bd 100644 --- a/lib/kubernetes-test-framework/Cargo.toml +++ b/lib/kubernetes-test-framework/Cargo.toml @@ -9,3 +9,4 @@ description = "Kubernetes Test Framework used to test Vector in Kubernetes" k8s-openapi = { version = "0.9", default-features = false, features = ["v1_15"] } serde_json = "1" tempfile = "3" +once_cell = "1" diff --git a/lib/kubernetes-test-framework/src/lib.rs b/lib/kubernetes-test-framework/src/lib.rs index e5709c32d67bc..2e9b2eb1830c6 100644 --- a/lib/kubernetes-test-framework/src/lib.rs +++ b/lib/kubernetes-test-framework/src/lib.rs @@ -18,6 +18,7 @@ pub mod framework; pub mod interface; +mod lock; pub mod log_lookup; pub mod namespace; mod resource_file; @@ -30,5 +31,6 @@ pub mod wait_for_rollout; pub use framework::Framework; pub use interface::Interface; +pub use lock::lock; type Result = std::result::Result>; diff --git a/lib/kubernetes-test-framework/src/lock.rs b/lib/kubernetes-test-framework/src/lock.rs new file mode 100644 index 0000000000000..ecec583c33039 --- /dev/null +++ b/lib/kubernetes-test-framework/src/lock.rs @@ -0,0 +1,15 @@ +use once_cell::sync::OnceCell; +use std::sync::{Mutex, MutexGuard}; + +/// A shared lock to use commonly among the tests. +/// The goal is to guranatee that only one test is executing concurrently, since +/// tests use a shared resource - a k8s cluster - and will conflict with each +/// other unless they're executing sequentially. +pub fn lock() -> MutexGuard<'static, ()> { + static INSTANCE: OnceCell> = OnceCell::new(); + match INSTANCE.get_or_init(|| Mutex::new(())).lock() { + Ok(guard) => guard, + // Ignore poison error. + Err(err) => err.into_inner(), + } +} From ef47617eebbef81436b615870a8b62ed5533367e Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Wed, 10 Jun 2020 06:35:34 +0300 Subject: [PATCH 11/69] Add some test cases to k8s e2e tests Signed-off-by: MOZGIII --- tests/kubernetes-e2e.rs | 451 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 421 insertions(+), 30 deletions(-) diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index 080671f040778..be58f864dee0f 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -1,8 +1,14 @@ +//! This test is optimized for very quick rebuilds as it doesn't use anything +//! from the `vector` crate, and thus doesn't waste time in a tremendously long +//! link step. + use k8s_openapi::{ api::core::v1::{Container, Pod, PodSpec}, apimachinery::pkg::apis::meta::v1::ObjectMeta, }; -use kubernetes_test_framework::{test_pod, wait_for_resource::WaitFor, Framework, Interface}; +use kubernetes_test_framework::{ + lock, log_lookup, test_pod, wait_for_resource::WaitFor, Framework, Interface, +}; const VECTOR_CONFIG: &str = r#" apiVersion: v1 @@ -32,11 +38,26 @@ fn make_framework() -> Framework { Framework::new(interface) } -fn make_test_pod(namespace: &str, name: &str, command: &str) -> Pod { +fn make_test_pod<'a>( + namespace: &'a str, + name: &'a str, + command: &'a str, + labels: impl IntoIterator + 'a, +) -> Pod { + let labels: std::collections::BTreeMap = labels + .into_iter() + .map(|(key, val)| (key.to_owned(), val.to_owned())) + .collect(); + let labels = if labels.is_empty() { + None + } else { + Some(labels) + }; Pod { metadata: ObjectMeta { name: Some(name.to_owned()), namespace: Some(namespace.to_owned()), + labels, ..ObjectMeta::default() }, spec: Some(PodSpec { @@ -58,8 +79,87 @@ fn parse_json(s: &str) -> Result> Ok(serde_json::from_str(s)?) } +fn generate_long_string(a: usize, b: usize) -> String { + (0..a).fold(String::new(), |mut acc, i| { + let istr = i.to_string(); + for _ in 0..b { + acc.push_str(&istr); + } + acc + }) +} + +/// Read the first line from vector logs and assert that it matches the expected +/// one. +/// This allows detecting the situations where things have gone very wrong. +fn smoke_check_first_line(log_reader: &mut log_lookup::Reader) { + // Wait for first line as a smoke check. + let first_line = log_reader.next().expect("unable to read first line"); + let expected_pat = "INFO vector: Log level \"info\" is enabled.\n"; + assert!( + first_line.ends_with(expected_pat), + "Expected a line ending with {:?} but got {:?}; vector might be malfunctioning", + expected_pat, + first_line + ); +} + +enum FlowControlCommand { + GoOn, + Terminate, +} + +fn look_for_log_line

( + log_reader: &mut log_lookup::Reader, + mut predicate: P, +) -> Result<(), Box> +where + P: FnMut(serde_json::Value) -> FlowControlCommand, +{ + let mut lines_till_we_give_up = 10000; + while let Some(line) = log_reader.next() { + println!("Got line: {:?}", line); + + lines_till_we_give_up -= 1; + if lines_till_we_give_up <= 0 { + println!("Giving up"); + log_reader.kill()?; + break; + } + + if !line.starts_with("{") { + // This isn't a json, must be an entry from Vector's own log stream. + continue; + } + + let val = parse_json(&line)?; + + match predicate(val) { + FlowControlCommand::GoOn => { + // Not what we were looking for, go on. + } + FlowControlCommand::Terminate => { + // We are told we should stop, request that log reader is + // killed. + // This doesn't immediately stop the reading because we want to + // process the pending buffers first. + log_reader.kill()?; + } + } + } + + // Ensure log reader exited. + log_reader.wait().expect("log reader wait failed"); + + Ok(()) +} + +/// This test validates that vector picks up logs at the simplest case +/// possible - a new pod is deployed and prints to stdout, and we assert that +/// vector picks that up. #[test] -fn test() -> Result<(), Box> { +fn simple() -> Result<(), Box> { + let _guard = lock(); let framework = make_framework(); let vector = framework.vector("test-vector", VECTOR_CONFIG)?; @@ -71,6 +171,7 @@ fn test() -> Result<(), Box> { "test-vector-test-pod", "test-pod", "echo MARKER", + vec![], ))?)?; framework.wait( "test-vector-test-pod", @@ -80,41 +181,326 @@ fn test() -> Result<(), Box> { )?; let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; + smoke_check_first_line(&mut log_reader); - // Wait for first line as a smoke check. - let first_line = log_reader.next().expect("unable to read first line"); - let expected_pat = "INFO vector: Log level \"info\" is enabled.\n"; - assert!( - first_line.ends_with(expected_pat), - "Expected a line ending with {:?} but got {:?}; vector might be malfunctioning", - expected_pat, - first_line - ); + // Read the rest of the log lines. + let mut got_marker = false; + look_for_log_line(&mut log_reader, |val| { + if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { + // A log from something other than our test pod, predend we don't + // see it. + return FlowControlCommand::GoOn; + } + + // Ensure we got the marker. + assert_eq!(val["message"], "MARKER"); + + if got_marker { + // We've already seen one marker! This is not good, we only emitted + // one. + panic!("marker seen more than once"); + } + + // If we did, remember it. + got_marker = true; + + // Request to stop the flow. + FlowControlCommand::Terminate + })?; + + assert!(got_marker); + + drop(test_pod); + drop(test_namespace); + drop(vector); + Ok(()) +} + +/// This test validates that vector properly merges a log message that +/// kubernetes has internally split into multiple partial log lines. +#[test] +fn partial_merge() -> Result<(), Box> { + let _guard = lock(); + let framework = make_framework(); + + let vector = framework.vector("test-vector", VECTOR_CONFIG)?; + framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; + + let test_namespace = framework.namespace("test-vector-test-pod")?; + + let test_message = generate_long_string(8, 8 * 1024); // 64 KiB + let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + &format!("echo {}", test_message), + vec![], + ))?)?; + framework.wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + )?; + + let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; + smoke_check_first_line(&mut log_reader); + + // Read the rest of the log lines. + let mut got_expected_line = false; + look_for_log_line(&mut log_reader, |val| { + if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { + // A log from something other than our test pod, predend we don't + // see it. + return FlowControlCommand::GoOn; + } + + // Ensure the message we got matches the one we emitted. + assert_eq!(val["message"], test_message); + + if got_expected_line { + // We've already seen our expected line once! This is not good, we + // only emitted one. + panic!("test message seen more than once"); + } + + // If we did, remember it. + got_expected_line = true; + + // Request to stop the flow. + FlowControlCommand::Terminate + })?; + + assert!(got_expected_line); + + drop(test_pod); + drop(test_namespace); + drop(vector); + Ok(()) +} + +/// This test validates that vector partail message merging mechanism doesn't +/// interfere with the non-partial messages that don't end with newline. +#[test] +fn no_newline_at_eol() -> Result<(), Box> { + let _guard = lock(); + let framework = make_framework(); + + let vector = framework.vector("test-vector", VECTOR_CONFIG)?; + framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; + + let test_namespace = framework.namespace("test-vector-test-pod")?; + + let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + "echo -n MARKER", // `-n` doesn't print newline + vec![], + ))?)?; + framework.wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + )?; + + let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; + smoke_check_first_line(&mut log_reader); + + // Read the rest of the log lines. + let mut got_expected_line = false; + look_for_log_line(&mut log_reader, |val| { + if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { + // A log from something other than our test pod, predend we don't + // see it. + return FlowControlCommand::GoOn; + } + + // Ensure the message we got matches the one we emitted. + assert_eq!(val["message"], "MARKER"); + + if got_expected_line { + // We've already seen our expected line once! This is not good, we + // only emitted one. + panic!("test message seen more than once"); + } + + // If we did, remember it. + got_expected_line = true; + + // Request to stop the flow. + FlowControlCommand::Terminate + })?; + + assert!(got_expected_line); + + drop(test_pod); + drop(test_namespace); + drop(vector); + Ok(()) +} + +/// This test validates that vector picks up preexisting logs - logs that +/// existed before vector was deployed. +#[test] +fn preexisting() -> Result<(), Box> { + let _guard = lock(); + let framework = make_framework(); + + let test_namespace = framework.namespace("test-vector-test-pod")?; + + let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + "echo MARKER", + vec![], + ))?)?; + framework.wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + )?; + + // Wait for some extra time to ensure pod completes. + std::thread::sleep(std::time::Duration::from_secs(10)); + + let vector = framework.vector("test-vector", VECTOR_CONFIG)?; + framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; + + let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; + smoke_check_first_line(&mut log_reader); // Read the rest of the log lines. - let mut lines_till_we_give_up = 10000; let mut got_marker = false; - while let Some(line) = log_reader.next() { - println!("Got line: {:?}", line); + look_for_log_line(&mut log_reader, |val| { + if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { + // A log from something other than our test pod, predend we don't + // see it. + return FlowControlCommand::GoOn; + } - lines_till_we_give_up -= 1; - if lines_till_we_give_up <= 0 { - println!("Giving up"); - log_reader.kill()?; - break; + // Ensure we got the marker. + assert_eq!(val["message"], "MARKER"); + + if got_marker { + // We've already seen one marker! This is not good, we only emitted + // one. + panic!("marker seen more than once"); } - if !line.starts_with("{") { - // This isn't a json, must be an entry from Vector's own log stream. - continue; + // If we did, remember it. + got_marker = true; + + // Request to stop the flow. + FlowControlCommand::Terminate + })?; + + assert!(got_marker); + + drop(test_pod); + drop(test_namespace); + drop(vector); + Ok(()) +} + +/// This test validates that vector picks up multiple log lines, and that they +/// arrive at the proper order. +#[test] +fn multiple_lines() -> Result<(), Box> { + let _guard = lock(); + let framework = make_framework(); + + let vector = framework.vector("test-vector", VECTOR_CONFIG)?; + framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; + + let test_namespace = framework.namespace("test-vector-test-pod")?; + + let test_messages = vec!["MARKER1", "MARKER2", "MARKER3", "MARKER4", "MARKER5"]; + let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + &format!("echo -e {}", test_messages.join(r"\\n")), + vec![], + ))?)?; + framework.wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + )?; + + let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; + smoke_check_first_line(&mut log_reader); + + // Read the rest of the log lines. + let mut test_messages_iter = test_messages.into_iter().peekable(); + look_for_log_line(&mut log_reader, |val| { + if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { + // A log from something other than our test pod, predend we don't + // see it. + return FlowControlCommand::GoOn; } - let val = parse_json(&line)?; + // Take the next marker. + let current_marker = test_messages_iter + .next() + .expect("expected no more lines since the test messages iter is exausted"); + + // Ensure we got the marker. + assert_eq!(val["message"], current_marker); + + if test_messages_iter.peek().is_some() { + // We're not done yet, so go on. + return FlowControlCommand::GoOn; + } + + // Request to stop the flow. + FlowControlCommand::Terminate + })?; + assert!(test_messages_iter.next().is_none()); + + drop(test_pod); + drop(test_namespace); + drop(vector); + Ok(()) +} + +/// This test validates that vector properly annotates log events with pod +/// metadata obtained from the k8s API. +#[test] +fn pod_metadata_annotation() -> Result<(), Box> { + let _guard = lock(); + let framework = make_framework(); + + let vector = framework.vector("test-vector", VECTOR_CONFIG)?; + framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; + + let test_namespace = framework.namespace("test-vector-test-pod")?; + + let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + "echo MARKER", + vec![("label1", "hello"), ("label2", "world")], + ))?)?; + framework.wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + )?; + + let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; + smoke_check_first_line(&mut log_reader); + + // Read the rest of the log lines. + let mut got_marker = false; + look_for_log_line(&mut log_reader, |val| { if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { // A log from something other than our test pod, predend we don't // see it. - continue; + return FlowControlCommand::GoOn; } // Ensure we got the marker. @@ -129,12 +515,17 @@ fn test() -> Result<(), Box> { // If we did, remember it. got_marker = true; - // We got a marker, so we're pretty much done. - log_reader.kill()?; - } + // Assert pod the event is properly annotated with pod metadata. + assert_eq!(val["kubernetes"]["pod_name"], "test-pod"); + // We've already asserted this above, but repeat for completeness. + assert_eq!(val["kubernetes"]["pod_namespace"], "test-vector-test-pod"); + assert_eq!(val["kubernetes"]["pod_uid"].as_str().unwrap().len(), 36); // 36 is a standard UUID string length + assert_eq!(val["kubernetes"]["pod_labels"]["label1"], "hello"); + assert_eq!(val["kubernetes"]["pod_labels"]["label2"], "world"); - // Ensure log reader exited. - log_reader.wait().expect("log reader wait failed"); + // Request to stop the flow. + FlowControlCommand::Terminate + })?; assert!(got_marker); From c1c36ac16c20e7ee1cbc6fba5b16b997b08113bb Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 16 Jun 2020 00:37:24 +0300 Subject: [PATCH 12/69] Add the ability to use quick debug builds in e2e tests Useful to speed up the development cycles Signed-off-by: MOZGIII --- scripts/test-e2e-kubernetes.sh | 52 +++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 19 deletions(-) diff --git a/scripts/test-e2e-kubernetes.sh b/scripts/test-e2e-kubernetes.sh index 66f99a9409891..81226747aa85f 100755 --- a/scripts/test-e2e-kubernetes.sh +++ b/scripts/test-e2e-kubernetes.sh @@ -33,26 +33,40 @@ if [[ -z "${CONTAINER_IMAGE:-}" ]]; then # Assign a default test run ID if none is provided by the user. TEST_RUN_ID="${TEST_RUN_ID:-"$(date +%s)-$(random-string)"}" - # Package a .deb file to build a docker container, unless skipped. - if [[ -z "${SKIP_PACKAGE_DEB:-}" ]]; then - make package-deb-x86_64 USE_CONTAINER="${PACKAGE_DEB_USE_CONTAINER:-"docker"}" - fi + if [[ "${QUICK_BUILD:-"false"}" == "true" ]]; then + # Build in debug mode. + cargo build + + # Prepare test image parameters. + VERSION_TAG="test-$TEST_RUN_ID" + + # Prepare the container image for the deployment command and docker build. + CONTAINER_IMAGE="$CONTAINER_IMAGE_REPO:$VERSION_TAG-debug" + + # Build docker image. + docker build --tag "$CONTAINER_IMAGE" -f skaffold/docker/Dockerfile target/debug + else + # Package a .deb file to build a docker container, unless skipped. + if [[ -z "${SKIP_PACKAGE_DEB:-}" ]]; then + make package-deb-x86_64 USE_CONTAINER="${PACKAGE_DEB_USE_CONTAINER:-"docker"}" + fi + + # Prepare test image parameters. + VERSION_TAG="test-$TEST_RUN_ID" + BASE_TAG="debian" - # Prepare test image parameters. - VERSION_TAG="test-$TEST_RUN_ID" - BASE_TAG="debian" - - # Build docker image with Vector - the same way it's done for releses. Don't - # do the push - we'll handle it later. - REPO="$CONTAINER_IMAGE_REPO" \ - CHANNEL="test" \ - BASE="$BASE_TAG" \ - TAG="$VERSION_TAG" \ - PUSH="" \ - scripts/build-docker.sh - - # Prepare the container image for the deployment command. - CONTAINER_IMAGE="$CONTAINER_IMAGE_REPO:$VERSION_TAG-$BASE_TAG" + # Build docker image with Vector - the same way it's done for releses. Don't + # do the push - we'll handle it later. + REPO="$CONTAINER_IMAGE_REPO" \ + CHANNEL="test" \ + BASE="$BASE_TAG" \ + TAG="$VERSION_TAG" \ + PUSH="" \ + scripts/build-docker.sh + + # Prepare the container image for the deployment command. + CONTAINER_IMAGE="$CONTAINER_IMAGE_REPO:$VERSION_TAG-$BASE_TAG" + fi fi if [[ -z "${SKIP_CONTAINER_IMAGE_PUBLISHING:-}" ]]; then From ab5703da2e1394ec4246c1f30b4e8a4efe03452f Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 16 Jun 2020 00:46:43 +0300 Subject: [PATCH 13/69] Use a single thread for test Signed-off-by: MOZGIII --- scripts/test-e2e-kubernetes.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/test-e2e-kubernetes.sh b/scripts/test-e2e-kubernetes.sh index 81226747aa85f..19e1624023a7a 100755 --- a/scripts/test-e2e-kubernetes.sh +++ b/scripts/test-e2e-kubernetes.sh @@ -94,4 +94,4 @@ export CONTAINER_IMAGE export KUBE_TEST_DEPLOY_COMMAND="scripts/deploy-kubernetes-test.sh" # Run the tests. -cargo test --test kubernetes-e2e --no-default-features --features kubernetes-e2e-tests -- --nocapture +cargo test --test kubernetes-e2e --no-default-features --features kubernetes-e2e-tests -- --nocapture --test-threads 1 From 0783690ff69cc1075bf4d9232ddb58806b2a020c Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 16 Jun 2020 01:49:08 +0300 Subject: [PATCH 14/69] Made test framework async Signed-off-by: MOZGIII --- Cargo.lock | 1 + lib/kubernetes-test-framework/Cargo.toml | 4 + .../src/framework.rs | 41 ++- lib/kubernetes-test-framework/src/lib.rs | 2 + .../src/log_lookup.rs | 41 +-- .../src/namespace.rs | 51 ++- lib/kubernetes-test-framework/src/test_pod.rs | 61 +--- lib/kubernetes-test-framework/src/up_down.rs | 79 +++++ lib/kubernetes-test-framework/src/util.rs | 18 ++ lib/kubernetes-test-framework/src/vector.rs | 66 ++-- .../src/wait_for_resource.rs | 24 +- .../src/wait_for_rollout.rs | 15 +- tests/kubernetes-e2e.rs | 299 ++++++++++-------- 13 files changed, 398 insertions(+), 304 deletions(-) create mode 100644 lib/kubernetes-test-framework/src/up_down.rs create mode 100644 lib/kubernetes-test-framework/src/util.rs diff --git a/Cargo.lock b/Cargo.lock index 52136eb3029e9..1cd3b18f67d54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1915,6 +1915,7 @@ dependencies = [ "once_cell", "serde_json", "tempfile", + "tokio 0.2.21", ] [[package]] diff --git a/lib/kubernetes-test-framework/Cargo.toml b/lib/kubernetes-test-framework/Cargo.toml index 3f27174f302bd..a6df1b66afc12 100644 --- a/lib/kubernetes-test-framework/Cargo.toml +++ b/lib/kubernetes-test-framework/Cargo.toml @@ -10,3 +10,7 @@ k8s-openapi = { version = "0.9", default-features = false, features = ["v1_15"] serde_json = "1" tempfile = "3" once_cell = "1" +tokio = { version = "0.2", features = ["process", "io-util"] } + +[dev-dependencies] +tokio = { version = "0.2", features = ["macros", "rt-threaded"] } diff --git a/lib/kubernetes-test-framework/src/framework.rs b/lib/kubernetes-test-framework/src/framework.rs index 57fa556a69346..ae4ad716fa6e0 100644 --- a/lib/kubernetes-test-framework/src/framework.rs +++ b/lib/kubernetes-test-framework/src/framework.rs @@ -1,7 +1,8 @@ //! The test framework main entry point. use super::{ - log_lookup, namespace, test_pod, vector, wait_for_resource, wait_for_rollout, Interface, Result, + log_lookup, namespace, test_pod, up_down, vector, wait_for_resource, wait_for_rollout, + Interface, Result, }; pub struct Framework { @@ -14,25 +15,35 @@ impl Framework { Self { interface } } - pub fn vector(&self, namespace: &str, custom_resource: &str) -> Result { - let manager = vector::Manager::new( + pub async fn vector( + &self, + namespace: &str, + custom_resource: &str, + ) -> Result> { + let mut manager = vector::manager( self.interface.deploy_vector_command.as_str(), namespace, custom_resource, )?; - manager.up()?; + manager.up().await?; Ok(manager) } - pub fn namespace(&self, namespace: &str) -> Result { - let manager = namespace::Manager::new(&self.interface.kubectl_command, namespace)?; - manager.up()?; + pub async fn namespace( + &self, + namespace: &str, + ) -> Result> { + let mut manager = namespace::manager(&self.interface.kubectl_command, namespace); + manager.up().await?; Ok(manager) } - pub fn test_pod(&self, config: test_pod::Config) -> Result { - let manager = test_pod::Manager::new(&self.interface.kubectl_command, config)?; - manager.up()?; + pub async fn test_pod( + &self, + config: test_pod::Config, + ) -> Result> { + let mut manager = test_pod::manager(&self.interface.kubectl_command, config); + manager.up().await?; Ok(manager) } @@ -40,7 +51,7 @@ impl Framework { log_lookup::logs(&self.interface.kubectl_command, namespace, resource) } - pub fn wait<'a>( + pub async fn wait<'a>( &self, namespace: &str, resources: impl IntoIterator, @@ -54,9 +65,10 @@ impl Framework { wait_for, extra, ) + .await } - pub fn wait_all_namespaces<'a>( + pub async fn wait_all_namespaces<'a>( &self, resources: impl IntoIterator, wait_for: wait_for_resource::WaitFor<&'_ str>, @@ -68,14 +80,15 @@ impl Framework { wait_for, extra, ) + .await } - pub fn wait_for_rollout<'a>( + pub async fn wait_for_rollout<'a>( &self, namespace: &str, resource: &str, extra: impl IntoIterator, ) -> Result<()> { - wait_for_rollout::run(&self.interface.kubectl_command, namespace, resource, extra) + wait_for_rollout::run(&self.interface.kubectl_command, namespace, resource, extra).await } } diff --git a/lib/kubernetes-test-framework/src/lib.rs b/lib/kubernetes-test-framework/src/lib.rs index 2e9b2eb1830c6..aae933267293a 100644 --- a/lib/kubernetes-test-framework/src/lib.rs +++ b/lib/kubernetes-test-framework/src/lib.rs @@ -23,6 +23,8 @@ pub mod log_lookup; pub mod namespace; mod resource_file; pub mod test_pod; +mod up_down; +mod util; pub mod vector; pub mod wait_for_resource; pub mod wait_for_rollout; diff --git a/lib/kubernetes-test-framework/src/log_lookup.rs b/lib/kubernetes-test-framework/src/log_lookup.rs index c712d04bc3055..a40a7661242ce 100644 --- a/lib/kubernetes-test-framework/src/log_lookup.rs +++ b/lib/kubernetes-test-framework/src/log_lookup.rs @@ -1,6 +1,7 @@ use super::Result; -use std::io::{BufRead, BufReader}; -use std::process::{Child, ChildStdout, Command, ExitStatus, Stdio}; +use std::process::{ExitStatus, Stdio}; +use tokio::io::{AsyncBufReadExt, BufReader}; +use tokio::process::{Child, ChildStdout, Command}; pub fn logs(kubectl_command: &str, namespace: &str, resource: &str) -> Result { let mut command = Command::new(kubectl_command); @@ -38,52 +39,56 @@ impl Reader { Reader { child, reader } } - pub fn wait(&mut self) -> std::io::Result { - self.child.wait() + pub async fn wait(&mut self) -> std::io::Result { + (&mut self.child).await } pub fn kill(&mut self) -> std::io::Result<()> { self.child.kill() } -} - -impl Iterator for Reader { - type Item = String; - fn next(&mut self) -> Option { + pub async fn read_line(&mut self) -> Option { let mut s = String::new(); - let result = self.reader.read_line(&mut s); + let result = self.reader.read_line(&mut s).await; match result { Ok(0) => None, Ok(_) => Some(s), Err(err) => panic!(err), } } + + pub async fn collect(&mut self) -> Vec { + let mut list = Vec::new(); + while let Some(line) = self.read_line().await { + list.push(line) + } + list + } } #[cfg(test)] mod tests { use super::*; - #[test] - fn test_reader_finite() { + #[tokio::test] + async fn test_reader_finite() { let mut command = Command::new("echo"); command.arg("test"); let mut reader = Reader::spawn(command).expect("unable to spawn"); // Collect all line, expect stream to finish. - let lines: Vec<_> = (&mut reader).collect(); + let lines = reader.collect().await; // Assert we got all the lines we expected. assert_eq!(lines, vec!["test\n".to_owned()]); // Ensure wait doesn't fail, and that we exit status is success. - let exit_status = reader.wait().expect("wait failed"); + let exit_status = reader.wait().await.expect("wait failed"); assert!(exit_status.success()); } - #[test] - fn test_reader_inifinite() { + #[tokio::test] + async fn test_reader_inifinite() { let mut command = Command::new("bash"); command.arg("-c"); command.arg(r#"NUM=0; while true; do echo "Line $NUM"; NUM=$((NUM+=1)); sleep 0.01; done"#); @@ -93,7 +98,7 @@ mod tests { // Read the lines and at some point ask the command we're reading from // to stop. let mut expected_num = 0; - while let Some(line) = (&mut reader).next() { + while let Some(line) = reader.read_line().await { // Assert we're getting expected lines. assert_eq!(line, format!("Line {}\n", expected_num)); @@ -118,7 +123,7 @@ mod tests { // Ensure wait doesn't fail. We killed the process, so expect // a non-success exit code. - let exit_status = reader.wait().expect("wait failed"); + let exit_status = reader.wait().await.expect("wait failed"); assert!(!exit_status.success()); } } diff --git a/lib/kubernetes-test-framework/src/namespace.rs b/lib/kubernetes-test-framework/src/namespace.rs index 9e9ed1640d6df..d33a65307db75 100644 --- a/lib/kubernetes-test-framework/src/namespace.rs +++ b/lib/kubernetes-test-framework/src/namespace.rs @@ -1,45 +1,30 @@ -use super::Result; +use crate::up_down; use std::process::{Command, Stdio}; -pub struct Manager { +#[derive(Debug)] +pub struct CommandBuilder { kubectl_command: String, namespace: String, } -impl Manager { - pub fn new(kubectl_command: &str, namespace: &str) -> Result { - Ok(Self { - kubectl_command: kubectl_command.to_owned(), - namespace: namespace.to_owned(), - }) - } - - pub fn up(&self) -> Result<()> { - self.exec("create") - } - - pub fn down(&self) -> Result<()> { - self.exec("delete") - } - - fn exec(&self, subcommand: &str) -> Result<()> { - if !Command::new(&self.kubectl_command) - .arg(subcommand) +impl up_down::CommandBuilder for CommandBuilder { + fn build(&self, command_to_build: up_down::CommandToBuild) -> Command { + let mut command = Command::new(&self.kubectl_command); + command + .arg(match command_to_build { + up_down::CommandToBuild::Up => "create", + up_down::CommandToBuild::Down => "delete", + }) .arg("namespace") .arg(&self.namespace) - .stdin(Stdio::null()) - .spawn()? - .wait()? - .success() - { - Err(format!("failed to exec: {}", subcommand))?; - } - Ok(()) + .stdin(Stdio::null()); + command } } -impl Drop for Manager { - fn drop(&mut self) { - self.down().expect("namespace turndown failed"); - } +pub fn manager(kubectl_command: &str, namespace: &str) -> up_down::Manager { + up_down::Manager::new(CommandBuilder { + kubectl_command: kubectl_command.to_owned(), + namespace: namespace.to_owned(), + }) } diff --git a/lib/kubernetes-test-framework/src/test_pod.rs b/lib/kubernetes-test-framework/src/test_pod.rs index 468cb23965c0c..384abbe84d65e 100644 --- a/lib/kubernetes-test-framework/src/test_pod.rs +++ b/lib/kubernetes-test-framework/src/test_pod.rs @@ -1,4 +1,5 @@ use super::{resource_file::ResourceFile, Result}; +use crate::up_down; use k8s_openapi::api::core::v1::Pod; use std::process::{Command, Stdio}; @@ -21,59 +22,29 @@ impl Config { } #[derive(Debug)] -pub struct Manager { +pub struct CommandBuilder { kubectl_command: String, config: Config, } -impl Manager { - pub fn new(kubectl_command: &str, config: Config) -> Result { - Ok(Self { - kubectl_command: kubectl_command.to_owned(), - config, - }) - } - - pub fn up(&self) -> Result<()> { - let mut command = self.prepare_command(); - - command.arg("create"); - command - .arg("-f") - .arg(self.config.custom_resource_file.path()); - Self::run_command(command)?; - - Ok(()) - } - - pub fn down(&self) -> Result<()> { - let mut command = self.prepare_command(); - - command.arg("delete"); +impl up_down::CommandBuilder for CommandBuilder { + fn build(&self, command_to_build: up_down::CommandToBuild) -> Command { + let mut command = Command::new(&self.kubectl_command); command + .arg(match command_to_build { + up_down::CommandToBuild::Up => "create", + up_down::CommandToBuild::Down => "delete", + }) .arg("-f") - .arg(self.config.custom_resource_file.path()); - Self::run_command(command)?; - - Ok(()) - } - - fn prepare_command(&self) -> Command { - let mut command = Command::new(&self.kubectl_command); - command.stdin(Stdio::null()); + .arg(self.config.custom_resource_file.path()) + .stdin(Stdio::null()); command } - - fn run_command(mut command: Command) -> Result<()> { - if !command.spawn()?.wait()?.success() { - Err(format!("failed to exec: {:?}", &command))?; - } - Ok(()) - } } -impl Drop for Manager { - fn drop(&mut self) { - self.down().expect("test pod turndown failed"); - } +pub fn manager(kubectl_command: &str, config: Config) -> up_down::Manager { + up_down::Manager::new(CommandBuilder { + kubectl_command: kubectl_command.to_owned(), + config, + }) } diff --git a/lib/kubernetes-test-framework/src/up_down.rs b/lib/kubernetes-test-framework/src/up_down.rs new file mode 100644 index 0000000000000..38c2a441ef3ee --- /dev/null +++ b/lib/kubernetes-test-framework/src/up_down.rs @@ -0,0 +1,79 @@ +use super::Result; +use crate::util::{run_command, run_command_blocking}; +use std::process::Command; + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum CommandToBuild { + Up, + Down, +} + +pub trait CommandBuilder { + fn build(&self, command_to_build: CommandToBuild) -> Command; +} + +#[derive(Debug)] +pub struct Manager +where + B: CommandBuilder, +{ + command_builder: B, + needs_drop: bool, +} + +impl Manager +where + B: CommandBuilder, +{ + pub fn new(command_builder: B) -> Self { + Self { + command_builder, + needs_drop: false, + } + } + + pub async fn up(&mut self) -> Result<()> { + self.needs_drop = true; + self.exec(CommandToBuild::Up).await + } + + pub async fn down(&mut self) -> Result<()> { + self.needs_drop = false; + self.exec(CommandToBuild::Down).await + } + + pub fn up_blocking(&mut self) -> Result<()> { + self.needs_drop = true; + self.exec_blocking(CommandToBuild::Up) + } + + pub fn down_blocking(&mut self) -> Result<()> { + self.needs_drop = false; + self.exec_blocking(CommandToBuild::Down) + } + + fn build(&self, command_to_build: CommandToBuild) -> Command { + self.command_builder.build(command_to_build) + } + + async fn exec(&self, command_to_build: CommandToBuild) -> Result<()> { + let command = self.build(command_to_build); + run_command(tokio::process::Command::from(command)).await + } + + fn exec_blocking(&self, command_to_build: CommandToBuild) -> Result<()> { + let command = self.build(command_to_build); + run_command_blocking(command) + } +} + +impl Drop for Manager +where + B: CommandBuilder, +{ + fn drop(&mut self) { + if self.needs_drop { + self.down_blocking().expect("turndown failed"); + } + } +} diff --git a/lib/kubernetes-test-framework/src/util.rs b/lib/kubernetes-test-framework/src/util.rs new file mode 100644 index 0000000000000..5d8527d524f2f --- /dev/null +++ b/lib/kubernetes-test-framework/src/util.rs @@ -0,0 +1,18 @@ +use crate::Result; + +pub async fn run_command(mut command: tokio::process::Command) -> Result<()> { + let exit_status = command.spawn()?.await?; + if !exit_status.success() { + Err(format!("exec failed: {:?}", command))?; + } + Ok(()) +} + +pub fn run_command_blocking(mut command: std::process::Command) -> Result<()> { + let mut child = command.spawn()?; + let exit_status = child.wait()?; + if !exit_status.success() { + Err(format!("exec failed: {:?}", command))?; + } + Ok(()) +} diff --git a/lib/kubernetes-test-framework/src/vector.rs b/lib/kubernetes-test-framework/src/vector.rs index 8fac1d58ceb6e..2cfeeaf2c3bd8 100644 --- a/lib/kubernetes-test-framework/src/vector.rs +++ b/lib/kubernetes-test-framework/src/vector.rs @@ -1,58 +1,44 @@ use super::{resource_file::ResourceFile, Result}; +use crate::up_down; use std::process::{Command, Stdio}; -/// Takes care of deploying vector into the kubernetes cluster. -/// -/// Manages the config file secret accordingly. #[derive(Debug)] -pub struct Manager { +pub struct CommandBuilder { interface_command: String, namespace: String, custom_resource_file: ResourceFile, } -impl Manager { - /// Create a new [`Manager`]. - pub fn new(interface_command: &str, namespace: &str, custom_resource: &str) -> Result { - let custom_resource_file = ResourceFile::new(custom_resource)?; - Ok(Self { - interface_command: interface_command.to_owned(), - namespace: namespace.to_owned(), - custom_resource_file, - }) - } - - pub fn up(&self) -> Result<()> { - self.exec("up")?; - Ok(()) - } - - pub fn down(&self) -> Result<()> { - self.exec("down")?; - Ok(()) - } - - fn exec(&self, operation: &str) -> Result<()> { - if !Command::new(&self.interface_command) - .arg(operation) +impl up_down::CommandBuilder for CommandBuilder { + fn build(&self, command_to_build: up_down::CommandToBuild) -> Command { + let mut command = Command::new(&self.interface_command); + command + .arg(match command_to_build { + up_down::CommandToBuild::Up => "up", + up_down::CommandToBuild::Down => "down", + }) .arg(&self.namespace) .env( "CUSTOM_RESOURCE_CONIFGS_FILE", self.custom_resource_file.path(), ) - .stdin(Stdio::null()) - .spawn()? - .wait()? - .success() - { - Err(format!("failed to exec: {}", operation))?; - } - Ok(()) + .stdin(Stdio::null()); + command } } -impl Drop for Manager { - fn drop(&mut self) { - self.down().expect("vector turndown failed"); - } +/// Takes care of deploying vector into the kubernetes cluster. +/// +/// Manages the config file secret accordingly. +pub fn manager( + interface_command: &str, + namespace: &str, + custom_resource: &str, +) -> Result> { + let custom_resource_file = ResourceFile::new(custom_resource)?; + Ok(up_down::Manager::new(CommandBuilder { + interface_command: interface_command.to_owned(), + namespace: namespace.to_owned(), + custom_resource_file, + })) } diff --git a/lib/kubernetes-test-framework/src/wait_for_resource.rs b/lib/kubernetes-test-framework/src/wait_for_resource.rs index 7b9c9a34c1970..539761410d310 100644 --- a/lib/kubernetes-test-framework/src/wait_for_resource.rs +++ b/lib/kubernetes-test-framework/src/wait_for_resource.rs @@ -1,8 +1,7 @@ use super::Result; -use std::{ - ffi::OsStr, - process::{Command, Stdio}, -}; +use crate::util::run_command; +use std::{ffi::OsStr, process::Stdio}; +use tokio::process::Command; pub enum WaitFor where @@ -12,7 +11,7 @@ where Condition(C), } -pub fn namespace( +pub async fn namespace( kubectl_command: CMD, namespace: NS, resources: impl IntoIterator, @@ -28,10 +27,10 @@ where { let mut command = prepare_base_command(kubectl_command, resources, wait_for, extra); command.arg("-n").arg(namespace); - run_wait_command(command) + run_command(command).await } -pub fn all_namespaces( +pub async fn all_namespaces( kubectl_command: CMD, resources: impl IntoIterator, wait_for: WaitFor, @@ -45,7 +44,7 @@ where { let mut command = prepare_base_command(kubectl_command, resources, wait_for, extra); command.arg("--all-namespaces=true"); - run_wait_command(command) + run_command(command).await } pub fn prepare_base_command( @@ -79,12 +78,3 @@ where command.args(extra); command } - -fn run_wait_command(mut command: Command) -> Result<()> { - let mut child = command.spawn()?; - let exit_status = child.wait()?; - if !exit_status.success() { - Err(format!("waiting for resources failed: {:?}", command))?; - } - Ok(()) -} diff --git a/lib/kubernetes-test-framework/src/wait_for_rollout.rs b/lib/kubernetes-test-framework/src/wait_for_rollout.rs index 481e4f8170d34..bff7c86d41770 100644 --- a/lib/kubernetes-test-framework/src/wait_for_rollout.rs +++ b/lib/kubernetes-test-framework/src/wait_for_rollout.rs @@ -1,10 +1,9 @@ use super::Result; -use std::{ - ffi::OsStr, - process::{Command, Stdio}, -}; +use crate::util::run_command; +use std::{ffi::OsStr, process::Stdio}; +use tokio::process::Command; -pub fn run( +pub async fn run( kubectl_command: CMD, namespace: NS, resource: R, @@ -28,10 +27,6 @@ where command.arg(resource); command.args(extra); - let mut child = command.spawn()?; - let exit_status = child.wait()?; - if !exit_status.success() { - Err(format!("waiting for rollout failed: {:?}", command))?; - } + run_command(command).await?; Ok(()) } diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index be58f864dee0f..d623ee25c0682 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -92,9 +92,12 @@ fn generate_long_string(a: usize, b: usize) -> String { /// Read the first line from vector logs and assert that it matches the expected /// one. /// This allows detecting the situations where things have gone very wrong. -fn smoke_check_first_line(log_reader: &mut log_lookup::Reader) { +async fn smoke_check_first_line(log_reader: &mut log_lookup::Reader) { // Wait for first line as a smoke check. - let first_line = log_reader.next().expect("unable to read first line"); + let first_line = log_reader + .read_line() + .await + .expect("unable to read first line"); let expected_pat = "INFO vector: Log level \"info\" is enabled.\n"; assert!( first_line.ends_with(expected_pat), @@ -109,7 +112,7 @@ enum FlowControlCommand { Terminate, } -fn look_for_log_line

( +async fn look_for_log_line

( log_reader: &mut log_lookup::Reader, mut predicate: P, ) -> Result<(), Box> @@ -117,7 +120,7 @@ where P: FnMut(serde_json::Value) -> FlowControlCommand, { let mut lines_till_we_give_up = 10000; - while let Some(line) = log_reader.next() { + while let Some(line) = log_reader.read_line().await { println!("Got line: {:?}", line); lines_till_we_give_up -= 1; @@ -149,7 +152,7 @@ where } // Ensure log reader exited. - log_reader.wait().expect("log reader wait failed"); + log_reader.wait().await.expect("log reader wait failed"); Ok(()) } @@ -157,31 +160,37 @@ where /// This test validates that vector picks up logs at the simplest case /// possible - a new pod is deployed and prints to stdout, and we assert that /// vector picks that up. -#[test] -fn simple() -> Result<(), Box> { +#[tokio::test] +async fn simple() -> Result<(), Box> { let _guard = lock(); let framework = make_framework(); - let vector = framework.vector("test-vector", VECTOR_CONFIG)?; - framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; - - let test_namespace = framework.namespace("test-vector-test-pod")?; - - let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( - "test-vector-test-pod", - "test-pod", - "echo MARKER", - vec![], - ))?)?; - framework.wait( - "test-vector-test-pod", - vec!["pods/test-pod"], - WaitFor::Condition("initialized"), - vec!["--timeout=30s"], - )?; + let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; + framework + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .await?; + + let test_namespace = framework.namespace("test-vector-test-pod").await?; + + let test_pod = framework + .test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + "echo MARKER", + vec![], + ))?) + .await?; + framework + .wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + ) + .await?; let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; - smoke_check_first_line(&mut log_reader); + smoke_check_first_line(&mut log_reader).await; // Read the rest of the log lines. let mut got_marker = false; @@ -206,7 +215,8 @@ fn simple() -> Result<(), Box> { // Request to stop the flow. FlowControlCommand::Terminate - })?; + }) + .await?; assert!(got_marker); @@ -218,32 +228,38 @@ fn simple() -> Result<(), Box> { /// This test validates that vector properly merges a log message that /// kubernetes has internally split into multiple partial log lines. -#[test] -fn partial_merge() -> Result<(), Box> { +#[tokio::test] +async fn partial_merge() -> Result<(), Box> { let _guard = lock(); let framework = make_framework(); - let vector = framework.vector("test-vector", VECTOR_CONFIG)?; - framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; + let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; + framework + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .await?; - let test_namespace = framework.namespace("test-vector-test-pod")?; + let test_namespace = framework.namespace("test-vector-test-pod").await?; let test_message = generate_long_string(8, 8 * 1024); // 64 KiB - let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( - "test-vector-test-pod", - "test-pod", - &format!("echo {}", test_message), - vec![], - ))?)?; - framework.wait( - "test-vector-test-pod", - vec!["pods/test-pod"], - WaitFor::Condition("initialized"), - vec!["--timeout=30s"], - )?; + let test_pod = framework + .test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + &format!("echo {}", test_message), + vec![], + ))?) + .await?; + framework + .wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + ) + .await?; let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; - smoke_check_first_line(&mut log_reader); + smoke_check_first_line(&mut log_reader).await; // Read the rest of the log lines. let mut got_expected_line = false; @@ -268,7 +284,8 @@ fn partial_merge() -> Result<(), Box> { // Request to stop the flow. FlowControlCommand::Terminate - })?; + }) + .await?; assert!(got_expected_line); @@ -280,31 +297,37 @@ fn partial_merge() -> Result<(), Box> { /// This test validates that vector partail message merging mechanism doesn't /// interfere with the non-partial messages that don't end with newline. -#[test] -fn no_newline_at_eol() -> Result<(), Box> { +#[tokio::test] +async fn no_newline_at_eol() -> Result<(), Box> { let _guard = lock(); let framework = make_framework(); - let vector = framework.vector("test-vector", VECTOR_CONFIG)?; - framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; - - let test_namespace = framework.namespace("test-vector-test-pod")?; - - let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( - "test-vector-test-pod", - "test-pod", - "echo -n MARKER", // `-n` doesn't print newline - vec![], - ))?)?; - framework.wait( - "test-vector-test-pod", - vec!["pods/test-pod"], - WaitFor::Condition("initialized"), - vec!["--timeout=30s"], - )?; + let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; + framework + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .await?; + + let test_namespace = framework.namespace("test-vector-test-pod").await?; + + let test_pod = framework + .test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + "echo -n MARKER", // `-n` doesn't print newline + vec![], + ))?) + .await?; + framework + .wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + ) + .await?; let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; - smoke_check_first_line(&mut log_reader); + smoke_check_first_line(&mut log_reader).await; // Read the rest of the log lines. let mut got_expected_line = false; @@ -329,7 +352,8 @@ fn no_newline_at_eol() -> Result<(), Box> { // Request to stop the flow. FlowControlCommand::Terminate - })?; + }) + .await?; assert!(got_expected_line); @@ -341,34 +365,40 @@ fn no_newline_at_eol() -> Result<(), Box> { /// This test validates that vector picks up preexisting logs - logs that /// existed before vector was deployed. -#[test] -fn preexisting() -> Result<(), Box> { +#[tokio::test] +async fn preexisting() -> Result<(), Box> { let _guard = lock(); let framework = make_framework(); - let test_namespace = framework.namespace("test-vector-test-pod")?; - - let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( - "test-vector-test-pod", - "test-pod", - "echo MARKER", - vec![], - ))?)?; - framework.wait( - "test-vector-test-pod", - vec!["pods/test-pod"], - WaitFor::Condition("initialized"), - vec!["--timeout=30s"], - )?; + let test_namespace = framework.namespace("test-vector-test-pod").await?; + + let test_pod = framework + .test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + "echo MARKER", + vec![], + ))?) + .await?; + framework + .wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + ) + .await?; // Wait for some extra time to ensure pod completes. - std::thread::sleep(std::time::Duration::from_secs(10)); + tokio::time::delay_for(std::time::Duration::from_secs(10)).await; - let vector = framework.vector("test-vector", VECTOR_CONFIG)?; - framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; + let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; + framework + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .await?; let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; - smoke_check_first_line(&mut log_reader); + smoke_check_first_line(&mut log_reader).await; // Read the rest of the log lines. let mut got_marker = false; @@ -393,7 +423,8 @@ fn preexisting() -> Result<(), Box> { // Request to stop the flow. FlowControlCommand::Terminate - })?; + }) + .await?; assert!(got_marker); @@ -405,32 +436,38 @@ fn preexisting() -> Result<(), Box> { /// This test validates that vector picks up multiple log lines, and that they /// arrive at the proper order. -#[test] -fn multiple_lines() -> Result<(), Box> { +#[tokio::test] +async fn multiple_lines() -> Result<(), Box> { let _guard = lock(); let framework = make_framework(); - let vector = framework.vector("test-vector", VECTOR_CONFIG)?; - framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; + let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; + framework + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .await?; - let test_namespace = framework.namespace("test-vector-test-pod")?; + let test_namespace = framework.namespace("test-vector-test-pod").await?; let test_messages = vec!["MARKER1", "MARKER2", "MARKER3", "MARKER4", "MARKER5"]; - let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( - "test-vector-test-pod", - "test-pod", - &format!("echo -e {}", test_messages.join(r"\\n")), - vec![], - ))?)?; - framework.wait( - "test-vector-test-pod", - vec!["pods/test-pod"], - WaitFor::Condition("initialized"), - vec!["--timeout=30s"], - )?; + let test_pod = framework + .test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + &format!("echo -e {}", test_messages.join(r"\\n")), + vec![], + ))?) + .await?; + framework + .wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + ) + .await?; let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; - smoke_check_first_line(&mut log_reader); + smoke_check_first_line(&mut log_reader).await; // Read the rest of the log lines. let mut test_messages_iter = test_messages.into_iter().peekable(); @@ -456,7 +493,8 @@ fn multiple_lines() -> Result<(), Box> { // Request to stop the flow. FlowControlCommand::Terminate - })?; + }) + .await?; assert!(test_messages_iter.next().is_none()); @@ -468,31 +506,37 @@ fn multiple_lines() -> Result<(), Box> { /// This test validates that vector properly annotates log events with pod /// metadata obtained from the k8s API. -#[test] -fn pod_metadata_annotation() -> Result<(), Box> { +#[tokio::test] +async fn pod_metadata_annotation() -> Result<(), Box> { let _guard = lock(); let framework = make_framework(); - let vector = framework.vector("test-vector", VECTOR_CONFIG)?; - framework.wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"])?; - - let test_namespace = framework.namespace("test-vector-test-pod")?; - - let test_pod = framework.test_pod(test_pod::Config::from_pod(&make_test_pod( - "test-vector-test-pod", - "test-pod", - "echo MARKER", - vec![("label1", "hello"), ("label2", "world")], - ))?)?; - framework.wait( - "test-vector-test-pod", - vec!["pods/test-pod"], - WaitFor::Condition("initialized"), - vec!["--timeout=30s"], - )?; + let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; + framework + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .await?; + + let test_namespace = framework.namespace("test-vector-test-pod").await?; + + let test_pod = framework + .test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod", + "echo MARKER", + vec![("label1", "hello"), ("label2", "world")], + ))?) + .await?; + framework + .wait( + "test-vector-test-pod", + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + ) + .await?; let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; - smoke_check_first_line(&mut log_reader); + smoke_check_first_line(&mut log_reader).await; // Read the rest of the log lines. let mut got_marker = false; @@ -525,7 +569,8 @@ fn pod_metadata_annotation() -> Result<(), Box> { // Request to stop the flow. FlowControlCommand::Terminate - })?; + }) + .await?; assert!(got_marker); From 0d5a541b390aaf0a3a3a5738967bbc455925cc3e Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 16 Jun 2020 02:12:49 +0300 Subject: [PATCH 15/69] Allow specifying scope Signed-off-by: MOZGIII --- scripts/test-e2e-kubernetes.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/test-e2e-kubernetes.sh b/scripts/test-e2e-kubernetes.sh index 19e1624023a7a..281f928184217 100755 --- a/scripts/test-e2e-kubernetes.sh +++ b/scripts/test-e2e-kubernetes.sh @@ -94,4 +94,4 @@ export CONTAINER_IMAGE export KUBE_TEST_DEPLOY_COMMAND="scripts/deploy-kubernetes-test.sh" # Run the tests. -cargo test --test kubernetes-e2e --no-default-features --features kubernetes-e2e-tests -- --nocapture --test-threads 1 +cargo test --test kubernetes-e2e --no-default-features --features kubernetes-e2e-tests -- --nocapture --test-threads 1 ${SCOPE:-} From 291e51349f80a4c438ebdcb2d2c6401b7995b7c3 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 16 Jun 2020 13:14:21 +0300 Subject: [PATCH 16/69] Correct arguments preparation for cargo test at scripts/test-e2e-kubernetes.sh Signed-off-by: MOZGIII --- scripts/test-e2e-kubernetes.sh | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/scripts/test-e2e-kubernetes.sh b/scripts/test-e2e-kubernetes.sh index 281f928184217..78d9c54e95188 100755 --- a/scripts/test-e2e-kubernetes.sh +++ b/scripts/test-e2e-kubernetes.sh @@ -93,5 +93,18 @@ export CONTAINER_IMAGE # Set the deployment command for integration tests. export KUBE_TEST_DEPLOY_COMMAND="scripts/deploy-kubernetes-test.sh" +# Prepare args. +CARGO_TEST_ARGS=() +if [[ -n "${SCOPE:-}" && "$SCOPE" != '""' ]]; then + CARGO_TEST_ARGS+=("$SCOPE") +fi + # Run the tests. -cargo test --test kubernetes-e2e --no-default-features --features kubernetes-e2e-tests -- --nocapture --test-threads 1 ${SCOPE:-} +cargo test \ + --test kubernetes-e2e \ + --no-default-features \ + --features kubernetes-e2e-tests \ + -- \ + --nocapture \ + --test-threads 1 \ + "${CARGO_TEST_ARGS[@]}" From b0672586120cbeda5ba4c2075fc1034493852df2 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 16 Jun 2020 14:34:22 +0300 Subject: [PATCH 17/69] Get rid of $(RUN) at test-e2e-kubernetes target at Makefile Signed-off-by: MOZGIII --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1d3d80551743d..822ca2bb7aec3 100644 --- a/Makefile +++ b/Makefile @@ -283,7 +283,7 @@ endif PACKAGE_DEB_USE_CONTAINER ?= $(USE_CONTAINER) test-e2e-kubernetes: ## Runs Kubernetes E2E tests (Sorry, no `ENVIRONMENT=true` support) - PACKAGE_DEB_USE_CONTAINER="$(PACKAGE_DEB_USE_CONTAINER)" USE_CONTAINER=none $(RUN) test-e2e-kubernetes + PACKAGE_DEB_USE_CONTAINER="$(PACKAGE_DEB_USE_CONTAINER)" scripts/test-e2e-kubernetes.sh test-shutdown: ## Runs shutdown tests ifeq ($(AUTOSPAWN), true) From e241e629b8898ee2e4c5c3f48206bac5a1b28676 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 16 Jun 2020 15:19:16 +0300 Subject: [PATCH 18/69] Set LOG at distribution/kubernetes/vector-namespaced.yaml Signed-off-by: MOZGIII --- distribution/kubernetes/vector-namespaced.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/distribution/kubernetes/vector-namespaced.yaml b/distribution/kubernetes/vector-namespaced.yaml index 6e52aaba2ff7f..493ce619e091d 100644 --- a/distribution/kubernetes/vector-namespaced.yaml +++ b/distribution/kubernetes/vector-namespaced.yaml @@ -49,6 +49,11 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + # Set a reasonable log level to avoid issues with internal logs + # overwriting console output at E2E tests. Fell free to change at + # a real deployment. + - name: LOG + value: info volumeMounts: - name: var-log mountPath: /var/log/ From 5459179c6a1a7124e986174185c4515020e0be18 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 18 Jun 2020 19:46:32 +0300 Subject: [PATCH 19/69] Add a test to validate the pods are properly excluded This tool a while to implement, and required that we make framework async. Signed-off-by: MOZGIII --- tests/kubernetes-e2e.rs | 133 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index d623ee25c0682..055bdce745c1b 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -2,6 +2,7 @@ //! from the `vector` crate, and thus doesn't waste time in a tremendously long //! link step. +use futures::{SinkExt, StreamExt}; use k8s_openapi::{ api::core::v1::{Container, Pod, PodSpec}, apimachinery::pkg::apis::meta::v1::ObjectMeta, @@ -579,3 +580,135 @@ async fn pod_metadata_annotation() -> Result<(), Box> { drop(vector); Ok(()) } + +/// This test validates that vector properly filters out the logs that are +/// requested to be excluded from collection, based on k8s API `Pod` labels. +#[tokio::test] +async fn pod_filtering() -> Result<(), Box> { + let _guard = lock(); + let framework = make_framework(); + + let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; + framework + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .await?; + + let test_namespace = framework.namespace("test-vector-test-pod").await?; + + let excluded_test_pod = framework + .test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod-excluded", + "echo EXCLUDED_MARKER", + vec![("vector.dev/exclude", "true")], + ))?) + .await?; + framework + .wait( + "test-vector-test-pod", + vec!["pods/test-pod-excluded"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + ) + .await?; + + let control_test_pod = framework + .test_pod(test_pod::Config::from_pod(&make_test_pod( + "test-vector-test-pod", + "test-pod-control", + "echo CONTROL_MARKER", + vec![], + ))?) + .await?; + framework + .wait( + "test-vector-test-pod", + vec!["pods/test-pod-control"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + ) + .await?; + + let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; + smoke_check_first_line(&mut log_reader).await; + + // Read the log lines until the reasoable amount of time passes for us + // to be confident that vector shoud've picked up the excluded message + // if it wasn't fitlering it. + let mut got_control_marker = false; + let mut lines_till_we_give_up: usize = 10000; + let (stop_tx, mut stop_rx) = futures::channel::mpsc::channel(0); + loop { + let line = tokio::select! { + result = stop_rx.next() => { + result.unwrap(); + log_reader.kill()?; + continue; + } + line = log_reader.read_line() => line, + }; + let line = match line { + Some(line) => line, + None => break, + }; + println!("Got line: {:?}", line); + + lines_till_we_give_up -= 1; + if lines_till_we_give_up <= 0 { + println!("Giving up"); + log_reader.kill()?; + break; + } + + if !line.starts_with("{") { + // This isn't a json, must be an entry from Vector's own log stream. + continue; + } + + let val = parse_json(&line)?; + + if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { + // A log from something other than our test pod, predend we don't + // see it. + continue; + } + + // Ensure we got the log event from the control pod. + assert_eq!(val["kubernetes"]["pod_name"], "test-pod-control"); + + // Ensure the test sanity by validating that we got the control marker. + // If we get an excluded marker here - it's an error. + assert_eq!(val["message"], "CONTROL_MARKER"); + + if got_control_marker { + // We've already seen one control marker! This is not good, we only + // emitted one. + panic!("control marker seen more than once"); + } + + // Remember that we've seen a control marker. + got_control_marker = true; + + // Request termination in a while. + let mut stop_tx = stop_tx.clone(); + tokio::spawn(async move { + // Wait for two minutes - a reasonable time for vector internals to + // pick up new `Pod` and collect events from them in idle load. + println!("Starting stop timer"); + tokio::time::delay_for(std::time::Duration::from_secs(120)).await; + println!("Stop timer complete"); + stop_tx.send(()).await.unwrap(); + }); + } + + // Ensure log reader exited. + log_reader.wait().await.expect("log reader wait failed"); + + assert!(got_control_marker); + + drop(excluded_test_pod); + drop(control_test_pod); + drop(test_namespace); + drop(vector); + Ok(()) +} From 2289834e7d6a3bb385206d31c19f881cacdd7900 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 18 Jun 2020 20:20:21 +0300 Subject: [PATCH 20/69] Fix a typo Signed-off-by: MOZGIII --- tests/kubernetes-e2e.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index 055bdce745c1b..e81d9e7c5b72f 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -197,7 +197,7 @@ async fn simple() -> Result<(), Box> { let mut got_marker = false; look_for_log_line(&mut log_reader, |val| { if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { - // A log from something other than our test pod, predend we don't + // A log from something other than our test pod, pretend we don't // see it. return FlowControlCommand::GoOn; } @@ -266,7 +266,7 @@ async fn partial_merge() -> Result<(), Box> { let mut got_expected_line = false; look_for_log_line(&mut log_reader, |val| { if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { - // A log from something other than our test pod, predend we don't + // A log from something other than our test pod, pretend we don't // see it. return FlowControlCommand::GoOn; } @@ -334,7 +334,7 @@ async fn no_newline_at_eol() -> Result<(), Box> { let mut got_expected_line = false; look_for_log_line(&mut log_reader, |val| { if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { - // A log from something other than our test pod, predend we don't + // A log from something other than our test pod, pretend we don't // see it. return FlowControlCommand::GoOn; } @@ -405,7 +405,7 @@ async fn preexisting() -> Result<(), Box> { let mut got_marker = false; look_for_log_line(&mut log_reader, |val| { if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { - // A log from something other than our test pod, predend we don't + // A log from something other than our test pod, pretend we don't // see it. return FlowControlCommand::GoOn; } @@ -474,7 +474,7 @@ async fn multiple_lines() -> Result<(), Box> { let mut test_messages_iter = test_messages.into_iter().peekable(); look_for_log_line(&mut log_reader, |val| { if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { - // A log from something other than our test pod, predend we don't + // A log from something other than our test pod, pretend we don't // see it. return FlowControlCommand::GoOn; } @@ -543,7 +543,7 @@ async fn pod_metadata_annotation() -> Result<(), Box> { let mut got_marker = false; look_for_log_line(&mut log_reader, |val| { if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { - // A log from something other than our test pod, predend we don't + // A log from something other than our test pod, pretend we don't // see it. return FlowControlCommand::GoOn; } @@ -668,7 +668,7 @@ async fn pod_filtering() -> Result<(), Box> { let val = parse_json(&line)?; if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { - // A log from something other than our test pod, predend we don't + // A log from something other than our test pod, pretend we don't // see it. continue; } From c96c14c2346c5bc436161a706d4f953e3ae3d332 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 18 Jun 2020 20:47:45 +0300 Subject: [PATCH 21/69] Add test to assert we properly collect logs from multiple namespaces Signed-off-by: MOZGIII --- tests/kubernetes-e2e.rs | 85 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index e81d9e7c5b72f..2ca2408be26f7 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -10,6 +10,7 @@ use k8s_openapi::{ use kubernetes_test_framework::{ lock, log_lookup, test_pod, wait_for_resource::WaitFor, Framework, Interface, }; +use std::collections::HashSet; const VECTOR_CONFIG: &str = r#" apiVersion: v1 @@ -712,3 +713,87 @@ async fn pod_filtering() -> Result<(), Box> { drop(vector); Ok(()) } + +/// This test validates that vector properly collects logs from multiple +/// `Namespace`s and `Pod`s. +#[tokio::test] +async fn multiple_ns() -> Result<(), Box> { + let _guard = lock(); + let framework = make_framework(); + + let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; + framework + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .await?; + + const NS_PREFIX: &str = "test-vector-test-pod"; + + let mut test_namespaces = vec![]; + let mut expected_namespaces = HashSet::new(); + for i in 0..10 { + let name = format!("{}-{}", NS_PREFIX, i); + test_namespaces.push(framework.namespace(&name).await?); + expected_namespaces.insert(name); + } + + let mut test_pods = vec![]; + for ns in &expected_namespaces { + let test_pod = framework + .test_pod(test_pod::Config::from_pod(&make_test_pod( + ns, + "test-pod", + "echo MARKER", + vec![], + ))?) + .await?; + framework + .wait( + ns, + vec!["pods/test-pod"], + WaitFor::Condition("initialized"), + vec!["--timeout=30s"], + ) + .await?; + test_pods.push(test_pod); + } + + let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; + smoke_check_first_line(&mut log_reader).await; + + // Read the rest of the log lines. + look_for_log_line(&mut log_reader, |val| { + let ns = match val["kubernetes"]["pod_namespace"].as_str() { + Some(val) if val.starts_with(NS_PREFIX) => val, + _ => { + // A log from something other than our test pod, pretend we + // don't see it. + return FlowControlCommand::GoOn; + } + }; + + // Ensure we got the marker. + assert_eq!(val["message"], "MARKER"); + + // Remove the namespace from the list of namespaces we still expect to + // get. + let as_expected = expected_namespaces.remove(ns); + assert!(as_expected); + + if expected_namespaces.is_empty() { + // We got all the messages we expected, request to stop the flow. + FlowControlCommand::Terminate + } else { + // We didn't get all the messages yet. + FlowControlCommand::GoOn + } + }) + .await?; + + // Ensure that we have collected messages from all the namespaces. + assert!(expected_namespaces.is_empty()); + + drop(test_pods); + drop(test_namespaces); + drop(vector); + Ok(()) +} From b22a26dbef8caefe86453dfc2e105169e5f1a49b Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 19 Jun 2020 01:51:19 +0300 Subject: [PATCH 22/69] Polish the test framework API Signed-off-by: MOZGIII --- .../src/framework.rs | 18 ++++++++++- .../src/interface.rs | 10 +++++++ lib/kubernetes-test-framework/src/lib.rs | 3 +- .../src/log_lookup.rs | 30 +++++++++++++------ .../src/namespace.rs | 5 ++++ lib/kubernetes-test-framework/src/test_pod.rs | 16 +++++++--- lib/kubernetes-test-framework/src/vector.rs | 6 +++- .../src/wait_for_resource.rs | 14 ++++++++- .../src/wait_for_rollout.rs | 5 ++++ 9 files changed, 89 insertions(+), 18 deletions(-) diff --git a/lib/kubernetes-test-framework/src/framework.rs b/lib/kubernetes-test-framework/src/framework.rs index ae4ad716fa6e0..a1afe6dce9976 100644 --- a/lib/kubernetes-test-framework/src/framework.rs +++ b/lib/kubernetes-test-framework/src/framework.rs @@ -5,16 +5,20 @@ use super::{ Interface, Result, }; +/// Framework wraps the interface to the system with an easy-to-use rust API +/// optimized for implementing test cases. +#[derive(Debug)] pub struct Framework { interface: Interface, } impl Framework { - /// Create a new [`Framework`]. + /// Create a new [`Framework`] powered by the passed interface. pub fn new(interface: Interface) -> Self { Self { interface } } + /// Deploy `vector` into a cluster. pub async fn vector( &self, namespace: &str, @@ -29,6 +33,7 @@ impl Framework { Ok(manager) } + /// Create a new namespace. pub async fn namespace( &self, namespace: &str, @@ -38,6 +43,7 @@ impl Framework { Ok(manager) } + /// Create a new test `Pod`. pub async fn test_pod( &self, config: test_pod::Config, @@ -47,10 +53,15 @@ impl Framework { Ok(manager) } + /// Initialize log lookup for a particular `resouurce` in a particular + /// `namespace`. pub fn logs(&self, namespace: &str, resource: &str) -> Result { log_lookup::logs(&self.interface.kubectl_command, namespace, resource) } + /// Wait for a set of `resources` in a specified `namespace` to acheive + /// `wait_for` state. + /// Use `extra` to pass additional arguments to `kubectl`. pub async fn wait<'a>( &self, namespace: &str, @@ -68,6 +79,9 @@ impl Framework { .await } + /// Wait for a set of `resources` in any namespace to acheive `wait_for` + /// state. + /// Use `extra` to pass additional arguments to `kubectl`. pub async fn wait_all_namespaces<'a>( &self, resources: impl IntoIterator, @@ -83,6 +97,8 @@ impl Framework { .await } + /// Wait for a rollout of a `resource` to complete. + /// Use `extra` to pass additional arguments to `kubectl`. pub async fn wait_for_rollout<'a>( &self, namespace: &str, diff --git a/lib/kubernetes-test-framework/src/interface.rs b/lib/kubernetes-test-framework/src/interface.rs index d175c4e26aabd..fa722f25e2886 100644 --- a/lib/kubernetes-test-framework/src/interface.rs +++ b/lib/kubernetes-test-framework/src/interface.rs @@ -1,12 +1,22 @@ +//! An interface into the system. + use std::env; +/// An interface between the test framework and external CLI commands and test +/// utilities. #[derive(Debug)] pub struct Interface { + /// A command used to deploy `vector` into the kubernetes cluster and + /// delete if from there. pub deploy_vector_command: String, + + /// A `kubectl` command used to pub kubectl_command: String, } impl Interface { + /// Create a new [`Interface`] instance with the parameters obtained from + /// the process environment. pub fn from_env() -> Option { Some(Self { deploy_vector_command: env::var("KUBE_TEST_DEPLOY_COMMAND").ok()?, diff --git a/lib/kubernetes-test-framework/src/lib.rs b/lib/kubernetes-test-framework/src/lib.rs index aae933267293a..81b0972416f10 100644 --- a/lib/kubernetes-test-framework/src/lib.rs +++ b/lib/kubernetes-test-framework/src/lib.rs @@ -9,8 +9,7 @@ //! the opportunity to test e2e - not just the code layer, but also the //! deployment configuration. -// TODO: deny -#![allow( +#![deny( missing_debug_implementations, missing_copy_implementations, missing_docs diff --git a/lib/kubernetes-test-framework/src/log_lookup.rs b/lib/kubernetes-test-framework/src/log_lookup.rs index a40a7661242ce..14f780b3f2201 100644 --- a/lib/kubernetes-test-framework/src/log_lookup.rs +++ b/lib/kubernetes-test-framework/src/log_lookup.rs @@ -1,8 +1,13 @@ +//! Perform a log lookup. + use super::Result; use std::process::{ExitStatus, Stdio}; use tokio::io::{AsyncBufReadExt, BufReader}; use tokio::process::{Child, ChildStdout, Command}; +/// Initiate a log lookup (`kubectl log`) with the specified `kubectl_command` +/// for the specified `resource` at the specified `namespace`. +/// Returns a [`Reader`] that managed the reading process. pub fn logs(kubectl_command: &str, namespace: &str, resource: &str) -> Result { let mut command = Command::new(kubectl_command); @@ -17,13 +22,17 @@ pub fn logs(kubectl_command: &str, namespace: &str, resource: &str) -> Result, } impl Reader { - pub fn spawn(mut command: Command) -> std::io::Result { + /// Spawn a new `kubectl logs` process. + fn spawn(mut command: Command) -> std::io::Result { Self::prepare_stdout(&mut command); let child = command.spawn()?; Ok(Self::new(child)) @@ -39,14 +48,17 @@ impl Reader { Reader { child, reader } } + /// Wait for the `kubectl logs` process to exit and return the exit code. pub async fn wait(&mut self) -> std::io::Result { (&mut self.child).await } + /// Send a termination signal to the `kubectl logs` process. pub fn kill(&mut self) -> std::io::Result<()> { self.child.kill() } + /// Read one line from the stdout of the `kubectl logs` process. pub async fn read_line(&mut self) -> Option { let mut s = String::new(); let result = self.reader.read_line(&mut s).await; @@ -56,19 +68,19 @@ impl Reader { Err(err) => panic!(err), } } +} + +#[cfg(test)] +mod tests { + use super::*; - pub async fn collect(&mut self) -> Vec { + async fn collect(reader: &mut Reader) -> Vec { let mut list = Vec::new(); - while let Some(line) = self.read_line().await { + while let Some(line) = reader.read_line().await { list.push(line) } list } -} - -#[cfg(test)] -mod tests { - use super::*; #[tokio::test] async fn test_reader_finite() { @@ -78,7 +90,7 @@ mod tests { let mut reader = Reader::spawn(command).expect("unable to spawn"); // Collect all line, expect stream to finish. - let lines = reader.collect().await; + let lines = collect(&mut reader).await; // Assert we got all the lines we expected. assert_eq!(lines, vec!["test\n".to_owned()]); diff --git a/lib/kubernetes-test-framework/src/namespace.rs b/lib/kubernetes-test-framework/src/namespace.rs index d33a65307db75..8143b674f1e07 100644 --- a/lib/kubernetes-test-framework/src/namespace.rs +++ b/lib/kubernetes-test-framework/src/namespace.rs @@ -1,6 +1,9 @@ +//! Manage namespaces. + use crate::up_down; use std::process::{Command, Stdio}; +/// Parameters required to build a `kubectl` command to manage the namespace. #[derive(Debug)] pub struct CommandBuilder { kubectl_command: String, @@ -22,6 +25,8 @@ impl up_down::CommandBuilder for CommandBuilder { } } +/// Create a new [`up_down::Manager`] for the specified `namespace` and using +/// the specified `kubectl_command`. pub fn manager(kubectl_command: &str, namespace: &str) -> up_down::Manager { up_down::Manager::new(CommandBuilder { kubectl_command: kubectl_command.to_owned(), diff --git a/lib/kubernetes-test-framework/src/test_pod.rs b/lib/kubernetes-test-framework/src/test_pod.rs index 384abbe84d65e..e58e7d026ee12 100644 --- a/lib/kubernetes-test-framework/src/test_pod.rs +++ b/lib/kubernetes-test-framework/src/test_pod.rs @@ -1,26 +1,32 @@ +//! Manage test pods. + use super::{resource_file::ResourceFile, Result}; use crate::up_down; use k8s_openapi::api::core::v1::Pod; use std::process::{Command, Stdio}; +/// A config that holds a test `Pod` resource file. #[derive(Debug)] pub struct Config { - custom_resource_file: ResourceFile, + test_pod_resource_file: ResourceFile, } impl Config { + /// Create a [`Config`] using a structured [`Pod`] object. pub fn from_pod(pod: &Pod) -> Result { Self::from_resource_string(serde_json::to_string(pod)?.as_str()) } + /// Create a [`Config`] using an unstructured resource string. pub fn from_resource_string(resource: &str) -> Result { - let custom_resource_file = ResourceFile::new(resource)?; + let test_pod_resource_file = ResourceFile::new(resource)?; Ok(Self { - custom_resource_file, + test_pod_resource_file, }) } } +/// Parameters required to build a `kubectl` command to manage the test `Pod`. #[derive(Debug)] pub struct CommandBuilder { kubectl_command: String, @@ -36,12 +42,14 @@ impl up_down::CommandBuilder for CommandBuilder { up_down::CommandToBuild::Down => "delete", }) .arg("-f") - .arg(self.config.custom_resource_file.path()) + .arg(self.config.test_pod_resource_file.path()) .stdin(Stdio::null()); command } } +/// Create a new [`up_down::Manager`] with the specified `config` and using +/// the specified `kubectl_command`. pub fn manager(kubectl_command: &str, config: Config) -> up_down::Manager { up_down::Manager::new(CommandBuilder { kubectl_command: kubectl_command.to_owned(), diff --git a/lib/kubernetes-test-framework/src/vector.rs b/lib/kubernetes-test-framework/src/vector.rs index 2cfeeaf2c3bd8..41cf1cf1bf7ae 100644 --- a/lib/kubernetes-test-framework/src/vector.rs +++ b/lib/kubernetes-test-framework/src/vector.rs @@ -1,7 +1,11 @@ +//! Manage Vector. + use super::{resource_file::ResourceFile, Result}; use crate::up_down; use std::process::{Command, Stdio}; +/// Parameters required to build a `kubectl` command to manage Vector in the +/// Kubernetes cluster. #[derive(Debug)] pub struct CommandBuilder { interface_command: String, @@ -27,7 +31,7 @@ impl up_down::CommandBuilder for CommandBuilder { } } -/// Takes care of deploying vector into the kubernetes cluster. +/// Takes care of deploying Vector into the Kubernetes cluster. /// /// Manages the config file secret accordingly. pub fn manager( diff --git a/lib/kubernetes-test-framework/src/wait_for_resource.rs b/lib/kubernetes-test-framework/src/wait_for_resource.rs index 539761410d310..ac11c2dccac88 100644 --- a/lib/kubernetes-test-framework/src/wait_for_resource.rs +++ b/lib/kubernetes-test-framework/src/wait_for_resource.rs @@ -1,16 +1,25 @@ +//! Wait for a resource to reach a certain condition. + use super::Result; use crate::util::run_command; use std::{ffi::OsStr, process::Stdio}; use tokio::process::Command; +/// Specify what condition to wait for. +#[derive(Debug)] pub enum WaitFor where C: std::fmt::Display, { + /// Wait for resource deletion. Delete, + /// Wait for the specified condition. Condition(C), } +/// Wait for a set of `resources` within a `namespace` to reach a `wait_for` +/// condition. +/// Use `extra` to pass additional arguments to `kubectl`. pub async fn namespace( kubectl_command: CMD, namespace: NS, @@ -30,6 +39,9 @@ where run_command(command).await } +/// Wait for a set of `resources` at any namespace to reach a `wait_for` +/// condition. +/// Use `extra` to pass additional arguments to `kubectl`. pub async fn all_namespaces( kubectl_command: CMD, resources: impl IntoIterator, @@ -47,7 +59,7 @@ where run_command(command).await } -pub fn prepare_base_command( +fn prepare_base_command( kubectl_command: CMD, resources: impl IntoIterator, wait_for: WaitFor, diff --git a/lib/kubernetes-test-framework/src/wait_for_rollout.rs b/lib/kubernetes-test-framework/src/wait_for_rollout.rs index bff7c86d41770..704bc722f1009 100644 --- a/lib/kubernetes-test-framework/src/wait_for_rollout.rs +++ b/lib/kubernetes-test-framework/src/wait_for_rollout.rs @@ -1,8 +1,13 @@ +//! Wait for a resource rollout to complete. + use super::Result; use crate::util::run_command; use std::{ffi::OsStr, process::Stdio}; use tokio::process::Command; +/// Wait for a rollout of a `resource` within a `namespace` to complete via +/// the specifed `kubectl_command`. +/// Use `extra` to pass additional arguments to `kubectl`. pub async fn run( kubectl_command: CMD, namespace: NS, From ea926d7c6f4259c135f0195ce79d1b4280508ccd Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 19 Jun 2020 02:18:44 +0300 Subject: [PATCH 23/69] Add E2E tests section to the contribution guide Signed-off-by: MOZGIII --- CONTRIBUTING.md | 80 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 76 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 806e324db6729..0f8f0b339e2d3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -37,6 +37,8 @@ expanding into more specifics. 1. [Benchmarking](#benchmarking) 1. [Profiling](#profiling) 1. [Kubernetes](#kubernetes) + 1. [Dev flow](#kubernetes-dev-flow) + 1. [E2E tests](#kubernetes-e2e-tests) 1. [Humans](#humans) 1. [Documentation](#documentation) 1. [Changelog](#changelog) @@ -550,13 +552,15 @@ navigated in your favorite web browser. ### Kubernetes +#### Kubernetes Dev Flow + There is a special flow for when you develop portions of Vector that are designed to work with Kubernetes, like `kubernetes_logs` source or the `deployment/kubernetes/*.yaml` configs. This flow facilitates building Vector and deploying it into a cluster. -#### Requirements +##### Requirements There are some extra requirements besides what you'd normally need to work on Vector: @@ -570,7 +574,7 @@ Vector: * [`minikube`](https://minikube.sigs.k8s.io/)-powered or other k8s cluster * [`cargo watch`](https://github.com/passcod/cargo-watch) -#### The dev flow +##### The dev flow Once you have the requirements, use the `scripts/skaffold.sh dev` command. @@ -596,7 +600,7 @@ the cluster state and exit. `scripts/skaffold.sh` wraps `skaffold`, you can use other `skaffold` subcommands if it fits you better. -#### Troubleshooting +##### Troubleshooting You might need to tweak `skaffold`, here are some hints: @@ -614,7 +618,7 @@ You might need to tweak `skaffold`, here are some hints: * For the rest of the `skaffold` tweaks you might want to apply check out [this page](https://skaffold.dev/docs/environment/). -#### Going through the dev flow manually +##### Going through the dev flow manually Is some cases `skaffold` may not work. It's possible to go through the dev flow manually, without `skaffold`. @@ -627,6 +631,74 @@ required. Essentially, the steps you have to take to deploy manually are the same that `skaffold` will perform, and they're outlined at the previous section. +#### Kubernetes E2E tests + +Kubernetes integration is quite complicated, a lot of things can go wrong. + +To cope with the complexity and ensure we maintain high quality, we use +E2E (end-to-end) tests. + +> E2E tests normally run at CI, so there's typically no need to run them +> manually. + +##### Requirements + +* `kubernetes` cluster (`minikube` has special support, but any cluster should + work) +* `docker` +* `kubectl` +* `bash` + +Vector release artifacts are prepared for E2E tests, so the ability to do that +is required too, see Vector [docs](https://vector.dev) for more details. + +##### Running the E2E tests + +To run the E2E tests, use the following command: + +```shell +CONTAINER_IMAGE_REPO=/vector-test make test-e2e-kubernetes +``` + +Where `CONTAINER_IMAGE_REPO` is the docker image repo name to use, without part +after the `:`. Replace `` with your Docker Hub username. + +This will run Kubernetes E2E tests suite. If the command exit with 0 status +code - tests passed. + +You can also pass additional parameters to adjust the behavior of the test: + +* `QUICK_BUILD=true` - use development build and a skaffold image from the dev + flow instead of a production docker image. Significantly speeds up the + preparation process, but doesn't guarantee the correctness in the release + build. Useful for development of the tests or Vector code to speed up the + iteration cycles. + +* `USE_MINIKUBE_DOCKER=true` - instead of pushing the built docker image to the + registry under the specified name, directly inject the image into a docker + instance running inside of a `minikube`-controlled cluster node. + Requires you to test against a `minikube` cluster. Eliminates the need to have + a registry to run tests. + +* `CONTAINER_IMAGE_REPO=/vector-test:tag` - completely skip the step + of building the Vector docker image, and use the specified image instead. + Useful to speed up the iterations speed when you already have a Vector docker + image you want to test against. + +* `SKIP_CONTAINER_IMAGE_PUBLISHING=true` - completely skip the image publishing + step. Useful when you want to speed up the iteration speed and when you know + the Vector image you want to test is already available to the cluster you're + testing against. + +* `SCOPE` - pass a filter to the `cargo test` command to filter out the tests, + effectively equivalent to `cargo test -- $SCOPE`. + +Passing additional commands is done like so: + +```shell +QUICK_BUILD=true USE_MINIKUBE_DOCKER=true CONTAINER_IMAGE_REPO=/vector-test make test-e2e-kubernetes +``` + ## Humans After making your change, you'll want to prepare it for Vector's users From 3a23d852b686d425fd837dc6f7f34d3749334fb2 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 19 Jun 2020 02:38:43 +0300 Subject: [PATCH 24/69] Kubernetes E2E tests are no longer experimental, should work consistently Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a97f3c5affa36..de382925c50cc 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -266,10 +266,7 @@ jobs: - run: make test-integration-splunk test-e2e-kubernetes: - name: E2E - Linux, Kubernetes, flaky - # This is an experimental test. Allow it to fail without failing the whole - # workflow, but keep it executing on every build to gather stats. - continue-on-error: true + name: E2E - Linux, Kubernetes runs-on: ubuntu-latest strategy: matrix: From d82e44b34e44e31e561e0ec3b5fac71ba316f717 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 19 Jun 2020 02:41:23 +0300 Subject: [PATCH 25/69] Add kubernetes version to the test name Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index de382925c50cc..7d71746690e5e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -266,7 +266,7 @@ jobs: - run: make test-integration-splunk test-e2e-kubernetes: - name: E2E - Linux, Kubernetes + name: E2E - Linux, Kubernetes ${{ matrix.kubernetes }} runs-on: ubuntu-latest strategy: matrix: From 1716843213befedcedfd8ab7fbf17bd840c9ddba Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 19 Jun 2020 02:46:27 +0300 Subject: [PATCH 26/69] Bump minikube Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7d71746690e5e..67695724ea699 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -281,7 +281,7 @@ jobs: - name: Setup Minikube uses: manusa/actions-setup-minikube@v1.0.2 with: - minikube version: 'v1.9.2' + minikube version: 'v1.11.0' kubernetes version: '${{ matrix.kubernetes }}' github token: '${{ secrets.GITHUB_TOKEN }}' - name: Checkout From 1b3631c7d2f80c65cc3116a416d4bbcdfa7fa227 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 19 Jun 2020 02:48:41 +0300 Subject: [PATCH 27/69] Bump kubernetes releases Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 67695724ea699..309d5e08f320b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -271,10 +271,10 @@ jobs: strategy: matrix: kubernetes: - - v1.18.2 - - v1.17.5 - - v1.16.9 - - v1.15.11 + - v1.18.4 + - v1.17.7 + - v1.16.11 + - v1.15.12 - v1.14.10 fail-fast: false steps: From c3656b9b175ba1131fcb6084c0bde308a2f84ba0 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 23 Jun 2020 11:37:41 +0300 Subject: [PATCH 28/69] Use minikube cache instead of manually moving image around Signed-off-by: MOZGIII --- CONTRIBUTING.md | 6 ++-- scripts/copy-docker-image-to-minikube.sh | 38 ------------------------ scripts/minikube-docker-env.sh | 9 ------ scripts/test-e2e-kubernetes.sh | 14 ++++----- 4 files changed, 9 insertions(+), 58 deletions(-) delete mode 100755 scripts/copy-docker-image-to-minikube.sh delete mode 100644 scripts/minikube-docker-env.sh diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0f8f0b339e2d3..0885560ca3f22 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -674,9 +674,9 @@ You can also pass additional parameters to adjust the behavior of the test: build. Useful for development of the tests or Vector code to speed up the iteration cycles. -* `USE_MINIKUBE_DOCKER=true` - instead of pushing the built docker image to the - registry under the specified name, directly inject the image into a docker - instance running inside of a `minikube`-controlled cluster node. +* `USE_MINIKUBE_CACHE=true` - instead of pushing the built docker image to the + registry under the specified name, directly load the image into + a `minikube`-controlled cluster node. Requires you to test against a `minikube` cluster. Eliminates the need to have a registry to run tests. diff --git a/scripts/copy-docker-image-to-minikube.sh b/scripts/copy-docker-image-to-minikube.sh deleted file mode 100755 index 0efe834b69e24..0000000000000 --- a/scripts/copy-docker-image-to-minikube.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# copy-docker-image-to-minikube.sh -# -# SUMMARY -# -# Copies a list of images from the host docker engine to the minikube docker -# engine via save/load commands. -# -# Requires minikube and docker to be available. -# -# USAGE -# -# copy-docker-image-to-minikube.sh timberio/vector:latest - -# Image to copy. -IMAGES=("${@:?"Specify the images to copy in the arguments list"}") - -# Prepare temp dir to store the images archive. -TD="$(mktemp -d)" -IMAGES_ARCHIVE="$TD/images.tar.gz" - -# Save images. -docker save "${IMAGES[@]}" | gzip >"$IMAGES_ARCHIVE" - -# Start a subshell to preserve the env state. -( - # Switch to minikube docker. - # shellcheck source=minikube-docker-env.sh disable=SC1091 - . scripts/minikube-docker-env.sh - - # Load images. - docker load -i "$IMAGES_ARCHIVE" -) - -# Clear temp dir. -rm -rf "$TD" diff --git a/scripts/minikube-docker-env.sh b/scripts/minikube-docker-env.sh deleted file mode 100644 index fc2ffdd8ab9f0..0000000000000 --- a/scripts/minikube-docker-env.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -if ! COMMANDS="$(minikube --shell bash docker-env)"; then - echo "Unable to obtain docker env from minikube; is minikube started?" >&2 - exit 7 -fi - -eval "$COMMANDS" diff --git a/scripts/test-e2e-kubernetes.sh b/scripts/test-e2e-kubernetes.sh index 78d9c54e95188..29eee9bb87085 100755 --- a/scripts/test-e2e-kubernetes.sh +++ b/scripts/test-e2e-kubernetes.sh @@ -25,7 +25,7 @@ if [[ -z "${CONTAINER_IMAGE:-}" ]]; then # also not work if you k8s cluster doesn't have network connectivity to the # registry. # - # Hint #2: if using with minikube, set `USE_MINIKUBE_DOCKER` to `true` and use + # Hint #2: if using with minikube, set `USE_MINIKUBE_CACHE` to `true` and use # any value for `CONTAINER_IMAGE_REPO` (for instance, `vector-test` will do). # CONTAINER_IMAGE_REPO="${CONTAINER_IMAGE_REPO:?"You have to specify CONTAINER_IMAGE_REPO to upload the test image to."}" @@ -70,18 +70,16 @@ if [[ -z "${CONTAINER_IMAGE:-}" ]]; then fi if [[ -z "${SKIP_CONTAINER_IMAGE_PUBLISHING:-}" ]]; then - # Whether to use minikube docker. + # Whether to use minikube cache to pass image to the k8s cluster. # After we build vector docker image, instead of pushing to the remote repo, - # we'll be exporting it to a file after (from the "host" docker engine), and - # then importing that file into the minikube in-cluster docker engine, that - # nodes have access to. + # we'll be using `minikube cache` to make image available to the cluster. # This effectively eliminates the requirement to have a docker registry, but # it requires that we run against minikube cluster. - USE_MINIKUBE_DOCKER="${USE_MINIKUBE_DOCKER:-"false"}" + USE_MINIKUBE_CACHE="${USE_MINIKUBE_CACHE:-"false"}" # Make the container image accessible to the k8s cluster. - if [[ "$USE_MINIKUBE_DOCKER" == "true" ]]; then - scripts/copy-docker-image-to-minikube.sh "$CONTAINER_IMAGE" + if [[ "$USE_MINIKUBE_CACHE" == "true" ]]; then + minikube cache add "$CONTAINER_IMAGE" else docker push "$CONTAINER_IMAGE" fi From ae628158aca52fc523c80453fbc897119b0640da Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 23 Jun 2020 11:01:50 +0300 Subject: [PATCH 29/69] Test against multiple container runtimes Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 44 +++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 12 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 309d5e08f320b..05736ad2241e2 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -266,24 +266,44 @@ jobs: - run: make test-integration-splunk test-e2e-kubernetes: - name: E2E - Linux, Kubernetes ${{ matrix.kubernetes }} + name: E2E - Kubernetes ${{ matrix.kubernetes_version }} / ${{ matrix.container_runtime }} runs-on: ubuntu-latest strategy: matrix: - kubernetes: - - v1.18.4 - - v1.17.7 - - v1.16.11 - - v1.15.12 - - v1.14.10 + minikube_version: + - 'v1.11.0' + kubernetes_version: + - 'v1.18.4' + - 'v1.17.7' + - 'v1.16.11' + - 'v1.15.12' + - 'v1.14.10' + container_runtime: + - docker + - crio + - containerd fail-fast: false steps: - name: Setup Minikube - uses: manusa/actions-setup-minikube@v1.0.2 - with: - minikube version: 'v1.11.0' - kubernetes version: '${{ matrix.kubernetes }}' - github token: '${{ secrets.GITHUB_TOKEN }}' + run: | + set -xeuo pipefail + + sudo apt-get install conntrack + + curl -Lo kubectl \ + 'https://storage.googleapis.com/kubernetes-release/release/${{ matrix.kubernetes_version }}/bin/linux/amd64/kubectl' + sudo install kubectl /usr/local/bin/ + + curl -Lo minikube \ + 'https://storage.googleapis.com/minikube/releases/${{ matrix.minikube_version }}/minikube-linux-amd64' + sudo install minikube /usr/local/bin/ + + minikube config set profile minikube + minikube config set vm-driver none + minikube config set kubernetes-version '${{ matrix.kubernetes_version }}' + minikube config set container-runtime '${{ matrix.container_runtime }}' + sudo minikube start + kubectl cluster-info - name: Checkout uses: actions/checkout@v1 - run: USE_CONTAINER=none make slim-builds From e27ade715ed7d21c20f224c5a810bc5b3c53bd38 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 23 Jun 2020 11:52:04 +0300 Subject: [PATCH 30/69] Remove unused repeating_echo_cmd Signed-off-by: MOZGIII --- tests/kubernetes-e2e.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index 2ca2408be26f7..bdf2788ba3528 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -28,13 +28,6 @@ data: const BUSYBOX_IMAGE: &str = "busybox:1.28"; -fn repeating_echo_cmd(marker: &str) -> String { - format!( - r#"echo before; i=0; while [ $i -le 600 ]; do sleep 0.1; echo "{}"; i=$((i+1)); done"#, - marker - ) -} - fn make_framework() -> Framework { let interface = Interface::from_env().expect("interface is not ready"); Framework::new(interface) From b34f78ff5c647e8c1fe679a75fb69f0afc15618a Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 23 Jun 2020 12:51:11 +0300 Subject: [PATCH 31/69] Display timeout Signed-off-by: MOZGIII --- tests/kubernetes-e2e.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index bdf2788ba3528..a43c6db783772 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -688,8 +688,9 @@ async fn pod_filtering() -> Result<(), Box> { tokio::spawn(async move { // Wait for two minutes - a reasonable time for vector internals to // pick up new `Pod` and collect events from them in idle load. - println!("Starting stop timer"); - tokio::time::delay_for(std::time::Duration::from_secs(120)).await; + let duration = std::time::Duration::from_secs(120); + println!("Starting stop timer, due in {} seconds", duration.as_secs()); + tokio::time::delay_for(duration).await; println!("Stop timer complete"); stop_tx.send(()).await.unwrap(); }); From a6194536dcae0e492dd7210b08d2eafcd90a7125 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 23 Jun 2020 12:57:43 +0300 Subject: [PATCH 32/69] Shorter title Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 05736ad2241e2..3c488b6ab41ad 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -266,7 +266,7 @@ jobs: - run: make test-integration-splunk test-e2e-kubernetes: - name: E2E - Kubernetes ${{ matrix.kubernetes_version }} / ${{ matrix.container_runtime }} + name: E2E - K8s ${{ matrix.kubernetes_version }} / ${{ matrix.container_runtime }} runs-on: ubuntu-latest strategy: matrix: From 4b565809e2236c4c5ae067bb1e0a76aec71ee450 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 23 Jun 2020 16:26:06 +0300 Subject: [PATCH 33/69] Switch to docker driver at minikube Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3c488b6ab41ad..52c1e9a3cbef7 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -288,8 +288,6 @@ jobs: run: | set -xeuo pipefail - sudo apt-get install conntrack - curl -Lo kubectl \ 'https://storage.googleapis.com/kubernetes-release/release/${{ matrix.kubernetes_version }}/bin/linux/amd64/kubectl' sudo install kubectl /usr/local/bin/ @@ -299,16 +297,16 @@ jobs: sudo install minikube /usr/local/bin/ minikube config set profile minikube - minikube config set vm-driver none + minikube config set vm-driver docker minikube config set kubernetes-version '${{ matrix.kubernetes_version }}' minikube config set container-runtime '${{ matrix.container_runtime }}' - sudo minikube start + minikube start kubectl cluster-info - name: Checkout uses: actions/checkout@v1 - run: USE_CONTAINER=none make slim-builds - run: make test-e2e-kubernetes env: - SKIP_CONTAINER_IMAGE_PUBLISHING: "true" + USE_MINIKUBE_CACHE: "true" CONTAINER_IMAGE_REPO: vector-test PACKAGE_DEB_USE_CONTAINER: docker From 6aaa21d19e2637037330b7588549f4d79d8bb80c Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 25 Jun 2020 06:38:18 +0300 Subject: [PATCH 34/69] Remove the no_newline_at_eol test Turns out, this test was invalid. The root cause with this is that, in essence, Kubernetes expects logs to consist of line, with line being defined as in POSIX - a sequence of characters *ending with \n*. Thus it's *not valid* to emit a log line without the terminating newline symbol in Kubernetes. One effect of this is that when using the CRI log format, lines won't be considered complete until we emit a newline character arrives - and the additional content before the newline will be added to the log line that's missing the newline. Given all of the above, there's no reason for this test to exist. The reason it was added was the behaviour detail of the docker log driver, but it's a mere implementation detail, and it we should abstract from it. The original statement of the test is also ill-posed, cause, as explained above, it's non-partial messages (and, generally speaking, any message) that doesn't end with newline isn't a valid log line in the first place. Signed-off-by: MOZGIII --- tests/kubernetes-e2e.rs | 68 ----------------------------------------- 1 file changed, 68 deletions(-) diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index a43c6db783772..e0ea24e4d1d60 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -290,74 +290,6 @@ async fn partial_merge() -> Result<(), Box> { Ok(()) } -/// This test validates that vector partail message merging mechanism doesn't -/// interfere with the non-partial messages that don't end with newline. -#[tokio::test] -async fn no_newline_at_eol() -> Result<(), Box> { - let _guard = lock(); - let framework = make_framework(); - - let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; - framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) - .await?; - - let test_namespace = framework.namespace("test-vector-test-pod").await?; - - let test_pod = framework - .test_pod(test_pod::Config::from_pod(&make_test_pod( - "test-vector-test-pod", - "test-pod", - "echo -n MARKER", // `-n` doesn't print newline - vec![], - ))?) - .await?; - framework - .wait( - "test-vector-test-pod", - vec!["pods/test-pod"], - WaitFor::Condition("initialized"), - vec!["--timeout=30s"], - ) - .await?; - - let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; - smoke_check_first_line(&mut log_reader).await; - - // Read the rest of the log lines. - let mut got_expected_line = false; - look_for_log_line(&mut log_reader, |val| { - if val["kubernetes"]["pod_namespace"] != "test-vector-test-pod" { - // A log from something other than our test pod, pretend we don't - // see it. - return FlowControlCommand::GoOn; - } - - // Ensure the message we got matches the one we emitted. - assert_eq!(val["message"], "MARKER"); - - if got_expected_line { - // We've already seen our expected line once! This is not good, we - // only emitted one. - panic!("test message seen more than once"); - } - - // If we did, remember it. - got_expected_line = true; - - // Request to stop the flow. - FlowControlCommand::Terminate - }) - .await?; - - assert!(got_expected_line); - - drop(test_pod); - drop(test_namespace); - drop(vector); - Ok(()) -} - /// This test validates that vector picks up preexisting logs - logs that /// existed before vector was deployed. #[tokio::test] From d0d02cbff9b4dbb7c0dd1b4f4696505c804cb698 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 25 Jun 2020 07:06:15 +0300 Subject: [PATCH 35/69] Increase timeout to rollout vector to 30s Signed-off-by: MOZGIII --- tests/kubernetes-e2e.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index e0ea24e4d1d60..4638e774fa44d 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -162,7 +162,7 @@ async fn simple() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) .await?; let test_namespace = framework.namespace("test-vector-test-pod").await?; @@ -230,7 +230,7 @@ async fn partial_merge() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) .await?; let test_namespace = framework.namespace("test-vector-test-pod").await?; @@ -321,7 +321,7 @@ async fn preexisting() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) .await?; let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; @@ -370,7 +370,7 @@ async fn multiple_lines() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) .await?; let test_namespace = framework.namespace("test-vector-test-pod").await?; @@ -440,7 +440,7 @@ async fn pod_metadata_annotation() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) .await?; let test_namespace = framework.namespace("test-vector-test-pod").await?; @@ -516,7 +516,7 @@ async fn pod_filtering() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) .await?; let test_namespace = framework.namespace("test-vector-test-pod").await?; @@ -649,7 +649,7 @@ async fn multiple_ns() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=10s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) .await?; const NS_PREFIX: &str = "test-vector-test-pod"; From 945ac528d3518701fdfbf93ee752abc433bae8f1 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 25 Jun 2020 07:34:38 +0300 Subject: [PATCH 36/69] Temporarily disable crio Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 52c1e9a3cbef7..33a9077fa4d61 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -280,8 +280,10 @@ jobs: - 'v1.14.10' container_runtime: - docker - - crio - containerd + # TODO: re-enable crio when + # https://github.com/kubernetes/minikube/issues/8554 is fixed + # - crio fail-fast: false steps: - name: Setup Minikube From 977d5fc5a0e4fc5d57ef9b4ff4d6729b50d3e2f7 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 25 Jun 2020 12:33:21 +0300 Subject: [PATCH 37/69] Apply workaround for CRIO Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 33a9077fa4d61..e1592b511ade4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -281,9 +281,7 @@ jobs: container_runtime: - docker - containerd - # TODO: re-enable crio when - # https://github.com/kubernetes/minikube/issues/8554 is fixed - # - crio + - crio fail-fast: false steps: - name: Setup Minikube @@ -310,5 +308,9 @@ jobs: - run: make test-e2e-kubernetes env: USE_MINIKUBE_CACHE: "true" - CONTAINER_IMAGE_REPO: vector-test + # CRIO prefixes the image name with `localhost/` when it's passed via + # `minikube cache`, so we specify the prefix ourselves so that + # the effective image name on the minikube node matches the one we + # expect in tests. + CONTAINER_IMAGE_REPO: localhost/vector-test PACKAGE_DEB_USE_CONTAINER: docker From 3f2e1d0927285d579c267e34dfad1cb287083df1 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 25 Jun 2020 16:47:46 +0300 Subject: [PATCH 38/69] Fix clippy Signed-off-by: MOZGIII --- lib/kubernetes-test-framework/src/util.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/kubernetes-test-framework/src/util.rs b/lib/kubernetes-test-framework/src/util.rs index 5d8527d524f2f..87c53585589b8 100644 --- a/lib/kubernetes-test-framework/src/util.rs +++ b/lib/kubernetes-test-framework/src/util.rs @@ -3,7 +3,7 @@ use crate::Result; pub async fn run_command(mut command: tokio::process::Command) -> Result<()> { let exit_status = command.spawn()?.await?; if !exit_status.success() { - Err(format!("exec failed: {:?}", command))?; + return Err(format!("exec failed: {:?}", command).into()); } Ok(()) } @@ -12,7 +12,7 @@ pub fn run_command_blocking(mut command: std::process::Command) -> Result<()> { let mut child = command.spawn()?; let exit_status = child.wait()?; if !exit_status.success() { - Err(format!("exec failed: {:?}", command))?; + return Err(format!("exec failed: {:?}", command).into()); } Ok(()) } From c793ab30fa899f5f62767735d3c924485a2c674b Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 2 Jul 2020 02:06:25 +0300 Subject: [PATCH 39/69] Unset log level in skaffold dev config to fallback to the one set in container Signed-off-by: MOZGIII --- kustomization.yaml | 3 +++ skaffold/manifests/patches/env.yaml | 12 ++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 skaffold/manifests/patches/env.yaml diff --git a/kustomization.yaml b/kustomization.yaml index f8145e36474c6..0ad86d586077d 100644 --- a/kustomization.yaml +++ b/kustomization.yaml @@ -8,3 +8,6 @@ resources: - skaffold/manifests/namespace.yaml - skaffold/manifests/config.yaml - distribution/kubernetes/vector-namespaced.yaml + +patchesStrategicMerge: + - skaffold/manifests/patches/env.yaml diff --git a/skaffold/manifests/patches/env.yaml b/skaffold/manifests/patches/env.yaml new file mode 100644 index 0000000000000..0b67aee5789f5 --- /dev/null +++ b/skaffold/manifests/patches/env.yaml @@ -0,0 +1,12 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: vector +spec: + template: + spec: + containers: + - name: vector + env: + - name: LOG + $patch: delete From 9bcd7a9c9ef5c312df464d6420c9658496b62a9e Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 2 Jul 2020 15:39:57 +0300 Subject: [PATCH 40/69] Add exec_tail to the test framework Signed-off-by: MOZGIII --- .../src/exec_tail.rs | 32 +++++ .../src/framework.rs | 17 ++- lib/kubernetes-test-framework/src/lib.rs | 7 +- .../src/log_lookup.rs | 127 +----------------- lib/kubernetes-test-framework/src/reader.rs | 123 +++++++++++++++++ tests/kubernetes-e2e.rs | 6 +- 6 files changed, 180 insertions(+), 132 deletions(-) create mode 100644 lib/kubernetes-test-framework/src/exec_tail.rs create mode 100644 lib/kubernetes-test-framework/src/reader.rs diff --git a/lib/kubernetes-test-framework/src/exec_tail.rs b/lib/kubernetes-test-framework/src/exec_tail.rs new file mode 100644 index 0000000000000..076392b108ff5 --- /dev/null +++ b/lib/kubernetes-test-framework/src/exec_tail.rs @@ -0,0 +1,32 @@ +//! Perform a log lookup. + +use super::{Reader, Result}; +use std::process::Stdio; +use tokio::process::Command; + +/// Exec a `tail` command reading the specified `file` within a `Container` +/// in a `Pod` of a specified `resource` at the specified `namespace` via the +/// specified `kubectl_command`. +/// Returns a [`Reader`] that managed the reading process. +pub fn exec_tail( + kubectl_command: &str, + namespace: &str, + resource: &str, + file: &str, +) -> Result { + let mut command = Command::new(kubectl_command); + + command.stdin(Stdio::null()).stderr(Stdio::inherit()); + + command.arg("exec"); + command.arg("-n").arg(namespace); + command.arg(resource); + command.arg("--"); + command.arg("tail"); + command.arg("--follow=name"); + command.arg("--retry"); + command.arg(file); + + let reader = Reader::spawn(command)?; + Ok(reader) +} diff --git a/lib/kubernetes-test-framework/src/framework.rs b/lib/kubernetes-test-framework/src/framework.rs index a1afe6dce9976..1e1c099fff500 100644 --- a/lib/kubernetes-test-framework/src/framework.rs +++ b/lib/kubernetes-test-framework/src/framework.rs @@ -1,8 +1,8 @@ //! The test framework main entry point. use super::{ - log_lookup, namespace, test_pod, up_down, vector, wait_for_resource, wait_for_rollout, - Interface, Result, + exec_tail, log_lookup, namespace, test_pod, up_down, vector, wait_for_resource, + wait_for_rollout, Interface, Reader, Result, }; /// Framework wraps the interface to the system with an easy-to-use rust API @@ -53,10 +53,17 @@ impl Framework { Ok(manager) } - /// Initialize log lookup for a particular `resouurce` in a particular + /// Initialize log lookup for a particular `resource` in a particular /// `namespace`. - pub fn logs(&self, namespace: &str, resource: &str) -> Result { - log_lookup::logs(&self.interface.kubectl_command, namespace, resource) + pub fn logs(&self, namespace: &str, resource: &str) -> Result { + log_lookup(&self.interface.kubectl_command, namespace, resource) + } + + /// Exec a `tail -f` command reading the specified `file` within + /// a `Container` in a `Pod` of a specified `resource` at the specified + /// `namespace`. + pub fn exec_tail(&self, namespace: &str, resource: &str, file: &str) -> Result { + exec_tail(&self.interface.kubectl_command, namespace, resource, file) } /// Wait for a set of `resources` in a specified `namespace` to acheive diff --git a/lib/kubernetes-test-framework/src/lib.rs b/lib/kubernetes-test-framework/src/lib.rs index 81b0972416f10..dbe98d553459d 100644 --- a/lib/kubernetes-test-framework/src/lib.rs +++ b/lib/kubernetes-test-framework/src/lib.rs @@ -15,11 +15,13 @@ missing_docs )] +mod exec_tail; pub mod framework; pub mod interface; mod lock; -pub mod log_lookup; +mod log_lookup; pub mod namespace; +mod reader; mod resource_file; pub mod test_pod; mod up_down; @@ -30,8 +32,11 @@ pub mod wait_for_rollout; // Re-export some unit for trivial accessability. +use exec_tail::exec_tail; pub use framework::Framework; pub use interface::Interface; pub use lock::lock; +use log_lookup::log_lookup; +pub use reader::Reader; type Result = std::result::Result>; diff --git a/lib/kubernetes-test-framework/src/log_lookup.rs b/lib/kubernetes-test-framework/src/log_lookup.rs index 14f780b3f2201..6906e5b7058fb 100644 --- a/lib/kubernetes-test-framework/src/log_lookup.rs +++ b/lib/kubernetes-test-framework/src/log_lookup.rs @@ -1,14 +1,13 @@ //! Perform a log lookup. -use super::Result; -use std::process::{ExitStatus, Stdio}; -use tokio::io::{AsyncBufReadExt, BufReader}; -use tokio::process::{Child, ChildStdout, Command}; +use super::{Reader, Result}; +use std::process::Stdio; +use tokio::process::Command; /// Initiate a log lookup (`kubectl log`) with the specified `kubectl_command` /// for the specified `resource` at the specified `namespace`. /// Returns a [`Reader`] that managed the reading process. -pub fn logs(kubectl_command: &str, namespace: &str, resource: &str) -> Result { +pub fn log_lookup(kubectl_command: &str, namespace: &str, resource: &str) -> Result { let mut command = Command::new(kubectl_command); command.stdin(Stdio::null()).stderr(Stdio::inherit()); @@ -21,121 +20,3 @@ pub fn logs(kubectl_command: &str, namespace: &str, resource: &str) -> Result, -} - -impl Reader { - /// Spawn a new `kubectl logs` process. - fn spawn(mut command: Command) -> std::io::Result { - Self::prepare_stdout(&mut command); - let child = command.spawn()?; - Ok(Self::new(child)) - } - - fn prepare_stdout(command: &mut Command) { - command.stdout(Stdio::piped()); - } - - fn new(mut child: Child) -> Self { - let stdout = child.stdout.take().unwrap(); - let reader = BufReader::new(stdout); - Reader { child, reader } - } - - /// Wait for the `kubectl logs` process to exit and return the exit code. - pub async fn wait(&mut self) -> std::io::Result { - (&mut self.child).await - } - - /// Send a termination signal to the `kubectl logs` process. - pub fn kill(&mut self) -> std::io::Result<()> { - self.child.kill() - } - - /// Read one line from the stdout of the `kubectl logs` process. - pub async fn read_line(&mut self) -> Option { - let mut s = String::new(); - let result = self.reader.read_line(&mut s).await; - match result { - Ok(0) => None, - Ok(_) => Some(s), - Err(err) => panic!(err), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - async fn collect(reader: &mut Reader) -> Vec { - let mut list = Vec::new(); - while let Some(line) = reader.read_line().await { - list.push(line) - } - list - } - - #[tokio::test] - async fn test_reader_finite() { - let mut command = Command::new("echo"); - command.arg("test"); - - let mut reader = Reader::spawn(command).expect("unable to spawn"); - - // Collect all line, expect stream to finish. - let lines = collect(&mut reader).await; - // Assert we got all the lines we expected. - assert_eq!(lines, vec!["test\n".to_owned()]); - - // Ensure wait doesn't fail, and that we exit status is success. - let exit_status = reader.wait().await.expect("wait failed"); - assert!(exit_status.success()); - } - - #[tokio::test] - async fn test_reader_inifinite() { - let mut command = Command::new("bash"); - command.arg("-c"); - command.arg(r#"NUM=0; while true; do echo "Line $NUM"; NUM=$((NUM+=1)); sleep 0.01; done"#); - - let mut reader = Reader::spawn(command).expect("unable to spawn"); - - // Read the lines and at some point ask the command we're reading from - // to stop. - let mut expected_num = 0; - while let Some(line) = reader.read_line().await { - // Assert we're getting expected lines. - assert_eq!(line, format!("Line {}\n", expected_num)); - - // On line 100 issue a `kill` to stop the infinite stream. - if expected_num == 100 { - reader.kill().expect("process already stopped") - } - - // If we are past 200 it means we issued `kill` at 100 and it wasn't - // effective. This is problem, fail the test. - // We don't to this immediately after `kill` to allow for some - // potential race condition. That kind of race is not just ok, but - // is desirable in the real-life usage to read-up the whole stdout - // buffer. - if expected_num > 200 { - panic!("went too far without stop being effective"); - } - - // Bump the expected num for the next iteration. - expected_num += 1; - } - - // Ensure wait doesn't fail. We killed the process, so expect - // a non-success exit code. - let exit_status = reader.wait().await.expect("wait failed"); - assert!(!exit_status.success()); - } -} diff --git a/lib/kubernetes-test-framework/src/reader.rs b/lib/kubernetes-test-framework/src/reader.rs new file mode 100644 index 0000000000000..9d8c57338013e --- /dev/null +++ b/lib/kubernetes-test-framework/src/reader.rs @@ -0,0 +1,123 @@ +//! Read process output. + +use std::process::{ExitStatus, Stdio}; +use tokio::io::{AsyncBufReadExt, BufReader}; +use tokio::process::{Child, ChildStdout, Command}; + +/// Keeps track of the command invocation, proving the interface to +/// read the output and send a termination signal. +#[derive(Debug)] +pub struct Reader { + child: Child, + reader: BufReader, +} + +impl Reader { + /// Spawn a command and provide a [`Reader`]. + pub fn spawn(mut command: Command) -> std::io::Result { + Self::prepare_stdout(&mut command); + let child = command.spawn()?; + Ok(Self::new(child)) + } + + fn prepare_stdout(command: &mut Command) { + command.stdout(Stdio::piped()); + } + + fn new(mut child: Child) -> Self { + let stdout = child.stdout.take().unwrap(); + let reader = BufReader::new(stdout); + Reader { child, reader } + } + + /// Wait for the `kubectl logs` process to exit and return the exit code. + pub async fn wait(&mut self) -> std::io::Result { + (&mut self.child).await + } + + /// Send a termination signal to the `kubectl logs` process. + pub fn kill(&mut self) -> std::io::Result<()> { + self.child.kill() + } + + /// Read one line from the stdout of the `kubectl logs` process. + pub async fn read_line(&mut self) -> Option { + let mut s = String::new(); + let result = self.reader.read_line(&mut s).await; + match result { + Ok(0) => None, + Ok(_) => Some(s), + Err(err) => panic!(err), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + async fn collect(reader: &mut Reader) -> Vec { + let mut list = Vec::new(); + while let Some(line) = reader.read_line().await { + list.push(line) + } + list + } + + #[tokio::test] + async fn test_reader_finite() { + let mut command = Command::new("echo"); + command.arg("test"); + + let mut reader = Reader::spawn(command).expect("unable to spawn"); + + // Collect all line, expect stream to finish. + let lines = collect(&mut reader).await; + // Assert we got all the lines we expected. + assert_eq!(lines, vec!["test\n".to_owned()]); + + // Ensure wait doesn't fail, and that we exit status is success. + let exit_status = reader.wait().await.expect("wait failed"); + assert!(exit_status.success()); + } + + #[tokio::test] + async fn test_reader_inifinite() { + let mut command = Command::new("bash"); + command.arg("-c"); + command.arg(r#"NUM=0; while true; do echo "Line $NUM"; NUM=$((NUM+=1)); sleep 0.01; done"#); + + let mut reader = Reader::spawn(command).expect("unable to spawn"); + + // Read the lines and at some point ask the command we're reading from + // to stop. + let mut expected_num = 0; + while let Some(line) = reader.read_line().await { + // Assert we're getting expected lines. + assert_eq!(line, format!("Line {}\n", expected_num)); + + // On line 100 issue a `kill` to stop the infinite stream. + if expected_num == 100 { + reader.kill().expect("process already stopped") + } + + // If we are past 200 it means we issued `kill` at 100 and it wasn't + // effective. This is problem, fail the test. + // We don't to this immediately after `kill` to allow for some + // potential race condition. That kind of race is not just ok, but + // is desirable in the real-life usage to read-up the whole stdout + // buffer. + if expected_num > 200 { + panic!("went too far without stop being effective"); + } + + // Bump the expected num for the next iteration. + expected_num += 1; + } + + // Ensure wait doesn't fail. We killed the process, so expect + // a non-success exit code. + let exit_status = reader.wait().await.expect("wait failed"); + assert!(!exit_status.success()); + } +} diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index 4638e774fa44d..dc1e89661d018 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -8,7 +8,7 @@ use k8s_openapi::{ apimachinery::pkg::apis::meta::v1::ObjectMeta, }; use kubernetes_test_framework::{ - lock, log_lookup, test_pod, wait_for_resource::WaitFor, Framework, Interface, + lock, test_pod, wait_for_resource::WaitFor, Framework, Interface, Reader, }; use std::collections::HashSet; @@ -87,7 +87,7 @@ fn generate_long_string(a: usize, b: usize) -> String { /// Read the first line from vector logs and assert that it matches the expected /// one. /// This allows detecting the situations where things have gone very wrong. -async fn smoke_check_first_line(log_reader: &mut log_lookup::Reader) { +async fn smoke_check_first_line(log_reader: &mut Reader) { // Wait for first line as a smoke check. let first_line = log_reader .read_line() @@ -108,7 +108,7 @@ enum FlowControlCommand { } async fn look_for_log_line

( - log_reader: &mut log_lookup::Reader, + log_reader: &mut Reader, mut predicate: P, ) -> Result<(), Box> where From e9548ccef7d7b9c2127cc3bdb7868a26f6c29368 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 7 Jul 2020 20:35:34 +0300 Subject: [PATCH 41/69] Fix a typo at the comment Signed-off-by: MOZGIII --- lib/kubernetes-test-framework/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/kubernetes-test-framework/src/lib.rs b/lib/kubernetes-test-framework/src/lib.rs index dbe98d553459d..d206f3cc97467 100644 --- a/lib/kubernetes-test-framework/src/lib.rs +++ b/lib/kubernetes-test-framework/src/lib.rs @@ -30,7 +30,7 @@ pub mod vector; pub mod wait_for_resource; pub mod wait_for_rollout; -// Re-export some unit for trivial accessability. +// Re-export some unit for trivial accessibility. use exec_tail::exec_tail; pub use framework::Framework; From 2691e10a2bc8bfa9e7964886a0d736363c75a568 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 7 Jul 2020 20:37:23 +0300 Subject: [PATCH 42/69] Fix the typos and styling at the crate doccomment Signed-off-by: MOZGIII --- lib/kubernetes-test-framework/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/kubernetes-test-framework/src/lib.rs b/lib/kubernetes-test-framework/src/lib.rs index d206f3cc97467..eafdd10af9215 100644 --- a/lib/kubernetes-test-framework/src/lib.rs +++ b/lib/kubernetes-test-framework/src/lib.rs @@ -5,8 +5,8 @@ //! developer as executable commands, rather than using a rust interface to talk //! to k8s cluster directly. //! This enables very trivial troubleshooting and allows us to use the same -//! deployemnt mechanisms that we use for prodcution - effectively giving us -//! the opportunity to test e2e - not just the code layer, but also the +//! deployment mechanisms that we use for prodcution - effectively giving us +//! the opportunity to test e2e: not just the code layer, but also the //! deployment configuration. #![deny( From 71690f36458dcb7be715c2df073d6a73f76d558a Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 7 Jul 2020 23:45:21 +0300 Subject: [PATCH 43/69] Bump k8s versions for E2E tests at CI Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e1592b511ade4..fbb4009de82af 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -273,9 +273,9 @@ jobs: minikube_version: - 'v1.11.0' kubernetes_version: - - 'v1.18.4' - - 'v1.17.7' - - 'v1.16.11' + - 'v1.18.5' + - 'v1.17.8' + - 'v1.16.12' - 'v1.15.12' - 'v1.14.10' container_runtime: From 7cb5dc709ae22fe046aab2a902ec60d28a7dbc2b Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Sun, 19 Jul 2020 04:20:09 +0300 Subject: [PATCH 44/69] Rename template params to pascal case Signed-off-by: MOZGIII --- .../src/wait_for_resource.rs | 42 +++++++++---------- .../src/wait_for_rollout.rs | 10 ++--- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/lib/kubernetes-test-framework/src/wait_for_resource.rs b/lib/kubernetes-test-framework/src/wait_for_resource.rs index ac11c2dccac88..944872cfe4ef7 100644 --- a/lib/kubernetes-test-framework/src/wait_for_resource.rs +++ b/lib/kubernetes-test-framework/src/wait_for_resource.rs @@ -20,19 +20,19 @@ where /// Wait for a set of `resources` within a `namespace` to reach a `wait_for` /// condition. /// Use `extra` to pass additional arguments to `kubectl`. -pub async fn namespace( - kubectl_command: CMD, +pub async fn namespace( + kubectl_command: Cmd, namespace: NS, resources: impl IntoIterator, - wait_for: WaitFor, - extra: impl IntoIterator, + wait_for: WaitFor, + extra: impl IntoIterator, ) -> Result<()> where - CMD: AsRef, + Cmd: AsRef, NS: AsRef, R: AsRef, - COND: std::fmt::Display, - EX: AsRef, + Cond: std::fmt::Display, + Ex: AsRef, { let mut command = prepare_base_command(kubectl_command, resources, wait_for, extra); command.arg("-n").arg(namespace); @@ -42,34 +42,34 @@ where /// Wait for a set of `resources` at any namespace to reach a `wait_for` /// condition. /// Use `extra` to pass additional arguments to `kubectl`. -pub async fn all_namespaces( - kubectl_command: CMD, +pub async fn all_namespaces( + kubectl_command: Cmd, resources: impl IntoIterator, - wait_for: WaitFor, - extra: impl IntoIterator, + wait_for: WaitFor, + extra: impl IntoIterator, ) -> Result<()> where - CMD: AsRef, + Cmd: AsRef, R: AsRef, - COND: std::fmt::Display, - EX: AsRef, + Cond: std::fmt::Display, + Ex: AsRef, { let mut command = prepare_base_command(kubectl_command, resources, wait_for, extra); command.arg("--all-namespaces=true"); run_command(command).await } -fn prepare_base_command( - kubectl_command: CMD, +fn prepare_base_command( + kubectl_command: Cmd, resources: impl IntoIterator, - wait_for: WaitFor, - extra: impl IntoIterator, + wait_for: WaitFor, + extra: impl IntoIterator, ) -> Command where - CMD: AsRef, + Cmd: AsRef, R: AsRef, - COND: std::fmt::Display, - EX: AsRef, + Cond: std::fmt::Display, + Ex: AsRef, { let mut command = Command::new(kubectl_command); diff --git a/lib/kubernetes-test-framework/src/wait_for_rollout.rs b/lib/kubernetes-test-framework/src/wait_for_rollout.rs index 704bc722f1009..a017232afe9ca 100644 --- a/lib/kubernetes-test-framework/src/wait_for_rollout.rs +++ b/lib/kubernetes-test-framework/src/wait_for_rollout.rs @@ -8,17 +8,17 @@ use tokio::process::Command; /// Wait for a rollout of a `resource` within a `namespace` to complete via /// the specifed `kubectl_command`. /// Use `extra` to pass additional arguments to `kubectl`. -pub async fn run( - kubectl_command: CMD, +pub async fn run( + kubectl_command: Cmd, namespace: NS, resource: R, - extra: impl IntoIterator, + extra: impl IntoIterator, ) -> Result<()> where - CMD: AsRef, + Cmd: AsRef, NS: AsRef, R: AsRef, - EX: AsRef, + Ex: AsRef, { let mut command = Command::new(kubectl_command); From db531ca86411eb004ba78ba9a5eb4febe67b196e Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Sun, 19 Jul 2020 04:27:07 +0300 Subject: [PATCH 45/69] Remove Drop from ResourceFile Signed-off-by: MOZGIII --- lib/kubernetes-test-framework/src/resource_file.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/kubernetes-test-framework/src/resource_file.rs b/lib/kubernetes-test-framework/src/resource_file.rs index 9f9ac469f002a..3a4f8f170c3b2 100644 --- a/lib/kubernetes-test-framework/src/resource_file.rs +++ b/lib/kubernetes-test-framework/src/resource_file.rs @@ -19,9 +19,3 @@ impl ResourceFile { self.path.as_path() } } - -impl Drop for ResourceFile { - fn drop(&mut self) { - std::fs::remove_file(&self.path).expect("unable to clean up custom resource file"); - } -} From de903fddc65a7159e0e6d64a16983ee2fe0985bc Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 21 Jul 2020 07:58:33 +0300 Subject: [PATCH 46/69] Proper authors Signed-off-by: MOZGIII --- lib/kubernetes-test-framework/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/kubernetes-test-framework/Cargo.toml b/lib/kubernetes-test-framework/Cargo.toml index a6df1b66afc12..26eed0793ae6d 100644 --- a/lib/kubernetes-test-framework/Cargo.toml +++ b/lib/kubernetes-test-framework/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "kubernetes-test-framework" version = "0.1.0" -authors = ["MOZGIII "] +authors = ["Vector Contributors "] edition = "2018" description = "Kubernetes Test Framework used to test Vector in Kubernetes" From 97af531d7a72058c9bdbb285c1ef739f3caac219 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 21 Jul 2020 08:01:49 +0300 Subject: [PATCH 47/69] Rename crate to k8s-test-framework More in-line with the naming patterns of the rest of the k8s-related crates. Signed-off-by: MOZGIII --- Cargo.lock | 24 +++++++++---------- Cargo.toml | 4 ++-- .../Cargo.toml | 2 +- .../src/exec_tail.rs | 0 .../src/framework.rs | 0 .../src/interface.rs | 0 .../src/lib.rs | 0 .../src/lock.rs | 0 .../src/log_lookup.rs | 0 .../src/namespace.rs | 0 .../src/reader.rs | 0 .../src/resource_file.rs | 0 .../src/test_pod.rs | 0 .../src/up_down.rs | 0 .../src/util.rs | 0 .../src/vector.rs | 0 .../src/wait_for_resource.rs | 0 .../src/wait_for_rollout.rs | 0 tests/kubernetes-e2e.rs | 2 +- 19 files changed, 16 insertions(+), 16 deletions(-) rename lib/{kubernetes-test-framework => k8s-test-framework}/Cargo.toml (93%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/exec_tail.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/framework.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/interface.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/lib.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/lock.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/log_lookup.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/namespace.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/reader.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/resource_file.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/test_pod.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/up_down.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/util.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/vector.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/wait_for_resource.rs (100%) rename lib/{kubernetes-test-framework => k8s-test-framework}/src/wait_for_rollout.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 1cd3b18f67d54..e037183707f35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1887,6 +1887,17 @@ dependencies = [ "url 2.1.1", ] +[[package]] +name = "k8s-test-framework" +version = "0.1.0" +dependencies = [ + "k8s-openapi", + "once_cell", + "serde_json", + "tempfile", + "tokio 0.2.21", +] + [[package]] name = "kernel32-sys" version = "0.2.2" @@ -1907,17 +1918,6 @@ dependencies = [ "openssl-sys", ] -[[package]] -name = "kubernetes-test-framework" -version = "0.1.0" -dependencies = [ - "k8s-openapi", - "once_cell", - "serde_json", - "tempfile", - "tokio 0.2.21", -] - [[package]] name = "lazy_static" version = "0.2.11" @@ -5454,7 +5454,7 @@ dependencies = [ "inventory", "jemallocator", "k8s-openapi", - "kubernetes-test-framework", + "k8s-test-framework", "lazy_static 1.4.0", "leveldb", "libc", diff --git a/Cargo.toml b/Cargo.toml index 7275adc94f99c..bd3449c257244 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,7 +31,7 @@ members = [ "lib/file-source", "lib/tracing-limit", "lib/vector-wasm", - "lib/kubernetes-test-framework", + "lib/k8s-test-framework", ] [dependencies] @@ -196,7 +196,7 @@ tokio-test = "0.2" tokio = { version = "0.2", features = ["test-util"] } assert_cmd = "1.0" reqwest = { version = "0.10.6", features = ["json"] } -kubernetes-test-framework = { version = "0.1", path = "lib/kubernetes-test-framework" } +k8s-test-framework = { version = "0.1", path = "lib/k8s-test-framework" } [features] # Default features for *-unknown-linux-gnu and *-apple-darwin diff --git a/lib/kubernetes-test-framework/Cargo.toml b/lib/k8s-test-framework/Cargo.toml similarity index 93% rename from lib/kubernetes-test-framework/Cargo.toml rename to lib/k8s-test-framework/Cargo.toml index 26eed0793ae6d..7ec283a159c83 100644 --- a/lib/kubernetes-test-framework/Cargo.toml +++ b/lib/k8s-test-framework/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "kubernetes-test-framework" +name = "k8s-test-framework" version = "0.1.0" authors = ["Vector Contributors "] edition = "2018" diff --git a/lib/kubernetes-test-framework/src/exec_tail.rs b/lib/k8s-test-framework/src/exec_tail.rs similarity index 100% rename from lib/kubernetes-test-framework/src/exec_tail.rs rename to lib/k8s-test-framework/src/exec_tail.rs diff --git a/lib/kubernetes-test-framework/src/framework.rs b/lib/k8s-test-framework/src/framework.rs similarity index 100% rename from lib/kubernetes-test-framework/src/framework.rs rename to lib/k8s-test-framework/src/framework.rs diff --git a/lib/kubernetes-test-framework/src/interface.rs b/lib/k8s-test-framework/src/interface.rs similarity index 100% rename from lib/kubernetes-test-framework/src/interface.rs rename to lib/k8s-test-framework/src/interface.rs diff --git a/lib/kubernetes-test-framework/src/lib.rs b/lib/k8s-test-framework/src/lib.rs similarity index 100% rename from lib/kubernetes-test-framework/src/lib.rs rename to lib/k8s-test-framework/src/lib.rs diff --git a/lib/kubernetes-test-framework/src/lock.rs b/lib/k8s-test-framework/src/lock.rs similarity index 100% rename from lib/kubernetes-test-framework/src/lock.rs rename to lib/k8s-test-framework/src/lock.rs diff --git a/lib/kubernetes-test-framework/src/log_lookup.rs b/lib/k8s-test-framework/src/log_lookup.rs similarity index 100% rename from lib/kubernetes-test-framework/src/log_lookup.rs rename to lib/k8s-test-framework/src/log_lookup.rs diff --git a/lib/kubernetes-test-framework/src/namespace.rs b/lib/k8s-test-framework/src/namespace.rs similarity index 100% rename from lib/kubernetes-test-framework/src/namespace.rs rename to lib/k8s-test-framework/src/namespace.rs diff --git a/lib/kubernetes-test-framework/src/reader.rs b/lib/k8s-test-framework/src/reader.rs similarity index 100% rename from lib/kubernetes-test-framework/src/reader.rs rename to lib/k8s-test-framework/src/reader.rs diff --git a/lib/kubernetes-test-framework/src/resource_file.rs b/lib/k8s-test-framework/src/resource_file.rs similarity index 100% rename from lib/kubernetes-test-framework/src/resource_file.rs rename to lib/k8s-test-framework/src/resource_file.rs diff --git a/lib/kubernetes-test-framework/src/test_pod.rs b/lib/k8s-test-framework/src/test_pod.rs similarity index 100% rename from lib/kubernetes-test-framework/src/test_pod.rs rename to lib/k8s-test-framework/src/test_pod.rs diff --git a/lib/kubernetes-test-framework/src/up_down.rs b/lib/k8s-test-framework/src/up_down.rs similarity index 100% rename from lib/kubernetes-test-framework/src/up_down.rs rename to lib/k8s-test-framework/src/up_down.rs diff --git a/lib/kubernetes-test-framework/src/util.rs b/lib/k8s-test-framework/src/util.rs similarity index 100% rename from lib/kubernetes-test-framework/src/util.rs rename to lib/k8s-test-framework/src/util.rs diff --git a/lib/kubernetes-test-framework/src/vector.rs b/lib/k8s-test-framework/src/vector.rs similarity index 100% rename from lib/kubernetes-test-framework/src/vector.rs rename to lib/k8s-test-framework/src/vector.rs diff --git a/lib/kubernetes-test-framework/src/wait_for_resource.rs b/lib/k8s-test-framework/src/wait_for_resource.rs similarity index 100% rename from lib/kubernetes-test-framework/src/wait_for_resource.rs rename to lib/k8s-test-framework/src/wait_for_resource.rs diff --git a/lib/kubernetes-test-framework/src/wait_for_rollout.rs b/lib/k8s-test-framework/src/wait_for_rollout.rs similarity index 100% rename from lib/kubernetes-test-framework/src/wait_for_rollout.rs rename to lib/k8s-test-framework/src/wait_for_rollout.rs diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index dc1e89661d018..4b78587cda938 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -7,7 +7,7 @@ use k8s_openapi::{ api::core::v1::{Container, Pod, PodSpec}, apimachinery::pkg::apis::meta::v1::ObjectMeta, }; -use kubernetes_test_framework::{ +use k8s_test_framework::{ lock, test_pod, wait_for_resource::WaitFor, Framework, Interface, Reader, }; use std::collections::HashSet; From 0ca655d73086ecd62eb982502c85407db2095dc2 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 21 Jul 2020 08:04:15 +0300 Subject: [PATCH 48/69] Correct kubectl comment at the interface Signed-off-by: MOZGIII --- lib/k8s-test-framework/src/interface.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/k8s-test-framework/src/interface.rs b/lib/k8s-test-framework/src/interface.rs index fa722f25e2886..0b305e5b99f59 100644 --- a/lib/k8s-test-framework/src/interface.rs +++ b/lib/k8s-test-framework/src/interface.rs @@ -10,7 +10,7 @@ pub struct Interface { /// delete if from there. pub deploy_vector_command: String, - /// A `kubectl` command used to + /// A `kubectl` command used for generic cluster interaction. pub kubectl_command: String, } From 3ae989ffdf05381de1ae6f993b8ff70626c8de4f Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Wed, 22 Jul 2020 11:12:43 +0300 Subject: [PATCH 49/69] Bumped k8s and minikube versions at CI Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fbb4009de82af..7e29760099de7 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -271,11 +271,11 @@ jobs: strategy: matrix: minikube_version: - - 'v1.11.0' + - 'v1.12.1' kubernetes_version: - - 'v1.18.5' - - 'v1.17.8' - - 'v1.16.12' + - 'v1.18.6' + - 'v1.17.9' + - 'v1.16.13' - 'v1.15.12' - 'v1.14.10' container_runtime: From 3110bb8667d69d97edf431d87d65f85591b0dc29 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Wed, 22 Jul 2020 11:38:42 +0300 Subject: [PATCH 50/69] Add a comment explaining the timeout at pod filtering test Signed-off-by: MOZGIII --- tests/kubernetes-e2e.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index 4b78587cda938..1814ed73bbeb2 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -620,6 +620,13 @@ async fn pod_filtering() -> Result<(), Box> { tokio::spawn(async move { // Wait for two minutes - a reasonable time for vector internals to // pick up new `Pod` and collect events from them in idle load. + // Here, we're assuming that if the `Pod` that was supposed to be + // ignored was in fact collected (meaning something's wrong with + // the exclusion logic), we'd see it's data within this time frame. + // It's not enough to just wait for `Pod` complete, we should still + // apply a reasonably big timeout before we stop waiting for the + // logs to appear to have high confidence that Vector has enough + // time to pick them up and spit them out. let duration = std::time::Duration::from_secs(120); println!("Starting stop timer, due in {} seconds", duration.as_secs()); tokio::time::delay_for(duration).await; From 5b7f17bd70ace250176a571ec6003c91c1140e57 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Wed, 22 Jul 2020 19:33:33 +0300 Subject: [PATCH 51/69] Rollback minikube to 0.11.0 Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7e29760099de7..bde224d2d1bb5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -271,7 +271,7 @@ jobs: strategy: matrix: minikube_version: - - 'v1.12.1' + - 'v1.11.0' # https://github.com/kubernetes/minikube/issues/8799 kubernetes_version: - 'v1.18.6' - 'v1.17.9' From bf37696c5d5447949cea4843276e589acd38ac8c Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 23 Jul 2020 21:11:35 +0300 Subject: [PATCH 52/69] Update CONTRIBUTING.md Co-authored-by: Ana Hobden Signed-off-by: MOZGIII --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0885560ca3f22..c06903b87da48 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -633,7 +633,7 @@ Essentially, the steps you have to take to deploy manually are the same that #### Kubernetes E2E tests -Kubernetes integration is quite complicated, a lot of things can go wrong. +Kubernetes integration has a lot of parts that can go wrong. To cope with the complexity and ensure we maintain high quality, we use E2E (end-to-end) tests. From dba975f6fc4fe12a34048800761a0fac43bed2b9 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 23 Jul 2020 21:49:39 +0300 Subject: [PATCH 53/69] Update distribution/kubernetes/vector-namespaced.yaml Co-authored-by: Ana Hobden Signed-off-by: MOZGIII --- distribution/kubernetes/vector-namespaced.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/distribution/kubernetes/vector-namespaced.yaml b/distribution/kubernetes/vector-namespaced.yaml index 493ce619e091d..c92e2bc33c5cb 100644 --- a/distribution/kubernetes/vector-namespaced.yaml +++ b/distribution/kubernetes/vector-namespaced.yaml @@ -50,7 +50,7 @@ spec: fieldRef: fieldPath: metadata.namespace # Set a reasonable log level to avoid issues with internal logs - # overwriting console output at E2E tests. Fell free to change at + # overwriting console output at E2E tests. Feel free to change at # a real deployment. - name: LOG value: info From a173f5d226315677a9726fd98e8e3dbc11b996bb Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 23 Jul 2020 22:38:10 +0300 Subject: [PATCH 54/69] Fix an error at CONTRIBUTING.md Signed-off-by: MOZGIII --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c06903b87da48..7c6ba5c119690 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -680,7 +680,7 @@ You can also pass additional parameters to adjust the behavior of the test: Requires you to test against a `minikube` cluster. Eliminates the need to have a registry to run tests. -* `CONTAINER_IMAGE_REPO=/vector-test:tag` - completely skip the step +* `CONTAINER_IMAGE=/vector-test:tag` - completely skip the step of building the Vector docker image, and use the specified image instead. Useful to speed up the iterations speed when you already have a Vector docker image you want to test against. From ff84036bf38580c4129d68cdcfbf81dd82452f31 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 23 Jul 2020 23:01:25 +0300 Subject: [PATCH 55/69] Remove a trivial line from the doc Signed-off-by: MOZGIII --- CONTRIBUTING.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7c6ba5c119690..f6e315a3270f2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -663,9 +663,6 @@ CONTAINER_IMAGE_REPO=/vector-test make test-e2e-kubernetes Where `CONTAINER_IMAGE_REPO` is the docker image repo name to use, without part after the `:`. Replace `` with your Docker Hub username. -This will run Kubernetes E2E tests suite. If the command exit with 0 status -code - tests passed. - You can also pass additional parameters to adjust the behavior of the test: * `QUICK_BUILD=true` - use development build and a skaffold image from the dev From f29e56f2c82cc8c5a0391f51d3eaefe03a29b094 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 24 Jul 2020 15:51:16 +0300 Subject: [PATCH 56/69] Do second attemtp to start up minikube if the first one failed Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bde224d2d1bb5..5c021d031c2d8 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -300,7 +300,7 @@ jobs: minikube config set vm-driver docker minikube config set kubernetes-version '${{ matrix.kubernetes_version }}' minikube config set container-runtime '${{ matrix.container_runtime }}' - minikube start + minikube start || { minikube delete && minikube start } # try again once if failed initially kubectl cluster-info - name: Checkout uses: actions/checkout@v1 From 3f4fdd741ff8c3186021b4b74b20fdb5e9784237 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 24 Jul 2020 15:52:19 +0300 Subject: [PATCH 57/69] Print minikube logs if it fails to start Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5c021d031c2d8..3563fd16fa923 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -300,7 +300,7 @@ jobs: minikube config set vm-driver docker minikube config set kubernetes-version '${{ matrix.kubernetes_version }}' minikube config set container-runtime '${{ matrix.container_runtime }}' - minikube start || { minikube delete && minikube start } # try again once if failed initially + minikube start || { minikube delete && minikube start } || minikube logs # try again once if failed initially kubectl cluster-info - name: Checkout uses: actions/checkout@v1 From f1bd433baa3c8a6184bda90e5dac5dae4f0512ef Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 24 Jul 2020 16:16:00 +0300 Subject: [PATCH 58/69] Provide a default for CONTAINER_IMAGE_REPO if USE_MINIKUBE_CACHE is set Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 5 ----- scripts/test-e2e-kubernetes.sh | 39 ++++++++++++++++++++++++---------- 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3563fd16fa923..736492d3d320a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -308,9 +308,4 @@ jobs: - run: make test-e2e-kubernetes env: USE_MINIKUBE_CACHE: "true" - # CRIO prefixes the image name with `localhost/` when it's passed via - # `minikube cache`, so we specify the prefix ourselves so that - # the effective image name on the minikube node matches the one we - # expect in tests. - CONTAINER_IMAGE_REPO: localhost/vector-test PACKAGE_DEB_USE_CONTAINER: docker diff --git a/scripts/test-e2e-kubernetes.sh b/scripts/test-e2e-kubernetes.sh index 29eee9bb87085..e71830812818e 100755 --- a/scripts/test-e2e-kubernetes.sh +++ b/scripts/test-e2e-kubernetes.sh @@ -16,6 +16,15 @@ random-string() { echo } +# Whether to use `minikube cache` to pass image to the k8s cluster. +# After we build vector docker image, instead of pushing to the remote repo, +# we'll be using `minikube cache` to make image available to the cluster. +# This effectively eliminates the requirement to have a docker registry, but +# it requires that we run against minikube cluster. +is_minikube_cache_enabled() { + [[ "${USE_MINIKUBE_CACHE:-"false"}" == "true" ]] +} + # Build a docker image if it wasn't provided. if [[ -z "${CONTAINER_IMAGE:-}" ]]; then # Require a repo to put the container image at. @@ -25,10 +34,25 @@ if [[ -z "${CONTAINER_IMAGE:-}" ]]; then # also not work if you k8s cluster doesn't have network connectivity to the # registry. # - # Hint #2: if using with minikube, set `USE_MINIKUBE_CACHE` to `true` and use - # any value for `CONTAINER_IMAGE_REPO` (for instance, `vector-test` will do). + # Hint #2: if using with minikube, set `USE_MINIKUBE_CACHE` to `true` and you + # can omit the `CONTAINER_IMAGE_REPO`. # - CONTAINER_IMAGE_REPO="${CONTAINER_IMAGE_REPO:?"You have to specify CONTAINER_IMAGE_REPO to upload the test image to."}" + if is_minikube_cache_enabled; then + # If `minikube cache` will be used, the push access to the docker repo + # is not required, and we can provide a default value for the + # `CONTAINER_IMAGE_REPO`. + # CRIO prefixes the image name with `localhost/` when it's passed via + # `minikube cache`, so, in our default value default, to work around that + # issue, we use the repo name that already contains that prefix, such that + # the effective image name on the minikube node matches the one expected in + # tests. + CONTAINER_IMAGE_REPO="${CONTAINER_IMAGE_REPO:-"localhost/vector-test"}" + else + # If not using `minikube cache`, it's mandatory to have a push access to the + # repo, so we don't offer a default value and explicilty require the user to + # specify a `CONTAINER_IMAGE_REPO`. + CONTAINER_IMAGE_REPO="${CONTAINER_IMAGE_REPO:?"You have to specify CONTAINER_IMAGE_REPO to upload the test image to."}" + fi # Assign a default test run ID if none is provided by the user. TEST_RUN_ID="${TEST_RUN_ID:-"$(date +%s)-$(random-string)"}" @@ -70,15 +94,8 @@ if [[ -z "${CONTAINER_IMAGE:-}" ]]; then fi if [[ -z "${SKIP_CONTAINER_IMAGE_PUBLISHING:-}" ]]; then - # Whether to use minikube cache to pass image to the k8s cluster. - # After we build vector docker image, instead of pushing to the remote repo, - # we'll be using `minikube cache` to make image available to the cluster. - # This effectively eliminates the requirement to have a docker registry, but - # it requires that we run against minikube cluster. - USE_MINIKUBE_CACHE="${USE_MINIKUBE_CACHE:-"false"}" - # Make the container image accessible to the k8s cluster. - if [[ "$USE_MINIKUBE_CACHE" == "true" ]]; then + if is_minikube_cache_enabled; then minikube cache add "$CONTAINER_IMAGE" else docker push "$CONTAINER_IMAGE" From b418c479c2faf9d649e0273aa25a69128f33ec44 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 24 Jul 2020 16:17:38 +0300 Subject: [PATCH 59/69] Update the CONTRIBUTING.md for CONTAINER_IMAGE_REPO default if USE_MINIKUBE_CACHE is set Signed-off-by: MOZGIII --- CONTRIBUTING.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f6e315a3270f2..71e72617ad619 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -676,6 +676,8 @@ You can also pass additional parameters to adjust the behavior of the test: a `minikube`-controlled cluster node. Requires you to test against a `minikube` cluster. Eliminates the need to have a registry to run tests. + When `USE_MINIKUBE_CACHE=true` is set, we provide a default value for the + `CONTAINER_IMAGE_REPO` so it can be omitted. * `CONTAINER_IMAGE=/vector-test:tag` - completely skip the step of building the Vector docker image, and use the specified image instead. @@ -693,7 +695,13 @@ You can also pass additional parameters to adjust the behavior of the test: Passing additional commands is done like so: ```shell -QUICK_BUILD=true USE_MINIKUBE_DOCKER=true CONTAINER_IMAGE_REPO=/vector-test make test-e2e-kubernetes +QUICK_BUILD=true USE_MINIKUBE_CACHE=true make test-e2e-kubernetes +``` + +or + +```shell +QUICK_BUILD=true CONTAINER_IMAGE_REPO=/vector-test make test-e2e-kubernetes ``` ## Humans From 949cae437e37c68949f21110556dcca3a1c750c0 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 24 Jul 2020 16:20:32 +0300 Subject: [PATCH 60/69] Increase all rollout/wait timeouts to one minute Signed-off-by: MOZGIII --- tests/kubernetes-e2e.rs | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/tests/kubernetes-e2e.rs b/tests/kubernetes-e2e.rs index 1814ed73bbeb2..f9c5d1e58da4f 100644 --- a/tests/kubernetes-e2e.rs +++ b/tests/kubernetes-e2e.rs @@ -162,7 +162,7 @@ async fn simple() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=60s"]) .await?; let test_namespace = framework.namespace("test-vector-test-pod").await?; @@ -180,7 +180,7 @@ async fn simple() -> Result<(), Box> { "test-vector-test-pod", vec!["pods/test-pod"], WaitFor::Condition("initialized"), - vec!["--timeout=30s"], + vec!["--timeout=60s"], ) .await?; @@ -230,7 +230,7 @@ async fn partial_merge() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=60s"]) .await?; let test_namespace = framework.namespace("test-vector-test-pod").await?; @@ -249,7 +249,7 @@ async fn partial_merge() -> Result<(), Box> { "test-vector-test-pod", vec!["pods/test-pod"], WaitFor::Condition("initialized"), - vec!["--timeout=30s"], + vec!["--timeout=60s"], ) .await?; @@ -312,7 +312,7 @@ async fn preexisting() -> Result<(), Box> { "test-vector-test-pod", vec!["pods/test-pod"], WaitFor::Condition("initialized"), - vec!["--timeout=30s"], + vec!["--timeout=60s"], ) .await?; @@ -321,7 +321,7 @@ async fn preexisting() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=60s"]) .await?; let mut log_reader = framework.logs("test-vector", "daemonset/vector")?; @@ -370,7 +370,7 @@ async fn multiple_lines() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=60s"]) .await?; let test_namespace = framework.namespace("test-vector-test-pod").await?; @@ -389,7 +389,7 @@ async fn multiple_lines() -> Result<(), Box> { "test-vector-test-pod", vec!["pods/test-pod"], WaitFor::Condition("initialized"), - vec!["--timeout=30s"], + vec!["--timeout=60s"], ) .await?; @@ -440,7 +440,7 @@ async fn pod_metadata_annotation() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=60s"]) .await?; let test_namespace = framework.namespace("test-vector-test-pod").await?; @@ -458,7 +458,7 @@ async fn pod_metadata_annotation() -> Result<(), Box> { "test-vector-test-pod", vec!["pods/test-pod"], WaitFor::Condition("initialized"), - vec!["--timeout=30s"], + vec!["--timeout=60s"], ) .await?; @@ -516,7 +516,7 @@ async fn pod_filtering() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=60s"]) .await?; let test_namespace = framework.namespace("test-vector-test-pod").await?; @@ -534,7 +534,7 @@ async fn pod_filtering() -> Result<(), Box> { "test-vector-test-pod", vec!["pods/test-pod-excluded"], WaitFor::Condition("initialized"), - vec!["--timeout=30s"], + vec!["--timeout=60s"], ) .await?; @@ -551,7 +551,7 @@ async fn pod_filtering() -> Result<(), Box> { "test-vector-test-pod", vec!["pods/test-pod-control"], WaitFor::Condition("initialized"), - vec!["--timeout=30s"], + vec!["--timeout=60s"], ) .await?; @@ -656,7 +656,7 @@ async fn multiple_ns() -> Result<(), Box> { let vector = framework.vector("test-vector", VECTOR_CONFIG).await?; framework - .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=30s"]) + .wait_for_rollout("test-vector", "daemonset/vector", vec!["--timeout=60s"]) .await?; const NS_PREFIX: &str = "test-vector-test-pod"; @@ -684,7 +684,7 @@ async fn multiple_ns() -> Result<(), Box> { ns, vec!["pods/test-pod"], WaitFor::Condition("initialized"), - vec!["--timeout=30s"], + vec!["--timeout=60s"], ) .await?; test_pods.push(test_pod); From b995cbc01703fc106a6806c01cc20bd5437be5cf Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Fri, 24 Jul 2020 16:22:47 +0300 Subject: [PATCH 61/69] Fix syntax error around minikube start command Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 736492d3d320a..e327c837779f5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -300,7 +300,9 @@ jobs: minikube config set vm-driver docker minikube config set kubernetes-version '${{ matrix.kubernetes_version }}' minikube config set container-runtime '${{ matrix.container_runtime }}' - minikube start || { minikube delete && minikube start } || minikube logs # try again once if failed initially + # Start minikube, try again once if fails and print logs if the second + # attempt fails too. + minikube start || minikube delete && minikube start || minikube logs kubectl cluster-info - name: Checkout uses: actions/checkout@v1 From c73c9688e37b2f01a5001a32f880f7ef36c5255c Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Sat, 25 Jul 2020 19:10:11 +0300 Subject: [PATCH 62/69] Rollback k8s v1.16.13 to v1.16.12 at CI Signed-off-by: MOZGIII --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e327c837779f5..fe17b1ddccdbf 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -275,7 +275,7 @@ jobs: kubernetes_version: - 'v1.18.6' - 'v1.17.9' - - 'v1.16.13' + - 'v1.16.12' # v1.16.13 is broken, see https://github.com/kubernetes/minikube/issues/8840 - 'v1.15.12' - 'v1.14.10' container_runtime: From a9b63c85cad718f9fdfdffb8a17472fdfa195bef Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 28 Jul 2020 17:22:06 +0300 Subject: [PATCH 63/69] Add minikube cache autodetection Signed-off-by: MOZGIII --- scripts/test-e2e-kubernetes.sh | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/scripts/test-e2e-kubernetes.sh b/scripts/test-e2e-kubernetes.sh index e71830812818e..cb7632cc7ebdf 100755 --- a/scripts/test-e2e-kubernetes.sh +++ b/scripts/test-e2e-kubernetes.sh @@ -16,13 +16,29 @@ random-string() { echo } +# Detect if current kubectl context is `minikube`. +is_kubectl_context_minikube() { + [[ "$(kubectl config current-context || true)" == "minikube" ]] +} + # Whether to use `minikube cache` to pass image to the k8s cluster. # After we build vector docker image, instead of pushing to the remote repo, # we'll be using `minikube cache` to make image available to the cluster. # This effectively eliminates the requirement to have a docker registry, but # it requires that we run against minikube cluster. is_minikube_cache_enabled() { - [[ "${USE_MINIKUBE_CACHE:-"false"}" == "true" ]] + local MODE="${USE_MINIKUBE_CACHE:-"auto"}" + if [[ "$MODE" == "auto" ]]; then + if is_kubectl_context_minikube; then + echo "Note: detected minikube kubectl context, using minikube cache" >&2 + return 0 + else + echo "Note: detected non-minikube kubectl context, docker repo is required" >&2 + return 1 + fi + else + [[ "$MODE" == "true" ]] + fi } # Build a docker image if it wasn't provided. From ec3f9c5308b1ccffb2867ca1d983f0e3358f01cd Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 28 Jul 2020 17:24:40 +0300 Subject: [PATCH 64/69] Document USE_MINIKUBE_CACHE=auto mode Signed-off-by: MOZGIII --- CONTRIBUTING.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 71e72617ad619..b0719af8236df 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -678,6 +678,9 @@ You can also pass additional parameters to adjust the behavior of the test: a registry to run tests. When `USE_MINIKUBE_CACHE=true` is set, we provide a default value for the `CONTAINER_IMAGE_REPO` so it can be omitted. + Can be set to `auto` (default) to automatically detect whether to use + `minikube cache` or not, based on the current `kubectl` context. To opt-out, + set `USE_MINIKUBE_CACHE=false`. * `CONTAINER_IMAGE=/vector-test:tag` - completely skip the step of building the Vector docker image, and use the specified image instead. From c418e3a5c98b81f356e4ceec321ec03cea898dac Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 28 Jul 2020 18:07:26 +0300 Subject: [PATCH 65/69] Add a note on minikube bug to CONTRIBUTING.md Signed-off-by: MOZGIII --- CONTRIBUTING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b0719af8236df..b7f97c8597b16 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -652,6 +652,10 @@ E2E (end-to-end) tests. Vector release artifacts are prepared for E2E tests, so the ability to do that is required too, see Vector [docs](https://vector.dev) for more details. +> Note: `minikube` has a bug in the latest versions that affects our test +> process - see https://github.com/kubernetes/minikube/issues/8799. +> Use version `1.11.0` for now. + ##### Running the E2E tests To run the E2E tests, use the following command: From 7f16dce909a2d3f694132519b6539d040e5901a6 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 28 Jul 2020 20:01:12 +0300 Subject: [PATCH 66/69] Add a note on minikube on ZFS to CONTRIBUTING.md Signed-off-by: MOZGIII --- CONTRIBUTING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b7f97c8597b16..39644112c0642 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -656,6 +656,10 @@ is required too, see Vector [docs](https://vector.dev) for more details. > process - see https://github.com/kubernetes/minikube/issues/8799. > Use version `1.11.0` for now. +> Note: `minikube` has troubles running on ZFS systems. If you're using ZFS, we +> suggest using a cloud cluster or [`minik8s`](https://microk8s.io/) with local +> registry. + ##### Running the E2E tests To run the E2E tests, use the following command: From 1e702babebdbc5d54f17ecb47c0eab242d0e1c2e Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 28 Jul 2020 20:24:46 +0300 Subject: [PATCH 67/69] Fix the doc comment at scripts/deploy-kubernetes-test.sh Signed-off-by: MOZGIII --- scripts/deploy-kubernetes-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/deploy-kubernetes-test.sh b/scripts/deploy-kubernetes-test.sh index 9cb4cf85dfef1..190a1b87b7b86 100755 --- a/scripts/deploy-kubernetes-test.sh +++ b/scripts/deploy-kubernetes-test.sh @@ -8,7 +8,7 @@ set -euo pipefail # Deploys Vector into Kubernetes for testing purposes. # Uses the same installation method our users would use. # -# This script impements cli interface required by the kubernetes integration +# This script implements cli interface required by the kubernetes E2E # tests. # # USAGE From ae93ca3799b0e9382d290c4a0886de777a4ec14f Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 28 Jul 2020 20:45:07 +0300 Subject: [PATCH 68/69] Apply a workaround for kubectl from snap Signed-off-by: MOZGIII --- scripts/deploy-kubernetes-test.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/deploy-kubernetes-test.sh b/scripts/deploy-kubernetes-test.sh index 190a1b87b7b86..854604a80c472 100755 --- a/scripts/deploy-kubernetes-test.sh +++ b/scripts/deploy-kubernetes-test.sh @@ -60,7 +60,8 @@ up() { } down() { - $VECTOR_TEST_KUBECTL delete --namespace "$NAMESPACE" -f - < "distribution/kubernetes/vector-namespaced.yaml" + # A workaround for `kubectl` from a `snap` package. + cat < "distribution/kubernetes/vector-namespaced.yaml" | $VECTOR_TEST_KUBECTL delete --namespace "$NAMESPACE" -f - if [[ -n "$CUSTOM_RESOURCE_CONIFGS_FILE" ]]; then $VECTOR_TEST_KUBECTL delete --namespace "$NAMESPACE" -f "$CUSTOM_RESOURCE_CONIFGS_FILE" From ce7284c47ad8b4829e9f149e580844c2e7c60185 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Tue, 28 Jul 2020 22:04:31 +0300 Subject: [PATCH 69/69] Extract and reuse scripts/skaffold-dockerignore.sh Signed-off-by: MOZGIII --- scripts/skaffold-dockerignore.sh | 15 +++++++++++++++ scripts/skaffold.sh | 5 +---- scripts/test-e2e-kubernetes.sh | 1 + 3 files changed, 17 insertions(+), 4 deletions(-) create mode 100755 scripts/skaffold-dockerignore.sh diff --git a/scripts/skaffold-dockerignore.sh b/scripts/skaffold-dockerignore.sh new file mode 100755 index 0000000000000..a82f73af27c66 --- /dev/null +++ b/scripts/skaffold-dockerignore.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -euo pipefail +cd "$(dirname "${BASH_SOURCE[0]}")/.." + +# skaffold-dockerignore.sh +# +# SUMMARY +# +# Prepare .dockerignore for skaffold docker image build so we don't send the +# whole `target/debug` dir to the docker as the context. + +cat <target/debug/.dockerignore +**/* +!vector +EOF diff --git a/scripts/skaffold.sh b/scripts/skaffold.sh index dd16c70603a7d..41e359fb19c93 100755 --- a/scripts/skaffold.sh +++ b/scripts/skaffold.sh @@ -7,10 +7,7 @@ cargo build # Prepare .dockerignore so we don't send the whole dir to the docker as the # context. -cat <target/debug/.dockerignore -**/* -!vector -EOF +scripts/skaffold-dockerignore.sh # Watch for changes in he background and rebuild the vector binary. cargo watch -x build & diff --git a/scripts/test-e2e-kubernetes.sh b/scripts/test-e2e-kubernetes.sh index cb7632cc7ebdf..ea29395b14e85 100755 --- a/scripts/test-e2e-kubernetes.sh +++ b/scripts/test-e2e-kubernetes.sh @@ -84,6 +84,7 @@ if [[ -z "${CONTAINER_IMAGE:-}" ]]; then CONTAINER_IMAGE="$CONTAINER_IMAGE_REPO:$VERSION_TAG-debug" # Build docker image. + scripts/skaffold-dockerignore.sh docker build --tag "$CONTAINER_IMAGE" -f skaffold/docker/Dockerfile target/debug else # Package a .deb file to build a docker container, unless skipped.