diff --git a/README.md b/README.md index c60a9a5a3..79b53533c 100644 --- a/README.md +++ b/README.md @@ -268,6 +268,11 @@ Scale down your Fluentd instances to 0. $ oc scale dc/logging-fluentd --replicas=0 +Or if your Fluentd is being deployed using the daemonset controller unlabel all +your nodes. + + $ oc label nodes --all logging-infra-fluentd- + Wait until they have properly terminated, this gives them time to properly flush their current buffer and send any logs they were processing to Elasticsearch. This helps prevent loss of data. @@ -287,8 +292,17 @@ Once your ES pods are confirmed to be terminated we can now pull in the latest EFK images to use as described [here](https://docs.openshift.org/latest/install_config/upgrading/manual_upgrades.html#importing-the-latest-images), replacing the default namespace with the namespace where logging was installed. -With the latest images in your repository we can now begin to scale back up. -We want to scale ES back up incrementally so that the cluster has time to rebuild. +With the latest images in your repository we can now rerun the deployer to generate +any missing or changed features. + +Be sure to delete your oauth client + + $ oc delete oauthclient --selector logging-infra=support + +Then proceed to follow the same steps as done previously for using the deployer. +After the deployer completes, re-attach your persistent volumes you were using +previously. Next, we want to scale ES back up incrementally so that the cluster +has time to rebuild. $ oc scale dc/logging-es-{unique_name} --replicas=1 @@ -304,4 +318,26 @@ recovered. We can now scale Kibana and Fluentd back up to their previous state. Since Fluentd was shut down and allowed to push its remaining records to ES in the previous steps it can now pick back up from where it left off with no loss of logs -- so long -as the log files that were not read in are still available on the node. +as the log files that were not read in are still available on the node. + +Note: +If your previous deployment did not use a daemonset to schedule Fluentd pods you +will now need to label your nodes to deploy Fluentd to. + + $ oc label nodes logging-infra-fluentd=true + +Or to deploy Fluentd to all your nodes. + + $ oc label nodes --all logging-infra-fluentd=true + +With this latest version, Kibana will display indices differently now in order +to prevent users from being able to access the logs of previously created +projects that have been deleted. + +Due to this change your old logs will not appear automatically. To migrate your +old indices to the new format, rerun the deployer with `-v MODE=migrate` in addition +to your prior flags. This should be run while your ES cluster is running as the +script will need to connect to it to make changes. +Note: This only impacts non-operations logs, operations logs will appear the +same as in previous versions. There should be minimal performance impact to ES +while running this and it will not perform an install. diff --git a/deployment/README.md b/deployment/README.md index a4573e829..2624a5ae9 100644 --- a/deployment/README.md +++ b/deployment/README.md @@ -65,6 +65,14 @@ For examples in this document we will assume the `logging` project. You can use the `default` or another project if you want. This implementation has no need to run in any specific project. +## Create missing templates + +If your installation did not create templates in the `openshift` +namespace, the `logging-deployer-template` and `logging-deployer-account-template` +templates may not exist. In that case you can create them with the following: + + $ oc create -n openshift -f https://raw.githubusercontent.com/openshift/origin-aggregated-logging/v0.2/deployment/deployer.yaml ... + ## Create the Deployer Secret Security parameters for the logging infrastructure @@ -98,20 +106,14 @@ An invocation supplying a properly signed Kibana cert might be: ## Create Supporting ServiceAccounts The deployer must run under a service account defined as follows: +(Note: change `:logging:` below to match the project name.) - $ oc create -f - < $dir/ca.serial.txt - else - openshift admin ca create-signer-cert \ - --key="${dir}/ca.key" \ - --cert="${dir}/ca.crt" \ - --serial="${dir}/ca.serial.txt" \ - --name="logging-signer-$(date +%Y%m%d%H%M%S)" - fi - - # use or generate Kibana proxy certs - if [ -n "${KIBANA_KEY}" ]; then - echo "${KIBANA_KEY}" | base64 -d > $dir/kibana.key - echo "${KIBANA_CERT}" | base64 -d > $dir/kibana.crt - elif [ -s /secret/kibana.crt ]; then - # use files from secret if present - cp {/secret,$dir}/kibana.key - cp {/secret,$dir}/kibana.crt - else #fallback to creating one - openshift admin ca create-server-cert \ - --key=$dir/kibana.key \ - --cert=$dir/kibana.crt \ - --hostnames=kibana,${hostname},${ops_hostname} \ - --signer-cert="$dir/ca.crt" --signer-key="$dir/ca.key" --signer-serial="$dir/ca.serial.txt" - fi - if [ -s /secret/kibana-ops.crt ]; then - # use files from secret if present - cp {/secret,$dir}/kibana-ops.key - cp {/secret,$dir}/kibana-ops.crt - else # just reuse the regular kibana cert - cp $dir/kibana{,-ops}.key - cp $dir/kibana{,-ops}.crt - fi - - echo 03 > $dir/ca.serial.txt # otherwise openssl chokes on the file - echo Generating signing configuration file - cat - conf/signing.conf > $dir/signing.conf < $dir/server-tls.json - elif [ -s /secret/server-tls.json ]; then - cp /secret/server-tls.json $dir - else - cp conf/server-tls.json $dir - fi - - # generate client certs for accessing ES - cat /dev/null > $dir/ca.db - cat /dev/null > $dir/ca.crt.srl - fluentd_user='system.logging.fluentd' - kibana_user='system.logging.kibana' - curator_user='system.logging.curator' - sh scripts/generatePEMCert.sh "$fluentd_user" - sh scripts/generatePEMCert.sh "$kibana_user" - sh scripts/generatePEMCert.sh "$curator_user" - - # generate java store/trust for the ES SearchGuard plugin - sh scripts/generateJKSChain.sh logging-es "$(join , logging-es{,-ops}{,-cluster}{,.${project}.svc.cluster.local})" - # generate common node key for the SearchGuard plugin - openssl rand 16 | openssl enc -aes-128-cbc -nosalt -out $dir/searchguard_node_key.key -pass pass:pass - - # generate proxy session - cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 200 | head -n 1 > "$dir/session-secret" - # generate oauth client secret - cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 64 | head -n 1 > "$dir/oauth-secret" - - # (re)generate secrets - echo "Deleting existing secrets" - oc delete secret logging-fluentd logging-elasticsearch logging-kibana logging-kibana-proxy logging-kibana-ops-proxy logging-curator logging-curator-ops || : - - echo "Creating secrets" - oc secrets new logging-elasticsearch \ - key=$dir/keystore.jks truststore=$dir/truststore.jks \ - searchguard.key=$dir/searchguard_node_key.key - oc secrets new logging-kibana \ - ca=$dir/ca.crt \ - key=$dir/${kibana_user}.key cert=$dir/${kibana_user}.crt - oc secrets new logging-kibana-proxy \ - oauth-secret=$dir/oauth-secret \ - session-secret=$dir/session-secret \ - server-key=$dir/kibana.key \ - server-cert=$dir/kibana.crt \ - server-tls.json=$dir/server-tls.json - oc secrets new logging-kibana-ops-proxy \ - oauth-secret=$dir/oauth-secret \ - session-secret=$dir/session-secret \ - server-key=$dir/kibana-ops.key \ - server-cert=$dir/kibana-ops.crt \ - server-tls.json=$dir/server-tls.json - oc secrets new logging-fluentd \ - ca=$dir/ca.crt \ - key=$dir/${fluentd_user}.key cert=$dir/${fluentd_user}.crt - oc secrets new logging-curator \ - ca=$dir/ca.crt \ - key=$dir/${curator_user}.key cert=$dir/${curator_user}.crt - oc secrets new logging-curator-ops \ - ca=$dir/ca.crt \ - key=$dir/${curator_user}.key cert=$dir/${curator_user}.crt - -fi # supporting infrastructure - -# (re)generate templates needed -echo "Creating templates" -oc delete template --selector logging-infra=curator -oc delete template --selector logging-infra=kibana -oc delete template --selector logging-infra=fluentd -oc delete template --selector logging-infra=elasticsearch - -es_params=$(join , \ - ES_CLUSTER_NAME=es \ - ES_INSTANCE_RAM=${es_instance_ram} \ - ES_NODE_QUORUM=${es_node_quorum} \ - ES_RECOVER_AFTER_NODES=${es_recover_after_nodes} \ - ES_RECOVER_EXPECTED_NODES=${es_recover_expected_nodes} \ - ES_RECOVER_AFTER_TIME=${es_recover_after_time} \ - ) - -es_ops_params=$(join , \ - ES_CLUSTER_NAME=es-ops \ - ES_INSTANCE_RAM=${es_ops_instance_ram} \ - ES_NODE_QUORUM=${es_ops_node_quorum} \ - ES_RECOVER_AFTER_NODES=${es_ops_recover_after_nodes} \ - ES_RECOVER_EXPECTED_NODES=${es_ops_recover_expected_nodes} \ - ES_RECOVER_AFTER_TIME=${es_ops_recover_after_time} \ - ) - -if [[ -n "${ES_NODESELECTOR}" ]]; then - sed "/serviceAccountName/ i\ -\ ${es_nodeselector}" templates/es.yaml | oc process -v "${es_params}" -f - | oc create -f - -else - oc process -f templates/es.yaml -v "${es_params}" | oc create -f - -fi - -es_host=logging-es -es_ops_host=${es_host} - -if [[ -n "${KIBANA_NODESELECTOR}" ]]; then - sed "/serviceAccountName/ i\ -\ ${kibana_nodeselector}" templates/kibana.yaml | oc process -v "OAP_PUBLIC_MASTER_URL=${public_master_url},OAP_MASTER_URL=${master_url}" -f - | oc create -f - -else - oc process -f templates/kibana.yaml -v "OAP_PUBLIC_MASTER_URL=${public_master_url},OAP_MASTER_URL=${master_url}" | oc create -f - -fi - -if [[ -n "${CURATOR_NODESELECTOR}" ]]; then - sed "/serviceAccountName/ i\ -\ ${curator_nodeselector}" templates/curator.yaml | oc process -v "ES_HOST=${es_host},MASTER_URL=${master_url},CURATOR_DEPLOY_NAME=curator" -f - | oc create -f - -else - oc process -f templates/curator.yaml -v "ES_HOST=${es_host},MASTER_URL=${master_url},CURATOR_DEPLOY_NAME=curator"| oc create -f - -fi - -if [ "${ENABLE_OPS_CLUSTER}" == true ]; then - - if [[ -n "${ES_OPS_NODESELECTOR}" ]]; then - sed "/serviceAccountName/ i\ -\ ${es_ops_nodeselector}" templates/es.yaml | oc process -v "${es_ops_params}" -f - | oc create -f - - else - oc process -f templates/es.yaml -v "${es_ops_params}" | oc create -f - - fi - - es_ops_host=logging-es-ops - - if [[ -n "${KIBANA_OPS_NODESELECTOR}" ]]; then - sed "/serviceAccountName/ i\ -\ ${kibana_ops_nodeselector}" templates/kibana.yaml | oc process -v "OAP_PUBLIC_MASTER_URL=${public_master_url},OAP_MASTER_URL=${master_url},KIBANA_DEPLOY_NAME=kibana-ops,ES_HOST=${es_ops_host}" -f - | oc create -f - - else - oc process -f templates/kibana.yaml -v "OAP_PUBLIC_MASTER_URL=${public_master_url},OAP_MASTER_URL=${master_url},KIBANA_DEPLOY_NAME=kibana-ops,ES_HOST=logging-es-ops" | oc create -f - - fi - - if [[ -n "${CURATOR_OPS_NODESELECTOR}" ]]; then - sed "/serviceAccountName/ i\ -\ ${curator_ops_nodeselector}" templates/curator.yaml | oc process -v "ES_HOST=${es_ops_host},MASTER_URL=${master_url},CURATOR_DEPLOY_NAME=curator-ops" -f - | oc create -f - - else - oc process -f templates/curator.yaml -v "ES_HOST=${es_ops_host},MASTER_URL=${master_url},CURATOR_DEPLOY_NAME=curator-ops"| oc create -f - - fi - -fi - -if [[ -n "${FLUENTD_NODESELECTOR}" ]]; then - sed "/serviceAccountName/ i\ -\ ${fluentd_nodeselector}" templates/fluentd.yaml | oc process -v "ES_HOST=${es_host},OPS_HOST=${es_ops_host},MASTER_URL=${master_url},IMAGE_PREFIX=${image_prefix},IMAGE_VERSION=${image_version}" -f - | oc create -f - -else - oc process -f templates/fluentd.yaml -v "ES_HOST=${es_host},OPS_HOST=${es_ops_host},MASTER_URL=${master_url},IMAGE_PREFIX=${image_prefix},IMAGE_VERSION=${image_version}"| oc create -f - -fi - -if [ "${KEEP_SUPPORT}" != true ]; then - oc delete template --selector logging-infra=support - oc process -f templates/support.yaml -v "OAUTH_SECRET=$(cat $dir/oauth-secret),KIBANA_HOSTNAME=${hostname},KIBANA_OPS_HOSTNAME=${ops_hostname},IMAGE_PREFIX=${image_prefix},IMAGE_VERSION=${image_version}" | oc create -f - - oc process -f templates/support-pre.yaml | oc create -f - -fi - -echo "(Re-)Creating deployed objects" -if [ "${KEEP_SUPPORT}" != true ]; then - oc delete sa,service --selector logging-infra=support - oc process logging-support-pre-template | oc create -f - -fi - -oc delete dc,rc,pod --selector logging-infra=curator -oc delete dc,rc,pod --selector logging-infra=kibana -oc delete dc,rc,pod --selector logging-infra=fluentd -oc delete dc,rc,pod --selector logging-infra=elasticsearch -for ((n=0;n<${es_cluster_size};n++)); do - oc process logging-es-template | oc create -f - -done -oc process logging-fluentd-template | oc create -f - -oc process logging-kibana-template | oc create -f - -oc process logging-curator-template | oc create -f - -if [ "${ENABLE_OPS_CLUSTER}" == true ]; then - for ((n=0;n<${es_ops_cluster_size};n++)); do - oc process logging-es-ops-template | oc create -f - - done - oc process logging-kibana-ops-template | oc create -f - - oc process logging-curator-ops-template | oc create -f - -fi - -set +x -echo 'Success!' -saf="system:serviceaccount:${project}:aggregated-logging-fluentd" -fns=${FLUENTD_NODESELECTOR:-logging-infra-fluentd=true} -support_section='' -if [ "${KEEP_SUPPORT}" != true ]; then - support_section=" -If you are replacing a previous deployment, delete the previous objects: - oc delete route,is,oauthclient --selector logging-infra=support - -Create the supporting definitions: - oc process logging-support-template | oc create -f - - -Enable fluentd service account - run the following - oadm policy add-scc-to-user hostmount-anyuid $saf - -Give the account access to read pod metadata: - openshift admin policy add-cluster-role-to-user cluster-reader $saf -" -fi -ops_cluster_section="" -if [ "${ENABLE_OPS_CLUSTER}" == true ]; then - ops_cluster_section=" -Operations logs: ----------------- -You chose to split ops logs to their own ops cluster, which includes an -ElasticSearch cluster and its own deployment of Kibana. The deployments -are set apart by '-ops' in the name. The comments above about configuring -ES and scaling Kibana apply equally to the ops cluster. -" -fi - -cat < ${fns} - -Kibana: --------------- -You may scale the Kibana deployment for redundancy: - - oc scale dc/logging-kibana --replicas=2 - oc scale rc/logging-kibana-1 --replicas=2 -${ops_cluster_section} -EOF +case "${mode}" in + install) + scripts/install.sh + ;; + migrate) + scripts/uuid_migrate.sh + ;; + *) + echo "Invalid mode provided. One of ['install'|'migrate'] was expected"; + exit 1 + ;; +esac diff --git a/deployment/scripts/install.sh b/deployment/scripts/install.sh new file mode 100755 index 000000000..0a3603957 --- /dev/null +++ b/deployment/scripts/install.sh @@ -0,0 +1,347 @@ +#!/bin/bash +set -ex +dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets +project=${PROJECT:-default} +image_prefix=${IMAGE_PREFIX:-openshift/} +image_version=${IMAGE_VERSION:-latest} +hostname=${KIBANA_HOSTNAME:-kibana.example.com} +ops_hostname=${KIBANA_OPS_HOSTNAME:-kibana-ops.example.com} +public_master_url=${PUBLIC_MASTER_URL:-https://kubernetes.default.svc.cluster.local:443} +master_url=${MASTER_URL:-https://kubernetes.default.svc.cluster.local:443} +# ES cluster parameters: +es_instance_ram=${ES_INSTANCE_RAM:-512M} +es_cluster_size=${ES_CLUSTER_SIZE:-1} +es_node_quorum=${ES_NODE_QUORUM:-$((es_cluster_size/2+1))} +es_recover_after_nodes=${ES_RECOVER_AFTER_NODES:-$((es_cluster_size-1))} +es_recover_expected_nodes=${ES_RECOVER_EXPECTED_NODES:-$es_cluster_size} +es_recover_after_time=${ES_RECOVER_AFTER_TIME:-5m} +es_ops_instance_ram=${ES_OPS_INSTANCE_RAM:-512M} +es_ops_cluster_size=${ES_OPS_CLUSTER_SIZE:-$es_cluster_size} +es_ops_node_quorum=${ES_OPS_NODE_QUORUM:-$((es_ops_cluster_size/2+1))} +es_ops_recover_after_nodes=${ES_OPS_RECOVER_AFTER_NODES:-$((es_ops_cluster_size-1))} +es_ops_recover_expected_nodes=${ES_OPS_RECOVER_EXPECTED_NODES:-$es_ops_cluster_size} +es_ops_recover_after_time=${ES_OPS_RECOVER_AFTER_TIME:-5m} + +# other env vars used: +# WRITE_KUBECONFIG, KEEP_SUPPORT, ENABLE_OPS_CLUSTER +# other env vars used (expect base64 encoding): +# KIBANA_KEY, KIBANA_CERT, SERVER_TLS_JSON + +function join { local IFS="$1"; shift; echo "$*"; } + +function extract_nodeselector() { + local inputstring="${1//\"/}" # remove any errant double quotes in the inputs + local selectors=() + + for keyvalstr in ${inputstring//\,/ }; do + + keyval=( ${keyvalstr//=/ } ) + + if [[ -n "${keyval[0]}" && -n "${keyval[1]}" ]]; then + selectors=( "${selectors[@]}" "\"${keyval[0]}\": \"${keyval[1]}\"") + else + echo "Could not make a node selector label from '${keyval[*]}'" + exit 255 + fi + done + + if [[ "${#selectors[*]}" -gt 0 ]]; then + echo nodeSelector: "{" $(join , "${selectors[@]}") "}" + fi +} + +# node selectors +fluentd_nodeselector=$(extract_nodeselector $FLUENTD_NODESELECTOR) +es_nodeselector=$(extract_nodeselector $ES_NODESELECTOR) +es_ops_nodeselector=$(extract_nodeselector $ES_OPS_NODESELECTOR) +kibana_nodeselector=$(extract_nodeselector $KIBANA_NODESELECTOR) +kibana_ops_nodeselector=$(extract_nodeselector $KIBANA_OPS_NODESELECTOR) +curator_nodeselector=$(extract_nodeselector $CURATOR_NODESELECTOR) +curator_ops_nodeselector=$(extract_nodeselector $CURATOR_OPS_NODESELECTOR) + +###################################### +# +# generate secret contents and secrets +# +if [ "${KEEP_SUPPORT}" != true ]; then + # this fails in the container, but it's useful for dev + rm -rf $dir && mkdir -p $dir && chmod 700 $dir || : + + # cp/generate CA + if [ -s /secret/ca.key ]; then + cp {/secret,$dir}/ca.key + cp {/secret,$dir}/ca.crt + echo "01" > $dir/ca.serial.txt + else + openshift admin ca create-signer-cert \ + --key="${dir}/ca.key" \ + --cert="${dir}/ca.crt" \ + --serial="${dir}/ca.serial.txt" \ + --name="logging-signer-$(date +%Y%m%d%H%M%S)" + fi + + # use or generate Kibana proxy certs + if [ -n "${KIBANA_KEY}" ]; then + echo "${KIBANA_KEY}" | base64 -d > $dir/kibana.key + echo "${KIBANA_CERT}" | base64 -d > $dir/kibana.crt + elif [ -s /secret/kibana.crt ]; then + # use files from secret if present + cp {/secret,$dir}/kibana.key + cp {/secret,$dir}/kibana.crt + else #fallback to creating one + openshift admin ca create-server-cert \ + --key=$dir/kibana.key \ + --cert=$dir/kibana.crt \ + --hostnames=kibana,${hostname},${ops_hostname} \ + --signer-cert="$dir/ca.crt" --signer-key="$dir/ca.key" --signer-serial="$dir/ca.serial.txt" + fi + if [ -s /secret/kibana-ops.crt ]; then + # use files from secret if present + cp {/secret,$dir}/kibana-ops.key + cp {/secret,$dir}/kibana-ops.crt + else # just reuse the regular kibana cert + cp $dir/kibana{,-ops}.key + cp $dir/kibana{,-ops}.crt + fi + + echo 03 > $dir/ca.serial.txt # otherwise openssl chokes on the file + echo Generating signing configuration file + cat - conf/signing.conf > $dir/signing.conf < $dir/server-tls.json + elif [ -s /secret/server-tls.json ]; then + cp /secret/server-tls.json $dir + else + cp conf/server-tls.json $dir + fi + + # generate client certs for accessing ES + cat /dev/null > $dir/ca.db + cat /dev/null > $dir/ca.crt.srl + fluentd_user='system.logging.fluentd' + kibana_user='system.logging.kibana' + curator_user='system.logging.curator' + admin_user='system.admin' + scripts/generatePEMCert.sh "$fluentd_user" + scripts/generatePEMCert.sh "$kibana_user" + scripts/generatePEMCert.sh "$curator_user" + scripts/generatePEMCert.sh "$admin_user" + + # generate java store/trust for the ES SearchGuard plugin + scripts/generateJKSChain.sh logging-es "$(join , logging-es{,-ops}{,-cluster}{,.${project}.svc.cluster.local})" + # generate common node key for the SearchGuard plugin + openssl rand 16 | openssl enc -aes-128-cbc -nosalt -out $dir/searchguard_node_key.key -pass pass:pass + + # generate proxy session + cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 200 | head -n 1 > "$dir/session-secret" + # generate oauth client secret + cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 64 | head -n 1 > "$dir/oauth-secret" + + # (re)generate secrets + echo "Deleting existing secrets" + oc delete secret logging-fluentd logging-elasticsearch logging-kibana logging-kibana-proxy logging-kibana-ops-proxy logging-curator logging-curator-ops || : + + echo "Creating secrets" + oc secrets new logging-elasticsearch \ + key=$dir/keystore.jks truststore=$dir/truststore.jks \ + searchguard.key=$dir/searchguard_node_key.key \ + admin-key=$dir/${admin_user}.key admin-cert=$dir/${admin_user}.crt \ + admin-ca=$dir/ca.crt + oc secrets new logging-kibana \ + ca=$dir/ca.crt \ + key=$dir/${kibana_user}.key cert=$dir/${kibana_user}.crt + oc secrets new logging-kibana-proxy \ + oauth-secret=$dir/oauth-secret \ + session-secret=$dir/session-secret \ + server-key=$dir/kibana.key \ + server-cert=$dir/kibana.crt \ + server-tls.json=$dir/server-tls.json + oc secrets new logging-kibana-ops-proxy \ + oauth-secret=$dir/oauth-secret \ + session-secret=$dir/session-secret \ + server-key=$dir/kibana-ops.key \ + server-cert=$dir/kibana-ops.crt \ + server-tls.json=$dir/server-tls.json + oc secrets new logging-fluentd \ + ca=$dir/ca.crt \ + key=$dir/${fluentd_user}.key cert=$dir/${fluentd_user}.crt + oc secrets new logging-curator \ + ca=$dir/ca.crt \ + key=$dir/${curator_user}.key cert=$dir/${curator_user}.crt + oc secrets new logging-curator-ops \ + ca=$dir/ca.crt \ + key=$dir/${curator_user}.key cert=$dir/${curator_user}.crt + +fi # supporting infrastructure + +###################################### +# +# (re)generate templates needed +# +echo "(Re-)Creating templates" +oc delete template --selector logging-infra=curator +oc delete template --selector logging-infra=kibana +oc delete template --selector logging-infra=fluentd +oc delete template --selector logging-infra=elasticsearch + +es_params=$(join , \ + ES_CLUSTER_NAME=es \ + ES_INSTANCE_RAM=${es_instance_ram} \ + ES_NODE_QUORUM=${es_node_quorum} \ + ES_RECOVER_AFTER_NODES=${es_recover_after_nodes} \ + ES_RECOVER_EXPECTED_NODES=${es_recover_expected_nodes} \ + ES_RECOVER_AFTER_TIME=${es_recover_after_time} \ + ) + +es_ops_params=$(join , \ + ES_CLUSTER_NAME=es-ops \ + ES_INSTANCE_RAM=${es_ops_instance_ram} \ + ES_NODE_QUORUM=${es_ops_node_quorum} \ + ES_RECOVER_AFTER_NODES=${es_ops_recover_after_nodes} \ + ES_RECOVER_EXPECTED_NODES=${es_ops_recover_expected_nodes} \ + ES_RECOVER_AFTER_TIME=${es_ops_recover_after_time} \ + ) + +if [[ -n "${ES_NODESELECTOR}" ]]; then + sed "/serviceAccountName/ i\ +\ ${es_nodeselector}" templates/es.yaml | oc process -v "${es_params}" -f - | oc create -f - +else + oc process -f templates/es.yaml -v "${es_params}" | oc create -f - +fi + +es_host=logging-es +es_ops_host=${es_host} + +if [[ -n "${KIBANA_NODESELECTOR}" ]]; then + sed "/serviceAccountName/ i\ +\ ${kibana_nodeselector}" templates/kibana.yaml | oc process -v "OAP_PUBLIC_MASTER_URL=${public_master_url},OAP_MASTER_URL=${master_url}" -f - | oc create -f - +else + oc process -f templates/kibana.yaml -v "OAP_PUBLIC_MASTER_URL=${public_master_url},OAP_MASTER_URL=${master_url}" | oc create -f - +fi + +if [[ -n "${CURATOR_NODESELECTOR}" ]]; then + sed "/serviceAccountName/ i\ +\ ${curator_nodeselector}" templates/curator.yaml | oc process -v "ES_HOST=${es_host},MASTER_URL=${master_url},CURATOR_DEPLOY_NAME=curator" -f - | oc create -f - +else + oc process -f templates/curator.yaml -v "ES_HOST=${es_host},MASTER_URL=${master_url},CURATOR_DEPLOY_NAME=curator"| oc create -f - +fi + +if [ "${ENABLE_OPS_CLUSTER}" == true ]; then + + if [[ -n "${ES_OPS_NODESELECTOR}" ]]; then + sed "/serviceAccountName/ i\ +\ ${es_ops_nodeselector}" templates/es.yaml | oc process -v "${es_ops_params}" -f - | oc create -f - + else + oc process -f templates/es.yaml -v "${es_ops_params}" | oc create -f - + fi + + es_ops_host=logging-es-ops + + if [[ -n "${KIBANA_OPS_NODESELECTOR}" ]]; then + sed "/serviceAccountName/ i\ +\ ${kibana_ops_nodeselector}" templates/kibana.yaml | oc process -v "OAP_PUBLIC_MASTER_URL=${public_master_url},OAP_MASTER_URL=${master_url},KIBANA_DEPLOY_NAME=kibana-ops,ES_HOST=${es_ops_host}" -f - | oc create -f - + else + oc process -f templates/kibana.yaml -v "OAP_PUBLIC_MASTER_URL=${public_master_url},OAP_MASTER_URL=${master_url},KIBANA_DEPLOY_NAME=kibana-ops,ES_HOST=logging-es-ops" | oc create -f - + fi + + if [[ -n "${CURATOR_OPS_NODESELECTOR}" ]]; then + sed "/serviceAccountName/ i\ +\ ${curator_ops_nodeselector}" templates/curator.yaml | oc process -v "ES_HOST=${es_ops_host},MASTER_URL=${master_url},CURATOR_DEPLOY_NAME=curator-ops" -f - | oc create -f - + else + oc process -f templates/curator.yaml -v "ES_HOST=${es_ops_host},MASTER_URL=${master_url},CURATOR_DEPLOY_NAME=curator-ops"| oc create -f - + fi + +fi + +if [[ -n "${FLUENTD_NODESELECTOR}" ]]; then + sed "/serviceAccountName/ i\ +\ ${fluentd_nodeselector}" templates/fluentd.yaml | oc process -v "ES_HOST=${es_host},OPS_HOST=${es_ops_host},MASTER_URL=${master_url},IMAGE_PREFIX=${image_prefix},IMAGE_VERSION=${image_version}" -f - | oc create -f - +else + oc process -f templates/fluentd.yaml -v "ES_HOST=${es_host},OPS_HOST=${es_ops_host},MASTER_URL=${master_url},IMAGE_PREFIX=${image_prefix},IMAGE_VERSION=${image_version}"| oc create -f - +fi + +if [ "${KEEP_SUPPORT}" != true ]; then + oc delete template --selector logging-infra=support + oc process -f templates/support.yaml -v "OAUTH_SECRET=$(cat $dir/oauth-secret),KIBANA_HOSTNAME=${hostname},KIBANA_OPS_HOSTNAME=${ops_hostname},IMAGE_PREFIX=${image_prefix},IMAGE_VERSION=${image_version}" | oc create -f - +fi + +###################################### +# +# Create "things", mostly from templates +# +echo "(Re-)Creating deployed objects" +if [ "${KEEP_SUPPORT}" != true ]; then + oc process logging-support-template | oc delete -f - || : + oc delete serviceaccount,service,route --selector logging-infra=support + oc process logging-support-template | oc create -f - + oc create route passthrough --service="logging-kibana" --hostname="${hostname}" + oc create route passthrough --service="logging-kibana-ops" --hostname="${ops_hostname}" +fi +oc process logging-imagestream-template | oc create -f - || : # these may fail if already created; that's ok + +oc delete dc,rc,pod --selector logging-infra=curator +oc delete dc,rc,pod --selector logging-infra=kibana +oc delete dc,rc,pod,daemonset --selector logging-infra=fluentd +oc delete dc,rc,pod --selector logging-infra=elasticsearch + +for ((n=0;n<${es_cluster_size};n++)); do + oc process logging-es-template | oc create -f - +done +oc process logging-fluentd-template | oc create -f - +oc process logging-kibana-template | oc create -f - +oc process logging-curator-template | oc create -f - +if [ "${ENABLE_OPS_CLUSTER}" == true ]; then + for ((n=0;n<${es_ops_cluster_size};n++)); do + oc process logging-es-ops-template | oc create -f - + done + oc process logging-kibana-ops-template | oc create -f - + oc process logging-curator-ops-template | oc create -f - +fi + +set +x +echo 'Success!' +fns=${FLUENTD_NODESELECTOR:-logging-infra-fluentd=true} +ops_cluster_section="" +if [ "${ENABLE_OPS_CLUSTER}" == true ]; then + ops_cluster_section=" +Operations logs: +---------------- +You chose to split ops logs to their own ops cluster, which includes an +ElasticSearch cluster and its own deployment of Kibana. The deployments +are set apart by '-ops' in the name. The comments above about configuring +ES apply equally to the ops cluster. +" +fi + +cat < ${fns} + +To label all nodes at once: + oc label nodes --all ${fns} +${ops_cluster_section} +EOF diff --git a/deployment/scripts/uuid_migrate.sh b/deployment/scripts/uuid_migrate.sh new file mode 100755 index 000000000..65d01bbe3 --- /dev/null +++ b/deployment/scripts/uuid_migrate.sh @@ -0,0 +1,85 @@ +#! /bin/bash + +set -exuo pipefail + +dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets +project=${PROJECT:-default} +OPS_PROJECTS=("default" "openshift" "openshift-infra") +CA=$dir/admin-ca.crt +KEY=$dir/admin-key.key +CERT=$dir/admin-cert.crt + +es_host=${ES_HOST:-logging-es} +es_port=${ES_PORT:-9200} +project=${PROJECT:-default} + +function create_alias() { + output=`curl -s --cacert $CA --key $KEY --cert $CERT -XPOST "https://$es_host:$es_port/_aliases" -d "{ \"actions\": [ { \"add\": { \"index\": \"${1}.*\", \"alias\": \"${1}.${2}.reference\"}} ] }"` + + echo Migration for project $1: $output +} + +function create_context() { + +# there's no good way for oc to filter the list of secrets; and there can be several token secrets per SA. + # following template prints all tokens for aggregated-logging-fluentd; --sort-by will order them earliest to latest, we will use the last. + local sa_token_secret_template='{{range .items}}{{if eq .type "kubernetes.io/service-account-token"}}{{if eq "aggregated-logging-fluentd" (index .metadata.annotations "kubernetes.io/service-account.name")}}{{.data.token}} +{{end}}{{end}}{{end}}' + local failure="false" + local nodes_active="false" + local output="" + + # check that the aggregated-logging-fluentd SA exists and we can get its token + output=$(oc get secret --namespace="${project}" --sort-by=metadata.resourceVersion --template="$sa_token_secret_template" 2>&1) + local token=$(echo -e "$output" | tail -1 | base64 -d) + + # set up a config context using the aggregated-logging-fluentd account and most recent token + oc config set-credentials aggregated-logging-fluentd-account \ + --token="$token" >& /dev/null + oc config set-context aggregated-logging-fluentd-context \ + --cluster=master \ + --user=aggregated-logging-fluentd-account \ + --namespace="${project}" >& /dev/null + + oc config use-context aggregated-logging-fluentd-context +} + +function recreate_admin_certs(){ + +# note: following mess is because we want the error output from the first failure, not a pipeline + secret_ca=$(oc get secret/logging-elasticsearch --template='{{index .data "admin-ca"}}' 2>&1) + secret_ca=$(echo -e "$secret_ca" | base64 -d 2>&1) + + secret_cert=$(oc get secret/logging-elasticsearch --template='{{index .data "admin-cert"}}' 2>&1) + secret_cert=$(echo -e "$secret_cert" | base64 -d 2>&1) + + secret_key=$(oc get secret/logging-elasticsearch --template='{{index .data "admin-key"}}' 2>&1) + secret_key=$(echo -e "$secret_key" | base64 -d 2>&1) + + echo -e "$secret_key" > $dir/admin-key.key + echo -e "$secret_cert" > $dir/admin-cert.crt + echo -e "$secret_ca" > $dir/admin-ca.crt + +} + +recreate_admin_certs +create_context + +PROJECTS=(`oc get project -o jsonpath='{.items[*].metadata.name}'`) +ES_PODS=$(oc get pods -l component=es | awk -e 'es ~ sel && $3 == "Running" {print $1}') +ES_POD=`echo $ES_PODS | cut -d' ' -f 1` + +if [[ -z "$ES_POD" ]]; then + echo "No Elasticsearch pods found running. Cannot migrate." + echo "Scale up ES prior to running with MODE=migrate" + exit 1 +fi + +for index in "${PROJECTS[@]}"; do + + if [[ ! ( ${OPS_PROJECTS[@]} =~ $index ) ]]; then + uid=$(oc get project "$index" -o jsonpath='{.metadata.uid}') + create_alias $index $uid + fi + +done diff --git a/deployment/templates/images.yaml b/deployment/templates/images.yaml new file mode 100644 index 000000000..fdd44befd --- /dev/null +++ b/deployment/templates/images.yaml @@ -0,0 +1,76 @@ +apiVersion: "v1" +kind: "Template" +metadata: + name: logging-images-template-maker + annotations: + description: "Template to create template for deploying logging support entities" + tags: "infrastructure" +objects: +- apiVersion: "v1" + kind: "Template" + metadata: + name: logging-images-template + annotations: + description: "Template for deploying logging support entities: imagestreams." + tags: "infrastructure" + labels: + logging-infra: support + labels: + logging-infra: support + provider: openshift + component: support + objects: + - + apiVersion: v1 + kind: ImageStream + metadata: + annotations: + openshift.io/image.insecureRepository: "true" + name: logging-auth-proxy + spec: + dockerImageRepository: ${IMAGE_PREFIX}logging-auth-proxy:${IMAGE_VERSION} + - + apiVersion: v1 + kind: ImageStream + metadata: + annotations: + openshift.io/image.insecureRepository: "true" + name: logging-elasticsearch + spec: + dockerImageRepository: ${IMAGE_PREFIX}logging-elasticsearch:${IMAGE_VERSION} + - + apiVersion: v1 + kind: ImageStream + metadata: + annotations: + openshift.io/image.insecureRepository: "true" + name: logging-fluentd + spec: + dockerImageRepository: ${IMAGE_PREFIX}logging-fluentd:${IMAGE_VERSION} + - + apiVersion: v1 + kind: ImageStream + metadata: + annotations: + openshift.io/image.insecureRepository: "true" + name: logging-kibana + spec: + dockerImageRepository: ${IMAGE_PREFIX}logging-kibana:${IMAGE_VERSION} + - + apiVersion: v1 + kind: ImageStream + metadata: + annotations: + openshift.io/image.insecureRepository: "true" + name: logging-curator + spec: + dockerImageRepository: ${IMAGE_PREFIX}logging-curator:${IMAGE_VERSION} +parameters: +- + description: 'Specify prefix for logging component images; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' + name: IMAGE_PREFIX + value: "docker.io/openshift/origin-" +- + description: 'Specify version for logging component images; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' + name: IMAGE_VERSION + value: "latest" diff --git a/deployment/templates/support-pre.yaml b/deployment/templates/support-pre.yaml deleted file mode 100644 index 457719707..000000000 --- a/deployment/templates/support-pre.yaml +++ /dev/null @@ -1,130 +0,0 @@ -apiVersion: "v1" -kind: "Template" -metadata: - name: logging-support-pre-template-maker - annotations: - description: "Template to create template for creating logging service accounts" - tags: "infrastructure" -objects: -- apiVersion: "v1" - kind: "Template" - metadata: - name: logging-support-pre-template - annotations: - description: "Template for deploying logging services and service accounts." - tags: "infrastructure" - labels: - logging-infra: support - labels: - logging-infra: support - provider: openshift - component: support - objects: - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-kibana - secrets: - - name: logging-kibana - - name: logging-kibana-proxy - - name: logging-kibana-ops-proxy - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-elasticsearch - secrets: - - name: logging-elasticsearch - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-fluentd - secrets: - - name: logging-fluentd - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-curator - secrets: - - name: logging-curator - - - apiVersion: "v1" - kind: "Service" - metadata: - name: "logging-es" - spec: - ports: - - - port: 9200 - targetPort: restapi - selector: - provider: "openshift" - component: "es" - - - apiVersion: "v1" - kind: "Service" - metadata: - name: "logging-es-cluster" - spec: - portalIP: "None" - ports: - - - port: 9300 - selector: - provider: "openshift" - component: "es" - - - apiVersion: "v1" - kind: "Service" - metadata: - name: "logging-es-ops" - spec: - ports: - - - port: 9200 - targetPort: restapi - selector: - provider: "openshift" - component: "es-ops" - - - apiVersion: "v1" - kind: "Service" - metadata: - name: "logging-es-ops-cluster" - spec: - portalIP: "None" - ports: - - - port: 9300 - selector: - provider: "openshift" - component: "es-ops" - - - apiVersion: "v1" - kind: "Service" - metadata: - name: "logging-kibana" - spec: - ports: - - - port: 443 - targetPort: "oaproxy" - selector: - provider: openshift - component: "kibana" - - - apiVersion: "v1" - kind: "Service" - metadata: - name: "logging-kibana-ops" - spec: - ports: - - - port: 443 - targetPort: "oaproxy" - selector: - provider: openshift - component: "kibana-ops" diff --git a/deployment/templates/support.yaml b/deployment/templates/support.yaml index 21f106c47..389dc8086 100644 --- a/deployment/templates/support.yaml +++ b/deployment/templates/support.yaml @@ -5,13 +5,34 @@ metadata: annotations: description: "Template to create template for deploying logging support entities" tags: "infrastructure" +parameters: +- + description: "A shared secret for the authentication proxy oauth client in front of Kibana" + name: OAUTH_SECRET + required: true +- + description: "Hostname at which users will visit Kibana and be authenticated." + name: KIBANA_HOSTNAME + required: true +- + description: "Hostname at which admins will visit the ops Kibana." + name: KIBANA_OPS_HOSTNAME + value: kibana-ops.example.com +- + description: 'Specify prefix for logging component images; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' + name: IMAGE_PREFIX + value: "docker.io/openshift/origin-" +- + description: 'Specify version for logging component images; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' + name: IMAGE_VERSION + value: "latest" objects: - apiVersion: "v1" kind: "Template" metadata: name: logging-support-template annotations: - description: "Template for deploying logging support entities: oauth, service accounts, services, routes, imagestreams." + description: "Template for deploying logging services, service accounts, and oauthclient." tags: "infrastructure" labels: logging-infra: support @@ -22,37 +43,135 @@ objects: objects: - apiVersion: v1 - kind: OAuthClient + kind: ServiceAccount metadata: - name: kibana-proxy - secret: ${OAUTH_SECRET} - redirectURIs: - - https://${KIBANA_HOSTNAME} - - https://${KIBANA_OPS_HOSTNAME} + name: aggregated-logging-kibana + secrets: + - name: logging-kibana + - name: logging-kibana-proxy + - name: logging-kibana-ops-proxy - apiVersion: v1 - kind: Route + kind: ServiceAccount metadata: - name: kibana - spec: - host: ${KIBANA_HOSTNAME} - to: - kind: Service - name: logging-kibana - tls: - termination: passthrough + name: aggregated-logging-elasticsearch + secrets: + - name: logging-elasticsearch - apiVersion: v1 - kind: Route + kind: ServiceAccount + metadata: + name: aggregated-logging-fluentd + secrets: + - name: logging-fluentd + - + apiVersion: v1 + kind: ServiceAccount + metadata: + name: aggregated-logging-curator + secrets: + - name: logging-curator + - + apiVersion: "v1" + kind: "Service" + metadata: + name: "logging-es" + spec: + ports: + - + port: 9200 + targetPort: restapi + selector: + provider: "openshift" + component: "es" + - + apiVersion: "v1" + kind: "Service" + metadata: + name: "logging-es-cluster" + spec: + portalIP: "None" + ports: + - + port: 9300 + selector: + provider: "openshift" + component: "es" + - + apiVersion: "v1" + kind: "Service" + metadata: + name: "logging-es-ops" + spec: + ports: + - + port: 9200 + targetPort: restapi + selector: + provider: "openshift" + component: "es-ops" + - + apiVersion: "v1" + kind: "Service" + metadata: + name: "logging-es-ops-cluster" + spec: + portalIP: "None" + ports: + - + port: 9300 + selector: + provider: "openshift" + component: "es-ops" + - + apiVersion: "v1" + kind: "Service" metadata: - name: kibana-ops + name: "logging-kibana" spec: - host: ${KIBANA_OPS_HOSTNAME} - to: - kind: Service - name: logging-kibana-ops - tls: - termination: passthrough + ports: + - + port: 443 + targetPort: "oaproxy" + selector: + provider: openshift + component: "kibana" + - + apiVersion: "v1" + kind: "Service" + metadata: + name: "logging-kibana-ops" + spec: + ports: + - + port: 443 + targetPort: "oaproxy" + selector: + provider: openshift + component: "kibana-ops" + - + apiVersion: v1 + kind: OAuthClient + metadata: + name: kibana-proxy + secret: ${OAUTH_SECRET} + redirectURIs: + - https://${KIBANA_HOSTNAME} + - https://${KIBANA_OPS_HOSTNAME} +- apiVersion: "v1" + kind: "Template" + metadata: + name: logging-imagestream-template + annotations: + description: "Template for deploying logging support entities: imagestreams." + tags: "infrastructure" + labels: + logging-infra: support + labels: + logging-infra: support + provider: openshift + component: support + objects: - apiVersion: v1 kind: ImageStream @@ -98,25 +217,4 @@ objects: name: logging-curator spec: dockerImageRepository: ${IMAGE_PREFIX}logging-curator:${IMAGE_VERSION} -parameters: -- - description: "A shared secret for the authentication proxy oauth client in front of Kibana" - name: OAUTH_SECRET - required: true -- - description: "Hostname at which users will visit Kibana and be authenticated." - name: KIBANA_HOSTNAME - required: true -- - description: "Hostname at which admins will visit the ops Kibana." - name: KIBANA_OPS_HOSTNAME - value: kibana-ops.example.com -- - description: 'Specify prefix for logging component images; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "docker.io/openshift/origin-" -- - description: 'Specify version for logging component images; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' - name: IMAGE_VERSION - value: "latest" diff --git a/elasticsearch/elasticsearch.yml b/elasticsearch/elasticsearch.yml index 435e1e7a9..2f78a8fdb 100644 --- a/elasticsearch/elasticsearch.yml +++ b/elasticsearch/elasticsearch.yml @@ -19,7 +19,7 @@ gateway: expected_nodes: ${RECOVER_EXPECTED_NODES} recover_after_time: ${RECOVER_AFTER_TIME} -io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator"] +io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"] cloud: k8s: @@ -50,7 +50,7 @@ searchguard: authorization: settingsdb: roles: - admin: ["root"] + admin: ["admin"] fluentd: ["fluentd"] kibana: ["kibana"] curator: ["curator"] @@ -65,7 +65,7 @@ searchguard: truststore_filepath: /etc/elasticsearch/keys/truststore truststore_password: tspass actionrequestfilter: - names: ["readonly", "fluentd", "kibana", "curator"] + names: ["readonly", "fluentd", "kibana", "curator", "admin"] readonly: allowed_actions: ["indices:data/read/*", "*monitor*"] forbidden_actions: ["cluster:*", "indices:admin*"] @@ -79,7 +79,7 @@ searchguard: openshift: acl: users: - names: ["system.logging.fluentd", "system.logging.kibana", "system.logging.curator"] + names: ["system.logging.fluentd", "system.logging.kibana", "system.logging.curator", "system.admin"] system.logging.fluentd: execute: ["actionrequestfilter.fluentd"] actionrequestfilter.fluentd.comment: "Fluentd can only write" @@ -92,3 +92,6 @@ openshift: system.logging.curator: execute: ["actionrequestfilter.curator"] actionrequestfilter.curator.comment: "Curator can list all indices and delete them" + system.admin: + bypass: ["*"] + system.admin.*.comment: "Admin user can do anything" diff --git a/elasticsearch/install.sh b/elasticsearch/install.sh index 515cf07bb..d90c6393e 100755 --- a/elasticsearch/install.sh +++ b/elasticsearch/install.sh @@ -14,8 +14,8 @@ export CLUSTER_NAME=placeholder mkdir -p ${HOME} ln -s /usr/share/elasticsearch /usr/share/java/elasticsearch -/usr/share/elasticsearch/bin/plugin -i com.floragunn/search-guard/0.5.1 -url https://github.com/lukas-vlcek/origin-aggregated-logging/releases/download/v0.1/search-guard-0.5.1.zip -/usr/share/elasticsearch/bin/plugin -i io.fabric8.elasticsearch/openshift-elasticsearch-plugin/0.11 +/usr/share/elasticsearch/bin/plugin -i com.floragunn/search-guard/0.5.1 -url https://github.com/lukas-vlcek/origin-aggregated-logging/releases/download/v0.1/search-guard-0.5.1.zip +/usr/share/elasticsearch/bin/plugin -i io.fabric8.elasticsearch/openshift-elasticsearch-plugin/0.13 /usr/share/elasticsearch/bin/plugin -i io.fabric8/elasticsearch-cloud-kubernetes/1.3.0 mkdir /elasticsearch chmod -R og+w /usr/share/java/elasticsearch ${HOME} /elasticsearch diff --git a/fluentd/configs.d/filter/k8s_meta.conf b/fluentd/configs.d/filter/k8s_meta.conf index 0f9a4faab..6c76e4f10 100644 --- a/fluentd/configs.d/filter/k8s_meta.conf +++ b/fluentd/configs.d/filter/k8s_meta.conf @@ -3,4 +3,5 @@ kubernetes_url "#{ENV['K8S_HOST_URL']}" bearer_token_file /var/run/secrets/kubernetes.io/serviceaccount/token ca_file /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + include_namespace_id true diff --git a/fluentd/configs.d/filter/k8s_record_transform.conf b/fluentd/configs.d/filter/k8s_record_transform.conf index 52e3ab113..dcf146f69 100644 --- a/fluentd/configs.d/filter/k8s_record_transform.conf +++ b/fluentd/configs.d/filter/k8s_record_transform.conf @@ -4,7 +4,7 @@ hostname ${(kubernetes_host rescue nil) || File.open('/etc/docker-hostname') { |f| f.readline }.rstrip} message ${log} - version 1.0.6 + version 1.1.4 remove_keys log,stream diff --git a/fluentd/configs.d/filter/syslog_record_transform.conf b/fluentd/configs.d/filter/syslog_record_transform.conf index 6bdddc778..f86e4288e 100644 --- a/fluentd/configs.d/filter/syslog_record_transform.conf +++ b/fluentd/configs.d/filter/syslog_record_transform.conf @@ -14,7 +14,7 @@ time ${ Time.at(time) > Time.now ? (temp_time = Time.parse(Time.at(time).to_s.gsub(Time.at(time).year.to_s, (tag_parts[3].nil? ? Time.at(time).year.to_s : tag_parts[3][9,4]) )).to_datetime.to_s; Time.parse(temp_time) > Time.now ? Time.parse(temp_time.gsub(Time.parse(temp_time).year.to_s, (Time.parse(temp_time).year - 1).to_s )).to_datetime.to_s : Time.parse(temp_time).to_datetime.to_s ) : Time.at(time).to_datetime.to_s } #tag ${tag}_.operations_log - version 1.0.6 + version 1.1.4 remove_keys host diff --git a/fluentd/configs.d/output/fluentd_es_config.conf b/fluentd/configs.d/output/fluentd_es_config.conf index cb0de4e1d..24f37ae7e 100644 --- a/fluentd/configs.d/output/fluentd_es_config.conf +++ b/fluentd/configs.d/output/fluentd_es_config.conf @@ -3,7 +3,7 @@ host "#{ENV['ES_HOST']}" port "#{ENV['ES_PORT']}" scheme https - index_name ${record['kubernetes_namespace_name']}.${Time.at(time).getutc.strftime(@logstash_dateformat)} + index_name ${record['kubernetes_namespace_name']}.${record['kubernetes_namespace_id']}.${Time.at(time).getutc.strftime(@logstash_dateformat)} user fluentd password changeme diff --git a/fluentd/fluentd_es_copy_config.conf b/fluentd/fluentd_es_copy_config.conf index 7562fea1f..6756f3bc2 100644 --- a/fluentd/fluentd_es_copy_config.conf +++ b/fluentd/fluentd_es_copy_config.conf @@ -3,7 +3,7 @@ host "#{ENV['ES_COPY_HOST']}" port "#{ENV['ES_COPY_PORT']}" scheme "#{ENV['ES_COPY_SCHEME']}" - index_name ${record['kubernetes_namespace_name']}.${Time.at(time).getutc.strftime(@logstash_dateformat)} + index_name ${record['kubernetes_namespace_name']}.${record['kubernetes_namespace_id']}.${Time.at(time).getutc.strftime(@logstash_dateformat)} user "#{ENV['ES_COPY_USERNAME']}" password "#{ENV['ES_COPY_PASSWORD']}" diff --git a/hack/testing/logging.sh b/hack/testing/logging.sh index 80885aa49..ff28f95e8 100755 --- a/hack/testing/logging.sh +++ b/hack/testing/logging.sh @@ -165,14 +165,11 @@ masterurlhack=",MASTER_URL=https://172.30.0.1:443" OS_O_A_L_DIR=${OS_O_A_L_DIR:-$OS_ROOT/test/extended/origin-aggregated-logging} os::cmd::expect_success "oc new-project logging" os::cmd::expect_success "oc secrets new logging-deployer nothing=/dev/null" -os::cmd::expect_success "echo 'apiVersion: v1 -kind: ServiceAccount -metadata: - name: logging-deployer -secrets: -- name: logging-deployer -' | oc create -f -" -os::cmd::expect_success "oadm policy add-cluster-role-to-user cluster-admin system:serviceaccount:logging:logging-deployer" +os::cmd::expect_success "oc create -f $OS_O_A_L_DIR/deployment/deployer.yaml" +os::cmd::expect_success "oc process logging-deployer-account-template | oc create -f -" +os::cmd::expect_success "oc policy add-role-to-user edit system:serviceaccount:logging:logging-deployer" +os::cmd::expect_success "oc policy add-role-to-user daemonset-admin system:serviceaccount:logging:logging-deployer" +os::cmd::expect_success "oadm policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer" if [ -n "$USE_LOGGING_DEPLOYER" ] ; then imageprefix="docker.io/openshift/origin-" elif [ -n "$USE_LOGGING_DEPLOYER_SCRIPT" ] ; then @@ -227,7 +224,7 @@ os::cmd::expect_success "oadm policy add-cluster-role-to-user cluster-reader \ sleep 5 if [ ! -n "$USE_LOGGING_DEPLOYER_SCRIPT" ] ; then os::cmd::expect_success "oc process \ - -f $OS_O_A_L_DIR/deployment/deployer.yaml \ + logging-deployer-template \ -v ENABLE_OPS_CLUSTER=$ENABLE_OPS_CLUSTER,IMAGE_PREFIX=$imageprefix,KIBANA_HOSTNAME=kibana.example.com,ES_CLUSTER_SIZE=1,PUBLIC_MASTER_URL=https://localhost:8443${masterurlhack} \ | oc create -f -" os::cmd::try_until_text "oc describe bc logging-deployment | awk '/^logging-deployment-/ {print \$2}'" "complete" @@ -243,8 +240,6 @@ if [ "$ENABLE_OPS_CLUSTER" = "true" ] ; then os::cmd::try_until_text "oc get pods -l component=curator-ops" "Running" "$(( 3 * TIME_MIN ))" fi -# this fails because the imagestreams already exist -os::cmd::expect_failure_and_text "oc process logging-support-template | oc create -f -" "already exists" if [ -n "$ES_VOLUME" ] ; then if [ ! -d $ES_VOLUME ] ; then sudo mkdir -p $ES_VOLUME @@ -357,6 +352,28 @@ for test in test-*.sh ; do ./$test $USE_CLUSTER fi done + +#run a migration here to ensure that it is able to work +# delete old deployer pod +os::cmd::expect_success "oc delete pods -l component=deployer" + +#this does not do what i think it does... +#os::cmd::try_until_text "oc get pods -l component=deployer -o jsonpath='{.items[*].metadata.name}'" "" "$(( 3 * TIME_MIN ))" + +if [ ! -n "$USE_LOGGING_DEPLOYER_SCRIPT" ] ; then + os::cmd::expect_success "oc process \ + logging-deployer-template \ + -v MODE=migrate,IMAGE_PREFIX=$imageprefix,KIBANA_HOSTNAME=kibana.example.com,ES_CLUSTER_SIZE=1,PUBLIC_MASTER_URL=https://localhost:8443${masterurlhack} \ + | oc create -f -" + os::cmd::try_until_text "oc describe bc logging-deployment | awk '/^logging-deployment-/ {print \$2}'" "complete" + + MIGRATE_POD=$(oc get pod -l component=deployer -o jsonpath='{.items[*].metadata.name}') + # adding grep to cut down on log output noise + os::cmd::try_until_text "oc logs $MIGRATE_POD | grep 'Migration for project'" "Migration for project test: {\"acknowledged\":true}" "$(( 3 * TIME_MIN ))" + os::cmd::try_until_text "oc logs $MIGRATE_POD | grep 'Migration for project'" "Migration for project logging: {\"acknowledged\":true}" "$(( 3 * TIME_MIN ))" + os::cmd::try_until_text "oc get pods -l component=deployer" "Completed" "$(( 3 * TIME_MIN ))" +fi + popd ### finished logging e2e tests ###