Skip to content

Commit

Permalink
Contexts for 8.13.3
Browse files Browse the repository at this point in the history
This commit was created by the elastic-dockerfiles-publisher.
  • Loading branch information
elasticmachine committed May 2, 2024
1 parent d6d8365 commit 4e109d7
Show file tree
Hide file tree
Showing 11 changed files with 367 additions and 168 deletions.
25 changes: 14 additions & 11 deletions elasticsearch/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,10 @@
################################################################################

################################################################################
# Build stage 0 `builder`:
# Build stage 1 `builder`:
# Extract Elasticsearch artifact
################################################################################

FROM ubuntu:20.04 AS builder

# Install required packages to extract the Elasticsearch distribution
Expand Down Expand Up @@ -42,7 +43,7 @@ RUN set -eux ; \
RUN mkdir /usr/share/elasticsearch
WORKDIR /usr/share/elasticsearch

RUN curl --retry 10 -S -L --output /tmp/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-7.17.20-linux-$(arch).tar.gz
RUN curl --retry 10 -S -L --output /tmp/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-8.13.3-linux-$(arch).tar.gz

RUN tar -zxf /tmp/elasticsearch.tar.gz --strip-components=1

Expand Down Expand Up @@ -71,9 +72,9 @@ RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elas
find config -type f -exec chmod 0664 {} +

################################################################################
# Build stage 1 (the actual Elasticsearch image):
# Build stage 2 (the actual Elasticsearch image):
#
# Copy elasticsearch from stage 0
# Copy elasticsearch from stage 1
# Add entrypoint
################################################################################

Expand All @@ -86,7 +87,7 @@ RUN yes no | dpkg-reconfigure dash && \
apt-get update && \
apt-get upgrade -y && \
apt-get install -y --no-install-recommends \
ca-certificates curl netcat p11-kit unzip zip && \
ca-certificates curl netcat p11-kit unzip zip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
exit_code=0 && break || \
Expand Down Expand Up @@ -134,32 +135,34 @@ RUN /etc/ca-certificates/update.d/docker-openjdk

EXPOSE 9200 9300

LABEL org.label-schema.build-date="2024-04-08T08:34:31.070382898Z" \
LABEL org.label-schema.build-date="2024-04-29T22:05:16.051731935Z" \
org.label-schema.license="Elastic-License-2.0" \
org.label-schema.name="Elasticsearch" \
org.label-schema.schema-version="1.0" \
org.label-schema.url="https://www.elastic.co/products/elasticsearch" \
org.label-schema.usage="https://www.elastic.co/guide/en/elasticsearch/reference/index.html" \
org.label-schema.vcs-ref="b26557f585b7d95c71a5549e571a6bcd2667697d" \
org.label-schema.vcs-ref="617f7b76c4ebcb5a7f1e70d409a99c437c896aea" \
org.label-schema.vcs-url="https://github.com/elastic/elasticsearch" \
org.label-schema.vendor="Elastic" \
org.label-schema.version="7.17.20" \
org.opencontainers.image.created="2024-04-08T08:34:31.070382898Z" \
org.label-schema.version="8.13.3" \
org.opencontainers.image.created="2024-04-29T22:05:16.051731935Z" \
org.opencontainers.image.documentation="https://www.elastic.co/guide/en/elasticsearch/reference/index.html" \
org.opencontainers.image.licenses="Elastic-License-2.0" \
org.opencontainers.image.revision="b26557f585b7d95c71a5549e571a6bcd2667697d" \
org.opencontainers.image.revision="617f7b76c4ebcb5a7f1e70d409a99c437c896aea" \
org.opencontainers.image.source="https://github.com/elastic/elasticsearch" \
org.opencontainers.image.title="Elasticsearch" \
org.opencontainers.image.url="https://www.elastic.co/products/elasticsearch" \
org.opencontainers.image.vendor="Elastic" \
org.opencontainers.image.version="7.17.20"
org.opencontainers.image.version="8.13.3"

# Our actual entrypoint is `tini`, a minimal but functional init program. It
# calls the entrypoint we provide, while correctly forwarding signals.
ENTRYPOINT ["/bin/tini", "--", "/usr/local/bin/docker-entrypoint.sh"]
# Dummy overridable parameter parsed by entrypoint
CMD ["eswrapper"]

USER 1000:0

################################################################################
# End of multi-stage Dockerfile
################################################################################
69 changes: 26 additions & 43 deletions elasticsearch/bin/docker-entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,38 +4,22 @@ set -e
# Files created by Elasticsearch should always be group writable too
umask 0002

run_as_other_user_if_needed() {
if [[ "$(id -u)" == "0" ]]; then
# If running as root, drop to specified UID and run command
exec chroot --userspec=1000:0 / "${@}"
else
# Either we are running in Openshift with random uid and are a member of the root group
# or with a custom --user
exec "${@}"
fi
}

# Allow user specify custom CMD, maybe bin/elasticsearch itself
# for example to directly specify `-E` style parameters for elasticsearch on k8s
# or simply to run /bin/bash to check the image
if [[ "$1" != "eswrapper" ]]; then
if [[ "$(id -u)" == "0" && $(basename "$1") == "elasticsearch" ]]; then
# centos:7 chroot doesn't have the `--skip-chdir` option and
# changes our CWD.
# Rewrite CMD args to replace $1 with `elasticsearch` explicitly,
# so that we are backwards compatible with the docs
# from the previous Elasticsearch versions<6
# and configuration option D:
# https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docker.html#_d_override_the_image_8217_s_default_ulink_url_https_docs_docker_com_engine_reference_run_cmd_default_command_or_options_cmd_ulink
# Without this, user could specify `elasticsearch -E x.y=z` but
# `bin/elasticsearch -E x.y=z` would not work.
set -- "elasticsearch" "${@:2}"
# Use chroot to switch to UID 1000 / GID 0
exec chroot --userspec=1000:0 / "$@"
else
# User probably wants to run something else, like /bin/bash, with another uid forced (Openshift?)
exec "$@"
fi
if [[ "$1" == "eswrapper" || $(basename "$1") == "elasticsearch" ]]; then
# Rewrite CMD args to remove the explicit command,
# so that we are backwards compatible with the docs
# from the previous Elasticsearch versions < 6
# and configuration option:
# https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docker.html#_d_override_the_image_8217_s_default_ulink_url_https_docs_docker_com_engine_reference_run_cmd_default_command_or_options_cmd_ulink
# Without this, user could specify `elasticsearch -E x.y=z` but
# `bin/elasticsearch -E x.y=z` would not work. In any case,
# we want to continue through this script, and not exec early.
set -- "${@:2}"
else
# Run whatever command the user wanted
exec "$@"
fi

# Allow environment variables to be set by creating a file with the
Expand All @@ -56,30 +40,23 @@ if [[ -f bin/elasticsearch-users ]]; then
# enabled, but we have no way of knowing which node we are yet. We'll just
# honor the variable if it's present.
if [[ -n "$ELASTIC_PASSWORD" ]]; then
[[ -f /usr/share/elasticsearch/config/elasticsearch.keystore ]] || (run_as_other_user_if_needed elasticsearch-keystore create)
if ! (run_as_other_user_if_needed elasticsearch-keystore has-passwd --silent) ; then
[[ -f /usr/share/elasticsearch/config/elasticsearch.keystore ]] || (elasticsearch-keystore create)
if ! (elasticsearch-keystore has-passwd --silent) ; then
# keystore is unencrypted
if ! (run_as_other_user_if_needed elasticsearch-keystore list | grep -q '^bootstrap.password$'); then
(run_as_other_user_if_needed echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x 'bootstrap.password')
if ! (elasticsearch-keystore list | grep -q '^bootstrap.password$'); then
(echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x 'bootstrap.password')
fi
else
# keystore requires password
if ! (run_as_other_user_if_needed echo "$KEYSTORE_PASSWORD" \
if ! (echo "$KEYSTORE_PASSWORD" \
| elasticsearch-keystore list | grep -q '^bootstrap.password$') ; then
COMMANDS="$(printf "%s\n%s" "$KEYSTORE_PASSWORD" "$ELASTIC_PASSWORD")"
(run_as_other_user_if_needed echo "$COMMANDS" | elasticsearch-keystore add -x 'bootstrap.password')
(echo "$COMMANDS" | elasticsearch-keystore add -x 'bootstrap.password')
fi
fi
fi
fi

if [[ "$(id -u)" == "0" ]]; then
# If requested and running as root, mutate the ownership of bind-mounts
if [[ -n "$TAKE_FILE_OWNERSHIP" ]]; then
chown -R 1000:0 /usr/share/elasticsearch/{data,logs}
fi
fi

if [[ -n "$ES_LOG_STYLE" ]]; then
case "$ES_LOG_STYLE" in
console)
Expand All @@ -96,6 +73,12 @@ if [[ -n "$ES_LOG_STYLE" ]]; then
esac
fi

if [[ -n "$ENROLLMENT_TOKEN" ]]; then
POSITIONAL_PARAMETERS="--enrollment-token $ENROLLMENT_TOKEN"
else
POSITIONAL_PARAMETERS=""
fi

# Signal forwarding and child reaping is handled by `tini`, which is the
# actual entrypoint of the container
run_as_other_user_if_needed /usr/share/elasticsearch/bin/elasticsearch <<<"$KEYSTORE_PASSWORD"
exec /usr/share/elasticsearch/bin/elasticsearch "$@" $POSITIONAL_PARAMETERS <<<"$KEYSTORE_PASSWORD"
62 changes: 48 additions & 14 deletions elasticsearch/config/log4j2.properties
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ status = error
######## Server JSON ############################
appender.rolling.type = Console
appender.rolling.name = rolling
appender.rolling.layout.type = ESJsonLayout
appender.rolling.layout.type_name = server
appender.rolling.layout.type = ECSJsonLayout
appender.rolling.layout.dataset = elasticsearch.server

################################################

Expand All @@ -16,16 +16,15 @@ rootLogger.appenderRef.rolling.ref = rolling
######## Deprecation JSON #######################
appender.deprecation_rolling.type = Console
appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.layout.type = ESJsonLayout
appender.deprecation_rolling.layout.type_name = deprecation.elasticsearch
appender.deprecation_rolling.layout.esmessagefields=x-opaque-id,key,category,elasticsearch.elastic_product_origin
appender.deprecation_rolling.layout.type = ECSJsonLayout
# Intentionally follows a different pattern to above
appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch
appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter

appender.header_warning.type = HeaderWarningAppender
appender.header_warning.name = header_warning
#################################################

#################################################
logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = WARN
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
Expand All @@ -35,9 +34,8 @@ logger.deprecation.additivity = false
######## Search slowlog JSON ####################
appender.index_search_slowlog_rolling.type = Console
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
appender.index_search_slowlog_rolling.layout.type = ESJsonLayout
appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog
appender.index_search_slowlog_rolling.layout.esmessagefields=message,took,took_millis,total_hits,types,stats,search_type,total_shards,source,id
appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout
appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog

#################################################

Expand All @@ -50,11 +48,8 @@ logger.index_search_slowlog_rolling.additivity = false
######## Indexing slowlog JSON ##################
appender.index_indexing_slowlog_rolling.type = Console
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout
appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog
appender.index_indexing_slowlog_rolling.layout.esmessagefields=message,took,took_millis,doc_type,id,routing,source

#################################################
appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout
appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog

#################################################

Expand All @@ -63,12 +58,41 @@ logger.index_indexing_slowlog.level = trace
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
logger.index_indexing_slowlog.additivity = false

logger.org_apache_pdfbox.name = org.apache.pdfbox
logger.org_apache_pdfbox.level = off

logger.org_apache_poi.name = org.apache.poi
logger.org_apache_poi.level = off

logger.org_apache_fontbox.name = org.apache.fontbox
logger.org_apache_fontbox.level = off

logger.org_apache_xmlbeans.name = org.apache.xmlbeans
logger.org_apache_xmlbeans.level = off

logger.com_amazonaws.name = com.amazonaws
logger.com_amazonaws.level = warn

logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport
logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error

logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics
logger.com_amazonaws_metrics_AwsSdkMetrics.level = error

logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader
logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error

logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver
logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error

appender.audit_rolling.type = Console
appender.audit_rolling.name = audit_rolling
appender.audit_rolling.layout.type = PatternLayout
appender.audit_rolling.layout.pattern = {\
"type":"audit", \
"timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\
%varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\
%varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\
%varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
%varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
%varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
Expand All @@ -80,16 +104,21 @@ appender.audit_rolling.layout.pattern = {\
%varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
%varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
%varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
%varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\
%varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
%varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\
%varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
%varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\
%varsNotEmpty{, "user.roles":%map{user.roles}}\
%varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\
%varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\
%varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\
%varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\
%varsNotEmpty{, "cross_cluster_access":%map{cross_cluster_access}}\
%varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
%varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\
%varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
%varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\
%varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
%varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
%varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\
Expand Down Expand Up @@ -120,16 +149,21 @@ appender.audit_rolling.layout.pattern = {\
# "user.run_by.name" the original authenticated subject name that is impersonating another one.
# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as.
# "user.realm" the name of the realm that authenticated "user.name"
# "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain
# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name")
# "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain
# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from
# "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain
# "user.roles" the roles array of the user; these are the roles that are granting privileges
# "apikey.id" this field is present if and only if the "authentication.type" is "api_key"
# "apikey.name" this field is present if and only if the "authentication.type" is "api_key"
# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token
# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token
# "cross_cluster_access" this field is present if and only if the associated authentication occurred cross cluster
# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change"
# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node
# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated
# "realm_domain" if "realm" is under a domain, this is the name of the domain
# "url.path" the URI component between the port and the query string; it is percent (URL) encoded
# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded
# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
Expand Down
16 changes: 8 additions & 8 deletions kibana/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y curl
RUN cd /tmp && \
curl --retry 8 -s -L \
--output kibana.tar.gz \
https://artifacts.elastic.co/downloads/kibana/kibana-7.17.20-linux-$(arch).tar.gz && \
https://artifacts.elastic.co/downloads/kibana/kibana-8.13.3-linux-$(arch).tar.gz && \
cd -


Expand Down Expand Up @@ -104,25 +104,25 @@ RUN groupadd --gid 1000 kibana && \
--home-dir /usr/share/kibana --no-create-home \
kibana

LABEL org.label-schema.build-date="2024-04-08T11:05:28.782Z" \
LABEL org.label-schema.build-date="2024-04-30T02:06:44.402Z" \
org.label-schema.license="Elastic License" \
org.label-schema.name="Kibana" \
org.label-schema.schema-version="1.0" \
org.label-schema.url="https://www.elastic.co/products/kibana" \
org.label-schema.usage="https://www.elastic.co/guide/en/kibana/reference/index.html" \
org.label-schema.vcs-ref="03253d4922979c94747a2f108370c00ad99df6d1" \
org.label-schema.vcs-ref="003e4a42946390c2eb93dfb3586498ce7520a530" \
org.label-schema.vcs-url="https://github.com/elastic/kibana" \
org.label-schema.vendor="Elastic" \
org.label-schema.version="7.17.20" \
org.opencontainers.image.created="2024-04-08T11:05:28.782Z" \
org.label-schema.version="8.13.3" \
org.opencontainers.image.created="2024-04-30T02:06:44.402Z" \
org.opencontainers.image.documentation="https://www.elastic.co/guide/en/kibana/reference/index.html" \
org.opencontainers.image.licenses="Elastic License" \
org.opencontainers.image.revision="03253d4922979c94747a2f108370c00ad99df6d1" \
org.opencontainers.image.revision="003e4a42946390c2eb93dfb3586498ce7520a530" \
org.opencontainers.image.source="https://github.com/elastic/kibana" \
org.opencontainers.image.title="Kibana" \
org.opencontainers.image.url="https://www.elastic.co/products/kibana" \
org.opencontainers.image.vendor="Elastic" \
org.opencontainers.image.version="7.17.20"
org.opencontainers.image.version="8.13.3"


ENTRYPOINT ["/bin/tini", "--"]
Expand All @@ -131,4 +131,4 @@ ENTRYPOINT ["/bin/tini", "--"]
CMD ["/usr/local/bin/kibana-docker"]


USER kibana
USER 1000
Loading

0 comments on commit 4e109d7

Please sign in to comment.