-
Notifications
You must be signed in to change notification settings - Fork 92
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Bug] Semantic Equality Check Error in elasticstack_fleet_integration_policy #820
Comments
@dejongm are you able to share the full resource definition causing this error? |
Hello @tobio ! Thanks for looking into this! Below are the code snippets. Out of caution, I can provide the content of # main.tf
module "fleet_integration_policy" {
source = "./modules/fleet_integration_policy"
for_each = local.fleet_package_policies
agent_policy_id = each.value.agent_policy_id
inputs = each.value.inputs
name = each.value.name
namespace = each.value.namespace
package_name = each.value.package_name
package_version = each.value.package_version
description = try(each.value.description, null)
enabled = try(each.value.enabled, null)
force = try(each.value.force, null)
policy_id = try(each.value.policy_id, null)
vars = try(each.value.vars, null)
} # modules/fleet_integration_policy/vars.tf
variable "agent_policy_id" {
type = string
}
variable "inputs" {
type = list(object({
input_id = string
enabled = optional(bool)
streams = optional(string)
vars = optional(string)
}))
}
variable "name" {
type = string
}
variable "namespace" {
type = string
default = "default"
}
variable "package_name" {
type = string
}
variable "package_version" {
type = string
}
variable "description" {
type = string
default = null
}
variable "enabled" {
type = bool
default = null
}
variable "force" {
type = bool
default = null
}
variable "policy_id" {
type = string
default = null
}
variable "vars" {
type = string
default = null
} # modules/fleet_integration_policy/main.tf
resource "elasticstack_fleet_integration_policy" "sample" {
agent_policy_id = var.agent_policy_id
dynamic input {
for_each = local.inputs
content {
enabled = input.value.enabled
input_id = input.value.input_id
streams_json = input.value.streams_json
vars_json = input.value.vars_json
}
}
name = var.name
namespace = var.namespace
integration_name = var.package_name
integration_version = var.package_version
description = var.description
enabled = var.enabled
force = var.force
policy_id = var.policy_id
vars_json = local.vars_json
} |
+1, for the time being reverted elastic/elasticstack to "0.11.4" to mitigate the issue |
I can kind of force this error artificially, but not with a real config. @dejongm if you're able to send through the value of |
So I've just upgraded to Prior to the upgrade, I was getting a consistent flapping behaviour on every apply. Edit: I've managed to create the simplest possible reproducer for this here: https://github.com/fatmcgav/tf-provider-elasticstack-820-reproducer Leaving the below in for posterity ;) I'm creating the policy by loading a yaml file, as such: # This is generated dynamically in our code, but have simplified here
locals {
agent_integration_policies = {
"Dev AWS EKS/EKS" = {
agent_policy = "Dev AWS EKS"
description = <<-EOT
Kubernetes integration for capturing logs and metrics from Dev EKS cluster
EOT
integration = "EKS"
integration_name = "Dev AWS EKS/EKS"
integration_type = "kubernetes"
namespace = "dev"
}
"UAT AWS EKS/EKS" = {
agent_policy = "UAT AWS EKS"
description = <<-EOT
Kubernetes integration for capturing logs and metrics from UAT EKS cluster
EOT
integration = "EKS"
integration_name = "UAT AWS EKS/EKS"
integration_type = "kubernetes"
namespace = "uat"
}
}
fleet_agent_policies_map = {
"Dev AWS EKS" = {
integrations = [
{
description = <<-EOT
Kubernetes integration for capturing logs and metrics from Dev EKS cluster
EOT
name = "EKS"
type = "kubernetes"
},
]
name = "Dev AWS EKS"
namespace = "dev"
}
"UAT AWS EKS" = {
integrations = [
{
description = <<-EOT
Kubernetes integration for capturing logs and metrics from UAT EKS cluster
EOT
name = "EKS"
type = "kubernetes"
},
]
name = "UAT AWS EKS"
namespace = "uat"
}
}
fleet_integrations_map = {
kubernetes = {
name = "kubernetes"
version = "1.67.0"
}
}
}
# Integrations
resource "elasticstack_fleet_integration" "integrations" {
for_each = local.fleet_integrations_map
name = each.key
version = each.value.version
force = true
}
# Create an agent policy per environment
resource "elasticstack_fleet_agent_policy" "agent_policies" {
for_each = local.fleet_agent_policies_map
name = format("%s Agent Policy", title(each.key))
namespace = try(each.value.namespace, "default")
description = try(each.value.description, null)
monitor_logs = try(each.value.monitor_logs, true)
monitor_metrics = try(each.value.monitor_metrics, true)
skip_destroy = try(each.value.skip_destroy, false)
}
# Create agent integration policy
resource "elasticstack_fleet_integration_policy" "agent_integration_policies" {
for_each = local.agent_integration_policies
name = each.value.integration_name
namespace = each.value.namespace
description = each.value.description
agent_policy_id = elasticstack_fleet_agent_policy.agent_policies[each.value.agent_policy].policy_id
integration_name = elasticstack_fleet_integration.integrations[each.value.integration_type].name
integration_version = elasticstack_fleet_integration.integrations[each.value.integration_type].version
dynamic "input" {
for_each = yamldecode(file(format("config/policies/integrations/%s/policy.yaml", each.value.integration_type))).inputs
content {
input_id = input.key
streams_json = jsonencode(input.value.streams)
}
}
} The contents of inputs:
kubelet-kubernetes/metrics:
enabled: true
streams:
kubernetes.container:
enabled: true
vars:
add_metadata: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
hosts:
- 'https://${env.NODE_NAME}:10250'
period: 10s
ssl.verification_mode: none
ssl.certificate_authorities: []
kubernetes.node:
enabled: true
vars:
add_metadata: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
hosts:
- 'https://${env.NODE_NAME}:10250'
period: 10s
ssl.verification_mode: none
ssl.certificate_authorities: []
kubernetes.pod:
enabled: true
vars:
add_metadata: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
hosts:
- 'https://${env.NODE_NAME}:10250'
period: 10s
ssl.verification_mode: none
ssl.certificate_authorities: []
kubernetes.system:
enabled: true
vars:
add_metadata: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
hosts:
- 'https://${env.NODE_NAME}:10250'
period: 10s
ssl.verification_mode: none
ssl.certificate_authorities: []
kubernetes.volume:
enabled: true
vars:
add_metadata: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
hosts:
- 'https://${env.NODE_NAME}:10250'
period: 10s
ssl.verification_mode: none
ssl.certificate_authorities: []
kube-state-metrics-kubernetes/metrics:
enabled: true
streams:
kubernetes.state_container:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_cronjob:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_daemonset:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_deployment:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_job:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_namespace:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_node:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_persistentvolume:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_persistentvolumeclaim:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_pod:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_replicaset:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_resourcequota:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_service:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_statefulset:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kubernetes.state_storageclass:
enabled: true
vars:
add_metadata: true
hosts:
- 'kube-state-metrics.kube-system.svc.cluster.local:8080'
leaderelection: true
period: 10s
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
ssl.certificate_authorities: []
kube-apiserver-kubernetes/metrics:
enabled: true
streams:
kubernetes.apiserver:
enabled: true
vars:
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
hosts:
- >-
https://${env.KUBERNETES_SERVICE_HOST}:${env.KUBERNETES_SERVICE_PORT}
leaderelection: true
period: 30s
ssl.certificate_authorities:
- /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
kube-proxy-kubernetes/metrics:
enabled: true
streams:
kubernetes.proxy:
enabled: true
vars:
hosts:
- 'localhost:10249'
period: 10s
kube-scheduler-kubernetes/metrics:
enabled: false
streams:
kubernetes.scheduler:
enabled: false
vars:
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
hosts:
- 'https://0.0.0.0:10259'
period: 10s
ssl.verification_mode: none
scheduler_label_key: component
scheduler_label_value: kube-scheduler
kube-controller-manager-kubernetes/metrics:
enabled: false
streams:
kubernetes.controllermanager:
enabled: false
vars:
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
hosts:
- 'https://0.0.0.0:10257'
period: 10s
ssl.verification_mode: none
controller_manager_label_key: component
controller_manager_label_value: kube-controller-manager
events-kubernetes/metrics:
enabled: true
streams:
kubernetes.event:
enabled: true
vars:
period: 10s
add_metadata: true
skip_older: true
leaderelection: true
container-logs-filestream:
enabled: true
streams:
kubernetes.container_logs:
enabled: true
vars:
paths:
- '/var/log/containers/*${kubernetes.container.id}.log'
symlinks: true
data_stream.dataset: kubernetes.container_logs
containerParserStream: all
containerParserFormat: auto
additionalParsersConfig: |
- ndjson:
target: json
ignore_decoding_error: true
# - multiline:
# type: pattern
# pattern: '^\['
# negate: true
# match: after
custom: ''
processors: |
- drop_event:
when:
or:
- regexp:
message: '.*(?i)(healt|livez|ping|status).*'
- regexp:
json.message: '(?i).*(?i)(healt|livez|ping|status).*'
- regexp:
json.msg: .*(?i)(healt|livez|ping|status).*'
audit-logs-filestream:
enabled: false
streams:
kubernetes.audit_logs:
enabled: false
vars:
paths:
- /var/log/kubernetes/kube-apiserver-audit.log |
This commit adds a simple reproducer for elastic/terraform-provider-elasticstack#820. Also includes a Terraform trace log file from an apply attempt.
We tried to update to v0.11.9 as well, since we wanted to try the new fix for the handling of secret values ( If I execute
The respective Terraform config looks like this (simplified and redacted): locals {
fip_system_inputs = [
{
input_id = "system-winlog"
enabled = false
streams_json = jsonencode({
"system.application" : {
"enabled" : false,
"vars" : {
"custom" : "# Winlog configuration example\n#batch_read_size: 100",
"ignore_older" : "72h",
"language" : 0,
"preserve_original_event" : false,
"tags" : []
}
},
"system.security" : {
"enabled" : false,
"vars" : {
"custom" : "# Winlog configuration example\n#batch_read_size: 100",
"ignore_older" : "72h",
"language" : 0,
"preserve_original_event" : false,
"tags" : []
}
},
"system.system" : {
"enabled" : false,
"vars" : {
"custom" : "# Winlog configuration example\n#batch_read_size: 100",
"ignore_older" : "72h",
"language" : 0,
"preserve_original_event" : false,
"tags" : []
}
}
})
vars_json = null
},
{
input_id = "system-system/metrics"
enabled = true
streams_json = jsonencode({
"system.core" : {
"enabled" : false,
"vars" : {
"core.metrics" : [
"percentages"
],
"period" : "10s",
"tags" : []
}
},
"system.cpu" : {
"enabled" : true,
"vars" : {
"cpu.metrics" : [
"percentages",
"normalized_percentages"
],
"period" : "10s",
"tags" : []
}
},
"system.diskio" : {
"enabled" : true,
"vars" : {
"diskio.include_devices" : [],
"period" : "10s",
"tags" : []
}
},
"system.filesystem" : {
"enabled" : true,
"vars" : {
"filesystem.ignore_types" : [],
"period" : "1m",
"processors" : "- drop_event.when.regexp:\n system.filesystem.mount_point: ^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/)\n",
"tags" : []
}
},
"system.fsstat" : {
"enabled" : true,
"vars" : {
"period" : "1m",
"processors" : "- drop_event.when.regexp:\n system.fsstat.mount_point: ^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/)\n",
"tags" : []
}
},
"system.load" : {
"enabled" : true,
"vars" : {
"period" : "10s",
"tags" : []
}
},
"system.memory" : {
"enabled" : true,
"vars" : {
"period" : "10s",
"tags" : []
}
},
"system.network" : {
"enabled" : true,
"vars" : {
"network.interfaces" : [],
"period" : "10s",
"tags" : []
}
},
"system.process" : {
"enabled" : true,
"vars" : {
"period" : "10s",
"process.cgroups.enabled" : false,
"process.cmdline.cache.enabled" : true,
"process.env.whitelist" : [],
"process.include_cpu_ticks" : false,
"process.include_top_n.by_cpu" : 5,
"process.include_top_n.by_memory" : 5,
"processes" : [
".*"
],
"tags" : []
}
},
"system.process.summary" : {
"enabled" : true,
"vars" : {
"period" : "10s",
"tags" : []
}
},
"system.socket_summary" : {
"enabled" : true,
"vars" : {
"period" : "10s",
"tags" : []
}
},
"system.uptime" : {
"enabled" : true,
"vars" : {
"period" : "10s",
"tags" : []
}
}
})
vars_json = jsonencode({})
},
{
input_id = "system-httpjson"
enabled = false
streams_json = jsonencode({
"system.application" : {
"enabled" : false,
"vars" : {
"interval" : "10s",
"search" : "search sourcetype=\"XmlWinEventLog:Application\"",
"tags" : [
"forwarded"
]
}
},
"system.security" : {
"enabled" : false,
"vars" : {
"interval" : "10s",
"search" : "search sourcetype=\"XmlWinEventLog:Security\"",
"tags" : [
"forwarded"
]
}
},
"system.system" : {
"enabled" : false,
"vars" : {
"interval" : "10s",
"search" : "search sourcetype=\"XmlWinEventLog:System\"",
"tags" : [
"forwarded"
]
}
}
})
vars_json = jsonencode({
"preserve_original_event" : false,
"ssl" : "#certificate_authorities:\n# - |\n# -----BEGIN CERTIFICATE-----\n# <redacted>\n# -----END CERTIFICATE-----\n",
"url" : "https://server.example.com:8089"
})
},
{
input_id = "system-logfile"
enabled = false
streams_json = jsonencode({
"system.auth" : {
"enabled" : false,
"vars" : {
"ignore_older" : "72h",
"paths" : [
"/var/log/auth.log*",
"/var/log/secure*"
],
"preserve_original_event" : false,
"tags" : [
"system-auth"
]
}
},
"system.syslog" : {
"enabled" : false,
"vars" : {
"ignore_older" : "72h",
"paths" : [
"/var/log/messages*",
"/var/log/syslog*",
"/var/log/system*"
],
"preserve_original_event" : false,
"tags" : [],
"exclude_files" : ["\\.gz$"]
}
}
})
vars_json = null
}
]
}
resource "elasticstack_fleet_integration_policy" "system" {
name = "<redacted>"
namespace = "<redacted>"
description = ""
agent_policy_id = elasticstack_fleet_agent_policy.somepolicy.policy_id
integration_name = "system"
integration_version = "1.60.5"
dynamic "input" {
for_each = local.fip_system_inputs
content {
input_id = input.value.input_id
enabled = input.value.enabled
vars_json = input.value.vars_json
streams_json = input.value.streams_json
}
}
} I hope, this helps to figure out the issue. |
Describe the bug
Upon upgrading to version 0.11.8, we started to get the following error. The issue seems to be in the "elasticstack_fleet_integration_policy". Pinning the version to 0.11.7 resolves the issue.
│ Error: Semantic Equality Check Error
│
│ with module.fleet_integration_policy["aws_servers_pingfed_policy_system-apf-1"].elasticstack_fleet_integration_policy.sample,
│ on modules/fleet_package_policy/main.tf line 1, in resource "elasticstack_fleet_integration_policy" "sample":
│ 1: resource "elasticstack_fleet_integration_policy" "sample" {
│
│ An unexpected error occurred while performing semantic equality checks.
│ Please report this to the provider developers.
│
│ Error: EOF
╵
To Reproduce
Steps to reproduce the behavior:
Expected behavior
Return with a clean plan.
Debug output
Run
terraform
command withTF_LOG=trace
and provide extended information on TF operations. Please ensure you redact any base64 encoded credentials from your output.eg
Screenshots
If applicable, add screenshots to help explain your problem.
Versions (please complete the following information):
Additional context
Add any other context about the problem here.
The text was updated successfully, but these errors were encountered: