Skip to content

Commit

Permalink
Merge pull request #7 from kabisa/renovate/configure
Browse files Browse the repository at this point in the history
Configure Renovate
  • Loading branch information
obeleh authored Jul 22, 2022
2 parents ad5dfcd + 5915ab7 commit 8034c06
Show file tree
Hide file tree
Showing 27 changed files with 374 additions and 342 deletions.
29 changes: 29 additions & 0 deletions .github/workflows/documentation.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
name: Generate terraform docs

on:
push:
# don't run when we push a tag
tags-ignore:
- '*'
# don't run when we merge to main
# the action should have run already
branches-ignore:
- 'main'
jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: terraform-linters/setup-tflint@v2
name: Setup TFLint
with:
tflint_version: v0.38.1
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: pre-commit/[email protected]
# pre-commit fails if it changed files
# we want to go on
continue-on-error: true
- uses: pre-commit/[email protected]
- uses: EndBug/add-and-commit@v9
with:
default_author: github_actions
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,4 @@ override.tf.json

# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
examples/.terraform.lock.hcl
5 changes: 2 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,9 @@ repos:
- id: terraform-fmt
- id: terraform-validate
- id: tflint
- repo: git@github.com:kabisa/terraform-datadog-pre-commit-hook.git
rev: "1.2.2"
- repo: https://github.com/kabisa/terraform-datadog-pre-commit-hook
rev: "1.3.6"
hooks:
- id: terraform-datadog-docs
exclude: ^README.md$
args:
- "."
34 changes: 18 additions & 16 deletions .terraform.lock.hcl

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

592 changes: 298 additions & 294 deletions README.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion bytesin-high.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "bytesin_high" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "BytesIn unusually high"
query = "avg(${var.bytesin_high_evaluation_period}):avg:kafka.net.bytes_in.rate{${local.bytesin_high_filter}} by {host} > ${var.bytesin_high_critical}"
Expand Down
2 changes: 1 addition & 1 deletion bytesout_high.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "bytesout_high" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "BytesOut unusually high"
query = "avg(${var.bytesout_high_evaluation_period}):avg:kafka.net.bytes_out.rate{${local.bytesout_high_filter}} by {host} > ${var.bytesout_high_critical}"
Expand Down
10 changes: 0 additions & 10 deletions examples/.terraform.lock.hcl

This file was deleted.

1 change: 1 addition & 0 deletions examples/example.tf
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# tflint-ignore: terraform_module_version
module "kafka" {
source = "kabisa/kafka/datadog"

Expand Down
2 changes: 1 addition & 1 deletion fetch_purgatory_size.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "fetch_purgatory_size" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "Fetch Purgatory Size"
query = "avg(${var.fetch_purgatory_size_evaluation_period}):max:kafka.request.fetch_request_purgatory.size{${local.fetch_purgatory_size_filter}} by {host} > ${var.fetch_purgatory_size_critical}"
Expand Down
2 changes: 1 addition & 1 deletion in_sync_nodes_dropped.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "in_sync_nodes_dropped" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "In Sync Nodes dropped"
query = "avg(${var.in_sync_nodes_dropped_evaluation_period}):max:kafka.replication.isr_shrinks.rate{${local.in_sync_nodes_dropped_filter}} by {aiven-service} - max:kafka.replication.isr_expands.rate{${local.in_sync_nodes_dropped_filter}} by {aiven-service} > ${var.in_sync_nodes_dropped_critical}"
Expand Down
2 changes: 1 addition & 1 deletion leader_election_occurring.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "leader_election_occurring" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "Leader Election occurring"
query = "max(${var.leader_election_occurring_evaluation_period}):avg:kafka.replication.leader_elections.rate{${local.leader_election_occurring_filter}} by {aiven-service} > ${var.leader_election_occurring_critical}"
Expand Down
2 changes: 1 addition & 1 deletion main.tf
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module "system" {
source = "kabisa/system/datadog"
version = "1.2.1"
version = "2.0.1"

locked = var.locked
additional_tags = var.additional_tags
Expand Down
2 changes: 1 addition & 1 deletion multiple_active_controllers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "multiple_active_controllers" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "Multiple Active controllers"
query = "avg(${var.multiple_active_controllers_evaluation_period}):max:kafka.replication.active_controller_count{${local.multiple_active_controllers_filter}} by {aiven-project} > ${var.multiple_active_controllers_critical}"
Expand Down
2 changes: 1 addition & 1 deletion no_active_controllers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "no_active_controllers" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "No Active controllers"
query = "avg(${var.no_active_controllers_evaluation_period}):max:kafka.replication.active_controller_count{${local.no_active_controllers_filter}} by {aiven-project} < ${var.no_active_controllers_critical}"
Expand Down
2 changes: 1 addition & 1 deletion offline_partitions.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "offline_partitions" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "Offline Partitions"
query = "avg(${var.offline_partitions_evaluation_period}):max:kafka.replication.offline_partitions_count{${local.offline_partitions_filter}} > ${var.offline_partitions_critical}"
Expand Down
2 changes: 1 addition & 1 deletion produce_purgatory_size.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "produce_purgatory_size" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "Produce Purgatory Size"
query = "avg(${var.produce_purgatory_size_evaluation_period}):max:kafka.request.producer_request_purgatory.size{${local.produce_purgatory_size_filter}} > ${var.produce_purgatory_size_critical}"
Expand Down
2 changes: 1 addition & 1 deletion provider.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ terraform {
required_providers {
datadog = {
source = "DataDog/datadog"
version = "~> 3.4"
version = "~> 3.12"
}
}
}
6 changes: 6 additions & 0 deletions renovate.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:base"
]
}
2 changes: 1 addition & 1 deletion unclean_leader_election.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "unclean_leader_election" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "Unclean Leader Election"
query = "avg(${var.unclean_leader_election_evaluation_period}):max:kafka.replication.unclean_leader_elections.rate{${local.unclean_leader_election_filter}} by {aiven-project} > ${var.unclean_leader_election_critical}"
Expand Down
2 changes: 1 addition & 1 deletion under_replicated_partitions.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "under_replicated_partitions" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "UnderReplicated Partitions"
query = "avg(${var.under_replicated_partitions_evaluation_period}):avg:kafka.replication.under_replicated_partitions{${local.under_replicated_partitions_filter}} by {aiven-service} > ${var.under_replicated_partitions_critical}"
Expand Down
2 changes: 1 addition & 1 deletion unusual_consumer_fetch_time.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "unusual_consumer_fetch_time" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "Unusual Consumer Fetch Time"
query = "avg(${var.unusual_consumer_fetch_time_evaluation_period}):avg:kafka.request.fetch_consumer.time.avg{${local.unusual_consumer_fetch_time_filter}} > ${var.unusual_consumer_fetch_time_critical}"
Expand Down
2 changes: 1 addition & 1 deletion unusual_fetch_failures.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "unusual_fetch_failures" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "Unusual Fetch Failures"
query = "avg(${var.unusual_fetch_failures_evaluation_period}):avg:kafka.request.fetch.failed.rate{${local.unusual_fetch_failures_filter}} > ${var.unusual_fetch_failures_critical}"
Expand Down
2 changes: 1 addition & 1 deletion unusual_follower_fetch_time.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "unusual_follower_fetch_time" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "Unusual Follower Fetch Time"
query = "avg(${var.unusual_follower_fetch_time_evaluation_period}):avg:kafka.request.fetch_follower.time.avg{${local.unusual_follower_fetch_time_filter}} > ${var.unusual_follower_fetch_time_critical}"
Expand Down
2 changes: 1 addition & 1 deletion unusual_produce_failures.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "unusual_produce_failures" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "Unusual Produce Failures"
query = "avg(${var.unusual_produce_failures_evaluation_period}):avg:kafka.request.produce.failed.rate{${local.unusual_produce_failures_filter}} > ${var.unusual_produce_failures_critical}"
Expand Down
2 changes: 1 addition & 1 deletion unusual_produce_time.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ locals {

module "unusual_produce_time" {
source = "kabisa/generic-monitor/datadog"
version = "0.7.5"
version = "1.0.0"

name = "Unusual Produce Time"
query = "avg(${var.unusual_produce_time_evaluation_period}):avg:kafka.request.produce.time.avg{${local.unusual_produce_time_filter}} > ${var.unusual_produce_time_critical}"
Expand Down
2 changes: 1 addition & 1 deletion variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,4 @@ variable "name_suffix" {
variable "priority_offset" {
description = "For non production workloads we can +1 on the priorities"
default = 0
}
}

0 comments on commit 8034c06

Please sign in to comment.