diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..131d3a1
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,22 @@
+[run]
+branch = True
+source =
+ fireconfig
+omit =
+ fireconfig/k8s/*
+
+[report]
+exclude_lines =
+ # Have to re-enable the standard pragma
+ \#\s*pragma: no cover
+
+ # Don't complain if tests don't hit defensive assertion code:
+ ^\s*raise AssertionError\b
+ ^\s*raise NotImplementedError\b
+ ^\s*return NotImplemented\b
+ ^\s*raise$
+
+ # Don't complain if non-runnable code isn't run:
+ ^if __name__ == ['"]__main__['"]:$
+
+# vim:ft=dosini
diff --git a/.flake8 b/.flake8
index ee81225..a2d91e5 100644
--- a/.flake8
+++ b/.flake8
@@ -1,3 +1,6 @@
[flake8]
max-line-length = 121
-ignore = E121,E123,E126,E226,E24,E704,W503,W504,E702,E703,E741,W605
+extend-ignore = E702,E703,E741,W605,E124,E128
+extend-exclude = fireconfig/k8s/*
+
+# vim:ft=dosini
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000..619968d
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,25 @@
+name: Run tests
+
+on:
+ push:
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out master
+ uses: actions/checkout@v4
+
+ - name: Install Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.11'
+
+ - name: Install Poetry
+ uses: snok/install-poetry@v1
+
+ - name: Run tests
+ run: |
+ poetry install
+ make test
diff --git a/.gitignore b/.gitignore
index 49bf90a..1a9cae8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,3 @@
__pycache__
.*sw[op]
+.coverage
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index c0366ae..810e9bd 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,11 +6,14 @@ repos:
hooks:
- id: end-of-file-fixer
- id: check-yaml
+ args: ["--allow-multiple-documents"]
- id: trailing-whitespace
- - repo: https://github.com/asottile/reorder-python-imports
- rev: v3.10.0
+ - repo: https://github.com/pycqa/isort
+ rev: 5.13.2
hooks:
- - id: reorder-python-imports
+ - id: isort
+ args:
+ - --sl
- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
hooks:
@@ -19,4 +22,4 @@ repos:
rev: v1.4.1
hooks:
- id: mypy
- additional_dependencies: [cdk8s]
+ additional_dependencies: [cdk8s, types-simplejson, types-pyyaml]
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..c369762
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,6 @@
+.PHONY: test
+
+test:
+ poetry run coverage erase
+ poetry run coverage run -m pytest -svv itests
+ poetry run coverage report --show-missing
diff --git a/examples/README.md b/examples/README.md
new file mode 100644
index 0000000..7f86bfc
--- /dev/null
+++ b/examples/README.md
@@ -0,0 +1,12 @@
+# 🔥Config Examples
+
+## Workflows
+
+The workflows directory contains a set of GitHub actions that you can use to have 🔥Config automatically compute the
+mermaid DAG and diff of changes to your Kubernetes objects, and then leave a comment on the PR with the DAG and diff.
+You _should_ just be able to copy these into your `.github/workflows` directory. You'll need to set up a personal
+access token (PAT) with read access to your actions and read and write access to pull requests. This PAT then needs to
+be injected into your actions as a GitHub secret.
+
+- [Managing your Personal Access Tokens](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens)
+- [Using secrets in GitHub Actions](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions)
diff --git a/examples/workflows/k8s_plan.yml b/examples/workflows/k8s_plan.yml
new file mode 100644
index 0000000..f0c7b86
--- /dev/null
+++ b/examples/workflows/k8s_plan.yml
@@ -0,0 +1,49 @@
+name: Compute k8s plan
+
+on:
+ pull_request:
+ paths:
+ - 'k8s/**'
+
+jobs:
+ plan:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out master
+ uses: actions/checkout@v4
+ with:
+ ref: master
+ submodules: recursive
+
+ - name: Install Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.11'
+
+ - name: Install Poetry
+ uses: snok/install-poetry@v1
+
+ - name: Compile k8s charts
+ run: make k8s
+
+ - name: Check out PR
+ uses: actions/checkout@v4
+ with:
+ clean: false
+
+ - name: Compute dag/diff
+ run: make k8s
+
+ - name: Save artifacts
+ run: |
+ mkdir -p ./artifacts
+ echo ${{ github.event.number }} > ./artifacts/PR
+ mv .build/dag.mermaid ./artifacts/dag.mermaid
+ mv .build/k8s.df ./artifacts/k8s.df
+
+ - name: Upload artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: k8s-plan-artifacts
+ path: artifacts/
diff --git a/examples/workflows/pr_comment_finished.yml b/examples/workflows/pr_comment_finished.yml
new file mode 100644
index 0000000..10d3641
--- /dev/null
+++ b/examples/workflows/pr_comment_finished.yml
@@ -0,0 +1,58 @@
+name: Comment on the PR
+
+on:
+ workflow_run:
+ workflows: ["Compute k8s plan"]
+ types:
+ - completed
+
+jobs:
+ pr-comment:
+ runs-on: ubuntu-latest
+ if: >
+ github.event.workflow_run.event == 'pull_request' &&
+ github.event.workflow_run.conclusion == 'success'
+
+ steps:
+ - name: Download artifact
+ uses: actions/download-artifact@v4
+ with:
+ name: k8s-plan-artifacts
+ github-token: ${{ secrets.PR_COMMENT_TOKEN }}
+ run-id: ${{ github.event.workflow_run.id }}
+ path: k8s-plan-artifacts
+
+ - name: Get PR number
+ uses: mathiasvr/command-output@v2.0.0
+ id: pr
+ with:
+ run: cat k8s-plan-artifacts/PR
+
+ - name: Find previous comment ID
+ uses: peter-evans/find-comment@v2
+ id: fc
+ with:
+ token: ${{ secrets.PR_COMMENT_TOKEN }}
+ issue-number: ${{ steps.pr.outputs.stdout }}
+ body-includes: ""
+
+ - name: Render Comment Template
+ run: |
+ echo "" > fireconfig-comment.md
+ echo "## Kubernetes Object DAG" >> fireconfig-comment.md
+ cat k8s-plan-artifacts/dag.mermaid >> fireconfig-comment.md
+ echo ' New object' >> fireconfig-comment.md
+ echo ' Deleted object' >> fireconfig-comment.md
+ echo ' Updated object' >> fireconfig-comment.md
+ echo ' Updated object (causes pod recreation)' >> fireconfig-comment.md
+ echo "## Detailed Diff" >> fireconfig-comment.md
+ cat k8s-plan-artifacts/k8s.df >> fireconfig-comment.md
+
+ - name: Comment on PR
+ uses: peter-evans/create-or-update-comment@v3
+ with:
+ token: ${{ secrets.PR_COMMENT_TOKEN }}
+ comment-id: ${{ steps.fc.outputs.comment-id }}
+ issue-number: ${{ steps.pr.outputs.stdout }}
+ body-path: fireconfig-comment.md
+ edit-mode: replace
diff --git a/examples/workflows/pr_comment_starting.yml b/examples/workflows/pr_comment_starting.yml
new file mode 100644
index 0000000..ed2bf41
--- /dev/null
+++ b/examples/workflows/pr_comment_starting.yml
@@ -0,0 +1,44 @@
+name: Update the PR Comment
+
+on:
+ #######################################################################################
+ # WARNING: DO NOT CHANGE THIS ACTION TO CHECK OUT OR EXECUTE ANY CODE!!!!! #
+ # #
+ # This can allow an attacker to gain write access to code in the repository or read #
+ # any repository secrets! This should _only_ be used to update or add a PR comment. #
+ # #
+ # See https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ #
+ # for more details. #
+ #######################################################################################
+ pull_request_target:
+ paths:
+ - 'k8s/**'
+
+jobs:
+ pr-comment:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Find previous comment ID
+ uses: peter-evans/find-comment@v3
+ id: fc
+ with:
+ token: ${{ secrets.PR_COMMENT_TOKEN }}
+ issue-number: ${{ github.event.pull_request.number }}
+ body-includes: ""
+
+ - name: Render Comment Template
+ run: |
+ echo
+
+ - name: Comment on PR
+ uses: peter-evans/create-or-update-comment@v3
+ with:
+ token: ${{ secrets.PR_COMMENT_TOKEN }}
+ issue-number: ${{ github.event.pull_request.number }}
+ comment-id: ${{ steps.fc.outputs.comment-id }}
+ body: |
+
+ ## Updating Kubernetes DAG...
+ Please wait until the job has finished.
+ edit-mode: replace
diff --git a/fireconfig/__init__.py b/fireconfig/__init__.py
index 5ff22cb..7113d4f 100644
--- a/fireconfig/__init__.py
+++ b/fireconfig/__init__.py
@@ -1,7 +1,27 @@
-from .container import ContainerBuilder
-from .deployment import DeploymentBuilder
-from .env import EnvBuilder
-from .volume import VolumesBuilder
+import typing as T
+from abc import ABCMeta
+from abc import abstractmethod
+from collections import defaultdict
+
+from cdk8s import App
+from cdk8s import Chart
+from cdk8s import DependencyGraph
+from constructs import Construct
+
+from fireconfig.container import ContainerBuilder
+from fireconfig.deployment import DeploymentBuilder
+from fireconfig.env import EnvBuilder
+from fireconfig.namespace import add_missing_namespace
+from fireconfig.output import format_diff
+from fireconfig.output import format_mermaid_graph
+from fireconfig.plan import GLOBAL_CHART_NAME
+from fireconfig.plan import compute_diff
+from fireconfig.plan import find_deleted_nodes
+from fireconfig.plan import get_resource_changes
+from fireconfig.plan import walk_dep_graph
+from fireconfig.subgraph import ChartSubgraph
+from fireconfig.util import fix_cluster_scoped_objects
+from fireconfig.volume import VolumesBuilder
__all__ = [
'ContainerBuilder',
@@ -9,3 +29,89 @@
'EnvBuilder',
'VolumesBuilder',
]
+
+
+class AppPackage(metaclass=ABCMeta):
+ """
+ Users should implement the AppPackage class to pass into fireconfig
+ """
+
+ @property
+ @abstractmethod
+ def id(self):
+ ...
+
+ @abstractmethod
+ def compile(self, app: Construct):
+ ...
+
+
+def compile(
+ pkgs: T.Dict[str, T.List[AppPackage]],
+ dag_filename: T.Optional[str] = None,
+ cdk8s_outdir: T.Optional[str] = None,
+ dry_run: bool = False,
+) -> T.Tuple[str, str]:
+ """
+ `compile` takes a list of "packages" and generates Kubernetes manifests from them. It
+ also generates a Markdown-ified "diff" and a mermaid graph representing the Kubernetes
+ manifest structure and changes.
+
+ :param pkgs: the list of packages to compile
+ :param dag_filename: the location of a previous DAG, for use in generating diffs
+ :param cdk8s_outdir: where to save the generated Kubernetes manifests
+ :param dry_run: actually generate the manifests, or not
+
+ :returns: the mermaid DAG and markdown-ified diff as a tuple of strings
+ """
+
+ app = App(outdir=cdk8s_outdir)
+
+ # Anything that is a "global" dependency (e.g., namespaces) that should be generated before
+ # everything else, or that should only be generated once, belongs in the global chart
+ gl = Chart(app, GLOBAL_CHART_NAME, disable_resource_name_hashes=True)
+
+ # For each cdk8s chart, we generate a sub-DAG (stored in `subgraphs`) and then we connect
+ # all the subgraphs together via the `subgraph_dag`
+ subgraph_dag = defaultdict(list)
+ subgraphs = {}
+ subgraphs[GLOBAL_CHART_NAME] = ChartSubgraph(GLOBAL_CHART_NAME)
+
+ for ns, pkglist in pkgs.items():
+ add_missing_namespace(gl, ns)
+ for pkg in pkglist:
+ chart = Chart(app, pkg.id, namespace=ns, disable_resource_name_hashes=True)
+ chart.add_dependency(gl)
+ pkg.compile(chart)
+
+ fix_cluster_scoped_objects(chart)
+ subgraphs[pkg.id] = ChartSubgraph(pkg.id)
+ subgraph_dag[gl.node.id].append(pkg.id)
+
+ # cdk8s doesn't compute the full dependency graph until you call `synth`, and there's no
+ # public access to it at that point, which is annoying. Until that point, the dependency
+ # graph only includes the dependencies that you've explicitly added. The format is
+ #
+ # [root (empty node)] ---> leaf nodes of created objects ---> tree in reverse
+ # |
+ # -----> [list of chart objects]
+ #
+ # The consequence being that we need to start at the root node, walk forwards, look at all the things
+ # that have "chart" fields, and then from there walk in reverse. It's somewhat annoying.
+ for obj in DependencyGraph(app.node).root.outbound:
+ walk_dep_graph(obj, subgraphs)
+ diff, kinds = compute_diff(app)
+ resource_changes = get_resource_changes(diff, kinds)
+
+ try:
+ find_deleted_nodes(subgraphs, resource_changes, dag_filename)
+ except Exception as e:
+ print(f"WARNING: {e}\nCould not read old DAG file, graph may be missing deleted nodes")
+
+ graph_str = format_mermaid_graph(subgraph_dag, subgraphs, dag_filename, resource_changes)
+ diff_str = format_diff(resource_changes)
+
+ if not dry_run:
+ app.synth()
+
+ return graph_str, diff_str
diff --git a/fireconfig/container.py b/fireconfig/container.py
index cd7cd96..a09cb59 100644
--- a/fireconfig/container.py
+++ b/fireconfig/container.py
@@ -1,69 +1,62 @@
-from typing import Any
-from typing import Dict
-from typing import Mapping
-from typing import Optional
-from typing import Sequence
-from typing import Set
+import typing as T
+
+from cdk8s import Chart
from fireconfig import k8s
from fireconfig.env import EnvBuilder
from fireconfig.resources import Resources
from fireconfig.types import Capability
+from fireconfig.volume import VolumeDefsWithObject
from fireconfig.volume import VolumesBuilder
class ContainerBuilder:
- def __init__(self, name: str, image: str, command: Optional[str] = None, args: Optional[Sequence[str]] = None):
+ def __init__(self, name: str, image: str, command: T.Optional[str] = None, args: T.Optional[T.Sequence[str]] = None):
self._name = name
self._image = image
self._args = args
self._command = command
- self._env: Optional[EnvBuilder] = None
- self._env_names: Optional[Sequence[str]] = None
- self._resources: Optional[Resources] = None
- self._ports: Sequence[int] = []
- self._volumes: Optional[VolumesBuilder] = None
- self._volume_names: Optional[Sequence[str]] = None
- self._capabilities: Set[Capability] = set()
-
- def get_ports(self) -> Sequence[int]:
- return self._ports
-
- def get_volumes(self) -> Mapping[str, Mapping[str, Any]]:
- if self._volumes is None:
- return dict()
+ self._env: T.Optional[EnvBuilder] = None
+ self._env_names: T.Optional[T.Sequence[str]] = None
+ self._resources: T.Optional[Resources] = None
+ self._ports: T.Sequence[int] = []
+ self._volumes: T.Optional[VolumesBuilder] = None
+ self._volume_names: T.Optional[T.Sequence[str]] = None
+ self._capabilities: T.Set[Capability] = set()
- return self._volumes.build_volumes(self._volume_names)
+ @property
+ def ports(self) -> T.Sequence[int]:
+ return self._ports
- def with_env(self, env: EnvBuilder, names: Optional[Sequence[str]] = None) -> 'ContainerBuilder':
+ def with_env(self, env: EnvBuilder, names: T.Optional[T.Sequence[str]] = None) -> T.Self:
self._env = env
self._env_names = names
return self
def with_resources(
self, *,
- requests: Optional[Mapping[str, Any]] = None,
- limits: Optional[Mapping[str, Any]] = None,
- ) -> 'ContainerBuilder':
+ requests: T.Optional[T.Mapping[str, T.Any]] = None,
+ limits: T.Optional[T.Mapping[str, T.Any]] = None,
+ ) -> T.Self:
self._resources = Resources(requests, limits)
return self
- def with_ports(self, *ports: int) -> 'ContainerBuilder':
+ def with_ports(self, *ports: int) -> T.Self:
self._ports = ports
return self
- def with_security_context(self, capability: Capability) -> 'ContainerBuilder':
+ def with_security_context(self, capability: Capability) -> T.Self:
self._capabilities.add(capability)
return self
- def with_volumes(self, volumes: VolumesBuilder, names: Optional[Sequence[str]] = None) -> 'ContainerBuilder':
+ def with_volumes(self, volumes: VolumesBuilder, names: T.Optional[T.Sequence[str]] = None) -> T.Self:
self._volumes = volumes
self._volume_names = names
return self
def build(self) -> k8s.Container:
- optional: Dict[str, Any] = {}
+ optional: T.MutableMapping[str, T.Any] = {}
if self._command:
optional["command"] = [self._command]
if self._args:
@@ -93,3 +86,9 @@ def build(self) -> k8s.Container:
image=self._image,
**optional,
)
+
+ def build_volumes(self, chart: Chart) -> VolumeDefsWithObject:
+ if self._volumes is None:
+ return dict()
+
+ return self._volumes.build_volumes(chart, self._volume_names)
diff --git a/fireconfig/deployment.py b/fireconfig/deployment.py
index 453199e..3d5f2d4 100644
--- a/fireconfig/deployment.py
+++ b/fireconfig/deployment.py
@@ -1,66 +1,44 @@
-from typing import Any
-from typing import List
-from typing import Mapping
-from typing import MutableMapping
-from typing import Optional
-from typing import Tuple
-from typing import Union
-
-from cdk8s import ApiObject
+import typing as T
+
from cdk8s import Chart
from cdk8s import JsonPatch
from fireconfig import k8s
from fireconfig.container import ContainerBuilder
-from fireconfig.errors import FireConfigError
+from fireconfig.object import ObjectBuilder
from fireconfig.types import TaintEffect
+from fireconfig.volume import VolumeDefsWithObject
from fireconfig.volume import VolumesBuilder
+_APP_LABEL_KEY = "fireconfig.io/app"
-_STANDARD_NAMESPACES = [
- 'default',
- 'kube-node-lease',
- 'kube-public',
- 'kube-system',
-]
+class DeploymentBuilder(ObjectBuilder):
+ def __init__(self, *, app_label: str, tag: T.Optional[str] = None):
+ super().__init__()
-class DeploymentBuilder:
- def __init__(self, *, namespace: str, selector: Mapping[str, str]):
- self._namespace = namespace
- self._annotations: MutableMapping[str, str] = {}
- self._labels: MutableMapping[str, str] = {}
- self._replicas: Union[int, Tuple[int, int]] = 1
- self._selector = selector
+ self._replicas: T.Union[int, T.Tuple[int, int]] = 1
+ self._app_label = app_label
+ self._selector = {_APP_LABEL_KEY: app_label}
+ self._tag = "" if tag is None else f"{tag}-"
- self._pod_annotations: MutableMapping[str, str] = {}
- self._pod_labels: MutableMapping[str, str] = dict(selector)
- self._containers: List[ContainerBuilder] = []
- self._node_selector: Optional[Mapping[str, str]] = None
- self._service_account_role: Optional[str] = None
+ self._pod_annotations: T.MutableMapping[str, str] = {}
+ self._pod_labels: T.MutableMapping[str, str] = dict(self._selector)
+ self._containers: T.List[ContainerBuilder] = []
+ self._node_selector: T.Optional[T.Mapping[str, str]] = None
+ self._service_account_role: T.Optional[str] = None
self._service_account_role_is_cluster_role: bool = False
self._service: bool = False
- self._service_ports: Optional[List[int]] = None
- self._service_object: Optional[k8s.KubeService] = None
- self._tolerations: List[Tuple[str, str, TaintEffect]] = []
- self._volumes: Optional[VolumesBuilder] = None
- self._deps: List[ApiObject] = []
-
- def get_service_address(self) -> str:
- if not self._service_object:
- raise FireConfigError('No service object; have you called `build()`?')
-
- return f'{self._service_object.name}.{self._namespace}'
-
- def with_annotation(self, key: str, value: str) -> 'DeploymentBuilder':
- self._annotations[key] = value
- return self
+ self._service_name: str = f"{self._app_label}-svc"
+ self._service_ports: T.Optional[T.List[int]] = None
+ self._tolerations: T.List[T.Tuple[str, str, TaintEffect]] = []
+ self._volumes: T.Optional[VolumesBuilder] = None
- def with_label(self, key: str, value: str) -> 'DeploymentBuilder':
- self._labels[key] = value
- return self
+ @property
+ def service_name(self) -> str:
+ return self._service_name
- def with_replicas(self, min_replicas: int, max_replicas: Optional[int] = None) -> 'DeploymentBuilder':
+ def with_replicas(self, min_replicas: int, max_replicas: T.Optional[int] = None) -> T.Self:
if max_replicas is not None:
if min_replicas > max_replicas:
raise ValueError(f'min_replicas cannot be larger than max_replicas: {min_replicas} > {max_replicas}')
@@ -69,72 +47,50 @@ def with_replicas(self, min_replicas: int, max_replicas: Optional[int] = None) -
self._replicas = min_replicas
return self
- def with_pod_annotation(self, key: str, value: str) -> 'DeploymentBuilder':
+ def with_pod_annotation(self, key: str, value: str) -> T.Self:
self._pod_annotations[key] = value
return self
- def with_pod_label(self, key: str, value: str) -> 'DeploymentBuilder':
+ def with_pod_label(self, key: str, value: str) -> T.Self:
self._pod_labels[key] = value
return self
- def with_containers(self, *containers: ContainerBuilder) -> 'DeploymentBuilder':
+ def with_containers(self, *containers: ContainerBuilder) -> T.Self:
self._containers.extend(containers)
return self
- def with_node_selector(self, key: str, value: str) -> 'DeploymentBuilder':
+ def with_node_selector(self, key: str, value: str) -> T.Self:
self._node_selector = {key: value}
return self
- def with_service(self, ports: Optional[List[int]] = None) -> 'DeploymentBuilder':
+ def with_service(self, ports: T.Optional[T.List[int]] = None) -> T.Self:
self._service = True
self._service_ports = ports
return self
- def with_service_account_and_role_binding(
- self,
- role_name: str,
- is_cluster_role: bool = False,
- ) -> 'DeploymentBuilder':
+ def with_service_account_and_role_binding(self, role_name: str, is_cluster_role: bool = False) -> T.Self:
self._service_account_role = role_name
self._service_account_role_is_cluster_role = is_cluster_role
return self
- def with_toleration(
- self,
- key: str,
- value: str = "",
- effect: TaintEffect = TaintEffect.NoExecute,
- ) -> 'DeploymentBuilder':
+ def with_toleration(self, key: str, value: str = "", effect: TaintEffect = TaintEffect.NoExecute) -> T.Self:
self._tolerations.append((key, value, effect))
return self
- def with_dependencies(self, *deps: ApiObject) -> 'DeploymentBuilder':
- self._deps.extend(deps)
- return self
-
- def build(self, chart: Chart) -> k8s.KubeDeployment:
- if self._namespace not in _STANDARD_NAMESPACES:
- ns = k8s.KubeNamespace(chart, "ns", metadata={"name": self._namespace})
- self._deps.insert(0, ns)
-
- meta: MutableMapping[str, Any] = {"namespace": self._namespace}
- if self._annotations:
- meta["annotations"] = self._annotations
- if self._labels:
- meta["labels"] = self._labels
-
- pod_meta: MutableMapping[str, Any] = {"namespace": self._namespace}
+ def _build(self, meta: k8s.ObjectMeta, chart: Chart) -> k8s.KubeDeployment:
+ # TODO auto-add app key
+ pod_meta: T.MutableMapping[str, T.Any] = {}
if self._pod_annotations:
pod_meta["annotations"] = self._pod_annotations
pod_meta["labels"] = self._pod_labels
if type(self._replicas) is tuple:
- replicas: Optional[int] = None
+ replicas: T.Optional[int] = None
raise NotImplementedError("No support for HPA currently")
else:
replicas = self._replicas # type: ignore
- optional: MutableMapping[str, Any] = {}
+ optional: T.MutableMapping[str, T.Any] = {}
if self._node_selector is not None:
optional["node_selector"] = self._node_selector
@@ -153,25 +109,28 @@ def build(self, chart: Chart) -> k8s.KubeDeployment:
if self._service_ports is None:
self._service_ports = []
for c in self._containers:
- self._service_ports.extend(c.get_ports())
- self._service_object = self._build_service(chart)
- self._deps.append(self._service_object)
+ self._service_ports.extend(c.ports)
+ self._build_service(chart)
if len(self._tolerations) > 0:
optional["tolerations"] = [
{"key": t[0], "value": t[1], "effect": t[2]} for t in self._tolerations
]
- vols: Mapping[str, Mapping[str, Any]] = dict()
+ vols: VolumeDefsWithObject = dict()
for c in self._containers:
- vols = {**vols, **c.get_volumes()}
+ vols = {**vols, **c.build_volumes(chart)}
if vols:
- optional["volumes"] = list(vols.values())
+ optional["volumes"] = []
+ for (defn, obj) in vols.values():
+ optional["volumes"].append(defn)
+ if obj is not None:
+ self._deps.append(obj)
depl = k8s.KubeDeployment(
- chart, "deployment",
- metadata=k8s.ObjectMeta(**meta),
+ chart, f"{self._tag}depl",
+ metadata=meta,
spec=k8s.DeploymentSpec(
selector=k8s.LabelSelector(match_labels=self._selector),
replicas=replicas,
@@ -191,22 +150,17 @@ def build(self, chart: Chart) -> k8s.KubeDeployment:
{"name": "POD_OWNER", "value": depl.name},
))
- for d in self._deps:
- depl.add_dependency(d)
-
return depl
+ # TODO maybe move these into separate files at some point?
def _build_service_account(self, chart: Chart) -> k8s.KubeServiceAccount:
- return k8s.KubeServiceAccount(
- chart, "service-account",
- metadata={"namespace": self._namespace},
- )
+ return k8s.KubeServiceAccount(chart, f"{self._tag}sa")
def _build_service(self, chart: Chart) -> k8s.KubeService:
assert self._service_ports
return k8s.KubeService(
chart, "service",
- metadata={"namespace": self._namespace},
+ metadata={"name": self._service_name},
spec=k8s.ServiceSpec(
ports=[
k8s.ServicePort(port=p, target_port=k8s.IntOrString.from_number(p))
@@ -222,12 +176,12 @@ def _build_role_binding_for_service_account(
service_account: k8s.KubeServiceAccount,
role_name: str,
is_cluster_role: bool,
- ) -> Union[k8s.KubeClusterRoleBinding, k8s.KubeRoleBinding]:
+ ) -> T.Union[k8s.KubeClusterRoleBinding, k8s.KubeRoleBinding]:
subjects = [k8s.Subject(
kind="ServiceAccount",
name=service_account.name,
- namespace=self._namespace,
+ namespace=chart.namespace,
)]
role_ref = k8s.RoleRef(
api_group="rbac.authorization.k8s.io",
@@ -236,6 +190,6 @@ def _build_role_binding_for_service_account(
)
if is_cluster_role:
- return k8s.KubeClusterRoleBinding(chart, "cluster-role-binding", subjects=subjects, role_ref=role_ref)
+ return k8s.KubeClusterRoleBinding(chart, f"{self._tag}crb", subjects=subjects, role_ref=role_ref)
else:
- return k8s.KubeRoleBinding(chart, "role-binding", subjects=subjects, role_ref=role_ref)
+ return k8s.KubeRoleBinding(chart, f"{self._tag}rb", subjects=subjects, role_ref=role_ref)
diff --git a/fireconfig/env.py b/fireconfig/env.py
index 44a4d22..9d20d80 100644
--- a/fireconfig/env.py
+++ b/fireconfig/env.py
@@ -1,19 +1,15 @@
-from typing import KeysView
-from typing import Mapping
-from typing import MutableMapping
-from typing import Optional
-from typing import Sequence
-from typing import Tuple
-from typing import Union
+import typing as T
from fireconfig.types import DownwardAPIField
class EnvBuilder:
- def __init__(self, env: Mapping[str, str] = dict()):
- self._env: MutableMapping[str, Tuple[str, Union[str, Mapping]]] = {k: ("value", v) for (k, v) in env.items()}
+ def __init__(self, env: T.Mapping[str, str] = dict()):
+ self._env: T.MutableMapping[str, T.Tuple[str, T.Union[str, T.Mapping]]] = {
+ k: ("value", v) for (k, v) in env.items()
+ }
- def with_field_ref(self, name: str, field: DownwardAPIField, key: Optional[str] = None) -> 'EnvBuilder':
+ def with_field_ref(self, name: str, field: DownwardAPIField, key: T.Optional[str] = None) -> T.Self:
field_str = str(field)
if field in (DownwardAPIField.ANNOTATION, DownwardAPIField.LABEL):
field_str = field_str.format(key)
@@ -21,7 +17,7 @@ def with_field_ref(self, name: str, field: DownwardAPIField, key: Optional[str]
self._env[name] = ("valueFrom", {"fieldRef": {"fieldPath": field_str}})
return self
- def build(self, names: Optional[Union[Sequence[str], KeysView[str]]] = None) -> Sequence[Mapping]:
+ def build(self, names: T.Optional[T.Union[T.Sequence[str], T.KeysView[str]]] = None) -> T.Sequence[T.Mapping]:
if names is None:
names = self._env.keys()
diff --git a/fireconfig/errors.py b/fireconfig/errors.py
deleted file mode 100644
index 126a18a..0000000
--- a/fireconfig/errors.py
+++ /dev/null
@@ -1,2 +0,0 @@
-class FireConfigError(Exception):
- pass
diff --git a/fireconfig/namespace.py b/fireconfig/namespace.py
new file mode 100644
index 0000000..8b03250
--- /dev/null
+++ b/fireconfig/namespace.py
@@ -0,0 +1,18 @@
+from cdk8s import Chart
+
+from fireconfig import k8s
+
+_STANDARD_NAMESPACES = [
+ 'default',
+ 'kube-node-lease',
+ 'kube-public',
+ 'kube-system',
+]
+
+
+def add_missing_namespace(gl: Chart, ns: str):
+ parent = gl.node.id
+ if ns not in _STANDARD_NAMESPACES:
+ k8s.KubeNamespace(gl, ns, metadata={"name": ns})
+ parent = f"{gl.node.id}/{ns}"
+ return parent
diff --git a/fireconfig/object.py b/fireconfig/object.py
new file mode 100644
index 0000000..5ca2b1c
--- /dev/null
+++ b/fireconfig/object.py
@@ -0,0 +1,45 @@
+import typing as T
+from abc import ABCMeta
+from abc import abstractmethod
+
+from cdk8s import ApiObject
+from cdk8s import Chart
+
+from fireconfig import k8s
+
+
+class ObjectBuilder(metaclass=ABCMeta):
+ def __init__(self: T.Self):
+ self._annotations: T.MutableMapping[str, str] = {}
+ self._labels: T.MutableMapping[str, str] = {}
+ self._deps: T.List[ApiObject] = []
+
+ def with_annotation(self, key: str, value: str) -> T.Self:
+ self._annotations[key] = value
+ return self
+
+ def with_label(self, key: str, value: str) -> T.Self:
+ self._labels[key] = value
+ return self
+
+ def with_dependencies(self, *deps: ApiObject) -> T.Self:
+ self._deps.extend(deps)
+ return self
+
+ def build(self, chart: Chart):
+ meta: T.MutableMapping[str, T.Any] = {}
+ if self._annotations:
+ meta["annotations"] = self._annotations
+ if self._labels:
+ meta["labels"] = self._labels
+
+ obj = self._build(k8s.ObjectMeta(**meta), chart)
+
+ for d in self._deps:
+ obj.add_dependency(d)
+
+ return obj
+
+ @abstractmethod
+ def _build(self, meta: k8s.ObjectMeta, chart: Chart):
+ ...
diff --git a/fireconfig/output.py b/fireconfig/output.py
new file mode 100644
index 0000000..7b2bc82
--- /dev/null
+++ b/fireconfig/output.py
@@ -0,0 +1,74 @@
+import typing as T
+
+import simplejson as json
+from deepdiff.helper import notpresent # type: ignore
+
+from fireconfig.plan import DELETED_OBJS_END
+from fireconfig.plan import DELETED_OBJS_START
+from fireconfig.plan import STYLE_DEFS_END
+from fireconfig.plan import STYLE_DEFS_START
+from fireconfig.plan import ResourceChanges
+from fireconfig.plan import ResourceState
+from fireconfig.subgraph import ChartSubgraph
+
+
+def _format_node_label(node: str, kind: str) -> str:
+ name = node.split("/")[-1]
+ return f" {node}[{kind}
{name}]\n"
+
+
+def format_mermaid_graph(
+ subgraph_dag: T.Mapping[str, T.List[str]],
+ subgraphs: T.Mapping[str, ChartSubgraph],
+ old_dag_filename: T.Optional[str],
+ resource_changes: T.Mapping[str, ResourceChanges],
+) -> str:
+
+ mermaid = "```mermaid\n"
+ mermaid += "%%{init: {'themeVariables': {'mainBkg': '#ddd'}}}%%\n"
+ mermaid += "graph LR\n\n"
+
+ # Colors taken from https://personal.sron.nl/~pault/#sec:qualitative
+ mermaid += "classDef default color:#000\n"
+
+ for chart, sg in subgraphs.items():
+ mermaid += f"subgraph {chart}\n"
+ mermaid += " direction LR\n"
+ for n, k in sg.nodes():
+ mermaid += _format_node_label(n, k)
+
+ for s, e in sg.edges():
+ mermaid += f" {s}--->{e}\n"
+
+ mermaid += f"{DELETED_OBJS_START}\n"
+ for del_line in sg.deleted_lines():
+ mermaid += del_line
+ mermaid += f"{DELETED_OBJS_END}\n"
+ mermaid += "end\n\n"
+
+ for sg1, edges in subgraph_dag.items():
+ for sg2 in edges:
+ mermaid += f"{sg1}--->{sg2}\n"
+
+ mermaid += f"\n{STYLE_DEFS_START}\n"
+ for res, changes in resource_changes.items():
+ if changes.state != ResourceState.Unchanged:
+ mermaid += f" style {res} fill:{changes.state.value}\n"
+ mermaid += f"{STYLE_DEFS_END}\n"
+ mermaid += "```\n\n"
+
+ return mermaid
+
+
+def format_diff(resource_changes: T.Mapping[str, ResourceChanges]) -> str:
+ diff_details = ""
+
+ for res, c in sorted(resource_changes.items()):
+ diff_details += f"\n\n#### {res}: {c.state.name}\n\n
\n\n"
+ for path, r1, r2 in c.changes:
+ r1_str = json.dumps(r1, indent=' ') if r1 != notpresent else r1
+ r2_str = json.dumps(r2, indent=' ') if r2 != notpresent else r2
+ diff_details += f"```\n{path}:\n{r1_str} --> {r2_str}\n```\n\n"
+ diff_details += " \n"
+
+ return diff_details
diff --git a/fireconfig/plan.py b/fireconfig/plan.py
new file mode 100644
index 0000000..7bf1342
--- /dev/null
+++ b/fireconfig/plan.py
@@ -0,0 +1,200 @@
+"""
+The bulk of the logic for computing the plan (a la terraform plan) lives in this file.
+The rough outline of steps taken is
+
+1. For each chart, walk the dependency graph to construct a DAG (`walk_dep_graph`)
+2. Compute a diff between the newly-generated manifests and the old ones (`compute_diff`)
+3. Turn that diff into a list of per-resource changes (`get_resource_changes`)
+4. Find out what's been deleted since the last run and add that into the graph (`find_deleted_nodes`)
+"""
+
+import re
+import typing as T
+from collections import defaultdict
+from enum import Enum
+from glob import glob
+
+import yaml
+from cdk8s import App
+from cdk8s import DependencyVertex
+from deepdiff import DeepDiff # type: ignore
+from deepdiff.helper import notpresent # type: ignore
+
+from fireconfig.subgraph import ChartSubgraph
+from fireconfig.util import owned_name
+from fireconfig.util import owned_name_from_dict
+
+GLOBAL_CHART_NAME = "global"
+DELETED_OBJS_START = "%% DELETED OBJECTS START"
+DELETED_OBJS_END = "%% DELETED OBJECTS END"
+STYLE_DEFS_START = "%% STYLE DEFINITIONS START"
+STYLE_DEFS_END = "%% STYLE DEFINITIONS END"
+
+ChangeTuple = T.Tuple[str, T.Union[T.Mapping, notpresent], T.Union[T.Mapping, notpresent]]
+
+
+# Colors taken from https://personal.sron.nl/~pault/#sec:qualitative
+class ResourceState(Enum):
+ Unchanged = ""
+ Changed = "#6ce"
+ ChangedWithPodRecreate = "#cb4"
+ Added = "#283"
+ Removed = "#e67"
+ Unknown = "#f00"
+
+
+class ResourceChanges:
+ def __init__(self) -> None:
+ self._state: ResourceState = ResourceState.Unchanged
+ self._changes: T.List[ChangeTuple] = []
+
+ @property
+ def state(self) -> ResourceState:
+ return self._state
+
+ @property
+ def changes(self) -> T.List[ChangeTuple]:
+ return self._changes
+
+ def update_state(self, change_type: str, path: str, kind: T.Optional[str]):
+ """
+ Given a particular resource, update the state (added, removed, changed, etc) for
+ that resource. We use the list of "change types" from DeepDiff defined here:
+
+ https://github.com/seperman/deepdiff/blob/89c5cc227c48b63be4a0e1ad4af59d3c1b0272d7/deepdiff/serialization.py#L388
+
+ Since these are Kubernetes objects, we expect the root object to be a dictionary, if it's
+ not, something has gone horribly wrong. If the root object was added or removed, we mark the
+ entire object as added or removed; otherwise if some sub-dictionary was added or removed,
+ the root object was just "changed".
+
+ We use the `kind` field to determine whether pod recreation needs to happen. This entire
+ function is currently very hacky and incomplete, it would be good to make this more robust sometime.
+ """
+ if self._state in {ResourceState.Added, ResourceState.Removed}:
+ return
+
+ if path == "root":
+ if change_type == "dictionary_item_removed":
+ self._state = ResourceState.Removed
+ elif change_type == "dictionary_item_added":
+ self._state = ResourceState.Added
+ else:
+ self._state = ResourceState.Unknown
+ elif self._state == ResourceState.ChangedWithPodRecreate:
+ return
+ elif kind == "Deployment":
+ # TODO - this is obviously incomplete, it will not detect all cases
+ # when pod recreation happens
+ if (
+ path.startswith("root['spec']['template']['spec']")
+ or path.startswith("root['spec']['selector']")
+ ):
+ self._state = ResourceState.ChangedWithPodRecreate
+ else:
+ self._state = ResourceState.Changed
+ else:
+ self._state = ResourceState.Changed
+
+ def add_change(self, path: str, r1: T.Union[T.Mapping, notpresent], r2: T.Union[T.Mapping, notpresent]):
+ self._changes.append((path, r1, r2))
+
+
+def compute_diff(app: App) -> T.Tuple[T.Mapping[str, T.Any], T.Mapping[str, str]]:
+ """
+ To compute a diff, we look at the old YAML files that were written out "last time", and
+ compare them to the generated YAML by cdk8s "this time".
+ """
+
+ kinds = {}
+ old_defs = {}
+ for filename in glob(f"{app.outdir}/*{app.output_file_extension}"):
+ with open(filename) as f:
+ parsed_filename = re.match(app.outdir + r"\/(\d{4}-)?(.*)" + app.output_file_extension, filename)
+ old_chart = "UNKNOWN"
+ if parsed_filename:
+ old_chart = parsed_filename.group(2)
+ for old_obj in yaml.safe_load_all(f):
+ node_id = owned_name_from_dict(old_obj, old_chart)
+ old_defs[node_id] = old_obj
+
+ new_defs = {}
+ for chart in app.charts:
+ for new_obj in chart.api_objects:
+ node_id = owned_name(new_obj)
+ new_defs[node_id] = new_obj.to_json()
+ kinds[node_id] = new_obj.kind
+
+ return DeepDiff(old_defs, new_defs, view="tree"), kinds
+
+
+def walk_dep_graph(v: DependencyVertex, subgraphs: T.Mapping[str, ChartSubgraph]):
+ assert v.value
+ if not hasattr(v.value, "chart"):
+ return
+
+ chart = v.value.chart.node.id # type: ignore
+ subgraphs[chart].add_node(v)
+
+ for dep in v.outbound:
+ assert dep.value
+ if not hasattr(v.value, "chart"):
+ return
+
+ # Note that cdk8s does things backwards, so instead of adding the edge from v->dep,
+ # we add an edge from dep->v
+ subgraphs[chart].add_edge(dep, v)
+ walk_dep_graph(dep, subgraphs)
+
+
+def get_resource_changes(diff: T.Mapping[str, T.Any], kinds: T.Mapping[str, str]) -> T.Mapping[str, ResourceChanges]:
+ resource_changes: T.MutableMapping[str, ResourceChanges] = defaultdict(lambda: ResourceChanges())
+ for change_type, items in diff.items():
+ for i in items:
+ root_item = i.path(output_format="list")[0]
+ path = re.sub(r"\[" + f"'{root_item}'" + r"\]", "", i.path())
+ resource_changes[root_item].update_state(change_type, path, kinds.get(root_item))
+ resource_changes[root_item].add_change(path, i.t1, i.t2)
+
+ return resource_changes
+
+
+def find_deleted_nodes(
+ subgraphs: T.Mapping[str, ChartSubgraph],
+ resource_changes: T.Mapping[str, ResourceChanges],
+ old_dag_filename: T.Optional[str],
+):
+ """
+ To determine the location and connections of deleted nodes in the DAG,
+ we just look at the old DAG file and copy out the edge lines that contain the
+ removed objects. The DAG file is split into subgraphs, so we parse it line-by-line
+ and only copy entries that are a) inside a subgraph block, and b) weren't marked as
+ deleted "last time". We use special comment markers in the DAG file to tell which
+ things were deleted "last time".
+ """
+ if not old_dag_filename:
+ return
+
+ old_dag_lines = []
+ with open(old_dag_filename) as f:
+ current_chart = None
+ del_lines = False
+
+ for l in f.readlines():
+ chart_match = re.match("^\s*subgraph (.*)", l)
+ if chart_match:
+ current_chart = chart_match.group(1)
+ elif re.match("^\s*end$", l):
+ current_chart = None
+ if l.startswith(DELETED_OBJS_START):
+ del_lines = True
+ elif l.startswith(DELETED_OBJS_END):
+ del_lines = False
+ elif current_chart is not None and not del_lines:
+ old_dag_lines.append((current_chart, l))
+
+ for res, changes in resource_changes.items():
+ if changes.state == ResourceState.Removed:
+ for chart, l in old_dag_lines:
+ if res in l:
+ subgraphs[chart].add_deleted_line(l)
diff --git a/fireconfig/resources.py b/fireconfig/resources.py
index 1d51548..dfc7d67 100644
--- a/fireconfig/resources.py
+++ b/fireconfig/resources.py
@@ -1,13 +1,10 @@
-from typing import Mapping
-from typing import MutableMapping
-from typing import Optional
-from typing import Union
+import typing as T
from fireconfig import k8s
-ResourceMap = Mapping[str, Union[int, str]]
-QuantityMap = Mapping[str, k8s.Quantity]
-MutableQuantityMap = MutableMapping[str, k8s.Quantity]
+ResourceMap = T.Mapping[str, T.Union[int, str]]
+QuantityMap = T.Mapping[str, k8s.Quantity]
+MutableQuantityMap = T.MutableMapping[str, k8s.Quantity]
def parse_resource_map(m: ResourceMap) -> QuantityMap:
@@ -22,9 +19,9 @@ def parse_resource_map(m: ResourceMap) -> QuantityMap:
class Resources:
- def __init__(self, requests: Optional[ResourceMap], limits: Optional[ResourceMap]) -> None:
- self.requests: Optional[QuantityMap] = None
- self.limits: Optional[QuantityMap] = None
+ def __init__(self, requests: T.Optional[ResourceMap], limits: T.Optional[ResourceMap]):
+ self.requests: T.Optional[QuantityMap] = None
+ self.limits: T.Optional[QuantityMap] = None
if requests is not None:
self.requests = parse_resource_map(requests)
diff --git a/fireconfig/subgraph.py b/fireconfig/subgraph.py
new file mode 100644
index 0000000..55e16b9
--- /dev/null
+++ b/fireconfig/subgraph.py
@@ -0,0 +1,39 @@
+import typing as T
+from collections import defaultdict
+
+from cdk8s import ApiObject
+from cdk8s import DependencyVertex
+
+from fireconfig.util import owned_name
+
+
+class ChartSubgraph:
+ def __init__(self, name: str) -> None:
+ self._name = name
+ self._dag: T.MutableMapping[str, T.List[str]] = defaultdict(list)
+ self._kinds: T.MutableMapping[str, str] = {}
+ self._deleted_lines: T.Set[str] = set()
+
+ def add_node(self, v: DependencyVertex) -> str:
+ obj = T.cast(ApiObject, v.value)
+ name = owned_name(obj)
+ self._kinds[name] = obj.kind
+ self._dag[name]
+ return name
+
+ def add_edge(self, s: DependencyVertex, t: DependencyVertex):
+ s_name = self.add_node(s)
+ t_name = self.add_node(t)
+ self._dag[s_name].append(t_name)
+
+ def add_deleted_line(self, l: str):
+ self._deleted_lines.add(l)
+
+ def nodes(self) -> T.List[T.Tuple[str, str]]:
+ return [(n, self._kinds[n]) for n in self._dag.keys()]
+
+ def edges(self) -> T.List[T.Tuple[str, str]]:
+ return [(s, e) for s, l in self._dag.items() for e in l]
+
+ def deleted_lines(self) -> T.Iterable[str]:
+ return self._deleted_lines
diff --git a/fireconfig/util.py b/fireconfig/util.py
new file mode 100644
index 0000000..414bbfe
--- /dev/null
+++ b/fireconfig/util.py
@@ -0,0 +1,57 @@
+import typing as T
+
+from cdk8s import ApiObject
+from cdk8s import Chart
+from cdk8s import JsonPatch
+
+
+# cdk8s incorrectly adds namespaces to cluster-scoped objects, so this function corrects for that
+# (see https://github.com/cdk8s-team/cdk8s/issues/1618 and https://github.com/cdk8s-team/cdk8s/issues/1558)
+def fix_cluster_scoped_objects(chart: Chart):
+ for obj in chart.api_objects:
+ if is_cluster_scoped(obj.kind):
+ obj.add_json_patch(JsonPatch.remove("/metadata/namespace"))
+
+
+def is_cluster_scoped(kind: str) -> bool:
+ # Taken from a vanilla 1.27 kind cluster
+ return kind in {
+ "APIService",
+ "CertificateSigningRequest",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "ComponentStatus",
+ "CSIDriver",
+ "CSINode",
+ "CustomResourceDefinition",
+ "FlowSchema",
+ "IngressClass",
+ "MutatingWebhookConfiguration",
+ "Namespace",
+ "Node",
+ "PriorityClass",
+ "PriorityLevelConfiguration",
+ "RuntimeClass",
+ "SelfSubjectAccessReview"
+ "SelfSubjectRulesReview",
+ "StorageClass",
+ "SubjectAccessReview",
+ "TokenReview",
+ "PersistentVolume"
+ "ValidatingWebhookConfiguration",
+ "VolumeAttachment",
+ }
+
+
+def owned_name_from_dict(obj: T.Mapping[str, T.Any], chart: str) -> str:
+ prefix = obj["metadata"].get("namespace")
+ if prefix is None or is_cluster_scoped(obj["kind"]):
+ prefix = chart
+ return prefix + "/" + obj["metadata"]["name"]
+
+
+def owned_name(obj: ApiObject) -> str:
+ prefix = obj.metadata.namespace
+ if prefix is None or is_cluster_scoped(obj.kind):
+ prefix = obj.chart.node.id
+ return prefix + "/" + obj.name
diff --git a/fireconfig/volume.py b/fireconfig/volume.py
index 11db365..92a2054 100644
--- a/fireconfig/volume.py
+++ b/fireconfig/volume.py
@@ -1,45 +1,53 @@
-from typing import Any
-from typing import Iterable
-from typing import Mapping
-from typing import MutableMapping
-from typing import Optional
-from typing import Sequence
+import typing as T
+
+from cdk8s import ApiObject
+from cdk8s import Chart
from fireconfig import k8s
+VolumeDefsWithObject = T.Mapping[str, T.Tuple[T.Mapping[str, T.Any], T.Optional[ApiObject]]]
+
class VolumesBuilder:
def __init__(self) -> None:
- self._volumes: MutableMapping[str, Any] = {}
- self._volume_mounts: MutableMapping[str, str] = {}
-
- def with_config_map(self, name: str, mount_path: str, config_map: k8s.KubeConfigMap) -> 'VolumesBuilder':
- self._volumes[name] = ("configMap", {
- "name": config_map.name,
- "items": [{
- "key": cm_entry,
- "path": cm_entry,
- } for cm_entry in config_map.to_json()["data"]],
- })
-
- self._volume_mounts[name] = mount_path
+ self._volume_mounts: T.MutableMapping[str, str] = {}
+ self._config_map_data: T.MutableMapping[str, T.Mapping[str, str]] = {}
+
+ def with_config_map(self, vol_name: str, mount_path: str, data: T.Mapping[str, str]) -> T.Self:
+ self._config_map_data[vol_name] = data
+ self._volume_mounts[vol_name] = mount_path
return self
- def get_path_to(self, name: str) -> str:
- path = self._volume_mounts[name] + '/'
- match self._volumes[name]:
- case ("configMap", data):
- path += data["items"][0]["path"]
+ def get_path_to_config_map(self, vol_name: str, path_name: str) -> str:
+ assert vol_name in self._config_map_data and path_name in self._config_map_data[vol_name]
+ path = self._volume_mounts[vol_name] + '/' + path_name
return path
- def build_mounts(self, names: Optional[Iterable[str]] = None) -> Sequence[Mapping]:
+ def build_mounts(self, names: T.Optional[T.Iterable[str]] = None) -> T.Sequence[T.Mapping]:
if names is None:
names = self._volume_mounts.keys()
return [{"name": name, "mountPath": self._volume_mounts[name]} for name in names]
- def build_volumes(self, names: Optional[Iterable[str]] = None) -> Mapping[str, Mapping[str, Any]]:
+ def build_volumes(self, chart: Chart, names: T.Optional[T.Iterable[str]] = None) -> VolumeDefsWithObject:
if names is None:
names = self._volume_mounts.keys()
- return {name: {"name": name, self._volumes[name][0]: self._volumes[name][1]} for name in names}
+ volumes = {}
+ for vol_name, data in self._config_map_data.items():
+ if vol_name not in names:
+ continue
+
+ cm = k8s.KubeConfigMap(chart, vol_name, data=data)
+ volumes[vol_name] = ({
+ "name": vol_name,
+ "configMap": {
+ "name": cm.name,
+ "items": [{
+ "key": cm_entry,
+ "path": cm_entry,
+ } for cm_entry in data],
+ }
+ }, cm)
+
+ return volumes
diff --git a/itests/output/0000-global.k8s.yaml b/itests/output/0000-global.k8s.yaml
new file mode 100644
index 0000000..19102b3
--- /dev/null
+++ b/itests/output/0000-global.k8s.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: the-namespace
diff --git a/itests/output/0001-fc-test-package.k8s.yaml b/itests/output/0001-fc-test-package.k8s.yaml
new file mode 100644
index 0000000..5197f57
--- /dev/null
+++ b/itests/output/0001-fc-test-package.k8s.yaml
@@ -0,0 +1,97 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: deployment1-svc
+ namespace: the-namespace
+spec:
+ ports:
+ - port: 8086
+ targetPort: 8086
+ selector:
+ fireconfig.io/app: deployment1
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: fc-test-package-sa
+ namespace: the-namespace
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: fc-test-package-crb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: fc-test-package-sa
+ namespace: the-namespace
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: fc-test-package-the-volume-name
+ namespace: the-namespace
+data:
+ foo.yml: bar
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: fc-test-package-other_name
+ namespace: the-namespace
+data:
+ bar.yml: asdf
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: fc-test-package-depl
+ namespace: the-namespace
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ fireconfig.io/app: deployment1
+ template:
+ metadata:
+ labels:
+ fireconfig.io/app: deployment1
+ spec:
+ containers:
+ - args:
+ - /run.sh
+ env:
+ - name: POD_OWNER
+ value: fc-test-package-depl
+ image: test:latest
+ name: container1
+ ports:
+ - containerPort: 8086
+ securityContext:
+ capabilities:
+ add:
+ - SYS_PTRACE
+ volumeMounts:
+ - mountPath: /mount/path
+ name: the-volume-name
+ - mountPath: /mount/path
+ name: other_name
+ nodeSelector:
+ type: kind-worker
+ serviceAccountName: fc-test-package-sa
+ volumes:
+ - configMap:
+ items:
+ - key: foo.yml
+ path: foo.yml
+ name: fc-test-package-the-volume-name
+ name: the-volume-name
+ - configMap:
+ items:
+ - key: bar.yml
+ path: bar.yml
+ name: fc-test-package-other_name
+ name: other_name
diff --git a/itests/output/dag.mermaid b/itests/output/dag.mermaid
new file mode 100644
index 0000000..c27c406
--- /dev/null
+++ b/itests/output/dag.mermaid
@@ -0,0 +1,33 @@
+```mermaid
+%%{init: {'themeVariables': {'mainBkg': '#ddd'}}}%%
+graph LR
+
+classDef default color:#000
+subgraph global
+ direction LR
+ global/the-namespace[Namespace
the-namespace]
+%% DELETED OBJECTS START
+%% DELETED OBJECTS END
+end
+
+subgraph fc-test-package
+ direction LR
+ the-namespace/deployment1-svc[Service
deployment1-svc]
+ the-namespace/fc-test-package-depl[Deployment
fc-test-package-depl]
+ the-namespace/fc-test-package-sa[ServiceAccount
fc-test-package-sa]
+ fc-test-package/fc-test-package-crb[ClusterRoleBinding
fc-test-package-crb]
+ the-namespace/fc-test-package-the-volume-name[ConfigMap
fc-test-package-the-volume-name]
+ the-namespace/fc-test-package-other_name[ConfigMap
fc-test-package-other_name]
+ the-namespace/fc-test-package-sa--->the-namespace/fc-test-package-depl
+ fc-test-package/fc-test-package-crb--->the-namespace/fc-test-package-depl
+ the-namespace/fc-test-package-the-volume-name--->the-namespace/fc-test-package-depl
+ the-namespace/fc-test-package-other_name--->the-namespace/fc-test-package-depl
+%% DELETED OBJECTS START
+%% DELETED OBJECTS END
+end
+
+global--->fc-test-package
+
+%% STYLE DEFINITIONS START
+%% STYLE DEFINITIONS END
+```
diff --git a/itests/test_deployment.py b/itests/test_deployment.py
new file mode 100644
index 0000000..3051e35
--- /dev/null
+++ b/itests/test_deployment.py
@@ -0,0 +1,54 @@
+from constructs import Construct
+
+import fireconfig as fire
+from fireconfig.types import Capability
+
+GRPC_PORT = 8086
+OUTPUT_DIR = "itests/output"
+
+
+def _make_deployment():
+ volumes = (fire.VolumesBuilder()
+ .with_config_map("the-volume-name", "/mount/path", {"foo.yml": "bar"})
+ .with_config_map("other_name", "/mount/path", {"bar.yml": "asdf"})
+ )
+
+ container = fire.ContainerBuilder(
+ name="container1",
+ image="test:latest",
+ args=["/run.sh"],
+ ).with_ports(GRPC_PORT).with_security_context(Capability.DEBUG).with_volumes(volumes)
+
+ return (fire.DeploymentBuilder(app_label="deployment1")
+ .with_containers(container)
+ .with_service()
+ .with_service_account_and_role_binding("cluster-admin", True)
+ .with_node_selector("type", "kind-worker")
+ )
+
+
+class FCTestPackage(fire.AppPackage):
+ def __init__(self):
+ self._depl = _make_deployment()
+
+ def compile(self, chart: Construct):
+ self._depl.build(chart)
+
+ @property
+ def id(self):
+ return "fc-test-package"
+
+
+def test_deployment():
+ old_dag_filename = f"{OUTPUT_DIR}/dag.mermaid"
+ dag, diff = fire.compile(
+ {"the-namespace": [FCTestPackage()]},
+ dag_filename=old_dag_filename,
+ cdk8s_outdir=OUTPUT_DIR
+ )
+
+ assert diff == ""
+ with open(old_dag_filename) as f:
+ # the resulting dag file has a blank newline which gets stripped by pre-commit,
+ # so compare everything except for that very last character
+ assert dag[:-1] == f.read()
diff --git a/poetry.lock b/poetry.lock
index 7f46592..8d7f65e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -59,6 +59,17 @@ jsii = ">=1.94.0,<2.0.0"
publication = ">=0.0.3"
typeguard = ">=2.13.3,<2.14.0"
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
[[package]]
name = "constructs"
version = "10.3.0"
@@ -75,6 +86,88 @@ jsii = ">=1.90.0,<2.0.0"
publication = ">=0.0.3"
typeguard = ">=2.13.3,<2.14.0"
+[[package]]
+name = "coverage"
+version = "7.4.1"
+description = "Code coverage measurement for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "coverage-7.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:077d366e724f24fc02dbfe9d946534357fda71af9764ff99d73c3c596001bbd7"},
+ {file = "coverage-7.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0193657651f5399d433c92f8ae264aff31fc1d066deee4b831549526433f3f61"},
+ {file = "coverage-7.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d17bbc946f52ca67adf72a5ee783cd7cd3477f8f8796f59b4974a9b59cacc9ee"},
+ {file = "coverage-7.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3277f5fa7483c927fe3a7b017b39351610265308f5267ac6d4c2b64cc1d8d25"},
+ {file = "coverage-7.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dceb61d40cbfcf45f51e59933c784a50846dc03211054bd76b421a713dcdf19"},
+ {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6008adeca04a445ea6ef31b2cbaf1d01d02986047606f7da266629afee982630"},
+ {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c61f66d93d712f6e03369b6a7769233bfda880b12f417eefdd4f16d1deb2fc4c"},
+ {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9bb62fac84d5f2ff523304e59e5c439955fb3b7f44e3d7b2085184db74d733b"},
+ {file = "coverage-7.4.1-cp310-cp310-win32.whl", hash = "sha256:f86f368e1c7ce897bf2457b9eb61169a44e2ef797099fb5728482b8d69f3f016"},
+ {file = "coverage-7.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:869b5046d41abfea3e381dd143407b0d29b8282a904a19cb908fa24d090cc018"},
+ {file = "coverage-7.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b8ffb498a83d7e0305968289441914154fb0ef5d8b3157df02a90c6695978295"},
+ {file = "coverage-7.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3cacfaefe6089d477264001f90f55b7881ba615953414999c46cc9713ff93c8c"},
+ {file = "coverage-7.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d6850e6e36e332d5511a48a251790ddc545e16e8beaf046c03985c69ccb2676"},
+ {file = "coverage-7.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e961aa13b6d47f758cc5879383d27b5b3f3dcd9ce8cdbfdc2571fe86feb4dd"},
+ {file = "coverage-7.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfd1e1b9f0898817babf840b77ce9fe655ecbe8b1b327983df485b30df8cc011"},
+ {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6b00e21f86598b6330f0019b40fb397e705135040dbedc2ca9a93c7441178e74"},
+ {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:536d609c6963c50055bab766d9951b6c394759190d03311f3e9fcf194ca909e1"},
+ {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7ac8f8eb153724f84885a1374999b7e45734bf93a87d8df1e7ce2146860edef6"},
+ {file = "coverage-7.4.1-cp311-cp311-win32.whl", hash = "sha256:f3771b23bb3675a06f5d885c3630b1d01ea6cac9e84a01aaf5508706dba546c5"},
+ {file = "coverage-7.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:9d2f9d4cc2a53b38cabc2d6d80f7f9b7e3da26b2f53d48f05876fef7956b6968"},
+ {file = "coverage-7.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f68ef3660677e6624c8cace943e4765545f8191313a07288a53d3da188bd8581"},
+ {file = "coverage-7.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23b27b8a698e749b61809fb637eb98ebf0e505710ec46a8aa6f1be7dc0dc43a6"},
+ {file = "coverage-7.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e3424c554391dc9ef4a92ad28665756566a28fecf47308f91841f6c49288e66"},
+ {file = "coverage-7.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0860a348bf7004c812c8368d1fc7f77fe8e4c095d661a579196a9533778e156"},
+ {file = "coverage-7.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe558371c1bdf3b8fa03e097c523fb9645b8730399c14fe7721ee9c9e2a545d3"},
+ {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3468cc8720402af37b6c6e7e2a9cdb9f6c16c728638a2ebc768ba1ef6f26c3a1"},
+ {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:02f2edb575d62172aa28fe00efe821ae31f25dc3d589055b3fb64d51e52e4ab1"},
+ {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ca6e61dc52f601d1d224526360cdeab0d0712ec104a2ce6cc5ccef6ed9a233bc"},
+ {file = "coverage-7.4.1-cp312-cp312-win32.whl", hash = "sha256:ca7b26a5e456a843b9b6683eada193fc1f65c761b3a473941efe5a291f604c74"},
+ {file = "coverage-7.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:85ccc5fa54c2ed64bd91ed3b4a627b9cce04646a659512a051fa82a92c04a448"},
+ {file = "coverage-7.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8bdb0285a0202888d19ec6b6d23d5990410decb932b709f2b0dfe216d031d218"},
+ {file = "coverage-7.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:918440dea04521f499721c039863ef95433314b1db00ff826a02580c1f503e45"},
+ {file = "coverage-7.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:379d4c7abad5afbe9d88cc31ea8ca262296480a86af945b08214eb1a556a3e4d"},
+ {file = "coverage-7.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b094116f0b6155e36a304ff912f89bbb5067157aff5f94060ff20bbabdc8da06"},
+ {file = "coverage-7.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2f5968608b1fe2a1d00d01ad1017ee27efd99b3437e08b83ded9b7af3f6f766"},
+ {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:10e88e7f41e6197ea0429ae18f21ff521d4f4490aa33048f6c6f94c6045a6a75"},
+ {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a4a3907011d39dbc3e37bdc5df0a8c93853c369039b59efa33a7b6669de04c60"},
+ {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6d224f0c4c9c98290a6990259073f496fcec1b5cc613eecbd22786d398ded3ad"},
+ {file = "coverage-7.4.1-cp38-cp38-win32.whl", hash = "sha256:23f5881362dcb0e1a92b84b3c2809bdc90db892332daab81ad8f642d8ed55042"},
+ {file = "coverage-7.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:a07f61fc452c43cd5328b392e52555f7d1952400a1ad09086c4a8addccbd138d"},
+ {file = "coverage-7.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8e738a492b6221f8dcf281b67129510835461132b03024830ac0e554311a5c54"},
+ {file = "coverage-7.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46342fed0fff72efcda77040b14728049200cbba1279e0bf1188f1f2078c1d70"},
+ {file = "coverage-7.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9641e21670c68c7e57d2053ddf6c443e4f0a6e18e547e86af3fad0795414a628"},
+ {file = "coverage-7.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aeb2c2688ed93b027eb0d26aa188ada34acb22dceea256d76390eea135083950"},
+ {file = "coverage-7.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d12c923757de24e4e2110cf8832d83a886a4cf215c6e61ed506006872b43a6d1"},
+ {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0491275c3b9971cdbd28a4595c2cb5838f08036bca31765bad5e17edf900b2c7"},
+ {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8dfc5e195bbef80aabd81596ef52a1277ee7143fe419efc3c4d8ba2754671756"},
+ {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1a78b656a4d12b0490ca72651fe4d9f5e07e3c6461063a9b6265ee45eb2bdd35"},
+ {file = "coverage-7.4.1-cp39-cp39-win32.whl", hash = "sha256:f90515974b39f4dea2f27c0959688621b46d96d5a626cf9c53dbc653a895c05c"},
+ {file = "coverage-7.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:64e723ca82a84053dd7bfcc986bdb34af8d9da83c521c19d6b472bc6880e191a"},
+ {file = "coverage-7.4.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:32a8d985462e37cfdab611a6f95b09d7c091d07668fdc26e47a725ee575fe166"},
+ {file = "coverage-7.4.1.tar.gz", hash = "sha256:1ed4b95480952b1a26d863e546fa5094564aa0065e1e5f0d4d0041f293251d04"},
+]
+
+[package.extras]
+toml = ["tomli"]
+
+[[package]]
+name = "deepdiff"
+version = "6.7.1"
+description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "deepdiff-6.7.1-py3-none-any.whl", hash = "sha256:58396bb7a863cbb4ed5193f548c56f18218060362311aa1dc36397b2f25108bd"},
+ {file = "deepdiff-6.7.1.tar.gz", hash = "sha256:b367e6fa6caac1c9f500adc79ada1b5b1242c50d5f716a1a4362030197847d30"},
+]
+
+[package.dependencies]
+ordered-set = ">=4.0.2,<4.2.0"
+
+[package.extras]
+cli = ["click (==8.1.3)", "pyyaml (==6.0.1)"]
+optimize = ["orjson"]
+
[[package]]
name = "flake8"
version = "7.0.0"
@@ -106,6 +199,17 @@ files = [
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "zipp (>=3.17)"]
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
[[package]]
name = "jsii"
version = "1.94.0"
@@ -194,6 +298,46 @@ files = [
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
+[[package]]
+name = "ordered-set"
+version = "4.1.0"
+description = "An OrderedSet is a custom MutableSet that remembers its order, so that every"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "ordered-set-4.1.0.tar.gz", hash = "sha256:694a8e44c87657c59292ede72891eb91d34131f6531463aab3009191c77364a8"},
+ {file = "ordered_set-4.1.0-py3-none-any.whl", hash = "sha256:046e1132c71fcf3330438a539928932caf51ddbc582496833e23de611de14562"},
+]
+
+[package.extras]
+dev = ["black", "mypy", "pytest"]
+
+[[package]]
+name = "packaging"
+version = "23.2"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"},
+ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"},
+]
+
+[[package]]
+name = "pluggy"
+version = "1.4.0"
+description = "plugin and hook calling mechanisms for python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"},
+ {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
[[package]]
name = "publication"
version = "0.0.3"
@@ -227,6 +371,26 @@ files = [
{file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"},
]
+[[package]]
+name = "pytest"
+version = "7.4.4"
+description = "pytest: simple powerful testing with Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
+ {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<2.0"
+
+[package.extras]
+testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
[[package]]
name = "python-dateutil"
version = "2.8.2"
@@ -241,6 +405,162 @@ files = [
[package.dependencies]
six = ">=1.5"
+[[package]]
+name = "pyyaml"
+version = "6.0.1"
+description = "YAML parser and emitter for Python"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
+ {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
+ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
+]
+
+[[package]]
+name = "simplejson"
+version = "3.19.2"
+description = "Simple, fast, extensible JSON encoder/decoder for Python"
+optional = false
+python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "simplejson-3.19.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3471e95110dcaf901db16063b2e40fb394f8a9e99b3fe9ee3acc6f6ef72183a2"},
+ {file = "simplejson-3.19.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:3194cd0d2c959062b94094c0a9f8780ffd38417a5322450a0db0ca1a23e7fbd2"},
+ {file = "simplejson-3.19.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:8a390e56a7963e3946ff2049ee1eb218380e87c8a0e7608f7f8790ba19390867"},
+ {file = "simplejson-3.19.2-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1537b3dd62d8aae644f3518c407aa8469e3fd0f179cdf86c5992792713ed717a"},
+ {file = "simplejson-3.19.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a8617625369d2d03766413bff9e64310feafc9fc4f0ad2b902136f1a5cd8c6b0"},
+ {file = "simplejson-3.19.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:2c433a412e96afb9a3ce36fa96c8e61a757af53e9c9192c97392f72871e18e69"},
+ {file = "simplejson-3.19.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:f1c70249b15e4ce1a7d5340c97670a95f305ca79f376887759b43bb33288c973"},
+ {file = "simplejson-3.19.2-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:287e39ba24e141b046812c880f4619d0ca9e617235d74abc27267194fc0c7835"},
+ {file = "simplejson-3.19.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6f0a0b41dd05eefab547576bed0cf066595f3b20b083956b1405a6f17d1be6ad"},
+ {file = "simplejson-3.19.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f98d918f7f3aaf4b91f2b08c0c92b1774aea113334f7cde4fe40e777114dbe6"},
+ {file = "simplejson-3.19.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7d74beca677623481810c7052926365d5f07393c72cbf62d6cce29991b676402"},
+ {file = "simplejson-3.19.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7f2398361508c560d0bf1773af19e9fe644e218f2a814a02210ac2c97ad70db0"},
+ {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ad331349b0b9ca6da86064a3599c425c7a21cd41616e175ddba0866da32df48"},
+ {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:332c848f02d71a649272b3f1feccacb7e4f7e6de4a2e6dc70a32645326f3d428"},
+ {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25785d038281cd106c0d91a68b9930049b6464288cea59ba95b35ee37c2d23a5"},
+ {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18955c1da6fc39d957adfa346f75226246b6569e096ac9e40f67d102278c3bcb"},
+ {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:11cc3afd8160d44582543838b7e4f9aa5e97865322844b75d51bf4e0e413bb3e"},
+ {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b01fda3e95d07a6148702a641e5e293b6da7863f8bc9b967f62db9461330562c"},
+ {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:778331444917108fa8441f59af45886270d33ce8a23bfc4f9b192c0b2ecef1b3"},
+ {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9eb117db8d7ed733a7317c4215c35993b815bf6aeab67523f1f11e108c040672"},
+ {file = "simplejson-3.19.2-cp310-cp310-win32.whl", hash = "sha256:39b6d79f5cbfa3eb63a869639cfacf7c41d753c64f7801efc72692c1b2637ac7"},
+ {file = "simplejson-3.19.2-cp310-cp310-win_amd64.whl", hash = "sha256:5675e9d8eeef0aa06093c1ff898413ade042d73dc920a03e8cea2fb68f62445a"},
+ {file = "simplejson-3.19.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ed628c1431100b0b65387419551e822987396bee3c088a15d68446d92f554e0c"},
+ {file = "simplejson-3.19.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:adcb3332979cbc941b8fff07181f06d2b608625edc0a4d8bc3ffc0be414ad0c4"},
+ {file = "simplejson-3.19.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:08889f2f597ae965284d7b52a5c3928653a9406d88c93e3161180f0abc2433ba"},
+ {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef7938a78447174e2616be223f496ddccdbf7854f7bf2ce716dbccd958cc7d13"},
+ {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a970a2e6d5281d56cacf3dc82081c95c1f4da5a559e52469287457811db6a79b"},
+ {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:554313db34d63eac3b3f42986aa9efddd1a481169c12b7be1e7512edebff8eaf"},
+ {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d36081c0b1c12ea0ed62c202046dca11438bee48dd5240b7c8de8da62c620e9"},
+ {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a3cd18e03b0ee54ea4319cdcce48357719ea487b53f92a469ba8ca8e39df285e"},
+ {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:66e5dc13bfb17cd6ee764fc96ccafd6e405daa846a42baab81f4c60e15650414"},
+ {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:972a7833d4a1fcf7a711c939e315721a88b988553fc770a5b6a5a64bd6ebeba3"},
+ {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3e74355cb47e0cd399ead3477e29e2f50e1540952c22fb3504dda0184fc9819f"},
+ {file = "simplejson-3.19.2-cp311-cp311-win32.whl", hash = "sha256:1dd4f692304854352c3e396e9b5f0a9c9e666868dd0bdc784e2ac4c93092d87b"},
+ {file = "simplejson-3.19.2-cp311-cp311-win_amd64.whl", hash = "sha256:9300aee2a8b5992d0f4293d88deb59c218989833e3396c824b69ba330d04a589"},
+ {file = "simplejson-3.19.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b8d940fd28eb34a7084877747a60873956893e377f15a32ad445fe66c972c3b8"},
+ {file = "simplejson-3.19.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4969d974d9db826a2c07671273e6b27bc48e940738d768fa8f33b577f0978378"},
+ {file = "simplejson-3.19.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c594642d6b13d225e10df5c16ee15b3398e21a35ecd6aee824f107a625690374"},
+ {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2f5a398b5e77bb01b23d92872255e1bcb3c0c719a3be40b8df146570fe7781a"},
+ {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:176a1b524a3bd3314ed47029a86d02d5a95cc0bee15bd3063a1e1ec62b947de6"},
+ {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3c7363a8cb8c5238878ec96c5eb0fc5ca2cb11fc0c7d2379863d342c6ee367a"},
+ {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:346820ae96aa90c7d52653539a57766f10f33dd4be609206c001432b59ddf89f"},
+ {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de9a2792612ec6def556d1dc621fd6b2073aff015d64fba9f3e53349ad292734"},
+ {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1c768e7584c45094dca4b334af361e43b0aaa4844c04945ac7d43379eeda9bc2"},
+ {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:9652e59c022e62a5b58a6f9948b104e5bb96d3b06940c6482588176f40f4914b"},
+ {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9c1a4393242e321e344213a90a1e3bf35d2f624aa8b8f6174d43e3c6b0e8f6eb"},
+ {file = "simplejson-3.19.2-cp312-cp312-win32.whl", hash = "sha256:7cb98be113911cb0ad09e5523d0e2a926c09a465c9abb0784c9269efe4f95917"},
+ {file = "simplejson-3.19.2-cp312-cp312-win_amd64.whl", hash = "sha256:6779105d2fcb7fcf794a6a2a233787f6bbd4731227333a072d8513b252ed374f"},
+ {file = "simplejson-3.19.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:061e81ea2d62671fa9dea2c2bfbc1eec2617ae7651e366c7b4a2baf0a8c72cae"},
+ {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4280e460e51f86ad76dc456acdbfa9513bdf329556ffc8c49e0200878ca57816"},
+ {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11c39fbc4280d7420684494373b7c5904fa72a2b48ef543a56c2d412999c9e5d"},
+ {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bccb3e88ec26ffa90f72229f983d3a5d1155e41a1171190fa723d4135523585b"},
+ {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bb5b50dc6dd671eb46a605a3e2eb98deb4a9af787a08fcdddabe5d824bb9664"},
+ {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:d94245caa3c61f760c4ce4953cfa76e7739b6f2cbfc94cc46fff6c050c2390c5"},
+ {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d0e5ffc763678d48ecc8da836f2ae2dd1b6eb2d27a48671066f91694e575173c"},
+ {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:d222a9ed082cd9f38b58923775152003765016342a12f08f8c123bf893461f28"},
+ {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8434dcdd347459f9fd9c526117c01fe7ca7b016b6008dddc3c13471098f4f0dc"},
+ {file = "simplejson-3.19.2-cp36-cp36m-win32.whl", hash = "sha256:c9ac1c2678abf9270e7228133e5b77c6c3c930ad33a3c1dfbdd76ff2c33b7b50"},
+ {file = "simplejson-3.19.2-cp36-cp36m-win_amd64.whl", hash = "sha256:92c4a4a2b1f4846cd4364855cbac83efc48ff5a7d7c06ba014c792dd96483f6f"},
+ {file = "simplejson-3.19.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0d551dc931638e2102b8549836a1632e6e7cf620af3d093a7456aa642bff601d"},
+ {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73a8a4653f2e809049999d63530180d7b5a344b23a793502413ad1ecea9a0290"},
+ {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:40847f617287a38623507d08cbcb75d51cf9d4f9551dd6321df40215128325a3"},
+ {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be893258d5b68dd3a8cba8deb35dc6411db844a9d35268a8d3793b9d9a256f80"},
+ {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9eb3cff1b7d71aa50c89a0536f469cb8d6dcdd585d8f14fb8500d822f3bdee4"},
+ {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d0f402e787e6e7ee7876c8b05e2fe6464820d9f35ba3f172e95b5f8b699f6c7f"},
+ {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fbbcc6b0639aa09b9649f36f1bcb347b19403fe44109948392fbb5ea69e48c3e"},
+ {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:2fc697be37585eded0c8581c4788fcfac0e3f84ca635b73a5bf360e28c8ea1a2"},
+ {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b0a3eb6dd39cce23801a50c01a0976971498da49bc8a0590ce311492b82c44b"},
+ {file = "simplejson-3.19.2-cp37-cp37m-win32.whl", hash = "sha256:49f9da0d6cd17b600a178439d7d2d57c5ef01f816b1e0e875e8e8b3b42db2693"},
+ {file = "simplejson-3.19.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c87c22bd6a987aca976e3d3e23806d17f65426191db36d40da4ae16a6a494cbc"},
+ {file = "simplejson-3.19.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9e4c166f743bb42c5fcc60760fb1c3623e8fda94f6619534217b083e08644b46"},
+ {file = "simplejson-3.19.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0a48679310e1dd5c9f03481799311a65d343748fe86850b7fb41df4e2c00c087"},
+ {file = "simplejson-3.19.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0521e0f07cb56415fdb3aae0bbd8701eb31a9dfef47bb57206075a0584ab2a2"},
+ {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d2d5119b1d7a1ed286b8af37357116072fc96700bce3bec5bb81b2e7057ab41"},
+ {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c1467d939932901a97ba4f979e8f2642415fcf02ea12f53a4e3206c9c03bc17"},
+ {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49aaf4546f6023c44d7e7136be84a03a4237f0b2b5fb2b17c3e3770a758fc1a0"},
+ {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60848ab779195b72382841fc3fa4f71698a98d9589b0a081a9399904487b5832"},
+ {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0436a70d8eb42bea4fe1a1c32d371d9bb3b62c637969cb33970ad624d5a3336a"},
+ {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:49e0e3faf3070abdf71a5c80a97c1afc059b4f45a5aa62de0c2ca0444b51669b"},
+ {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ff836cd4041e16003549449cc0a5e372f6b6f871eb89007ab0ee18fb2800fded"},
+ {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3848427b65e31bea2c11f521b6fc7a3145d6e501a1038529da2391aff5970f2f"},
+ {file = "simplejson-3.19.2-cp38-cp38-win32.whl", hash = "sha256:3f39bb1f6e620f3e158c8b2eaf1b3e3e54408baca96a02fe891794705e788637"},
+ {file = "simplejson-3.19.2-cp38-cp38-win_amd64.whl", hash = "sha256:0405984f3ec1d3f8777c4adc33eac7ab7a3e629f3b1c05fdded63acc7cf01137"},
+ {file = "simplejson-3.19.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:445a96543948c011a3a47c8e0f9d61e9785df2544ea5be5ab3bc2be4bd8a2565"},
+ {file = "simplejson-3.19.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a8c3cc4f9dfc33220246760358c8265dad6e1104f25f0077bbca692d616d358"},
+ {file = "simplejson-3.19.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af9c7e6669c4d0ad7362f79cb2ab6784d71147503e62b57e3d95c4a0f222c01c"},
+ {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:064300a4ea17d1cd9ea1706aa0590dcb3be81112aac30233823ee494f02cb78a"},
+ {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9453419ea2ab9b21d925d0fd7e3a132a178a191881fab4169b6f96e118cc25bb"},
+ {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e038c615b3906df4c3be8db16b3e24821d26c55177638ea47b3f8f73615111c"},
+ {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16ca9c90da4b1f50f089e14485db8c20cbfff2d55424062791a7392b5a9b3ff9"},
+ {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1018bd0d70ce85f165185d2227c71e3b1e446186f9fa9f971b69eee223e1e3cd"},
+ {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e8dd53a8706b15bc0e34f00e6150fbefb35d2fd9235d095b4f83b3c5ed4fa11d"},
+ {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:2d022b14d7758bfb98405672953fe5c202ea8a9ccf9f6713c5bd0718eba286fd"},
+ {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:febffa5b1eda6622d44b245b0685aff6fb555ce0ed734e2d7b1c3acd018a2cff"},
+ {file = "simplejson-3.19.2-cp39-cp39-win32.whl", hash = "sha256:4edcd0bf70087b244ba77038db23cd98a1ace2f91b4a3ecef22036314d77ac23"},
+ {file = "simplejson-3.19.2-cp39-cp39-win_amd64.whl", hash = "sha256:aad7405c033d32c751d98d3a65801e2797ae77fac284a539f6c3a3e13005edc4"},
+ {file = "simplejson-3.19.2-py3-none-any.whl", hash = "sha256:bcedf4cae0d47839fee7de344f96b5694ca53c786f28b5f773d4f0b265a159eb"},
+ {file = "simplejson-3.19.2.tar.gz", hash = "sha256:9eb442a2442ce417801c912df68e1f6ccfcd41577ae7274953ab3ad24ef7d82c"},
+]
+
[[package]]
name = "six"
version = "1.16.0"
@@ -267,6 +587,28 @@ files = [
doc = ["sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["mypy", "pytest", "typing-extensions"]
+[[package]]
+name = "types-pyyaml"
+version = "6.0.12.12"
+description = "Typing stubs for PyYAML"
+optional = false
+python-versions = "*"
+files = [
+ {file = "types-PyYAML-6.0.12.12.tar.gz", hash = "sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062"},
+ {file = "types_PyYAML-6.0.12.12-py3-none-any.whl", hash = "sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24"},
+]
+
+[[package]]
+name = "types-simplejson"
+version = "3.19.0.2"
+description = "Typing stubs for simplejson"
+optional = false
+python-versions = "*"
+files = [
+ {file = "types-simplejson-3.19.0.2.tar.gz", hash = "sha256:ebc81f886f89d99d6b80c726518aa2228bc77c26438f18fd81455e4f79f8ee1b"},
+ {file = "types_simplejson-3.19.0.2-py3-none-any.whl", hash = "sha256:8ba093dc7884f59b3e62aed217144085e675a269debc32678fd80e0b43b2b86f"},
+]
+
[[package]]
name = "typing-extensions"
version = "4.9.0"
@@ -281,4 +623,4 @@ files = [
[metadata]
lock-version = "2.0"
python-versions = "^3.11"
-content-hash = "14570e7a3fb14796b962e2f6a8b36f5a34facf9a65c214638ae4adb1d92adf4a"
+content-hash = "bcd18208624df9aac1469d8f5dad1c21271e7ad626005cffcb4b3a2a9811fb44"
diff --git a/pyproject.toml b/pyproject.toml
index ad2ed57..582eeeb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,11 +8,18 @@ readme = "README.md"
[tool.poetry.dependencies]
python = "^3.11"
-cdk8s = "^2"
+cdk8s = "^2.68"
+deepdiff = "^6.7.1"
+pyyaml = "^6.0.1"
+simplejson = "^3.19.2"
[tool.poetry.group.dev.dependencies]
mypy = "^1"
flake8 = "*"
+types-simplejson = "^3.19.0.2"
+types-pyyaml = "^6.0.12.12"
+pytest = "^7.4.4"
+coverage = "^7.4.1"
[build-system]
requires = ["poetry-core"]