Skip to content

Commit

Permalink
Merge branch 'master' of github.com:ClubCedille/Plateforme-Cedille
Browse files Browse the repository at this point in the history
  • Loading branch information
SonOfLope committed May 24, 2024
2 parents 418b6ae + f1061b9 commit 8708852
Show file tree
Hide file tree
Showing 6 changed files with 220 additions and 1 deletion.
2 changes: 1 addition & 1 deletion apps/argo-apps/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ resources:
- ../acets/grav/
- ../ets-demo/grav/
- ../raconteurs/grav/
- ../hackqc2024/
# - ../hackqc2024/
- ../integrale/grav/
- ../applets/hello/
# ... other apps ...
Expand Down
4 changes: 4 additions & 0 deletions apps/raconteurs/grav/prod/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ namePrefix: raconteurs-
resources:
- ../../../../bases/grav

images:
- name: "ghcr.io/clubcedille/grav:0.0.8"
newTag: "1.7.45.1"

patches:
- path: vault-patch.yaml
- target:
Expand Down
31 changes: 31 additions & 0 deletions system/oneuptime/argo-app.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: oneuptime
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
destination:
namespace: oneuptime
server: https://kubernetes.default.svc
sources:
- chart: oneuptime
helm:
releaseName: oneuptime
valueFiles:
- $values/system/oneuptime/helm/values.yaml
repoURL: https://helm-chart.oneuptime.com
targetRevision: 7.0.1978
- repoURL: https://github.com/ClubCedille/Plateforme-Cedille.git
targetRevision: HEAD
ref: values
- repoURL: https://github.com/ClubCedille/Plateforme-Cedille.git
path: system/oneuptime/configs
targetRevision: HEAD
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
25 changes: 25 additions & 0 deletions system/oneuptime/configs/ingress.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: oneuptime
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
ingress.kubernetes.io/force-ssl-redirect: "true"
kubernetes.io/tls-acme: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- status.omni.cedille.club
secretName: status-https-cert
rules:
- host: status.omni.cedille.club
http:
paths:
- path: /
backend:
service:
name: oneuptime-nginx
port:
number: 80
pathType: Prefix
155 changes: 155 additions & 0 deletions system/oneuptime/helm/values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
global:
storageClass: mayastor
clusterDomain: &global-cluster-domain cluster.local
# Please change this to the domain name / IP where OneUptime server is hosted on.
host: status.omni.cedille.club
httpProtocol: https
# (Optional): You usually do not need to set this if you're self hosting. If you do set it, set it to a long random value.
oneuptimeSecret:
encryptionSecret:
# (Optional): You usually do not need to set this if you're self hosting.
openTelemetryCollectorHost:
fluentdHost:
deployment:
replicaCount: 1
metalLb:
enabled: false
ipAdddressPool:
enabled: false
addresses:
# - 51.158.55.153/32 # List of IP addresses of all the servers in the cluster.
ingress:
service:
type: ClusterIP
externalIPs:
# - 51.158.55.153 # Please make sure this is the same as the one in metalLb.ipAdddressPool.addresses
postgresql:
clusterDomain: *global-cluster-domain
auth:
username: postgres
database: oneuptimedb
architecture: standalone
primary:
service:
ports:
postgresql: "5432"
terminationGracePeriodSeconds: 0 # We do this because we do not want to wait for the pod to terminate in case of node failure. https://medium.com/tailwinds-navigator/kubernetes-tip-how-statefulsets-behave-differently-than-deployments-when-node-fails-d29e36bca7d5
persistence:
size: 25Gi
readReplicas:
terminationGracePeriodSeconds: 0 # We do this because we do not want to wait for the pod to terminate in case of node failure. https://medium.com/tailwinds-navigator/kubernetes-tip-how-statefulsets-behave-differently-than-deployments-when-node-fails-d29e36bca7d5
persistence:
size: 25Gi
clickhouse:
clusterDomain: *global-cluster-domain
service:
ports:
http: "8123"
shards: 1
replicaCount: 1
terminationGracePeriodSeconds: 0 # We do this because we do not want to wait for the pod to terminate in case of node failure. https://medium.com/tailwinds-navigator/kubernetes-tip-how-statefulsets-behave-differently-than-deployments-when-node-fails-d29e36bca7d5
zookeeper:
enabled: false
persistence:
size: 25Gi
auth:
username: oneuptime
initdbScripts:
db-init.sql: |
CREATE DATABASE oneuptime;
redis:
clusterDomain: *global-cluster-domain
architecture: standalone
auth:
enabled: true
master:
persistence:
enabled: false # We dont need redis persistence, because we dont do anything with it.
replica:
persistence:
enabled: false # We dont need redis persistence, because we dont do anything with it.
image:
registry: docker.io
repository: oneuptime
pullPolicy: Always
tag: release
restartPolicy: Always
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80
nodeEnvironment: production
billing:
enabled: false
publicKey:
privateKey:
smsDefaultValueInCents:
callDefaultValueInCentsPerMinute:
smsHighRiskValueInCents:
callHighRiskValueInCentsPerMinute:
subscriptionPlan:
basic:
growth:
scale:
enterprise:
analytics:
host:
key:
internalSmtp:
sendingDomain:
dkimPrivateKey:
dkimPublicKey:
email:
name:
incidents:
disableAutomaticCreation: false
statusPage:
cnameRecord:
probes:
one:
name: "Probe"
description: "Probe"
monitoringWorkers: 3
monitorFetchLimit: 10
key:
replicaCount: 1
# two:
# name: "Probe 2"
# description: "Probe 2"
# monitoringWorkers: 3
# monitorFetchLimit: 10
# key:
# replicaCount: 1
port:
app: 3002
ingestor: 3400
testServer: 3800
accounts: 3003
statusPage: 3105
dashboard: 3009
adminDashboard: 3158
nginx: 80
haraka: 2525
probe: 3500
otelCollectorGrpc: 4317
otelCollectorHttp: 4318
isolatedVM: 4572
testServer:
enabled: false
openTelemetryExporter:
endpoint:
server:
client:
headers:
app:
dashboard:
accounts:
statusPage:
probe:
adminDashboard:
containerSecurityContext:
podSecurityContext:
# This can be one of the following: DEBUG, INFO, WARN, ERROR
logLevel: ERROR
4 changes: 4 additions & 0 deletions system/oneuptime/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- argo-app.yaml

0 comments on commit 8708852

Please sign in to comment.