Skip to content

Commit

Permalink
chore: switch Nautobot to the chart broken out
Browse files Browse the repository at this point in the history
Instead of dumping the templated chart out to one file, switch to helm's
output-dir option to get spread out set of resource configs and document
it.
  • Loading branch information
cardoe committed Feb 4, 2024
1 parent 82dbc9c commit e21889e
Show file tree
Hide file tree
Showing 11 changed files with 664 additions and 665 deletions.
1 change: 1 addition & 0 deletions components/09-nautobot/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
base/nautobot/templates/secret.yaml
26 changes: 26 additions & 0 deletions components/09-nautobot/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# nautobot

The helm chart for nautobot doesn't actually support taking secrets in by reference.
Instead they're read from the active cluster when supplied by reference which
there might not be access for. <https://github.com/nautobot/helm-charts/pull/356>
has been opened to attempt to address this but without it being merged, kustomize's
helm support can't consume it directly so the chart is templated out and committed
here.

The following commmands were run using <https://github.com/cardoe/helm-charts/tree/password-ref>
from the top level of this repo:

```bash
helm template \
--namespace nautobot \
nautobot \
/path/to/nautobot/helm-charts/charts/nautobot \
--skip-tests \
-f components/09-nautobot/values.yaml \
--output-dir components/09-nautobot/base
# we do secrets separately
rm -f components/09-nautobot/base/nautobot/templates/secret.yaml
cd components/09-nautobot/base
kustomize create --autodetect --recursive
cd ../../..
```
9 changes: 9 additions & 0 deletions components/09-nautobot/base/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- nautobot/templates/celery-deployment.yaml
- nautobot/templates/configmap.yaml
- nautobot/templates/nautobot-deployment.yaml
- nautobot/templates/nginx-configmap.yaml
- nautobot/templates/service-account.yaml
- nautobot/templates/service.yaml
217 changes: 217 additions & 0 deletions components/09-nautobot/base/nautobot/templates/celery-deployment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,217 @@
---
# Source: nautobot/templates/celery-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nautobot-celery-beat
namespace: "nautobot"
labels:
app.kubernetes.io/name: nautobot
helm.sh/chart: nautobot-2.0.5
app.kubernetes.io/instance: nautobot
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0.5"
app.kubernetes.io/component: nautobot-celery-beat
spec:
replicas: 1
revisionHistoryLimit: 3
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: nautobot
app.kubernetes.io/instance: nautobot
app.kubernetes.io/component: nautobot-celery-beat
template:
metadata:
labels:
app.kubernetes.io/name: nautobot
helm.sh/chart: nautobot-2.0.5
app.kubernetes.io/instance: nautobot
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0.5"
app.kubernetes.io/component: nautobot-celery-beat
spec:
serviceAccountName: nautobot

affinity:
podAffinity:

podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: nautobot
app.kubernetes.io/name: nautobot
app.kubernetes.io/component: nautobot-celery-beat
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:

securityContext:
fsGroup: 999
seccompProfile:
type: RuntimeDefault
initContainers:
containers:
- name: nautobot-celery
tty: true
image: ghcr.io/nautobot/nautobot:2.1.1-py3.11
imagePullPolicy: Always
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
runAsUser: 999
command:
- nautobot-server
- celery
- beat
- --loglevel
- $(NAUTOBOT_LOG_LEVEL)
env:
- name: "NAUTOBOT_K8S_COMPONENT"
value: "nautobot-celery-beat"
- name: NAUTOBOT_DB_PASSWORD
valueFrom:
secretKeyRef:
name: nautobot-pguser-nautobot
key: password
- name: NAUTOBOT_REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: nautobot-redis
key: redis-password
envFrom:
- configMapRef:
name: nautobot-env
- secretRef:
name: nautobot-env
resources:
limits:
cpu: 3328m
memory: 6656M
requests:
cpu: 5m
memory: 256M
volumeMounts:
- name: "git-repos"
mountPath: "/opt/nautobot/git"
terminationGracePeriodSeconds: 30
volumes:
- name: "git-repos"
emptyDir: {}
---
# Source: nautobot/templates/celery-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nautobot-celery-default
namespace: "nautobot"
labels:
app.kubernetes.io/name: nautobot
helm.sh/chart: nautobot-2.0.5
app.kubernetes.io/instance: nautobot
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0.5"
app.kubernetes.io/component: nautobot-celery-default
spec:
replicas: 1
revisionHistoryLimit: 3
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: nautobot
app.kubernetes.io/instance: nautobot
app.kubernetes.io/component: nautobot-celery-default
template:
metadata:
labels:
app.kubernetes.io/name: nautobot
helm.sh/chart: nautobot-2.0.5
app.kubernetes.io/instance: nautobot
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0.5"
app.kubernetes.io/component: nautobot-celery-default
spec:
serviceAccountName: nautobot

affinity:
podAffinity:

podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: nautobot
app.kubernetes.io/name: nautobot
app.kubernetes.io/component: nautobot-celery-default
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:

securityContext:
fsGroup: 999
seccompProfile:
type: RuntimeDefault
initContainers:
containers:
- name: nautobot-celery
tty: true
image: ghcr.io/nautobot/nautobot:2.1.1-py3.11
imagePullPolicy: Always
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
runAsUser: 999
command:
- nautobot-server
- celery
- worker
- --loglevel
- $(NAUTOBOT_LOG_LEVEL)
- --queues
- $(CELERY_TASK_QUEUES)
- --events
env:
- name: "NAUTOBOT_K8S_COMPONENT"
value: "nautobot-celery-default"
- name: "CELERY_TASK_QUEUES"
value: "default"
- name: NAUTOBOT_DB_PASSWORD
valueFrom:
secretKeyRef:
name: nautobot-pguser-nautobot
key: password
- name: NAUTOBOT_REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: nautobot-redis
key: redis-password
envFrom:
- configMapRef:
name: nautobot-env
- secretRef:
name: nautobot-env
resources:
limits:
cpu: 3328m
memory: 6656M
requests:
cpu: 400m
memory: 1G
volumeMounts:
- name: "git-repos"
mountPath: "/opt/nautobot/git"
terminationGracePeriodSeconds: 30
volumes:
- name: "git-repos"
emptyDir: {}
113 changes: 113 additions & 0 deletions components/09-nautobot/base/nautobot/templates/configmap.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
---
# Source: nautobot/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: nautobot-env
namespace: "nautobot"
labels:
app.kubernetes.io/name: nautobot
helm.sh/chart: nautobot-2.0.5
app.kubernetes.io/instance: nautobot
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0.5"
data:
NAUTOBOT_ALLOWED_HOSTS: "*"
NAUTOBOT_CREATE_SUPERUSER: "true"
NAUTOBOT_DB_ENGINE: "django.db.backends.postgresql"
NAUTOBOT_DB_HOST: "nautobot-primary.nautobot.svc"
NAUTOBOT_DB_NAME: "nautobot"
NAUTOBOT_DB_PORT: "5432"
NAUTOBOT_DB_TIMEOUT: "300"
NAUTOBOT_DB_USER: "nautobot"
NAUTOBOT_DEBUG: "False"
NAUTOBOT_LOG_LEVEL: "INFO"
NAUTOBOT_METRICS_ENABLED: "True"
NAUTOBOT_REDIS_HOST: "nautobot-redis-master"
NAUTOBOT_REDIS_PORT: "6379"
NAUTOBOT_REDIS_USERNAME: ""
NAUTOBOT_REDIS_SSL: "False"
NAUTOBOT_SUPERUSER_EMAIL: "[email protected]"
NAUTOBOT_SUPERUSER_NAME: "admin"
---
# Source: nautobot/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: nautobot-config
namespace: "nautobot"
labels:
app.kubernetes.io/name: nautobot
helm.sh/chart: nautobot-2.0.5
app.kubernetes.io/instance: nautobot
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/version: "2.0.5"
data:

uwsgi.ini: |
[uwsgi]
; The IP address (typically localhost) and port that the WSGI process should listen on
http = 0.0.0.0:8080
https = 0.0.0.0:8443,/opt/nautobot/nautobot.crt,/opt/nautobot/nautobot.key
; Fail to start if any parameter in the configuration file isn’t explicitly understood by uWSGI
strict = true
; Enable master process to gracefully re-spawn and pre-fork workers
master = true
; Allow Python app-generated threads to run
enable-threads = true
;Try to remove all of the generated file/sockets during shutdown
vacuum = true
; Do not use multiple interpreters, allowing only Nautobot to run
single-interpreter = true
; Shutdown when receiving SIGTERM (default is respawn)
die-on-term = true
; Prevents uWSGI from starting if it is unable load Nautobot (usually due to errors)
need-app = true
; By default, uWSGI has rather verbose logging that can be noisy
disable-logging = true
; Assert that critical 4xx and 5xx errors are still logged
log-4xx = true
log-5xx = true
; Enable HTTP 1.1 keepalive support
http-keepalive = 1
;
; Advanced settings (disabled by default)
; Customize these for your environment if and only if you need them.
; Ref: https://uwsgi-docs.readthedocs.io/en/latest/Options.html
;
; Number of uWSGI workers to spawn. This should typically be 2n+1, where n is the number of CPU cores present. Default 3 as n will be >= 1
processes = 3
; Number of uWSGI threads each worker will be pre-forked into before starting
threads = 2
; set the socket listen queue size, in production the suggested value is 1024, however RHEL based kernels have a max of 128 by default
; you may need to increase the somaxconn parameter in your kernel
listen = 128
; If using subdirectory hosting e.g. example.com/nautobot, you must uncomment this line. Otherwise you'll get double paths e.g. example.com/nautobot/nautobot/.
; See: https://uwsgi-docs.readthedocs.io/en/latest/Changelog-2.0.11.html#fixpathinfo-routing-action
; route-run = fixpathinfo:
; If hosted behind a load balancer uncomment these lines, the harakiri timeout should be greater than your load balancer timeout.
; Ref: https://uwsgi-docs.readthedocs.io/en/latest/HTTP.html?highlight=keepalive#http-keep-alive
; harakiri = 65
; add-header = Connection: Keep-Alive
; http-keepalive = 1
; For larger installations, certain API calls (example: Relationships, GraphQL) can have a length of query parameters that go over uWSGI default limit.
; Setting the buffer size to larger than default (4096) can have an impact on memory utilization, but can be set as high as the header limit of 65535.
buffer-size = 4096
Loading

0 comments on commit e21889e

Please sign in to comment.