diff --git a/applications/auxtel/Chart.yaml b/applications/auxtel/Chart.yaml index a2eaeed0b9..3553c3572f 100644 --- a/applications/auxtel/Chart.yaml +++ b/applications/auxtel/Chart.yaml @@ -89,3 +89,8 @@ dependencies: version: 1.0.0 condition: atspectrograph-sim.enabled repository: file://../../charts/csc +- name: csc + alias: pmd1 + version: 1.0.0 + condition: pmd1.enabled + repository: file://../../charts/csc diff --git a/applications/auxtel/README.md b/applications/auxtel/README.md index c63b022184..987d479538 100644 --- a/applications/auxtel/README.md +++ b/applications/auxtel/README.md @@ -29,6 +29,7 @@ Deployment for the Auxiliary Telescope CSCs | atspectrograph-sim.enabled | bool | `false` | Enable the ATSpectograph simulator CSC | | atspectrograph.enabled | bool | `false` | Enable the ATSpectrograph CSC | | hexapod-sim.enabled | bool | `false` | Enable the hexapod controller simulator | +| pmd1.enabled | bool | `false` | Enable the PMD1 CSC | | hexapod-sim.affinity | object | `{}` | This specifies the scheduling constraints of the pod | | hexapod-sim.image | object | `{"pullPolicy":"Always","repository":"ts-dockerhub.lsst.org/hexapod_simulator","tag":"latest"}` | This section holds the configuration of the container image | | hexapod-sim.image.pullPolicy | string | `"Always"` | The policy to apply when pulling an image for deployment | diff --git a/applications/auxtel/values-summit.yaml b/applications/auxtel/values-summit.yaml new file mode 100644 index 0000000000..374d8722e3 --- /dev/null +++ b/applications/auxtel/values-summit.yaml @@ -0,0 +1,251 @@ +ataos: + image: + repository: ts-dockerhub.lsst.org/ataos + pullPolicy: Always + resources: + limits: + cpu: 450m + memory: 600Mi + requests: + cpu: 45m + memory: 200Mi + +atbuilding: + enabled: true + image: + repository: ts-dockerhub.lsst.org/atbuilding + pullPolicy: Always + resources: + limits: + cpu: 120m + memory: 300Mi + requests: + cpu: 12m + memory: 100Mi + +atdome: + enabled: true + image: + repository: ts-dockerhub.lsst.org/atdome + pullPolicy: Always + resources: + limits: + cpu: 170m + memory: 300Mi + requests: + cpu: 17m + memory: 100Mi + +atdometrajectory: + image: + repository: ts-dockerhub.lsst.org/atdometrajectory + pullPolicy: Always + resources: + limits: + cpu: 350m + memory: 300Mi + requests: + cpu: 35m + memory: 100Mi + +atheaderservice: + image: + repository: ts-dockerhub.lsst.org/headerservice + pullPolicy: Always + env: + URL_SPEC: --lfa_mode s3 --s3instance cp + CAMERA: at + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + resources: + limits: + cpu: 800m + memory: 700Mi + requests: + cpu: 80m + memory: 250Mi + +athexapod: + enabled: true + image: + repository: ts-dockerhub.lsst.org/athexapod + pullPolicy: Always + resources: + limits: + cpu: 90m + memory: 300Mi + requests: + cpu: 9m + memory: 95Mi + +atmcs: + enabled: true + image: + repository: ts-dockerhub.lsst.org/atmcs_sim + pullPolicy: Always + resources: + limits: + cpu: 600m + memory: 1200Mi + requests: + cpu: 60m + memory: 400Mi + +atoods: + image: + repository: ts-dockerhub.lsst.org/atoods + pullPolicy: Always + env: + CTRL_OODS_CONFIG_FILE: /etc/atoods.yaml + butlerSecret: + containerPath: &bs-cp /home/saluser/.lsst + dbUser: oods + secretFilename: &bs-fn postgres-credentials.txt + secretFixup: + containerPath: *bs-cp + filenames: + - *bs-fn + nfsMountpoint: + - name: auxtel-gen3-butler + containerPath: /repo/LATISS + readOnly: false + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/repo/LATISS + - name: auxtel-oods-data + containerPath: /data + readOnly: false + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 325Mi + configfile: + path: /etc + filename: atoods.yaml + content: | + defaultInterval: &interval + days: 0 + hours: 0 + minutes: 0 + seconds: 0 + + ingesterClass: + ingesterType: lsst.ctrl.oods.fileIngester + ingesterName: FileIngester + ingester: + imageStagingDirectory: /data/staging/auxtel/oods + butlers: + - butler: + instrument: lsst.obs.lsst.Latiss + class: + import: lsst.ctrl.oods.fileAttendant + name: FileAttendant + stagingDirectory: /data/lsstdata/base/auxtel/oods/gen3butler/raw + badFileDirectory: /data/lsstdata/base/auxtel/oods/gen3butler/badfiles + repoDirectory: /repo/LATISS + collections: + - LATISS/raw/all + cleanCollections: + - collection: LATISS/raw/all + filesOlderThan: + <<: *interval + days: 30 + - collenction: LATISS/runs/quickLook + filesOlderThan: + <<: *interval + days: 7 + scanInterval: + <<: *interval + hours: 1 + + batchSize: 20 + scanInterval: + <<: *interval + seconds: 2 + + cacheCleaner: + # ONLY clean out empty directories here, never files + clearEmptyDirectories: + - /data/lsstdata/base/auxtel/oods/gen3butler/raw + - /data/repo/LATISS/LATISS/runs/quickLook + # clean out empty directories and old files from these directories + clearEmptyDirectoriesAndOldFiles: + - /data/lsstdata/base/auxtel/oods/gen3butler/badfiles + - /data/staging/auxtel/oods + - /data/staging/auxtel/forwarder + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 30 + directoriesEmptyForMoreThan: + <<: *interval + days: 2 + +atpneumatics: + enabled: true + image: + repository: ts-dockerhub.lsst.org/at_pneumatics_sim + pullPolicy: Always + resources: + limits: + cpu: 120m + memory: 300Mi + requests: + cpu: 12m + memory: 90Mi + +atptg: + image: + repository: ts-dockerhub.lsst.org/ptkernel + pullPolicy: Always + env: + TELESCOPE: AT + LSST_KAFKA_TLM_FLUSH_MS: 0 + LSST_KAFKA_CMDEVT_FLUSH_MS: 0 + resources: + limits: + cpu: 900m + requests: + cpu: 90m + memory: 265Mi + +atspectrograph: + enabled: true + image: + repository: ts-dockerhub.lsst.org/atspec + pullPolicy: Always + resources: + limits: + cpu: 120m + memory: 300Mi + requests: + cpu: 12m + memory: 95Mi + +pmd1: + enabled: true + image: + repository: ts-dockerhub.lsst.org/pmd + pullPolicy: Always + env: + RUN_ARG: 1 + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 10m + memory: 95Mi diff --git a/applications/auxtel/values.yaml b/applications/auxtel/values.yaml index 84e4145386..47adc9a27a 100644 --- a/applications/auxtel/values.yaml +++ b/applications/auxtel/values.yaml @@ -46,6 +46,10 @@ atspectrograph-sim: # -- Enable the ATSpectograph simulator CSC enabled: false +pmd1: + # -- Enable the PMD1 CSC + enabled: false + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: diff --git a/applications/calsys/Chart.yaml b/applications/calsys/Chart.yaml index c5a01fd800..6ea5c655f2 100644 --- a/applications/calsys/Chart.yaml +++ b/applications/calsys/Chart.yaml @@ -6,6 +6,16 @@ dependencies: - name: csc_shared version: 1.0.0 repository: file://../../charts/csc_shared +- name: csc + alias: atmonochromator + version: 1.0.0 + condition: atmonochromator.enabled + repository: file://../../charts/csc +- name: csc + alias: atwhitelight + version: 1.0.0 + condition: atwhitelight.enabled + repository: file://../../charts/csc - name: csc alias: cbp version: 1.0.0 diff --git a/applications/calsys/README.md b/applications/calsys/README.md index 07c7dfc676..ab3f3d0ba5 100644 --- a/applications/calsys/README.md +++ b/applications/calsys/README.md @@ -6,9 +6,14 @@ Deployment for the Calibration System CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| +| atmonochromator.enabled | bool | `false` | Enabled the ATMonochromator CSC | +| atwhitelight.enabled | bool | `false` | Enabled the ATWhitelight CSC | | cbp.enabled | bool | `false` | Enable the CBP:0 CSC | +| electrometer101-sim.enabled | bool | `false` | Enable the Electrometer:11 simulator CSC | | electrometer101.enabled | bool | `false` | Enable the Electrometer:101 CSC | +| electrometer102-sim.enabled | bool | `false` | Enable the Electrometer:102 simulator CSC | | electrometer102.enabled | bool | `false` | Enable the Electrometer:102 CSC | +| electrometer201-sim.enabled | bool | `false` | Enable the Electrometer:201 simulator CSC | | electrometer201.enabled | bool | `false` | Enable the Electrometer:201 CSC | | gcheaderservice1.enabled | bool | `false` | Enable the GCHeaderService:1 CSC | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | @@ -22,10 +27,16 @@ Deployment for the Calibration System CSCs | global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| ledprojector-sim.enabled | bool | `false` | Enabled the LedProjector:0 simulator CSC | | ledprojector.enabled | bool | `false` | Enabled the LedProjector:0 CSC | +| linearstage101-sim.enabled | bool | `false` | Enable the LinearStage:101 simulator CSC | | linearstage101.enabled | bool | `false` | Enable the LinearStage:101 CSC | +| linearstage102-sim.enabled | bool | `false` | Enable the LinearStage:102 simulator CSC | | linearstage102.enabled | bool | `false` | Enable the LinearStage:102 CSC | +| linearstage103-sim.enabled | bool | `false` | Enable the LinearStage:103 simulator CSC | | linearstage103.enabled | bool | `false` | Enable the LinearStage:103 CSC | +| linearstage104-sim.enabled | bool | `false` | Enable the LinearStage:104 simulator CSC | | linearstage104.enabled | bool | `false` | Enable the LinearStage:104 CSC | | simulation-gencam.enabled | bool | `false` | Enabled the GenericCamera:1 CSC | +| tunablelaser-sim.enabled | bool | `false` | Enabled the TunableLaser:0 simulator CSC | | tunablelaser.enabled | bool | `false` | Enabled the TunableLaser:0 CSC | diff --git a/applications/calsys/values-summit.yaml b/applications/calsys/values-summit.yaml new file mode 100644 index 0000000000..59a8ae06ca --- /dev/null +++ b/applications/calsys/values-summit.yaml @@ -0,0 +1,225 @@ +atmonochromator: + enabled: true + image: + repository: ts-dockerhub.lsst.org/atmonochromator + pullPolicy: Always + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 10m + memory: 97Mi + +atwhitelight: + enabled: true + image: + repository: ts-dockerhub.lsst.org/atwhitelight + pullPolicy: Always + resources: + limits: + cpu: 80m + memory: 384Mi + requests: + cpu: 8m + memory: 128Mi + +cbp: + enabled: true + image: + repository: ts-dockerhub.lsst.org/cbp + pullPolicy: Always + resources: + limits: + cpu: 110m + memory: 300Mi + requests: + cpu: 11m + memory: 100Mi + +electrometer101: + enabled: true + image: + repository: ts-dockerhub.lsst.org/electrometer + pullPolicy: Always + env: + RUN_ARG: 101 + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + resources: + limits: + cpu: 100m + memory: 330Mi + requests: + cpu: 10m + memory: 110Mi + +electrometer102: + enabled: true + image: + repository: ts-dockerhub.lsst.org/electrometer + pullPolicy: Always + env: + RUN_ARG: 102 + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + resources: + limits: + cpu: 100m + memory: 350Mi + requests: + cpu: 10m + memory: 115Mi + +electrometer201: + enabled: true + image: + repository: ts-dockerhub.lsst.org/electrometer + pullPolicy: Always + env: + RUN_ARG: 201 + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + resources: + limits: + cpu: 100m + memory: 330Mi + requests: + cpu: 10m + memory: 110Mi + +ledprojector: + enabled: true + image: + repository: ts-dockerhub.lsst.org/ledprojector + pullPolicy: Always + resources: + limits: + cpu: 100m + memory: 330Mi + requests: + cpu: 10m + memory: 110Mi + +linearstage101: + enabled: true + image: + repository: ts-dockerhub.lsst.org/linearstage + pullPolicy: Always + env: + RUN_ARG: 101 + resources: + limits: + cpu: 100m + memory: 330Mi + requests: + cpu: 10m + memory: 130Mi + +linearstage102: + enabled: true + image: + repository: ts-dockerhub.lsst.org/linearstage + pullPolicy: Always + env: + RUN_ARG: 102 + resources: + limits: + cpu: 150m + memory: 330Mi + requests: + cpu: 15m + memory: 130Mi + +linearstage103: + enabled: true + image: + repository: ts-dockerhub.lsst.org/linearstage + pullPolicy: Always + env: + RUN_ARG: 103 + resources: + limits: + cpu: 100m + memory: 330Mi + requests: + cpu: 10m + memory: 130Mi + +linearstage104: + enabled: true + image: + repository: ts-dockerhub.lsst.org/linearstage + pullPolicy: Always + env: + RUN_ARG: 104 + resources: + limits: + cpu: 100m + memory: 330Mi + requests: + cpu: 10m + memory: 130Mi + +simulation-gencam: + enabled: true + classifier: genericcamera1 + image: + repository: ts-dockerhub.lsst.org/genericcamera + pullPolicy: Always + env: + RUN_ARG: 1 + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + service: + enabled: true + port: 5013 + type: LoadBalancer + resources: + limits: + cpu: 120m + memory: 360Mi + requests: + cpu: 12m + memory: 120Mi + +tunablelaser: + enabled: true + image: + repository: ts-dockerhub.lsst.org/tunablelaser + pullPolicy: Always + resources: + limits: + cpu: 120m + memory: 300Mi + requests: + cpu: 12m + memory: 100Mi diff --git a/applications/calsys/values.yaml b/applications/calsys/values.yaml index 1ac27eea40..0f8a18fead 100644 --- a/applications/calsys/values.yaml +++ b/applications/calsys/values.yaml @@ -6,14 +6,26 @@ electrometer101: # -- Enable the Electrometer:101 CSC enabled: false +electrometer101-sim: + # -- Enable the Electrometer:11 simulator CSC + enabled: false + electrometer102: # -- Enable the Electrometer:102 CSC enabled: false +electrometer102-sim: + # -- Enable the Electrometer:102 simulator CSC + enabled: false + electrometer201: # -- Enable the Electrometer:201 CSC enabled: false +electrometer201-sim: + # -- Enable the Electrometer:201 simulator CSC + enabled: false + gcheaderservice1: # -- Enable the GCHeaderService:1 CSC enabled: false @@ -22,22 +34,42 @@ ledprojector: # -- Enabled the LedProjector:0 CSC enabled: false +ledprojector-sim: + # -- Enabled the LedProjector:0 simulator CSC + enabled: false + linearstage101: # -- Enable the LinearStage:101 CSC enabled: false +linearstage101-sim: + # -- Enable the LinearStage:101 simulator CSC + enabled: false + linearstage102: # -- Enable the LinearStage:102 CSC enabled: false +linearstage102-sim: + # -- Enable the LinearStage:102 simulator CSC + enabled: false + linearstage103: # -- Enable the LinearStage:103 CSC enabled: false +linearstage103-sim: + # -- Enable the LinearStage:103 simulator CSC + enabled: false + linearstage104: # -- Enable the LinearStage:104 CSC enabled: false +linearstage104-sim: + # -- Enable the LinearStage:104 simulator CSC + enabled: false + simulation-gencam: # -- Enabled the GenericCamera:1 CSC enabled: false @@ -46,6 +78,18 @@ tunablelaser: # -- Enabled the TunableLaser:0 CSC enabled: false +tunablelaser-sim: + # -- Enabled the TunableLaser:0 simulator CSC + enabled: false + +atmonochromator: + # -- Enabled the ATMonochromator CSC + enabled: false + +atwhitelight: + # -- Enabled the ATWhitelight CSC + enabled: false + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: diff --git a/applications/control-system-test/values-summit.yaml b/applications/control-system-test/values-summit.yaml new file mode 100644 index 0000000000..4d3435e934 --- /dev/null +++ b/applications/control-system-test/values-summit.yaml @@ -0,0 +1,13 @@ +test42: + image: + repository: ts-dockerhub.lsst.org/test + pullPolicy: Always + env: + RUN_ARG: 42 + resources: + limits: + cpu: 150m + memory: 300Mi + requests: + cpu: 15m + memory: 95Mi diff --git a/applications/envsys/Chart.yaml b/applications/envsys/Chart.yaml index 9a619c247f..a594cd78a8 100644 --- a/applications/envsys/Chart.yaml +++ b/applications/envsys/Chart.yaml @@ -126,6 +126,11 @@ dependencies: version: 1.0.0 condition: dsm2-sim.enabled repository: file://../../charts/csc +- name: csc + alias: earthquake-ess302 + version: 1.0.0 + condition: earthquake-ess302.enabled + repository: file://../../charts/csc - name: csc alias: eas version: 1.0.0 @@ -146,6 +151,16 @@ dependencies: version: 1.0.0 condition: epm1-sim.enabled repository: file://../../charts/csc +- name: csc + alias: epm301 + version: 1.0.0 + condition: epm301.enabled + repository: file://../../charts/csc +- name: csc + alias: hvac + version: 1.0.0 + condition: hvac.enabled + repository: file://../../charts/csc - name: csc alias: m1m3-ess113 version: 1.0.0 diff --git a/applications/envsys/README.md b/applications/envsys/README.md index 7cfbfccd6b..e0c2bec176 100644 --- a/applications/envsys/README.md +++ b/applications/envsys/README.md @@ -30,10 +30,12 @@ Deployment for the Environmental Awareness Systems CSCs | dsm1.enabled | bool | `false` | Enable the DSM:1 CSC | | dsm2-sim.enabled | bool | `false` | Enable the DSM:2 simulator CSC | | dsm2.enabled | bool | `false` | Enable the DSM:2 CSC | +| earthquake-ess302.enabled | bool | `false` | Enable ESS:302 CSC | | eas-sim.enabled | bool | `false` | Enable the EAS simulator CSC | | eas.enabled | bool | `false` | Enable the EAS CSC | | epm1-sim.enabled | bool | `false` | Enable the EPM:1 simulator CSC | | epm1.enabled | bool | `false` | Enable the EPM:1 CSC | +| epm301.enabled | bool | `false` | Enable the EPM:301 CSC | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | | global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | @@ -45,6 +47,7 @@ Deployment for the Environmental Awareness Systems CSCs | global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| hvac.enabled | bool | `false` | Enable the HVAC CSC | | m1m3-ess113-sim.enabled | bool | `false` | Enable the ESS:113 simulator CSC | | m1m3-ess113.enabled | bool | `false` | Enable the ESS:113 CSC | | m2-ess106-sim.enabled | bool | `false` | Enable the ESS:106 simulator CSC | @@ -63,3 +66,4 @@ Deployment for the Environmental Awareness Systems CSCs | tma-ess105.enabled | bool | `false` | Enable the ESS:105 CSC | | tma-ess110-sim.enabled | bool | `false` | Enable the ESS:110 simulator CSC | | tma-ess110.enabled | bool | `false` | Enable the ESS:110 CSC | +| weatherforecast.enabled | bool | `false` | Enable the WeatherForecast CSC | diff --git a/applications/envsys/values-summit.yaml b/applications/envsys/values-summit.yaml new file mode 100644 index 0000000000..ca31f1f328 --- /dev/null +++ b/applications/envsys/values-summit.yaml @@ -0,0 +1,375 @@ +auxtel-ess201: + enabled: true + classifier: ess201 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 201 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +auxtel-ess202: + enabled: true + classifier: ess202 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 202 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +auxtel-ess203: + enabled: true + classifier: ess203 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 203 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +auxtel-ess204: + enabled: true + classifier: ess204 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 204 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +calibhill-ess301: + enabled: true + classifier: ess301 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 301 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +camera-ess111: + enabled: true + classifier: ess111 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 111 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +cleanroom-ess109: + enabled: true + classifier: ess109 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 109 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +dimm1: + enabled: true + image: + repository: ts-dockerhub.lsst.org/dimm + pullPolicy: Always + env: + RUN_ARG: 1 + resources: + requests: + cpu: 23m + memory: 107Mi + limits: + cpu: 230m + memory: 1070Mi + +dimm2: + enabled: true + image: + repository: ts-dockerhub.lsst.org/dimm + pullPolicy: Always + env: + RUN_ARG: 2 + resources: + requests: + cpu: 23m + memory: 107Mi + limits: + cpu: 230m + memory: 1070Mi + +dream: + enabled: true + image: + repository: ts-dockerhub.lsst.org/dream + pullPolicy: Always + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 10m + memory: 96Mi + +earthquake-ess302: + enabled: true + classifier: ess302 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 302 + resources: + requests: + cpu: 4m + memory: 103Mi + limits: + cpu: 40m + memory: 310Mi + +eas: + enabled: true + image: + repository: ts-dockerhub.lsst.org/eas + pullPolicy: Always + resources: + limits: + cpu: 150m + memory: 375Mi + requests: + cpu: 15m + memory: 125Mi + +epm301: + enabled: true + classifier: epm301 + image: + repository: ts-dockerhub.lsst.org/epm + pullPolicy: Always + env: + RUN_ARG: 301 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +hvac: + enabled: true + image: + repository: ts-dockerhub.lsst.org/hvac + pullPolicy: Always + env: + RUN_ARG: --state enabled + resources: + requests: + cpu: 15m + memory: 120Mi + limits: + cpu: 150m + memory: 30000Mi + +m1m3-ess113: + enabled: true + classifier: ess113 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 113 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +m2-ess106: + enabled: true + classifier: ess106 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 106 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +m2-ess112: + enabled: true + classifier: ess112 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 112 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +mtdome-ess107: + enabled: true + classifier: ess107 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 107 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +mtdome-ess108: + enabled: true + classifier: ess108 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 108 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +tma-ess001: + enabled: true + classifier: ess1 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 1 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +tma-ess104: + enabled: true + classifier: ess104 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 104 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +tma-ess105: + enabled: true + classifier: ess105 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 105 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +tma-ess110: + enabled: true + classifier: ess110 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 110 + resources: + requests: + cpu: 19m + memory: 90Mi + limits: + cpu: 190m + memory: 900Mi + +weatherforecast: + image: + repository: ts-dockerhub.lsst.org/weatherforecast + pullPolicy: Always + env: + RUN_ARG: --state enabled + envSecrets: + - name: METEOBLUE_API_KEY + key: meteoblue-api-key + resources: + requests: + cpu: 9m + memory: 95Mi + limits: + cpu: 90m + memory: 950Mi diff --git a/applications/envsys/values.yaml b/applications/envsys/values.yaml index 1e8ce54018..a6629a3c7c 100644 --- a/applications/envsys/values.yaml +++ b/applications/envsys/values.yaml @@ -94,6 +94,10 @@ dsm2-sim: # -- Enable the DSM:2 simulator CSC enabled: false +earthquake-ess302: + # -- Enable ESS:302 CSC + enabled: false + eas: # -- Enable the EAS CSC enabled: false @@ -110,6 +114,14 @@ epm1-sim: # -- Enable the EPM:1 simulator CSC enabled: false +epm301: + # -- Enable the EPM:301 CSC + enabled: false + +hvac: + # -- Enable the HVAC CSC + enabled: false + m1m3-ess113: # -- Enable the ESS:113 CSC enabled: false @@ -182,6 +194,10 @@ tma-ess110-sim: # -- Enable the ESS:110 simulator CSC enabled: false +weatherforecast: + # -- Enable the WeatherForecast CSC + enabled: false + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: diff --git a/applications/love/Chart.yaml b/applications/love/Chart.yaml index d0c1b0a640..22bd9a78eb 100644 --- a/applications/love/Chart.yaml +++ b/applications/love/Chart.yaml @@ -10,6 +10,8 @@ dependencies: alias: love-commander version: 1.0.0 repository: file://../../charts/csc +- name: audio-broadcaster + version: 1.0.0 - name: love-manager version: 1.0.0 - name: love-nginx diff --git a/applications/love/README.md b/applications/love/README.md index b070b65f19..560e3650a0 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -17,7 +17,29 @@ Deployment for the LSST Operators Visualization Environment | global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| love-manager.manager | object | `{"frontend":{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_PRODUCER_WEBSOCKET_HOST":"love-service/manager/ws/subscription","LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager","revision":null},"nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]},"producers":[{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager","revision":null},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}],"producers_ports":{"container":8000,"node":30000}}` | Configuration for the different manager instances. This is divided into two sessions; frontend and producers. _frontend_ Configuration for the manager frontend. The frontend session defines the configuration for the so-called frontend managers. These serves the frontend artifacts as well as handles the data piping from the system to the frontend. Every time a user opens a view in LOVE the page will connect to the frontend manager and will receive the telemetry data from the system. Once a connection is established between a frontend and the manager it is kept alive. As more connections come in, the autoscaler will scale up the number of frontend managers and new connections should be redirected to them. The redirect is handled by the manager-frontend-service ClusterIP. _producers_ Configurations for the manger producers. This is basically a list of managers (with the same structure as the frontend, but in a list). These defines services that the LOVE-producers connect to, to feed data from the control system. | +| audio-broadcaster.affinity | object | `{}` | Affinity rules for the ts_audio_broadcaster pods | +| audio-broadcaster.env | object | `{"WEBSERVER_PORT":8888}` | This section holds a set of key, value pairs for environmental variables | +| audio-broadcaster.fullnameOverride | string | `""` | Specify the deployed application name specifically. Overrides all other names. | +| audio-broadcaster.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the ts_audio_broadcaster image | +| audio-broadcaster.image.repository | string | `"lsstts/audio_broadcaster"` | The ts_audio_broadcaster image to use | +| audio-broadcaster.image.revision | int | `nil` | The cycle revision to add to the image tag | +| audio-broadcaster.ingress.annotations | object | `{}` | Annotations for the ts_audio_broadcaster ingress | +| audio-broadcaster.ingress.className | string | `"nginx"` | Assign the Ingress class name | +| audio-broadcaster.ingress.hostname | string | `"audio-broadcaster.local"` | Hostname for the ts_audio_broadcaster ingress | +| audio-broadcaster.ingress.httpPath | string | `"/"` | Path name associated with the ts_audio_broadcaster ingress | +| audio-broadcaster.ingress.pathType | string | `"Exact"` | Set the Kubernetes path type for the ts_audio_broadcaster ingress | +| audio-broadcaster.microphones | list | `[]` | This sections sets the list of producers to use. The microphones should be specified like: _name_: _host_: _Microphone host_ _port_: _Microphone port_ Example: auxtel1: host: localhost port: 4444 | +| audio-broadcaster.nameOverride | string | `""` | Adds an extra string to the release name. | +| audio-broadcaster.namespace | string | `"love"` | The overall namespace for the ts_audio_broadcaster | +| audio-broadcaster.nodeSelector | object | `{}` | Node selection rules for the ts_audio_broadcaster pods | +| audio-broadcaster.podAnnotations | object | `{}` | This allows the specification of pod annotations. | +| audio-broadcaster.ports.container | int | `80` | Container port for the ts_audio_broadcaster service | +| audio-broadcaster.ports.node | int | `30000` | Node port for the ts_audio_broadcaster service | +| audio-broadcaster.replicaCount | int | `1` | Set the replica count for the ts_audio_broadcasters | +| audio-broadcaster.resources | object | `{}` | Resource specifications for the ts_audio_broadcaster pods | +| audio-broadcaster.serviceType | string | `"ClusterIP"` | Service type specification | +| audio-broadcaster.tolerations | list | `[]` | Toleration specifications for the ts_audio_broadcaster pods | +| love-manager.manager | object | `{"frontend":{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_PRODUCER_WEBSOCKET_HOST":"love-service/manager/ws/subscription","LOVE_SITE":"local","NIGHTREPORT_MAIL_ADDRESS":"rubin-night-log@lists.lsst.org","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","SMTP_USER":"loveapplication","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","SMTP_PASSWORD":"smtp-email-password","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager","revision":null},"nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]},"producers":[{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager","revision":null},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}],"producers_ports":{"container":8000,"node":30000}}` | Configuration for the different manager instances. This is divided into two sessions; frontend and producers. _frontend_ Configuration for the manager frontend. The frontend session defines the configuration for the so-called frontend managers. These serves the frontend artifacts as well as handles the data piping from the system to the frontend. Every time a user opens a view in LOVE the page will connect to the frontend manager and will receive the telemetry data from the system. Once a connection is established between a frontend and the manager it is kept alive. As more connections come in, the autoscaler will scale up the number of frontend managers and new connections should be redirected to them. The redirect is handled by the manager-frontend-service ClusterIP. _producers_ Configurations for the manger producers. This is basically a list of managers (with the same structure as the frontend, but in a list). These defines services that the LOVE-producers connect to, to feed data from the control system. | | love-manager.manager.frontend.affinity | object | `{}` | Affinity rules for the LOVE manager frontend pods | | love-manager.manager.frontend.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | | love-manager.manager.frontend.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | @@ -40,12 +62,14 @@ Deployment for the LSST Operators Visualization Environment | love-manager.manager.frontend.env.JIRA_PROJECT_ID | int | `10063` | Set the Jira project ID | | love-manager.manager.frontend.env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | | love-manager.manager.frontend.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| love-manager.manager.frontend.env.NIGHTREPORT_MAIL_ADDRESS | string | `"rubin-night-log@lists.lsst.org"` | The mail address to forward the nightly report to | | love-manager.manager.frontend.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | | love-manager.manager.frontend.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | | love-manager.manager.frontend.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | | love-manager.manager.frontend.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | | love-manager.manager.frontend.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | | love-manager.manager.frontend.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| love-manager.manager.frontend.env.SMTP_USER | string | `"loveapplication"` | The SMTP user for the LOVE manager frontend | | love-manager.manager.frontend.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | | love-manager.manager.frontend.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager frontend admin user password secret key name | | love-manager.manager.frontend.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager frontend authlist_user password secret key name | @@ -56,6 +80,7 @@ Deployment for the LSST Operators Visualization Environment | love-manager.manager.frontend.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager frontend process connection password secret key name | | love-manager.manager.frontend.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | | love-manager.manager.frontend.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager frontend secret secret key name | +| love-manager.manager.frontend.envSecrets.SMTP_PASSWORD | string | `"smtp-email-password"` | The LOVE manager smtp email password secret key name | | love-manager.manager.frontend.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager frontend user user password secret key name | | love-manager.manager.frontend.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | | love-manager.manager.frontend.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager frontend image | diff --git a/applications/love/charts/audio-broadcaster/Chart.yaml b/applications/love/charts/audio-broadcaster/Chart.yaml new file mode 100644 index 0000000000..391a29720c --- /dev/null +++ b/applications/love/charts/audio-broadcaster/Chart.yaml @@ -0,0 +1,4 @@ +name: audio-broadcaster +apiVersion: v2 +version: 1.0.0 +description: Helm chart for the audio-broadcaster. diff --git a/applications/love/charts/audio-broadcaster/README.md b/applications/love/charts/audio-broadcaster/README.md new file mode 100644 index 0000000000..6cc289c6e4 --- /dev/null +++ b/applications/love/charts/audio-broadcaster/README.md @@ -0,0 +1,30 @@ +# audio-broadcaster + +Helm chart for the audio-broadcaster. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the ts_audio_broadcaster pods | +| env | object | `{"WEBSERVER_PORT":8888}` | This section holds a set of key, value pairs for environmental variables | +| fullnameOverride | string | `""` | Specify the deployed application name specifically. Overrides all other names. | +| image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the ts_audio_broadcaster image | +| image.repository | string | `"lsstts/audio_broadcaster"` | The ts_audio_broadcaster image to use | +| image.revision | int | `nil` | The cycle revision to add to the image tag | +| ingress.annotations | object | `{}` | Annotations for the ts_audio_broadcaster ingress | +| ingress.className | string | `"nginx"` | Assign the Ingress class name | +| ingress.hostname | string | `"audio-broadcaster.local"` | Hostname for the ts_audio_broadcaster ingress | +| ingress.httpPath | string | `"/"` | Path name associated with the ts_audio_broadcaster ingress | +| ingress.pathType | string | `"Exact"` | Set the Kubernetes path type for the ts_audio_broadcaster ingress | +| microphones | list | `[]` | This sections sets the list of producers to use. The microphones should be specified like: _name_: _host_: _Microphone host_ _port_: _Microphone port_ Example: auxtel1: host: localhost port: 4444 | +| nameOverride | string | `""` | Adds an extra string to the release name. | +| namespace | string | `"love"` | The overall namespace for the ts_audio_broadcaster | +| nodeSelector | object | `{}` | Node selection rules for the ts_audio_broadcaster pods | +| podAnnotations | object | `{}` | This allows the specification of pod annotations. | +| ports.container | int | `80` | Container port for the ts_audio_broadcaster service | +| ports.node | int | `30000` | Node port for the ts_audio_broadcaster service | +| replicaCount | int | `1` | Set the replica count for the ts_audio_broadcasters | +| resources | object | `{}` | Resource specifications for the ts_audio_broadcaster pods | +| serviceType | string | `"ClusterIP"` | Service type specification | +| tolerations | list | `[]` | Toleration specifications for the ts_audio_broadcaster pods | diff --git a/applications/love/charts/audio-broadcaster/templates/_helpers.tpl b/applications/love/charts/audio-broadcaster/templates/_helpers.tpl new file mode 100644 index 0000000000..694390599f --- /dev/null +++ b/applications/love/charts/audio-broadcaster/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "audio-broadcaster.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "audio-broadcaster.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create image name from information +*/}} +{{- define "helpers.makeImage" -}} +{{- if kindIs "float64" .rev }} +{{- $rev := int .rev -}} +{{- printf "%s:%s.%03d" .repo .tag $rev }} +{{- else }} +{{- printf "%s:%s" .repo .tag }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "audio-broadcaster.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "audio-broadcaster.labels" -}} +helm.sh/chart: {{ include "audio-broadcaster.chart" . }} +{{ include "audio-broadcaster.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "audio-broadcaster.selectorLabels" -}} +app.kubernetes.io/name: {{ include "audio-broadcaster.name" . }} +{{- end }} diff --git a/applications/love/charts/audio-broadcaster/templates/deployment.yaml b/applications/love/charts/audio-broadcaster/templates/deployment.yaml new file mode 100644 index 0000000000..ee16707ee9 --- /dev/null +++ b/applications/love/charts/audio-broadcaster/templates/deployment.yaml @@ -0,0 +1,60 @@ +{{- range $index, $mic := .Values.microphones }} +{{ $appName := printf "audio-broadcaster-%d" (add $index 1) | trunc 63 | trimSuffix "-" }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $appName }} + labels: + {{- include "audio-broadcaster.labels" $ | nindent 4 }} +spec: + replicas: {{ $.Values.replicaCount }} + selector: + matchLabels: + {{- include "audio-broadcaster.selectorLabels" $ | nindent 6 }} + app.kubernetes.io/instance: {{ include "audio-broadcaster.fullname" $ }}-{{ add $index 1 }} + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml $ | nindent 8 }} + {{- end }} + labels: + {{- include "audio-broadcaster.selectorLabels" $ | nindent 8 }} + app.kubernetes.io/instance: {{ include "audio-broadcaster.fullname" $ }}-{{ add $index 1 }} + spec: + containers: + - name: {{ $appName }} + {{- $image := dict "repo" $.Values.image.repository "tag" $.Values.global.controlSystem.imageTag "rev" $.Values.image.revision }} + image: {{ include "helpers.makeImage" $image }} + imagePullPolicy: {{ $.Values.image.pullPolicy }} + ports: + - containerPort: {{ $.Values.env.WEBSERVER_PORT }} + env: + - name: MICROPHONE_SERVER_HOST + value: {{ $mic.host | quote }} + - name: MICROPHONE_SERVER_PORT + value: {{ $mic.port | quote }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- with $.Values.resources }} + resources: + {{- toYaml $.Values.resources | nindent 12 }} + {{- end }} + imagePullSecrets: + - name: pull-secret + {{- with $.Values.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/audio-broadcaster/templates/ingress.yaml b/applications/love/charts/audio-broadcaster/templates/ingress.yaml new file mode 100644 index 0000000000..5ebc9488ed --- /dev/null +++ b/applications/love/charts/audio-broadcaster/templates/ingress.yaml @@ -0,0 +1,29 @@ +--- +{{- if eq .Values.serviceType "ClusterIP" }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Release.Name }}-ingress + namespace: {{ .Values.namespace }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + rules: + - host: {{ required "ingress.host must be set" .Values.ingress.hostname | quote }} + http: + paths: + {{- range $index, $mic := .Values.microphones }} + - path: {{ $.Values.ingress.httpPath }}/{{ add $index 1 }} + pathType: {{ default "Prefix" $.Values.ingress.pathType }} + backend: + service: + name: {{ include "audio-broadcaster.fullname" $ }}-service-{{ add $index 1 }} + port: + number: {{ $.Values.ports.container }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/audio-broadcaster/templates/service.yaml b/applications/love/charts/audio-broadcaster/templates/service.yaml new file mode 100644 index 0000000000..baa2b24c4a --- /dev/null +++ b/applications/love/charts/audio-broadcaster/templates/service.yaml @@ -0,0 +1,21 @@ +{{- range $index, $mic := .Values.microphones }} +--- +apiVersion: v1 +kind: Service +metadata: + nameOverride: {{ $.Values.fullnameOverride }} + name: {{ include "audio-broadcaster.fullname" $ }}-service-{{ add $index 1 }} + namespace: {{ $.Values.namespace }} + labels: + {{- include "audio-broadcaster.labels" $ | nindent 4 }} +spec: + selector: + app.kubernetes.io/instance: {{ include "audio-broadcaster.fullname" $ }}-{{ add $index 1 }} + type: {{ $.Values.serviceType }} + ports: + - port: {{ $.Values.ports.container }} + targetPort: {{ $.Values.env.WEBSERVER_PORT }} + {{- if ne $.Values.serviceType "ClusterIP" }} + nodePort: {{ $.Values.ports.node }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/audio-broadcaster/values.yaml b/applications/love/charts/audio-broadcaster/values.yaml new file mode 100644 index 0000000000..44322e2968 --- /dev/null +++ b/applications/love/charts/audio-broadcaster/values.yaml @@ -0,0 +1,55 @@ +# -- Set the replica count for the ts_audio_broadcasters +replicaCount: 1 +# -- The overall namespace for the ts_audio_broadcaster +namespace: love +image: + # -- The ts_audio_broadcaster image to use + repository: lsstts/audio_broadcaster + # -- (int) The cycle revision to add to the image tag + revision: + # -- The pull policy on the ts_audio_broadcaster image + pullPolicy: IfNotPresent +# -- Service type specification +serviceType: ClusterIP +ports: + # -- Container port for the ts_audio_broadcaster service + container: 80 + # -- Node port for the ts_audio_broadcaster service + node: 30000 +ingress: + # -- Hostname for the ts_audio_broadcaster ingress + hostname: audio-broadcaster.local + # -- Path name associated with the ts_audio_broadcaster ingress + httpPath: / + # -- Set the Kubernetes path type for the ts_audio_broadcaster ingress + pathType: Exact + # -- Assign the Ingress class name + className: nginx + # -- Annotations for the ts_audio_broadcaster ingress + annotations: {} +# -- This section holds a set of key, value pairs for environmental variables +env: + WEBSERVER_PORT: 8888 +# -- This sections sets the list of producers to use. +# The microphones should be specified like: +# _name_: +# _host_: _Microphone host_ +# _port_: _Microphone port_ +# Example: auxtel1: +# host: localhost +# port: 4444 +microphones: [] +# -- This allows the specification of pod annotations. +podAnnotations: {} +# -- Resource specifications for the ts_audio_broadcaster pods +resources: {} +# -- Node selection rules for the ts_audio_broadcaster pods +nodeSelector: {} +# -- Toleration specifications for the ts_audio_broadcaster pods +tolerations: [] +# -- Affinity rules for the ts_audio_broadcaster pods +affinity: {} +# -- Adds an extra string to the release name. +nameOverride: "" +# -- Specify the deployed application name specifically. Overrides all other names. +fullnameOverride: "" diff --git a/applications/love/charts/love-manager/README.md b/applications/love/charts/love-manager/README.md index febd95a7a4..18af98fd5f 100644 --- a/applications/love/charts/love-manager/README.md +++ b/applications/love/charts/love-manager/README.md @@ -6,7 +6,7 @@ Helm chart for the LOVE manager service. | Key | Type | Default | Description | |-----|------|---------|-------------| -| manager | object | `{"frontend":{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_PRODUCER_WEBSOCKET_HOST":"love-service/manager/ws/subscription","LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager","revision":null},"nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]},"producers":[{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager","revision":null},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}],"producers_ports":{"container":8000,"node":30000}}` | Configuration for the different manager instances. This is divided into two sessions; frontend and producers. _frontend_ Configuration for the manager frontend. The frontend session defines the configuration for the so-called frontend managers. These serves the frontend artifacts as well as handles the data piping from the system to the frontend. Every time a user opens a view in LOVE the page will connect to the frontend manager and will receive the telemetry data from the system. Once a connection is established between a frontend and the manager it is kept alive. As more connections come in, the autoscaler will scale up the number of frontend managers and new connections should be redirected to them. The redirect is handled by the manager-frontend-service ClusterIP. _producers_ Configurations for the manger producers. This is basically a list of managers (with the same structure as the frontend, but in a list). These defines services that the LOVE-producers connect to, to feed data from the control system. | +| manager | object | `{"frontend":{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_PRODUCER_WEBSOCKET_HOST":"love-service/manager/ws/subscription","LOVE_SITE":"local","NIGHTREPORT_MAIL_ADDRESS":"rubin-night-log@lists.lsst.org","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","SMTP_USER":"loveapplication","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","SMTP_PASSWORD":"smtp-email-password","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager","revision":null},"nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]},"producers":[{"affinity":{},"autoscaling":{"enabled":true,"maxReplicas":100,"minReplicas":1,"scaleDownPolicy":{},"scaleUpPolicy":{},"targetCPUUtilizationPercentage":80,"targetMemoryUtilizationPercentage":""},"env":{"AUTH_LDAP_1_SERVER_URI":"ldap://ipa1.lsst.local","AUTH_LDAP_2_SERVER_URI":"ldap://ipa2.lsst.local","AUTH_LDAP_3_SERVER_URI":"ldap://ipa3.lsst.local","COMMANDER_HOSTNAME":"love-commander-service","COMMANDER_PORT":5000,"DB_ENGINE":"postgresql","DB_HOST":"love-manager-database-service","DB_NAME":"love","DB_PORT":5432,"DB_USER":"love","HEARTBEAT_QUERY_COMMANDER":false,"JIRA_API_HOSTNAME":"rubinobs.atlassian.net","JIRA_PROJECT_ID":10063,"LOVE_SITE":"local","OLE_API_HOSTNAME":"site.lsst.local","REDIS_CONFIG_CAPACITY":5000,"REDIS_CONFIG_EXPIRY":5,"REDIS_HOST":"love-manager-redis-service","REMOTE_STORAGE":true,"SERVER_URL":"love.lsst.local","URL_SUBPATH":"/love"},"envSecrets":{"ADMIN_USER_PASS":"admin-user-pass","AUTHLIST_USER_PASS":"authlist-user-pass","AUTH_LDAP_BIND_PASSWORD":"auth-ldap-bind-password","CMD_USER_PASS":"cmd-user-pass","DB_PASS":"db-pass","JIRA_API_TOKEN":"jira-api-token","PROCESS_CONNECTION_PASS":"process-connection-pass","REDIS_PASS":"redis-pass","SECRET_KEY":"manager-secret-key","USER_USER_PASS":"user-user-pass"},"image":{"nexus3":"","pullPolicy":"IfNotPresent","repository":"lsstts/love-manager","revision":null},"name":"example-producer","nodeSelector":{},"ports":{"container":8000,"node":30000},"readinessProbe":{},"replicas":1,"resources":{},"tolerations":[]}],"producers_ports":{"container":8000,"node":30000}}` | Configuration for the different manager instances. This is divided into two sessions; frontend and producers. _frontend_ Configuration for the manager frontend. The frontend session defines the configuration for the so-called frontend managers. These serves the frontend artifacts as well as handles the data piping from the system to the frontend. Every time a user opens a view in LOVE the page will connect to the frontend manager and will receive the telemetry data from the system. Once a connection is established between a frontend and the manager it is kept alive. As more connections come in, the autoscaler will scale up the number of frontend managers and new connections should be redirected to them. The redirect is handled by the manager-frontend-service ClusterIP. _producers_ Configurations for the manger producers. This is basically a list of managers (with the same structure as the frontend, but in a list). These defines services that the LOVE-producers connect to, to feed data from the control system. | | manager.frontend.affinity | object | `{}` | Affinity rules for the LOVE manager frontend pods | | manager.frontend.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | | manager.frontend.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | @@ -29,12 +29,14 @@ Helm chart for the LOVE manager service. | manager.frontend.env.JIRA_PROJECT_ID | int | `10063` | Set the Jira project ID | | manager.frontend.env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | | manager.frontend.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| manager.frontend.env.NIGHTREPORT_MAIL_ADDRESS | string | `"rubin-night-log@lists.lsst.org"` | The mail address to forward the nightly report to | | manager.frontend.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | | manager.frontend.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | | manager.frontend.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | | manager.frontend.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | | manager.frontend.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | | manager.frontend.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| manager.frontend.env.SMTP_USER | string | `"loveapplication"` | The SMTP user for the LOVE manager frontend | | manager.frontend.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | | manager.frontend.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager frontend admin user password secret key name | | manager.frontend.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager frontend authlist_user password secret key name | @@ -45,6 +47,7 @@ Helm chart for the LOVE manager service. | manager.frontend.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager frontend process connection password secret key name | | manager.frontend.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | | manager.frontend.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager frontend secret secret key name | +| manager.frontend.envSecrets.SMTP_PASSWORD | string | `"smtp-email-password"` | The LOVE manager smtp email password secret key name | | manager.frontend.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager frontend user user password secret key name | | manager.frontend.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | | manager.frontend.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager frontend image | diff --git a/applications/love/charts/love-manager/templates/redis-deployment.yaml b/applications/love/charts/love-manager/templates/redis-deployment.yaml index bceb0eb50e..dfbfac3d59 100644 --- a/applications/love/charts/love-manager/templates/redis-deployment.yaml +++ b/applications/love/charts/love-manager/templates/redis-deployment.yaml @@ -28,7 +28,7 @@ spec: {{- $data := dict "env" .Values.redis.envSecrets "secret" true }} {{- include "helpers.envFromList" $data | indent 10 }} volumeMounts: - - mountPath: /data/redis.conf + - mountPath: /data readOnly: true name: redis-conf {{- with $.Values.redis.resources }} diff --git a/applications/love/charts/love-manager/values.yaml b/applications/love/charts/love-manager/values.yaml index 2e7162495d..e33adab93d 100644 --- a/applications/love/charts/love-manager/values.yaml +++ b/applications/love/charts/love-manager/values.yaml @@ -77,6 +77,10 @@ manager: REDIS_CONFIG_EXPIRY: 5 # -- The connection capacity for the redis service REDIS_CONFIG_CAPACITY: 5000 + # -- The SMTP user for the LOVE manager frontend + SMTP_USER: loveapplication + # -- The mail address to forward the nightly report to + NIGHTREPORT_MAIL_ADDRESS: rubin-night-log@lists.lsst.org envSecrets: # -- The LOVE manager frontend secret secret key name SECRET_KEY: manager-secret-key @@ -100,6 +104,8 @@ manager: REDIS_PASS: redis-pass # -- The LOVE manager jira API token secret key name JIRA_API_TOKEN: jira-api-token + # -- The LOVE manager smtp email password secret key name + SMTP_PASSWORD: smtp-email-password # -- Set the default number of LOVE manager frontend pod replicas replicas: 1 autoscaling: diff --git a/applications/love/values-base.yaml b/applications/love/values-base.yaml index b67cae06c2..e022dcf78f 100644 --- a/applications/love/values-base.yaml +++ b/applications/love/values-base.yaml @@ -39,6 +39,7 @@ love-manager: AUTH_LDAP_3_SERVER_URI: ldap://ipa3.ls.lsst.org DB_HOST: postgresdb01.ls.lsst.org LOVE_SITE: base + NIGHTREPORT_MAIL_ADDRESS: saranda@lsst.org autoscaling: enabled: true minReplicas: 2 diff --git a/applications/love/values-summit.yaml b/applications/love/values-summit.yaml new file mode 100644 index 0000000000..f1c42bdde4 --- /dev/null +++ b/applications/love/values-summit.yaml @@ -0,0 +1,756 @@ +audio-broadcaster: + image: + repository: ts-dockerhub.lsst.org/audio_broadcaster + pullPolicy: Always + ingress: + hostname: summit-lsp.lsst.codes + httpPath: /audio-broadcaster + pathType: Exact + className: nginx + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: 'true' + nginx.ingress.kubernetes.io/rewrite-target: /audio_feed + env: + WEBSERVER_PORT: 8888 + microphones: + - name: auxtel1 + host: auxtel-rpi-audio01.cp.lsst.org + port: 4444 + - name: dynalene1 + host: dynalene-rpi-audio01.cp.lsst.org + port: 4444 + - name: mtdome1 + host: mtdome-rpi-audio01.cp.lsst.org + port: 4444 + - name: tma1 + host: tma-rpi-audio01.cp.lsst.org + port: 4444 + resources: + requests: + cpu: 5m + memory: 100Mi + limits: + cpu: 50m + memory: 500Mi + +love-commander: + image: + repository: ts-dockerhub.lsst.org/love-commander + pullPolicy: Always + env: + S3_INSTANCE: cp + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + service: + enabled: true + port: 5000 + type: ClusterIP + resources: + requests: + cpu: 50m + memory: 230Mi + limits: + cpu: 500m + memory: 800Mi + +love-manager: + manager: + frontend: + image: + repository: ts-dockerhub.lsst.org/love-manager + pullPolicy: Always + env: + SERVER_URL: summit-lsp.lsst.codes + OLE_API_HOSTNAME: summit-lsp.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.cp.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.cp.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.cp.lsst.org + DB_HOST: postgresdb01.cp.lsst.org + LOVE_SITE: summit + autoscaling: + enabled: true + minReplicas: 10 + maxReplicas: 50 + targetCPUUtilizationPercentage: 80 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + producers: + - name: general + image: + repository: ts-dockerhub.lsst.org/love-manager + pullPolicy: Always + env: + LOVE_SITE: summit + SERVER_URL: summit-lsp.lsst.codes + OLE_API_HOSTNAME: summit-lsp.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.cp.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.cp.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.cp.lsst.org + COMMANDER_HOSTNAME: love-commander-service + COMMANDER_PORT: 5000 + DB_HOST: postgresdb01.ls.lsst.org + DB_ENGINE: postgresql + DB_NAME: love + DB_PORT: 5432 + DB_USER: love + HEARTBEAT_QUERY_COMMANDER: false + JIRA_API_HOSTNAME: rubinobs.atlassian.net + JIRA_PROJECT_ID: 10063 + REDIS_CONFIG_CAPACITY: 5000 + REDIS_CONFIG_EXPIRY: 5 + REDIS_HOST: love-manager-redis-service + REMOTE_STORAGE: true + URL_SUBPATH: /love + envSecrets: + SECRET_KEY: manager-secret-key + PROCESS_CONNECTION_PASS: process-connection-pass + ADMIN_USER_PASS: admin-user-pass + USER_USER_PASS: user-user-pass + CMD_USER_PASS: cmd-user-pass + AUTHLIST_USER_PASS: authlist-user-pass + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + DB_PASS: db-pass + REDIS_PASS: redis-pass + replicas: 10 + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + - name: queue + image: + repository: ts-dockerhub.lsst.org/love-manager + pullPolicy: Always + env: + LOVE_SITE: summit + SERVER_URL: summit-lsp.lsst.codes + OLE_API_HOSTNAME: summit-lsp.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.cp.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.cp.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.cp.lsst.org + COMMANDER_HOSTNAME: love-commander-service + COMMANDER_PORT: 5000 + DB_HOST: postgresdb01.cp.lsst.org + DB_ENGINE: postgresql + DB_NAME: love + DB_PORT: 5432 + DB_USER: love + HEARTBEAT_QUERY_COMMANDER: false + JIRA_API_HOSTNAME: rubinobs.atlassian.net + JIRA_PROJECT_ID: 10063 + REDIS_CONFIG_CAPACITY: 5000 + REDIS_CONFIG_EXPIRY: 5 + REDIS_HOST: love-manager-redis-service + REMOTE_STORAGE: true + URL_SUBPATH: /love + envSecrets: + SECRET_KEY: manager-secret-key + PROCESS_CONNECTION_PASS: process-connection-pass + ADMIN_USER_PASS: admin-user-pass + USER_USER_PASS: user-user-pass + CMD_USER_PASS: cmd-user-pass + AUTHLIST_USER_PASS: authlist-user-pass + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + DB_PASS: db-pass + REDIS_PASS: redis-pass + replicas: 3 + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + - name: m1m3 + image: + repository: ts-dockerhub.lsst.org/love-manager + pullPolicy: Always + env: + LOVE_SITE: summit + SERVER_URL: summit-lsp.lsst.codes + OLE_API_HOSTNAME: summit-lsp.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.cp.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.cp.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.cp.lsst.org + COMMANDER_HOSTNAME: love-commander-service + COMMANDER_PORT: 5000 + DB_HOST: postgresdb01.cp.lsst.org + DB_ENGINE: postgresql + DB_NAME: love + DB_PORT: 5432 + DB_USER: love + HEARTBEAT_QUERY_COMMANDER: false + JIRA_API_HOSTNAME: rubinobs.atlassian.net + JIRA_PROJECT_ID: 10063 + REDIS_CONFIG_CAPACITY: 5000 + REDIS_CONFIG_EXPIRY: 5 + REDIS_HOST: love-manager-redis-service + REMOTE_STORAGE: true + URL_SUBPATH: /love + envSecrets: + SECRET_KEY: manager-secret-key + PROCESS_CONNECTION_PASS: process-connection-pass + ADMIN_USER_PASS: admin-user-pass + USER_USER_PASS: user-user-pass + CMD_USER_PASS: cmd-user-pass + AUTHLIST_USER_PASS: authlist-user-pass + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + DB_PASS: db-pass + REDIS_PASS: redis-pass + replicas: 1 + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + redis: + image: + repository: redis + tag: '7' + pullPolicy: IfNotPresent + config: | + timeout 60 + save "" + resources: + requests: + cpu: 250m + memory: 300Mi + limits: + cpu: 2500m + memory: 1000Mi + viewBackup: + enabled: true + image: + repository: ts-dockerhub.lsst.org/love-view-backup + pullPolicy: Always + schedule: 0 12 * * * + resources: + requests: + cpu: 5m + memory: 100Mi + limits: + cpu: 100m + memory: 500Mi + +love-nginx: + image: + repository: nginx + tag: 1.25.1 + pullPolicy: Always + ingress: + hostname: summit-lsp.lsst.codes + httpPath: /love + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "true" + initContainers: + frontend: + image: + repository: ts-dockerhub.lsst.org/love-frontend-k8s + pullPolicy: Always + manager: + image: + repository: ts-dockerhub.lsst.org/love-manager-static + pullPolicy: Always + command: + - /bin/sh + - -c + - mkdir -p /usr/src/love-manager; cp -Rv /usr/src/love/manager/media /usr/src/love-manager; cp -Rv /usr/src/love/manager/static /usr/src/love-manager + staticStore: + name: love-nginx-static + storageClass: rook-ceph-block + accessMode: ReadWriteOnce + claimSize: 2Gi + resources: + requests: + cpu: 50m + memory: 70Mi + limits: + cpu: 500m + memory: 300Mi + nginxConfig: | + server { + listen 80; + server_name localhost; + location /love { + root /usr/src/love-frontend; + try_files $uri$args $uri$args/ $uri/ /love/index.html; + } + location /love/manager { + client_max_body_size 5M; + proxy_pass http://love-manager-frontend-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /love/manager/producers { + proxy_pass http://love-manager-producer-general-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /love/manager/m1m3 { + proxy_pass http://love-manager-producer-m1m3-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /love/manager/queue { + proxy_pass http://love-manager-producer-queue-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /love/media { + alias /usr/src/love-manager/media; + } + location /love/manager/static { + alias /usr/src/love-manager/static; + } + location /love/manager/media { + alias /usr/src/love-manager/media; + } + location /love/startrackera { + proxy_pass http://azar03.cp.lsst.org:5101; + } + location /love/startrackerb { + proxy_pass http://azar03.cp.lsst.org:5102; + } + location /love/startrackerc { + proxy_pass http://azar03.cp.lsst.org:5103; + } + } + loveConfig: | + { + "alarms": { + "minSeveritySound": "serious", + "minSeverityNotification": "warning" + }, + "camFeeds": { + "starTrackerA": "/love/startrackera", + "starTrackerB": "/love/startrackerb", + "starTrackerC": "/love/startrackerc" + }, + "efd": { + "defaultEfdInstance": "summit_efd", + "urlStatus": "https://summit-lsp.lsst.codes/influxdb/health" + }, + "sal": { + "urlStatus": "https://summit-lsp.lsst.codes/sasquatch-rest-proxy/brokers", + "expectedBrokerList": [6, 7, 8] + } + } + +love-producer: + image: + repository: ts-dockerhub.lsst.org/love-producer + pullPolicy: Always + resources: + requests: + cpu: 10m + memory: 100Mi + limits: + cpu: 100m + memory: 300Mi + producers: + - name: ataos + csc: ATAOS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atbuilding + csc: ATBuilding:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atcamera + csc: ATCamera:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atdome + csc: ATDome:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atdometrajectory + csc: ATDomeTrajectory:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atheaderservice + csc: ATHeaderService:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: athexapod + csc: ATHexapod:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atmcs + csc: ATMCS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atmonochromator + csc: ATMonochromator:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atocps + csc: OCPS:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atoods + csc: ATOODS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atpneumatics + csc: ATPneumatics:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atptg + csc: ATPtg:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atscheduler + csc: Scheduler:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atscriptqueue + csc: ScriptQueue:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/queue/ws/subscription + resources: + requests: + cpu: 50m + memory: 100Mi + limits: + cpu: 500m + memory: 500Mi + - name: atspectrograph + csc: ATSpectrograph:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: atwhitelight + csc: ATWhiteLight:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: auxteless201 + csc: ESS:201 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: auxteless202 + csc: ESS:202 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: auxteless203 + csc: ESS:203 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: auxteless204 + csc: ESS:204 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: calibhilless301 + csc: ESS:301 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: cameraess111 + csc: ESS:111 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: camerahexapod + csc: MTHexapod:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: cccamera + csc: CCCamera:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: ccheaderservice + csc: CCHeaderService:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: ccocps + csc: OCPS:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: ccoods + csc: CCOODS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: cbp + csc: CBP:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: dream + csc: DREAM:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: cleanroomess109 + csc: ESS:109 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: dimm1 + csc: DIMM:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: dimm2 + csc: DIMM:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: dsm1 + csc: DSM:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: dsm2 + csc: DSM:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: earthquakeess302 + csc: ESS:302 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: eas + csc: EAS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: electrometer101 + csc: Electrometer:101 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: electrometer102 + csc: Electrometer:102 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: electrometer201 + csc: Electrometer:201 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: epm1 + csc: EPM:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: epm301 + csc: EPM:301 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: fiberspectrograph3 + csc: FiberSpectrograph:3 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: gcheaderservice1 + csc: GCHeaderService:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: gcheaderservice101 + csc: GCHeaderService:101 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: gcheaderservice102 + csc: GCHeaderService:102 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: gcheaderservice103 + csc: GCHeaderService:103 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: genericcamera1 + csc: GenericCamera:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: genericcamera101 + csc: GenericCamera:101 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: genericcamera102 + csc: GenericCamera:102 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: genericcamera103 + csc: GenericCamera:103 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: gis + csc: GIS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: hvac + csc: HVAC:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: lasertracker1 + csc: LaserTracker:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: ledprojector + csc: LEDProjector:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: linearstage101 + csc: LinearStage:101 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: linearstage102 + csc: LinearStage:102 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: linearstage103 + csc: LinearStage:103 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: linearstage104 + csc: LinearStage:104 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: love + csc: LOVE:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: m1m3ess113 + csc: ESS:113 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: m2ess106 + csc: ESS:106 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: m2ess112 + csc: ESS:112 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtheaderservice + csc: MTHeaderService:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: m2hexapod + csc: MTHexapod:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtaircompressor1 + csc: MTAirCompressor:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtaircompressor2 + csc: MTAirCompressor:2 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtaos + csc: MTAOS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtcamera + csc: MTCamera:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtdome + csc: MTDome:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtdomeess107 + csc: ESS:107 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtdomeess108 + csc: ESS:108 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtdometrajectory + csc: MTDomeTrajectory:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtm1m3 + csc: MTM1M3:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/m1m3/ws/subscription + resources: + requests: + cpu: 100m + memory: 220Mi + limits: + cpu: 500m + memory: 600Mi + - name: mtm1m3ts + csc: MTM1M3TS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtm2 + csc: MTM2:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtmount + csc: MTMount:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtoods + csc: MTOODS:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtptg + csc: MTPtg:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtrotator + csc: MTRotator:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtscheduler + csc: Scheduler:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: mtscriptqueue + csc: ScriptQueue:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/queue/ws/subscription + resources: + requests: + cpu: 50m + memory: 100Mi + limits: + cpu: 500m + memory: 500Mi + - name: ocsscheduler + csc: Scheduler:3 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: ocsscriptqueue + csc: ScriptQueue:3 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/queue/ws/subscription + resources: + requests: + cpu: 50m + memory: 100Mi + limits: + cpu: 500m + memory: 500Mi + - name: pmd1 + csc: PMD:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: raocps + csc: OCPS:101 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: tmaess001 + csc: ESS:1 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: tmaess104 + csc: ESS:104 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: tmaess105 + csc: ESS:105 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: tmaess110 + csc: ESS:110 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: tunablelaser + csc: TunableLaser:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: watcher + csc: Watcher:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + - name: weatherforecast + csc: WeatherForecast:0 --log-level 10 + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription diff --git a/applications/obssys/Chart.yaml b/applications/obssys/Chart.yaml index e9bcd2b636..6eca84c28d 100644 --- a/applications/obssys/Chart.yaml +++ b/applications/obssys/Chart.yaml @@ -14,6 +14,10 @@ dependencies: alias: atscheduler version: 1.0.0 repository: file://../../charts/csc +- name: csc + alias: gis + version: 1.0.0 + repository: file://../../charts/csc - name: csc alias: mtqueue version: 1.0.0 diff --git a/applications/obssys/values-summit.yaml b/applications/obssys/values-summit.yaml new file mode 100644 index 0000000000..8e2cbd52c3 --- /dev/null +++ b/applications/obssys/values-summit.yaml @@ -0,0 +1,397 @@ +x-butler-secret: + &butler-secret + butlerSecret: + containerPath: &bs-cp /home/saluser/.lsst + dbUser: oods + secretFilename: &bs-fn postgres-credentials.txt + secretFixup: + containerPath: *bs-cp + filenames: + - *bs-fn + +atqueue: + classifier: scriptqueue2 + image: + repository: ts-dockerhub.lsst.org/scriptqueue + pullPolicy: Always + env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + RUN_ARG: 2 --state enabled + USER_USERNAME: user + IMAGE_SERVER_URL: http://ccs.lsst.org + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + <<: *butler-secret + nfsMountpoint: + - name: auxtel-gen3-data + containerPath: /repo/LATISS + readOnly: false + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/repo/LATISS + - name: auxtel-gen3-data-temp + containerPath: /data/lsstdata/base/auxtel + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/lsstdata/base/auxtel + - name: auxtel-data + containerPath: /readonly/lsstdata/auxtel + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/lsstdata + - name: comcam-gen3-data + containerPath: /repo/LSSTComCam + readOnly: false + server: nfs3.cp.lsst.org + serverPath: /comcam/repo/LSSTComCam + - name: comcam-gen3-data-temp + containerPath: /data/lsstdata/base/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata/base/comcam + - name: comcam-data + containerPath: /readonly/lsstdata/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata + - name: project-shared + containerPath: /project + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /project + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.cp.lsst.org + serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scriptqueue + topologyKey: "kubernetes.io/hostname" + resources: + limits: + cpu: 5000m + memory: 10Gi + requests: + cpu: 1000m + memory: 580Mi + +atscheduler: + classifier: scheduler2 + image: + repository: ts-dockerhub.lsst.org/scheduler + pullPolicy: Always + env: + INDEX: 2 + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + nfsMountpoint: + - name: rubin-sim-data + containerPath: /home/saluser/rubin_sim_data + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /scratch/scheduler + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.cp.lsst.org + serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scheduler + topologyKey: "kubernetes.io/hostname" + resources: + limits: + cpu: 2500m + memory: 10000Mi + requests: + cpu: 250m + memory: 3100Mi + +gis: + image: + repository: ts-dockerhub.lsst.org/gis + pullPolicy: Always + secretFixup: + containerPath: /home/saluser/.store + filenames: + - id_gis_bastion + - id_gis_bastion.pub + specialInstructions: >- + chmod 0644 /secrets/*.* + resources: + limits: + cpu: 300m + memory: 300Mi + requests: + cpu: 30m + memory: 97Mi + +mtqueue: + classifier: scriptqueue1 + image: + repository: ts-dockerhub.lsst.org/scriptqueue + pullPolicy: Always + env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + RUN_ARG: 1 --state enabled + USER_USERNAME: user + IMAGE_SERVER_URL: http://ccs.lsst.org + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + <<: *butler-secret + nfsMountpoint: + - name: auxtel-gen3-data + containerPath: /repo/LATISS + readOnly: false + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/repo/LATISS + - name: auxtel-gen3-data-temp + containerPath: /data/lsstdata/base/auxtel + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/lsstdata/base/auxtel + - name: auxtel-data + containerPath: /readonly/lsstdata/auxtel + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/lsstdata + - name: comcam-gen3-data + containerPath: /repo/LSSTComCam + readOnly: false + server: nfs3.cp.lsst.org + serverPath: /comcam/repo/LSSTComCam + - name: comcam-gen3-data-temp + containerPath: /data/lsstdata/base/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata/base/comcam + - name: comcam-data + containerPath: /readonly/lsstdata/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata + - name: project-shared + containerPath: /project + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /project + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.cp.lsst.org + serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scriptqueue + topologyKey: "kubernetes.io/hostname" + resources: + limits: + cpu: 2500m + memory: 10Gi + requests: + cpu: 500m + memory: 430Mi + +mtscheduler: + classifier: scheduler1 + image: + repository: ts-dockerhub.lsst.org/scheduler + pullPolicy: Always + env: + INDEX: 1 + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + nfsMountpoint: + - name: rubin-sim-data + containerPath: /home/saluser/rubin_sim_data + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /scratch/scheduler + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.cp.lsst.org + serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scheduler + topologyKey: "kubernetes.io/hostname" + resources: + limits: + cpu: 2700m + memory: 7000Mi + requests: + cpu: 270m + memory: 2100Mi + +ocsqueue: + classifier: scriptqueue3 + image: + repository: ts-dockerhub.lsst.org/scriptqueue + pullPolicy: Always + env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + RUN_ARG: 3 --state enabled + IMAGE_SERVER_URL: http://ccs.lsst.org + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + <<: *butler-secret + nfsMountpoint: + - name: auxtel-gen3-data + containerPath: /repo/LATISS + readOnly: false + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/repo/LATISS + - name: auxtel-gen3-data-temp + containerPath: /data/lsstdata/base/auxtel + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/lsstdata/base/auxtel + - name: auxtel-data + containerPath: /readonly/lsstdata/auxtel + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/lsstdata + - name: comcam-gen3-data + containerPath: /repo/LSSTComCam + readOnly: false + server: nfs3.cp.lsst.org + serverPath: /comcam/repo/LSSTComCam + - name: comcam-gen3-data-temp + containerPath: /data/lsstdata/base/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata/base/comcam + - name: comcam-data + containerPath: /readonly/lsstdata/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata + - name: project-shared + containerPath: /project + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /project + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.cp.lsst.org + serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scriptqueue + topologyKey: "kubernetes.io/hostname" + resources: + limits: + cpu: 2500m + memory: 10Gi + requests: + cpu: 500m + memory: 430Mi + +ocsscheduler: + classifier: scheduler3 + image: + repository: ts-dockerhub.lsst.org/scheduler + pullPolicy: Always + env: + INDEX: 3 + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + nfsMountpoint: + - name: rubin-sim-data + containerPath: /home/saluser/rubin_sim_data + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /scratch/scheduler + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.cp.lsst.org + serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scheduler + topologyKey: "kubernetes.io/hostname" + resources: + limits: + cpu: 2600m + memory: 7000Mi + requests: + cpu: 260m + memory: 2100Mi + +watcher: + image: + repository: ts-dockerhub.lsst.org/watcher + pullPolicy: Always + envSecrets: + - name: ESCALATION_KEY + key: squadcast-escalation-key + resources: + limits: + cpu: 2400m + memory: 1500Mi + requests: + cpu: 240m + memory: 445Mi diff --git a/applications/simonyitel/Chart.yaml b/applications/simonyitel/Chart.yaml index 1f8f9d3cd8..15ad1b5448 100644 --- a/applications/simonyitel/Chart.yaml +++ b/applications/simonyitel/Chart.yaml @@ -11,11 +11,31 @@ dependencies: version: 1.0.0 condition: ccheaderservice.enabled repository: file://../../charts/csc +- name: csc + alias: ccheaderservice-sim + version: 1.0.0 + condition: ccheaderservice-sim.enabled + repository: file://../../charts/csc - name: csc alias: ccoods version: 1.0.0 condition: ccoods.enabled repository: file://../../charts/csc +- name: csc + alias: gcheaderservice101 + version: 1.0.0 + condition: gcheaderservice101.enabled + repository: file://../../charts/csc +- name: csc + alias: gcheaderservice102 + version: 1.0.0 + condition: gcheaderservice102.enabled + repository: file://../../charts/csc +- name: csc + alias: gcheaderservice103 + version: 1.0.0 + condition: gcheaderservice103.enabled + repository: file://../../charts/csc - name: csc alias: lasertracker1 version: 1.0.0 @@ -89,11 +109,6 @@ dependencies: version: 1.0.0 condition: mtm1m3-sim.enabled repository: file://../../charts/csc -- name: csc - alias: mtm1m3ts-sim - version: 1.0.0 - condition: mtm1m3ts-sim.enabled - repository: file://../../charts/csc - name: csc alias: mtm2 version: 1.0.0 @@ -119,6 +134,11 @@ dependencies: version: 1.0.0 condition: mtmount.enabled repository: file://../../charts/csc +- name: csc + alias: mtmount-ccw-only + version: 1.0.0 + condition: mtmount-ccw-only.enabled + repository: file://../../charts/csc - name: csc alias: mtmount-sim version: 1.0.0 diff --git a/applications/simonyitel/README.md b/applications/simonyitel/README.md index 42e237ac69..5a7c318657 100644 --- a/applications/simonyitel/README.md +++ b/applications/simonyitel/README.md @@ -21,8 +21,6 @@ Deployment for the Simonyi Survey Telescope CSCs | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | lasertracker1-sim.enabled | bool | `false` | Enable the LaserTracker:1 simulator CSC | | lasertracker1.enabled | bool | `false` | Enable the LaserTracker:1 CSC | -| m1m3-sim.enabled | bool | `false` | Enable the MTM1M3 simulator CSC | -| m1m3.enabled | bool | `false` | Enable the MTM1M3 hardware simulator CSC | | m1m3ts-sim.enabled | bool | `false` | Enable the MTM1M3TS simulator CSC | | mtaircompressor1-sim.enabled | bool | `false` | Enable the MTAirCompressor:1 simulator CSC | | mtaircompressor1.enabled | bool | `false` | Enable the MTAirCompressor:1 CSC | @@ -33,12 +31,15 @@ Deployment for the Simonyi Survey Telescope CSCs | mtdome-sim.enabled | bool | `false` | Enable the MTDome simulator CSC | | mtdome.enabled | bool | `false` | Enable the MTDome CSC | | mtheaderservice.enabled | bool | `false` | Enable the MTHeaderService CSC | +| mtm1m3-sim.enabled | bool | `false` | Enable the MTM1M3 simulator CSC | +| mtm1m3.enabled | bool | `false` | Enable the MTM1M3 hardware simulator CSC | | mtm2-sim.enabled | bool | `false` | Enable the MTM2 simulator CSC | | mtm2.enabled | bool | `false` | Enable the MTM2 CSC | | mtm2hexapod-sim.enabled | bool | `false` | Enable the MTHexapod:2 simulator CSC | | mtm2hexapod.enabled | bool | `false` | Enable the MTHexapod:2 CSC | | mtmount-sim.enabled | bool | `false` | Enable the MTMount simulator CSC | | mtmount.enabled | bool | `false` | Enable the MTMount CSC | +| mtoods.enabled | bool | `false` | Enable the MTOODS simulator CSC | | mtrotator-sim.enabled | bool | `false` | Enable the MTRotator simulator CSC | | mtrotator.enabled | bool | `false` | Enable the MTRotator CSC | | mtvms-m1m3-sim.enabled | bool | `false` | Enable the MTVMS:1 simulator CSC | diff --git a/applications/simonyitel/values-summit.yaml b/applications/simonyitel/values-summit.yaml new file mode 100644 index 0000000000..cefa4a6fc1 --- /dev/null +++ b/applications/simonyitel/values-summit.yaml @@ -0,0 +1,647 @@ +x-butler-secret: + &butler-secret + butlerSecret: + containerPath: &bs-cp /home/saluser/.lsst + dbUser: oods + secretFilename: &bs-fn postgres-credentials.txt + secretFixup: + containerPath: *bs-cp + filenames: + - *bs-fn + +ccheaderservice: + enabled: true + image: + repository: ts-dockerhub.lsst.org/headerservice + pullPolicy: Always + env: + URL_SPEC: --lfa_mode s3 --s3instance cp + CAMERA: cc + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + resources: + limits: + cpu: 1000m + memory: 1500Mi + requests: + cpu: 100m + memory: 300Mi + +ccheaderservice-sim: + enabled: false + image: + repository: ts-dockerhub.lsst.org/headerservice + pullPolicy: Always + env: + URL_SPEC: --lfa_mode s3 --s3instance cp --playback + CAMERA: cc + HEADERSERVICE_PLAYLIST_DIR: /header_service + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + nfsMountpoint: + - name: ccs-data + containerPath: /header_service + readOnly: true + server: nfs1.cp.lsst.org + serverPath: /scratch/header_service + resources: + limits: + cpu: 1000m + memory: 1500Mi + requests: + cpu: 100m + memory: 300Mi + +ccoods: + enabled: true + image: + repository: ts-dockerhub.lsst.org/ccoods + pullPolicy: Always + env: + CTRL_OODS_CONFIG_FILE: /etc/ccoods.yaml + <<: *butler-secret + nfsMountpoint: + - name: comcam-gen3-butler + containerPath: /repo/LSSTComCam + readOnly: false + server: nfs3.cp.lsst.org + serverPath: /comcam/repo/LSSTComCam + - name: comcam-oods-data + containerPath: /data + readOnly: false + server: nfs3.cp.lsst.org + serverPath: /comcam + resources: + limits: + cpu: 100m + memory: 4000Mi + requests: + cpu: 1m + memory: 800Mi + configfile: + path: /etc + filename: ccoods.yaml + content: | + defaultInterval: &interval + days: 0 + hours: 0 + minutes: 0 + seconds: 0 + + ingester: + imageStagingDirectory: /data/staging/comcam/oods + butlers: + - butler: + instrument: lsst.obs.lsst.LsstComCam + class: + import : lsst.ctrl.oods.gen3ButlerIngester + name : Gen3ButlerIngester + stagingDirectory : /data/lsstdata/base/comcam/oods/gen3butler/raw + badFileDirectory: /data/lsstdata/base/comcam/oods/gen3butler/badfiles + repoDirectory : /repo/LSSTComCam + collections: + - LSSTComCam/raw/all + - LSSTComCamSim/raw/all + cleanCollections: + - LSSTComCam/raw/all + - LSSTComCamSim/raw/all + - LSSTComCamSim/quickLook + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 90 + batchSize: 20 + scanInterval: + <<: *interval + seconds: 2 + + cacheCleaner: + # ONLY clean out empty directories here, never files + clearEmptyDirectories: + - /data/lsstdata/base/comcam/oods/gen3butler/raw + - /data/repo/LSSTComCam/LSSTComCamSim/quickLook + # clean out empty directories and old files from these directories + clearEmptyDirectoriesAndOldFiles: + - /data/lsstdata/base/comcam/oods/gen3butler/badfiles + - /data/staging/comcam/oods + - /data/staging/comcam/forwarder + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 31 + directoriesEmptyForMoreThan: + <<: *interval + days: 2 + +gcheaderservice101: + enabled: true + image: + repository: ts-dockerhub.lsst.org/headerservice + pullPolicy: Always + env: + CAMERA: gc101 + URL_SPEC: --lfa_mode s3 --s3instance cp + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + resources: + limits: + cpu: 2000m + memory: 700Mi + requests: + cpu: 200m + memory: 232Mi + +gcheaderservice102: + enabled: true + image: + repository: ts-dockerhub.lsst.org/headerservice + pullPolicy: Always + env: + CAMERA: gc102 + URL_SPEC: --lfa_mode s3 --s3instance cp + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + resources: + limits: + cpu: 2000m + memory: 700Mi + requests: + cpu: 200m + memory: 236Mi + +gcheaderservice103: + enabled: true + image: + repository: ts-dockerhub.lsst.org/headerservice + pullPolicy: Always + env: + CAMERA: gc103 + URL_SPEC: --lfa_mode s3 --s3instance cp + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + resources: + limits: + cpu: 2000m + memory: 700Mi + requests: + cpu: 200m + memory: 225Mi + +lasertracker1: + enabled: true + image: + repository: ts-dockerhub.lsst.org/lasertracker + pullPolicy: Always + env: + RUN_ARG: 1 + resources: + limits: + cpu: 200m + memory: 360Mi + requests: + cpu: 20m + memory: 120Mi + +mtaircompressor1: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtaircompressor + pullPolicy: Always + env: + RUN_ARG: 1 --state disabled + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 10m + memory: 100Mi + +mtaircompressor2: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtaircompressor + pullPolicy: Always + env: + RUN_ARG: 2 --state disabled + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 10m + memory: 100Mi + +mtaos: + image: + repository: ts-dockerhub.lsst.org/mtaos + pullPolicy: Always + butlerSecret: + containerPath: /home/saluser/.lsst + dbUser: oods + secretFilename: postgres-credentials.txt + nfsMountpoint: + - name: comcam-gen3-data + containerPath: /repo/LSSTComCam + readOnly: false + server: nfs3.cp.lsst.org + serverPath: /comcam/repo/LSSTComCam + - name: comcam-gen3-data-temp + containerPath: /data/lsstdata/base/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata/base/comcam + - name: comcam-data + containerPath: /readonly/lsstdata/comcam + readOnly: true + server: nfs3.cp.lsst.org + serverPath: /comcam/lsstdata + - name: project-shared + containerPath: /project + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /project + resources: + limits: + cpu: 1000m + memory: 1500Mi + requests: + cpu: 30m + memory: 430Mi + +mtcamhexapod: + enabled: true + classifier: mthexapod1 + image: + repository: ts-dockerhub.lsst.org/mthexapod + pullPolicy: Always + env: + RUN_ARG: 1 + resources: + limits: + cpu: 500m + memory: 400Mi + requests: + cpu: 50m + memory: 125Mi + +mtcamhexapod-sim: + enabled: false + classifier: mthexapod1 + image: + repository: ts-dockerhub.lsst.org/mthexapod + pullPolicy: Always + env: + RUN_ARG: --simulate 1 + resources: + limits: + cpu: 500m + memory: 400Mi + requests: + cpu: 50m + memory: 125Mi + +mtdome: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtdome + pullPolicy: Always + resources: + limits: + cpu: 900m + memory: 300Mi + requests: + cpu: 90m + memory: 100Mi + +mtdome-sim: + enabled: false + image: + repository: ts-dockerhub.lsst.org/mtdome + pullPolicy: Always + env: + RUN_ARG: --simulate 1 + resources: + limits: + cpu: 900m + memory: 300Mi + requests: + cpu: 90m + memory: 100Mi + +mtdometrajectory: + image: + repository: ts-dockerhub.lsst.org/mtdometrajectory + pullPolicy: Always + resources: + limits: + cpu: 250m + memory: 400Mi + requests: + cpu: 25m + memory: 115Mi + +mtheaderservice: + enabled: true + image: + repository: ts-dockerhub.lsst.org/headerservice + pullPolicy: Always + env: + URL_SPEC: --lfa_mode s3 --s3instance cp + TSTAND_HEADERSERVICE: LEVEL3_CLEANROOM + CAMERA: mt + envSecrets: + - name: AWS_ACCESS_KEY_ID + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + key: aws-secret-access-key + - name: MYS3_ACCESS_KEY + key: aws-access-key-id + - name: MYS3_SECRET_KEY + key: aws-secret-access-key + resources: + limits: + cpu: 2000m + memory: 2000Mi + requests: + cpu: 200m + memory: 500Mi + +mtm1m3-sim: + enabled: false + image: + repository: ts-dockerhub.lsst.org/mtm1m3_sim + pullPolicy: Always + env: + LSST_KAFKA_TLM_FLUSH_MS: 0 + LSST_KAFKA_CMDEVT_FLUSH_MS: 0 + resources: + limits: + cpu: 1000m + requests: + cpu: 100m + memory: 1000Mi + +mtm2: + enabled: true + image: + repository: ts-dockerhub.lsst.org/m2 + pullPolicy: Always + resources: + limits: + cpu: 850m + memory: 1500Mi + requests: + cpu: 85m + memory: 420Mi + +mtm2-sim: + enabled: false + image: + repository: ts-dockerhub.lsst.org/m2 + pullPolicy: Always + env: + RUN_ARG: --simulate + resources: + limits: + cpu: 850m + memory: 1500Mi + requests: + cpu: 85m + memory: 420Mi + +mtm2hexapod: + enabled: true + classifier: mthexapod2 + image: + repository: ts-dockerhub.lsst.org/mthexapod + pullPolicy: Always + env: + RUN_ARG: 2 + resources: + limits: + cpu: 400m + memory: 400Mi + requests: + cpu: 40m + memory: 125Mi + +mtm2hexapod-sim: + enabled: false + classifier: mthexapod2 + image: + repository: ts-dockerhub.lsst.org/mthexapod + pullPolicy: Always + env: + RUN_ARG: --simulate 2 + resources: + limits: + cpu: 400m + memory: 400Mi + requests: + cpu: 40m + memory: 125Mi + +mtmount: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtmount + pullPolicy: Always + resources: + limits: + cpu: 500m + memory: 1000Mi + requests: + cpu: 50m + memory: 300Mi + +mtmount-ccw-only: + enabled: false + image: + repository: ts-dockerhub.lsst.org/mtmount + pullPolicy: Always + env: + CCW_ONLY: yes + resources: + limits: + cpu: 500m + memory: 1000Mi + requests: + cpu: 50m + memory: 300Mi + +mtmount-sim: + enabled: false + image: + repository: ts-dockerhub.lsst.org/mtmount + pullPolicy: Always + env: + RUN_ARG: --simulate + resources: + limits: + cpu: 500m + memory: 1000Mi + requests: + cpu: 50m + memory: 300Mi + +mtoods: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtoods + pullPolicy: Always + env: + CTRL_OODS_CONFIG_FILE: /etc/mtoods.yaml + <<: *butler-secret + resources: + limits: + cpu: 1000m + memory: 700Mi + requests: + cpu: 10m + memory: 345Mi + nfsMountpoint: + - name: lsstcam-gen3-butler + containerPath: /repo/LSSTCam + readOnly: false + server: nfs3.cp.lsst.org + serverPath: /lsstcam/repo/LSSTCam + - name: lsstcam-oods-data + containerPath: /data + readOnly: false + server: nfs3.cp.lsst.org + serverPath: /lsstcam + configfile: + path: /etc + filename: mtoods.yaml + content: | + defaultInterval: &interval + days: 0 + hours: 0 + minutes: 0 + seconds: 0 + + ingester: + imageStagingDirectory: /data/staging/maintel/oods + butlers: + - butler: + instrument: lsst.obs.lsst.LsstCam + class: + import : lsst.ctrl.oods.gen3ButlerIngester + name : Gen3ButlerIngester + stagingDirectory : /data/lsstdata/base/maintel/oods/gen3butler/raw + badFileDirectory: /data/lsstdata/base/maintel/oods/gen3butler/badfiles + repoDirectory : /repo/LSSTCam + collections: + - LSSTCam/raw/all + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 2 + batchSize: 20 + scanInterval: + <<: *interval + seconds: 2 + + cacheCleaner: + # ONLY clean out empty directories here, never files + clearEmptyDirectories: + - /data/lsstdata/base/maintel/oods/gen3butler/raw + - /data/staging/maintel/camera + # clean out empty directories and old files from these directories + clearEmptyDirectoriesAndOldFiles: + - /data/lsstdata/base/maintel/oods/gen3butler/badfiles + - /data/staging/maintel/oods + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 31 + directoriesEmptyForMoreThan: + <<: *interval + days: 2 + +mtptg: + image: + repository: ts-dockerhub.lsst.org/ptkernel + pullPolicy: Always + env: + TELESCOPE: MT + LSST_KAFKA_TLM_FLUSH_MS: 0 + LSST_KAFKA_CMDEVT_FLUSH_MS: 0 + resources: + limits: + cpu: 1200m + requests: + cpu: 120m + memory: 1500Mi + +mtrotator: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtrotator + pullPolicy: Always + resources: + limits: + cpu: 400m + memory: 600Mi + requests: + cpu: 40m + memory: 150Mi + +mtrotator-sim: + enabled: false + image: + repository: ts-dockerhub.lsst.org/mtrotator + pullPolicy: Always + env: + RUN_ARG: --simulate + resources: + limits: + cpu: 400m + memory: 600Mi + requests: + cpu: 40m + memory: 150Mi diff --git a/applications/simonyitel/values.yaml b/applications/simonyitel/values.yaml index 7018b0c1f8..5ffae23c27 100644 --- a/applications/simonyitel/values.yaml +++ b/applications/simonyitel/values.yaml @@ -50,11 +50,11 @@ mtheaderservice: # -- Enable the MTHeaderService CSC enabled: false -m1m3: +mtm1m3: # -- Enable the MTM1M3 hardware simulator CSC enabled: false -m1m3-sim: +mtm1m3-sim: # -- Enable the MTM1M3 simulator CSC enabled: false @@ -86,6 +86,10 @@ mtmount-sim: # -- Enable the MTMount simulator CSC enabled: false +mtoods: + # -- Enable the MTOODS simulator CSC + enabled: false + mtrotator: # -- Enable the MTRotator CSC enabled: false diff --git a/applications/uws/Chart.yaml b/applications/uws/Chart.yaml index 9351e0db76..b4b892e846 100644 --- a/applications/uws/Chart.yaml +++ b/applications/uws/Chart.yaml @@ -16,6 +16,11 @@ dependencies: version: 1.0.0 condition: ccocps.enabled repository: file://../../charts/csc +- name: csc + alias: raocps + version: 1.0.0 + condition: raocps.enabled + repository: file://../../charts/csc - name: csc alias: mtocps version: 1.0.0 diff --git a/applications/uws/README.md b/applications/uws/README.md index 4ec810c701..5be54396ba 100644 --- a/applications/uws/README.md +++ b/applications/uws/README.md @@ -20,6 +20,7 @@ Deployment for the UWS and DM OCPS CSCs | atocps.enabled | bool | `false` | Enable the OCPS:1 CSC | | ccocps.enabled | bool | `false` | Enable the OCPS:2 CSC | | mtocps.enabled | bool | `false` | Enable the OCPS:3 CSC | +| raocps.enabled | bool | `false` | Enable the OCPS:101 CSC | | uws-api-server.basePath | string | `"uws-server"` | The base path for the client ingress | | uws-api-server.butlerPg | object | `{}` | Configuration for Postgres backed butlers The object must have the following attributes defined: _secretName_ (A label that points to the VaultSecret for the postgres credentials) _containerPath_ (The directory location in the container for the Butler secret) _dbUser_ (The database user name for butler access) | | uws-api-server.client.enabled | bool | `false` | Turn on the UWS client system if desired | diff --git a/applications/uws/values-summit.yaml b/applications/uws/values-summit.yaml index e3da835dab..ab0ec32905 100644 --- a/applications/uws/values-summit.yaml +++ b/applications/uws/values-summit.yaml @@ -1,5 +1,3 @@ -csc_shared: - secretsOnly: true uws-api-server: targetCluster: "summit" hostname: summit-lsp.lsst.codes @@ -54,3 +52,56 @@ uws-api-server: exportPath: "/comcam/lsstdata/base/comcam" subPath: "" readOnly: true + +atocps: + enabled: true + classifier: ocps1 + image: + repository: ts-dockerhub.lsst.org/dmocps + pullPolicy: Always + env: + RUN_ARG: 1 + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 10m + memory: 100Mi + +ccocps: + enabled: true + classifier: ocps2 + image: + repository: ts-dockerhub.lsst.org/dmocps + pullPolicy: Always + env: + RUN_ARG: 2 + resources: + limits: + cpu: 100m + memory: 500Mi + requests: + cpu: 1m + memory: 100Mi + +raocps: + enabled: true + classifier: ocps101 + image: + repository: ts-dockerhub.lsst.org/dmocps + pullPolicy: Always + env: + RUN_ARG: 101 + REDIS_HOST: redis-service.rapid-analysis.svc.cluster.local + REDIS_PORT: 6379 + envSecrets: + - name: REDIS_PASSWORD + key: redis-password + resources: + limits: + cpu: 100m + memory: 500Mi + requests: + cpu: 1m + memory: 100Mi diff --git a/applications/uws/values.yaml b/applications/uws/values.yaml index 504b87ef2d..13634e8ab5 100644 --- a/applications/uws/values.yaml +++ b/applications/uws/values.yaml @@ -10,6 +10,10 @@ mtocps: # -- Enable the OCPS:3 CSC enabled: false +raocps: + # -- Enable the OCPS:101 CSC + enabled: false + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: diff --git a/environments/values-summit.yaml b/environments/values-summit.yaml index 838471061d..b03d498a6c 100644 --- a/environments/values-summit.yaml +++ b/environments/values-summit.yaml @@ -7,21 +7,33 @@ onepassword: vaultPathPrefix: "secret/phalanx/summit" applications: + auxtel: true + calsys: true consdb: true + control-system-test: true + envsys: true exposurelog: true + love: true mobu: true narrativelog: true nightreport: true nublado: true obsenv-management: true + obssys: true portal: true rapid-analysis: true rubintv: true rubintv-dev: true sasquatch: true sasquatch-backpack: true + simonyitel: true squareone: true strimzi: true telegraf: true telegraf-ds: true uws: true + +controlSystem: + imageTag: "k0003" + siteTag: "summit" + s3EndpointUrl: "https://s3.cp.lsst.org"