From 455300111ab4c48328ae569cb445b1d5c8caa678 Mon Sep 17 00:00:00 2001 From: Moshe Immermam Date: Sun, 14 Jul 2024 23:38:00 +0300 Subject: [PATCH] chore: doc updates --- .../docs/reference/1-alert-manager.mdx | 5 +- .../docs/reference/1-aws-cloudwatch.mdx | 3 +- canary-checker/docs/reference/1-catalog.mdx | 5 +- canary-checker/docs/reference/1-folder.mdx | 71 ++-- .../docs/reference/1-s3-protocol.mdx | 2 +- .../docs/reference/3-gcs-database-backup.mdx | 2 +- common/src/components/Helm.jsx | 8 +- common/src/components/Icon.jsx | 29 +- common/src/components/Registry.jsx | 0 .../{architecture.md => architecture.mdx} | 6 +- .../docs/config-db/concepts/health_status.md | 57 +-- mission-control/docs/config-db/index.mdx | 7 +- .../docs/config-db/scrapers/aws.md | 3 +- .../docs/config-db/scrapers/kubernetes.md | 14 +- mission-control/docs/how-it-works.mdx | 80 +++++ mission-control/docs/index.mdx | 9 +- .../docs/installation/_aws_iam.mdx | 329 ++++++++++-------- mission-control/docs/installation/helm.md | 19 + .../docs/installation/local-testing.md | 1 + .../docs/installation/saas/eks.mdx | 35 +- .../installation/saas/getting-started.mdx | 12 +- .../docs/installation/self-hosted/eks.mdx | 3 +- .../self-hosted/getting-started.mdx | 12 +- .../docs/partials/_envVarTypes.mdx | 4 + .../Actions/azure_devops_pipeline.mdx | 7 +- .../docs/playbooks/Actions/gitops.mdx | 3 +- mission-control/docs/playbooks/index.mdx | 7 +- .../docs/playbooks/quick-start.mdx | 40 +-- .../connections/{index.md => index.mdx} | 2 + .../reference/{env-var.md => env-var.mdx} | 0 mission-control/sidebars.js | 29 +- .../static/img/architecture-flow.svg | 3 + mission-control/tailwind.config.js | 20 ++ 33 files changed, 479 insertions(+), 348 deletions(-) create mode 100644 common/src/components/Registry.jsx rename mission-control/docs/{architecture.md => architecture.mdx} (82%) create mode 100644 mission-control/docs/how-it-works.mdx create mode 100644 mission-control/docs/installation/helm.md create mode 100644 mission-control/docs/partials/_envVarTypes.mdx rename mission-control/docs/reference/connections/{index.md => index.mdx} (99%) rename mission-control/docs/reference/{env-var.md => env-var.mdx} (100%) create mode 100644 mission-control/static/img/architecture-flow.svg diff --git a/canary-checker/docs/reference/1-alert-manager.mdx b/canary-checker/docs/reference/1-alert-manager.mdx index 86541791..42e3bf9a 100644 --- a/canary-checker/docs/reference/1-alert-manager.mdx +++ b/canary-checker/docs/reference/1-alert-manager.mdx @@ -1,9 +1,10 @@ --- title: Alertmanager +hide_title: true sidebar_class_name: popular --- -# Alertmanager +# Alertmanager Checks [Prometheus AlertManager](https://prometheus.io/docs/alerting/latest/alertmanager/) for any firing alerts. @@ -59,6 +60,7 @@ spec: } ]}/> + ## Relationships + ## Inserting checks into different namespaces You can specify different namespaces for checks using the `namespace` field. This is helpful when checks are dynamically generated via transformation diff --git a/canary-checker/docs/reference/1-aws-cloudwatch.mdx b/canary-checker/docs/reference/1-aws-cloudwatch.mdx index 4ef86ab4..cefe48cb 100644 --- a/canary-checker/docs/reference/1-aws-cloudwatch.mdx +++ b/canary-checker/docs/reference/1-aws-cloudwatch.mdx @@ -1,8 +1,9 @@ --- title: AWS Cloud Watch +hide_title: true --- -# CloudWatch +# CloudWatch Cloudwatch checks for all active alarms diff --git a/canary-checker/docs/reference/1-catalog.mdx b/canary-checker/docs/reference/1-catalog.mdx index cc343ae7..31982cb3 100644 --- a/canary-checker/docs/reference/1-catalog.mdx +++ b/canary-checker/docs/reference/1-catalog.mdx @@ -1,8 +1,11 @@ --- -title: Flanksource Catalog +title: Config DB +hide_title: true sidebar_class_name: popular --- +# Config DB + Runs a [config-db](https://github.com/flanksource/config-db) query. ```yaml title="catalog.yaml" file=../../../modules/canary-checker/fixtures/external/catalog.yaml diff --git a/canary-checker/docs/reference/1-folder.mdx b/canary-checker/docs/reference/1-folder.mdx index 945885a9..2e11455b 100644 --- a/canary-checker/docs/reference/1-folder.mdx +++ b/canary-checker/docs/reference/1-folder.mdx @@ -6,12 +6,12 @@ title: Folder Checks the contents of a folder for size, age and count. Folder based checks are useful in a number of scenarios: -* Verifying that backups have been uploaded and are the appropriate size -* Checking that logs or other temporary files are being cleaned up -* For batch processes: - * Checking if files are being processed (and/or produced) - * Checking the size of queue processing backlog - * Checking if any error (`.err` or `.log`) files have been produced. +- Verifying that backups have been uploaded and are the appropriate size +- Checking that logs or other temporary files are being cleaned up +- For batch processes: + - Checking if files are being processed (and/or produced) + - Checking the size of queue processing backlog + - Checking if any error (`.err` or `.log`) files have been produced. ```yaml title="folder-check.yaml" apiVersion: canaries.flanksource.com/v1 @@ -27,8 +27,6 @@ spec: minCount: 10 ``` - - - ## FolderFilter | Field | Description | Scheme | Required | | --------- | ----------------------------------------------------------- | ---------------------------------------------------- | -------- | -| `maxAge` | MaxAge the latest object should be younger than defined age | [Duration](/reference/types#duration) | | -| `maxSize` | MaxSize of the files inside the searchPath | [Size](/reference/types#size) | | -| `minAge` | MinAge the latest object should be older than defined age | [Duration](/reference/types#duration) | | -| `minSize` | MinSize of the files inside the searchPath | [Size](/reference/types#size) | | -| `regex` | Filter files based on regular expression | *[regex](https://github.com/google/re2/wiki/Syntax)* | | +| `maxAge` | MaxAge the latest object should be younger than defined age | [Duration](/reference/types#duration) | | +| `maxSize` | MaxSize of the files inside the searchPath | [Size](/reference/types#size) | | +| `minAge` | MinAge the latest object should be older than defined age | [Duration](/reference/types#duration) | | +| `minSize` | MinSize of the files inside the searchPath | [Size](/reference/types#size) | | +| `regex` | Filter files based on regular expression | _[regex](https://github.com/google/re2/wiki/Syntax)_ | | e.g. to verify that database backups are being performed @@ -72,7 +69,7 @@ spec: folder: - path: /data/backups filter: - regex: "pg-backups-.*.zip" + regex: 'pg-backups-.*.zip' maxAge: 1d # require a daily backup minSize: 10mb # the backup should be at least 10mb ``` @@ -81,34 +78,33 @@ spec: The following fields are available in `test`, `display` and `transform` [expressions](../concepts/expressions) -| Field | Scheme | -| --------------------- | -------------------------------------------------- | -| `Oldest` | [os.FileInfo](https://pkg.go.dev/io/fs#FileInfo) | -| `Newest` | [os.FileInfo](https://pkg.go.dev/io/fs#FileInfo) | -| `MinSize` | [os.FileInfo](https://pkg.go.dev/io/fs#FileInfo) | -| `MaxSize` | [os.FileInfo](https://pkg.go.dev/io/fs#FileInfo) | -| `SupportsTotalSize` (Only true for SMB folders) | bool | +| Field | Scheme | +| --------------------------------------------------- | -------------------------------------------------- | +| `Oldest` | [os.FileInfo](https://pkg.go.dev/io/fs#FileInfo) | +| `Newest` | [os.FileInfo](https://pkg.go.dev/io/fs#FileInfo) | +| `MinSize` | [os.FileInfo](https://pkg.go.dev/io/fs#FileInfo) | +| `MaxSize` | [os.FileInfo](https://pkg.go.dev/io/fs#FileInfo) | +| `SupportsTotalSize` (Only true for SMB folders) | bool | | `SupportsAvailableSize` (Only true for SMB folders) | bool | -| `TotalSize` | int64 | -| `AvailableSize` | int64 | -| `Files` | [[]os.FileInfo](https://pkg.go.dev/io/fs#FileInfo) | +| `TotalSize` | int64 | +| `AvailableSize` | int64 | +| `Files` | [[]os.FileInfo](https://pkg.go.dev/io/fs#FileInfo) | ### FolderFilter -| Field | Description | Scheme | Required | -| --------- | ----------- | ----------------------- | -------- | +| Field | Description | Scheme | Required | +| --------- | ----------- | --------------------------------------- | -------- | | `minAge` | | [`Duration`](/reference/types#duration) | | | `maxAge` | | [`Duration`](/reference/types#duration) | | | `minSize` | | [`Size`](/reference/types#size) | | | `maxSize` | | [`Size`](/reference/types#size) | | -| `regex` | | `string` | | - +| `regex` | | `string` | | ## Connection Types ### SFTP - +
@@ -118,12 +114,9 @@ The following fields are available in `test`, `display` and `transform` [express
- - ### S3 - - + ```yaml title="s3-check.yaml" # ... @@ -145,10 +138,9 @@ spec: - ### SMB - + ```yaml title="smb-check.yaml" # ... @@ -161,7 +153,7 @@ spec: #... ``` - +
@@ -172,10 +164,9 @@ spec:
- ### GCS - + ```yaml title="gcs-check.yaml" # ... @@ -187,6 +178,7 @@ spec: gcpConnection: # ... ``` +
@@ -195,4 +187,3 @@ spec:
- diff --git a/canary-checker/docs/reference/1-s3-protocol.mdx b/canary-checker/docs/reference/1-s3-protocol.mdx index 817dc499..d55c905d 100644 --- a/canary-checker/docs/reference/1-s3-protocol.mdx +++ b/canary-checker/docs/reference/1-s3-protocol.mdx @@ -5,7 +5,7 @@ tags: - enterprise --- -# S3 Protocol +# S3 Protocol Checks if S3 compatible endpoints (like Minio, EMC ECS) are functioning correctly, diff --git a/canary-checker/docs/reference/3-gcs-database-backup.mdx b/canary-checker/docs/reference/3-gcs-database-backup.mdx index b0afa3bb..aa2a7821 100644 --- a/canary-checker/docs/reference/3-gcs-database-backup.mdx +++ b/canary-checker/docs/reference/3-gcs-database-backup.mdx @@ -3,7 +3,7 @@ title: GCS Database Backup sidebar_class_name: beta --- -# Google Cloud SQL Backups +# Google Cloud SQL Backups Checks if a Google Cloud SQL instance has been successfully backed up recently. diff --git a/common/src/components/Helm.jsx b/common/src/components/Helm.jsx index 0c8591f7..7d2e64f8 100644 --- a/common/src/components/Helm.jsx +++ b/common/src/components/Helm.jsx @@ -1,11 +1,8 @@ import CodeBlock from '@theme/CodeBlock' -import CopyButton from '@theme/CodeBlock/CopyButton' import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import Link from '@docusaurus/Link'; import { useState, useRef } from "react" -import Details from '@theme/Details'; -import Properties from '@site/docs/installation/_properties.mdx' function generateCli( @@ -48,7 +45,6 @@ function generateCli( if (wait) { s += " --wait \n" } - console.log('generate', s, values) return s } @@ -72,8 +68,6 @@ export default function Helm({ const [cli, setCli] = useState(generateCli( repo, repoName, chart, namespace, createNamespace, createRepo, wait, state, valueFile, args)) - console.log(values) - return <> {/*
@@ -130,7 +124,7 @@ export default function Helm({ - + {cli} diff --git a/common/src/components/Icon.jsx b/common/src/components/Icon.jsx index 60c03f77..4eb1d7d8 100644 --- a/common/src/components/Icon.jsx +++ b/common/src/components/Icon.jsx @@ -1,13 +1,28 @@ + import React from "react"; -export default function Icon({ name, height = "32px", className, url }) { - let img = ; +import { IconMap as Icons } from "@flanksource/icons/mi"; +import clsx from "clsx" + +export default function Icon({ name, height = 22, className, url, children }) { + name = name + .replaceAll("--", "-") + .replaceAll("::", "-") + .toLowerCase() + .replaceAll("k8-", "k8s-") + .replaceAll("kubernetes-", "k8s-"); + + let IconSVG = Icons[name]; + + if (!IconSVG) { + return {name} + } + let img = + {children} + if (url != null) { - return {img}; + return {img} } return img } + diff --git a/common/src/components/Registry.jsx b/common/src/components/Registry.jsx new file mode 100644 index 00000000..e69de29b diff --git a/mission-control/docs/architecture.md b/mission-control/docs/architecture.mdx similarity index 82% rename from mission-control/docs/architecture.md rename to mission-control/docs/architecture.mdx index a8a4b39c..e6ea0fb5 100644 --- a/mission-control/docs/architecture.md +++ b/mission-control/docs/architecture.mdx @@ -6,6 +6,9 @@ title: Architecture Mission Control has a micro-service architecture with a shared data source with multiple deployment models. +import EnvVarTypes from './partials/_envVarTypes.mdx' + + 1. CLI 2. Kubernetes (Helm Chart) @@ -31,8 +34,5 @@ All services use a shared database and model via the [duty](https://github.com/f * Library updates happen automatically using dependabot -## Kubernetes & Gitops -Mission control is kubernetes-native with all configuration being possible by Custom Resource Definition (CRD's) -The single source of truth is still the database, the operators only function is to synchronize CRD's into the database and update the CRD status periodically. diff --git a/mission-control/docs/config-db/concepts/health_status.md b/mission-control/docs/config-db/concepts/health_status.md index b8a3d388..7c5855e3 100644 --- a/mission-control/docs/config-db/concepts/health_status.md +++ b/mission-control/docs/config-db/concepts/health_status.md @@ -2,55 +2,14 @@ title: Health & Status --- -A config item has two key attributes associated with it: health and status. These attributes are assigned by the scrapers by evaluating the config. +Each config item has attributes used to quickly determine the health and status of an item. -The health attribute represents the overall condition or well-being of the config item. It can have one of the following values: healthy, unhealthy, unknown, or warning. This attribute provides a high-level indication of whether the config item is functioning as expected or if there are any issues that need attention. +- `health` can be one of `healthy`, `unhealthy`,`unknown`, `warning` and correspondes with RAG (Red, Amber, Green) status that will be associated with an item +- `status` is a config type specific short description of the current running state e.g. `Running`, `Terminating`, `in-use` +- `ready` indicates whether an item is still progressing -The status attribute, on the other hand, provides more granular information about the current state or phase of the config item. The possible values for the status attribute can vary widely depending on the type of config item being monitored. For example, a Kubernetes Pod config could have statuses like "Running", "Pending", "Terminating", and so on. +:::note Health is orthagonal to readiness +A config item could have a state of `heath: unhealthy, status: failed, ready: true` - this indicates that the item will unlikely change its state, while an item of `health: health, status: rolling-out, ready: false` indicates a healthy item that has not yet finished rolling out. +::: -To illustrate with an example, consider an AWS EC2 instance that is currently in the process of shutting down. In this case, the scrapers would assign an "Unknown" health and a "Deleting" status to this config item. - -## Health - -- healthy -- unhealthy -- unknown -- warning - -## Status - -Here's a list of all the possible statuses (subject to change): - -- Completed -- CrashLoopBackOff -- Creating -- Degraded -- Deleted -- Deleting -- Error -- Healthy -- Inaccesible -- Info -- Maintenance -- Missing -- Pending -- Progressing -- Restarting -- Rolling Out -- Rollout Failed -- Running -- Scaled to Zero -- Scaling -- Scaling Down -- Scaling Up -- Starting -- Stopped -- Stopping -- Suspended -- Terminating -- Unhealthy -- Unknown -- Unschedulable -- Updating -- UpgradeFailed -- Warning +The [github.com/flanksource/is-healthy](https://github.com/flanksource/is-healthy) is used to derive these conditions. diff --git a/mission-control/docs/config-db/index.mdx b/mission-control/docs/config-db/index.mdx index befb4494..9b2684b9 100644 --- a/mission-control/docs/config-db/index.mdx +++ b/mission-control/docs/config-db/index.mdx @@ -8,11 +8,16 @@ pagination_prev: sidebar_position: 1 --- - +## Catalog The Mission Control Catalog is a centralized repository that automatically discovers, transforms and tracks infrastructure, applications and configuration across many environments. + + + + + Some key benefits include: * **Automatic Discovery** - Automatically discovers resources by scraping external systems like Kubernetes, AWS, Git, SQL, etc diff --git a/mission-control/docs/config-db/scrapers/aws.md b/mission-control/docs/config-db/scrapers/aws.md index 0a5c692e..655b65fc 100644 --- a/mission-control/docs/config-db/scrapers/aws.md +++ b/mission-control/docs/config-db/scrapers/aws.md @@ -59,10 +59,11 @@ The registry has an [AWS](/registry/aws) Helm chart that provides a pre-configur - CloudTrail - Config Rules - Cost & Usage Reporting - - EBS - EC2 - ECR +- ECS +- Lambda - EFS - EKS - IAM diff --git a/mission-control/docs/config-db/scrapers/kubernetes.md b/mission-control/docs/config-db/scrapers/kubernetes.md index a2ef4058..1224e74b 100644 --- a/mission-control/docs/config-db/scrapers/kubernetes.md +++ b/mission-control/docs/config-db/scrapers/kubernetes.md @@ -113,30 +113,30 @@ There are 3 different ways to specify which value to use when finding related co | `label` | Get the value from a label | `string` | | -## Special annotations +## Annotations -Kubernetes resources can be annotated with some special annotations that can direct the scraper to certain behaviors. +Kubernetes resources can be annotated with annotations that can direct the scraper to certain behaviors. +
Config Item
+
A physical item with some underlying configuration e.g. `Pod`, `VM`, `File`, `Database`
+
Changes
+
Changes in the underlying configuration of a config item, or external events that occur on them
+
Insights
+
Recommendations and analysis provided by third parties through scanning and AI
+
Relationships
+
Links between config items based upon how they are logically created and categorized
+ +
Component
+
A logical thing that often corresponds 1-1 with a config item, but not always e.g. `Pod`, `Finance Application`
+ +
Health
+
The RAG status of a component or config item e.g. Healthy, Unhealthy, Warning
+
Scrape
+
Connecting to an external API and ingesting data from it
+
Playbook
+
A sequence of automated steps performed on a config item or component
+
Canary
+
A definition for how to perform a health check, Canaries produce 1 or more checks
+ +
Check
+ + +
The output of a Canary (Health Check) that includes Health, Latency and other runtime details
+
Health
+
Ceiling
+
Health
+
Ceiling
+
Topology
+
Ceiling
+
Playbook
+
Ceiling
+
Webhook
+
Ceiling
+
Custom Resource Defintion (CRD)
+
Ceiling
+ + + + + + +## Kubernetes & Gitops + +Mission control is kubernetes-native with all configuration being possible by Custom Resource Definition (CRD's) + +The single source of truth is still the database, the operators only function is to synchronize CRD's into the database and update the CRD status periodically. + + + + + + + + + + +Mission Control uses Kubernetes Custom Resources Definitions as the underlying configuration method. The CRD's include: + + +| Resource | Description | Output | +| ----------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ------ | +| ScrapeConfig | Configures scrapers to run periodically | Config items, analysis, change, cost | +| Canary | Runs health checks using HTTP, SQL, S3, kubectl, etc. | 1 or more checks (Health) | +| Notification | Sends notifications based on events | Email, Slack, etc | +| Playbook | Executable playbooks | Pull Request, CLI, Webhook | + Connection | Re-usable settings with secure external lookups | URL, username, password, etc | + Topology | Combines configs, health checks and lookups to form cards | Component | + diff --git a/mission-control/docs/index.mdx b/mission-control/docs/index.mdx index cb2813a8..2cd5590e 100644 --- a/mission-control/docs/index.mdx +++ b/mission-control/docs/index.mdx @@ -63,10 +63,13 @@ With Mission Control you can:
-## Getting Started +## Next Steps -See [Deployment Models](/installation/deployment-models) to get started with either self-hosted, SaaS or hybrid models. +1. See [How it Works](./how-it-works) to learn how to configure Mission Control using Kubernetes CRD's +1. See [Local Testing](./installation/local-testing) to setup a Mission Control instance in < 5 minutes on Kind or Minukube +1. See [Architecture](./architecture) to understand the components and services that make up a Mission Control Installation +1. See [Deployment Models](/installation/deployment-models) for the different ways of deploying mission control (Self-Hosted, SaaS, Hybrid) ## Getting Help @@ -75,7 +78,7 @@ If you have any questions about canary checker: * Invite yourself to the [CNCF community slack](https://slack.cncf.io/) and join the [#canary-checker](https://cloud-native.slack.com/messages/canary-checker/) channel. * Check out the [Youtube Playlist](https://www.youtube.com/playlist?list=PLz4F_KggvA58D6krlw433TNr8qMbu1aIU). -* File an [issue](https://github.com/flanksource/mission-control/issues/new) - (We do provide user support via Github Issues, so don't worry if your issue a real bug or not) +* File an [issue](https://github.com/flanksource/mission-control/issues/new) - (We do provide user support via Github Issues, so don't worry if your issue is a "real" bug or not) * Email us at [hello@flanksource.com](mailto:hello@flanksource.com) Your feedback is always welcome! diff --git a/mission-control/docs/installation/_aws_iam.mdx b/mission-control/docs/installation/_aws_iam.mdx index 3453fa2d..d768e98b 100644 --- a/mission-control/docs/installation/_aws_iam.mdx +++ b/mission-control/docs/installation/_aws_iam.mdx @@ -1,7 +1,6 @@ - import Domain from '../partials/_domain.mdx' - +## Create an IAM Role Depending on how you want to use Mission Control you need to create an IAM role for mission control to use: @@ -67,15 +66,13 @@ You can also create a new policy with just the permissions required by Mission C ] } ``` + +## Configure IAM Roles for Mission Control - - - - @@ -91,188 +88,209 @@ You can also create a new policy with just the permissions required by Mission C

1. Enable [EKS IAM Roles for Service Accounts](https://eksctl.io/usage/iamserviceaccounts/) - ```bash - eksctl utils associate-iam-oidc-provider --cluster=$CLUSTER - ``` -

-2. Create the IAM Role mappings + ```bash + eksctl utils associate-iam-oidc-provider --cluster=$CLUSTER + ``` - ```yaml title="eksctl.yaml" - iam: - withOIDC: true - serviceAccounts: - - metadata: - name: mission-control-sa - namespace: mission-control - roleName: MissionControlRole - roleOnly: true - attachPolicyARNs: - - "arn:aws:iam::aws:policy/ReadOnlyAccess" - - metadata: - name: canary-checker-sa - namespace: mission-control - roleName: CanaryCheckerRole - roleOnly: true - attachPolicyARNs: - - "arn:aws:iam::aws:policy/ReadOnlyAccess" - - metadata: - name: config-db-sa - namespace: mission-control - roleName: ConfigDBRole - roleOnly: true - attachPolicyARNs: - - "arn:aws:iam::aws:policy/ReadOnlyAccess" +

- ``` - ```bash - eksctl create iamserviceaccount --cluster $CLUSTER -c eksctl.yaml - ``` - -1. - -1. Install Mission Control +2. Create the IAM Role mappings - + +4. Install Mission Control + + + serviceAccount: + annotations: + # used to scrape AWS resources, change history via AWS CloudTrail and cost via Athena + eks.amazonaws.com/role-arn: arn:aws:iam::$ACCOUNT:role/ConfigDBRole`} /> - - - - - 1. Ensure the [AWS Pod Identity Agent](https://docs.aws.amazon.com/eks/latest/userguide/pod-id-agent-setup.html) is configured and running 1. Create a mapping file for `eksctl` - ```yaml title="eksctl.yaml" - podIdentityAssociations: - - namespace: mission-control - serviceAccountName: mission-control-sa - permissionPolicyARNs: arn:aws:iam::aws:policy/ReadOnlyAccess - - - namespace: mission-control - serviceAccountName: config-db-sa - permissionPolicyARNs: arn:aws:iam::aws:policy/ReadOnlyAccess - - - namespace: mission-control - serviceAccountName: canary-checker-sa - permissionPolicyARNs: arn:aws:iam::aws:policy/ReadOnlyAccess - iam: - # note withOIDC is not required for Pod Identity - serviceAccounts: - # used by mission control for notifications / playbooks - - metadata: - name: mission-control-sa - namespace: mission-control - attachPolicyARNs: - - "arn:aws:iam::aws:policy/ReadOnlyAccess" - # used for cloudwatch, S3 and other AWS health checks - - metadata: - name: canary-checker-sa - namespace: mission-control - attachPolicyARNs: - - "arn:aws:iam::aws:policy/ReadOnlyAccess" - # used to scrape resources, AWS CloudTrail and AWS Cost & Usage Reports - - metadata: - name: config-db-sa - namespace: mission-control - attachPolicyARNs: - - "arn:aws:iam::aws:policy/ReadOnlyAccess" + podIdentityAssociations: + - namespace: mission-control + serviceAccountName: mission-control-sa + permissionPolicyARNs: arn:aws:iam::aws:policy/ReadOnlyAccess + + - namespace: mission-control + serviceAccountName: config-db-sa + permissionPolicyARNs: arn:aws:iam::aws:policy/ReadOnlyAccess + + - namespace: mission-control + serviceAccountName: canary-checker-sa + permissionPolicyARNs: arn:aws:iam::aws:policy/ReadOnlyAccess + iam: + # note withOIDC is not required for Pod Identity + serviceAccounts: + # used by mission control for notifications / playbooks + - metadata: + name: mission-control-sa + namespace: mission-control + attachPolicyARNs: + - "arn:aws:iam::aws:policy/ReadOnlyAccess" + # used for cloudwatch, S3 and other AWS health checks + - metadata: + name: canary-checker-sa + namespace: mission-control + attachPolicyARNs: + - "arn:aws:iam::aws:policy/ReadOnlyAccess" + # used to scrape resources, AWS CloudTrail and AWS Cost & Usage Reports + - metadata: + name: config-db-sa + namespace: mission-control + attachPolicyARNs: + - "arn:aws:iam::aws:policy/ReadOnlyAccess" ``` -

+

If you are using a pre-existing IAM role when creating a pod identity association, you must configure the role to trust the newly introduced EKS service principal (`pods.eks.amazonaws.com`)

+ ```json title="iam-trust-policy.json" { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": "pods.eks.amazonaws.com" - }, - "Action": [ - "sts:AssumeRole", - "sts:TagSession" - ] - } - ] + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "pods.eks.amazonaws.com" + }, + "Action": ["sts:AssumeRole", "sts:TagSession"] + } + ] } ``` +
1. Apply the Pod Identities using `eksctl` - ```bash - eksctl create podidentityassociation -c eksctl.yaml - ``` -

-1. + ```bash + eksctl create podidentityassociation -c eksctl.yaml + ``` + +

+ +1. 1. Install Mission Control - + 1. Ensure the [AWS Pod Identity Agent](https://docs.aws.amazon.com/eks/latest/userguide/pod-id-agent-setup.html) is configured and running 2. Create `main.tf` - ```hcl title="main.tf" file=../partials/_pod_identity.tf - ``` -2. Apply the terraform - ```bash - TF_VAR_role=$CLUSTER terraform apply - ``` -3. Install Mission Control - + ```hcl title="main.tf" file=../partials/_pod_identity.tf + ``` +

+3. Apply the terraform + ```bash + TF_VAR_role=$CLUSTER terraform apply + ``` +

+1. + +4. Install Mission Control + + -1. Associate Pod Identities - ```yaml title="mission-control-iam-cloudformation.yaml" file=../partials/_pod_identity.yaml +1. Setup variables + ```bash + # The name of the EKS cluster mission control is being deployed to + export CLUSTER= + # the default namespace the mission-control helm chart uses + export NAMESPACE=mission-control ``` -3. Install Mission Control - +

+ +1. Create a cloudformation template + + ```yaml title="mission-control-iam-cloudformation.yaml" file=../partials/_pod_identity.yaml + ``` +

+ +2. Create a new stack + ```bash + aws cloudformation deploy \ + --stack-name mission-control-roles \ + --template-file file://mission-control-iam-cloudformation.yaml \ + --parameter-overrides Cluster==$CLUSTER Namespace=$NAMESPACE + ``` +

+ +1. +2. Install Mission Control + - @@ -281,35 +299,42 @@ You can also create a new policy with just the permissions required by Mission C Using Access Keys and Secrets is not recommended from a security perspective ::: +First we create a secret called `aws` containing the access key and secret. -First we create a secret called `aws` containing the access key and secret, This secret is not s - -1. Create a new IAM User and Access Key +1. - ```bash - USER_NAME="mission-control-sa" +2. Install Mission Control + - aws iam create-user --user-name $USER_NAME +1. Create a new IAM User and Access Key - aws iam attach-user-policy --user-name $USER_NAME --policy-arn arn:aws:iam::aws:policy/ReadOnlyAccess + ```bash + USER_NAME="mission-control-sa" - key=$(aws iam create-access-key --user-name $USER_NAME) - ``` + aws iam create-user --user-name $USER_NAME + aws iam attach-user-policy \ + --user-name $USER_NAME \ + --policy-arn arn:aws:iam::aws:policy/ReadOnlyAccess + key=$(aws iam create-access-key --user-name $USER_NAME) + ``` +

2. Create a new secret `aws` containing the access and secret key - ```bash - kubectl create secret generic aws \ - --from-literal=AWS_ACCESS_KEY_ID=$(echo $key | jq -r '.AccessKey.AccessKeyId') \ - --from-literal=AWS_SECRET_ACCESS_KEY=$(echo $key | jq -r '.AccessKey.SecretAccessKey') - ``` + ```bash + kubectl create secret generic aws \ + --from-literal=AWS_ACCESS_KEY_ID=$(echo $key | jq -r '.AccessKey.AccessKeyId') \ + --from-literal=AWS_SECRET_ACCESS_KEY=$(echo $key | jq -r '.AccessKey.SecretAccessKey') + ``` +

+3. Create a new [connection](/reference/connections) -3. Create a new connection to use bundles ```yaml title="aws-connection.yaml" apiVersion: mission-control.flanksource.com/v1 kind: Connection metadata: name: aws + namespace: mission-control spec: region: eu-west-1 accessKey: @@ -323,15 +348,11 @@ First we create a secret called `aws` containing the access key and secret, This name: aws key: AWS_ACCESS_KEY_ID - ``` - - ```bash - kubectl create secret generic aws \ - --from-literal=AWS_ACCESS_KEY_ID=$(echo $key | jq -r '.AccessKey.AccessKeyId') \ - --from-literal=AWS_SECRET_ACCESS_KEY=$(echo $key | jq -r '.AccessKey.SecretAccessKey') ``` +

- +1. When creating Scrapers / Registry bundles you can now refer to `connection://mission-control/aws` + - + diff --git a/mission-control/docs/installation/helm.md b/mission-control/docs/installation/helm.md new file mode 100644 index 00000000..959bc557 --- /dev/null +++ b/mission-control/docs/installation/helm.md @@ -0,0 +1,19 @@ +--- +title: Helm +--- + + +## values.yaml + + + + +## Agent + + +## Self Hosted + + +## Agent + +## Registry diff --git a/mission-control/docs/installation/local-testing.md b/mission-control/docs/installation/local-testing.md index 8aa11df5..62a5c38e 100644 --- a/mission-control/docs/installation/local-testing.md +++ b/mission-control/docs/installation/local-testing.md @@ -12,6 +12,7 @@ import TabItem from '@theme/TabItem'; - kubectl - [helm](https://helm.sh/docs/intro/install/) v3+ - [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) or [minikube](https://minikube.sigs.k8s.io/docs/start/) +- 20GB+ free space for docker volumes ::: diff --git a/mission-control/docs/installation/saas/eks.mdx b/mission-control/docs/installation/saas/eks.mdx index c0ee8f57..215bbedb 100644 --- a/mission-control/docs/installation/saas/eks.mdx +++ b/mission-control/docs/installation/saas/eks.mdx @@ -3,29 +3,30 @@ title: AWS EKS slug: installation/eks --- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import AwsIam from "../_aws_iam.mdx" -import Helm from "@site/src/components/Helm" +import Tabs from '@theme/Tabs' +import TabItem from '@theme/TabItem' +import AwsIam from '../_aws_iam.mdx' +import Helm from '@site/src/components/Helm' -# Install Mission Control on AWS EKS cluster +# Install Mission Control Agent on an AWS EKS cluster :::info Prerequisites To install and run Mission Control you need to have the following prerequisites: -- EKS 1.26+ with an Ingress Controller -- 500-1000m of CPU and 2GB of Memory +- EKS 1.28+ with an Ingress Controller +- 500-1000m of CPU and 4GB of Memory - Persistent Volumes with 20GB+ of storage or an external postgres database like RDS -- (Optional) SMTP Server (For sending notifications and invites) -::: - - - + ::: + + ## Next Steps diff --git a/mission-control/docs/installation/saas/getting-started.mdx b/mission-control/docs/installation/saas/getting-started.mdx index efb41b1d..6bbf148e 100644 --- a/mission-control/docs/installation/saas/getting-started.mdx +++ b/mission-control/docs/installation/saas/getting-started.mdx @@ -38,13 +38,19 @@ The agent based approach has the following benefits: See [Deployment Models](/installation/deployment-models) for [Self Hosted](/installation/self-hosted/getting-started) and [Fully Hosted](/installation/saas/fully-hosted) options -## Signup -1. Goto [app.flanksource.com/signup](https://accounts.flanksource.com/sign-up) to signup +--- +### Signup +1. Goto [accounts.flanksource.com/sign-up](https://accounts.flanksource.com/sign-up) to signup 2. Choose an organisation name and logo
3. Invite your team members - > Organization details and members can be changed by going to [accounts.flanksource.com/organization](https://accounts.flanksource.com/organization)
or Clicking on **Manage Organization** when logged in +

+ :::note + Organization details and members can be changed by going to [accounts.flanksource.com/organization](https://accounts.flanksource.com/organization)
or Clicking on **Manage Organization** when logged in + ::: + +4. Install an Agent ## Agent Installation diff --git a/mission-control/docs/installation/self-hosted/eks.mdx b/mission-control/docs/installation/self-hosted/eks.mdx index 780a29ac..91453258 100644 --- a/mission-control/docs/installation/self-hosted/eks.mdx +++ b/mission-control/docs/installation/self-hosted/eks.mdx @@ -9,11 +9,12 @@ import AdminPassword from '@site/docs/partials/_admin-password.mdx' # Self Hosted Installation on AWS EKS :::info Prerequisites -To install and run Mission Control you need to have the following prerequisites: +To install and run a self-hosted Mission Control on AWS EKS you need to have the following prerequisites: - EKS 1.28+ with an Ingress Controller - 500m - 2000m of CPU and 6 - 8GB of Memory (2 - 4GB if using an external DB) - Persistent Volumes with 20GB+ of storage or an external postgres database like RDS +- Access to create - (Optional) SMTP Server (For sending notifications and invites) ::: diff --git a/mission-control/docs/installation/self-hosted/getting-started.mdx b/mission-control/docs/installation/self-hosted/getting-started.mdx index 1da143a2..99b6544e 100644 --- a/mission-control/docs/installation/self-hosted/getting-started.mdx +++ b/mission-control/docs/installation/self-hosted/getting-started.mdx @@ -63,17 +63,19 @@ To install and run Mission Control you need the following: - Persistent Volumes with 20GB+ of storage or an external postgres database - (Optional) [prometheus operator](https://prometheus-operator.dev/) - (Optional) SMTP Server (For sending notifications and invites) - ::: +::: + +--- 1. Choose a routable `DOMAIN` for Mission Control > See [Ingress](/reference/helm/mission-control#ingress) for more options on configuring the ingress including generating certs with cert-manager >

See [Local Testing](../local-testing) for testing using a kind or minikube without a routable domain

1. Install Mission Control - - :::tip - If you are installing on EKS and plan to scrape AWS resources use the method described in [EKS](./eks) instead - ::: +

+ :::tip + If you are installing on EKS and plan to scrape AWS resources use the method described in [EKS](./eks) instead + ::: ConfigMap +* Secret +* values.yaml +* ServiceAccount diff --git a/mission-control/docs/playbooks/Actions/azure_devops_pipeline.mdx b/mission-control/docs/playbooks/Actions/azure_devops_pipeline.mdx index 54e2e6a4..a1f7458b 100644 --- a/mission-control/docs/playbooks/Actions/azure_devops_pipeline.mdx +++ b/mission-control/docs/playbooks/Actions/azure_devops_pipeline.mdx @@ -1,15 +1,16 @@ --- title: Azure DevOps Pipeline --- -import Templating from "../../reference/playbooks/context.mdx" + +import Templating from '../../reference/playbooks/context.mdx' # Azure DevOps Pipeline Action This action allows you to invoke pipelines in your Azure DevOps project. ```yaml title="invoke-azure-devops-pipeline.yaml" file=../../../modules/mission-control/fixtures/playbooks/azure-devops.yaml -``` +``` + diff --git a/mission-control/docs/playbooks/Actions/gitops.mdx b/mission-control/docs/playbooks/Actions/gitops.mdx index 94e31bd1..5f79aa13 100644 --- a/mission-control/docs/playbooks/Actions/gitops.mdx +++ b/mission-control/docs/playbooks/Actions/gitops.mdx @@ -249,6 +249,7 @@ spec: .metadata.name=="$(.config.config | jq `.metadata.name`)" ) |= $(.params.yamlInput | yaml | toJSON) ``` + @@ -266,7 +267,7 @@ spec: label: Commit Message name: commit_message - default: $(.config.config | toJSON | neat | json | toYAML) - label: "" + label: '' name: yamlInput properties: size: large diff --git a/mission-control/docs/playbooks/index.mdx b/mission-control/docs/playbooks/index.mdx index e9635503..3f5ff27d 100644 --- a/mission-control/docs/playbooks/index.mdx +++ b/mission-control/docs/playbooks/index.mdx @@ -8,9 +8,11 @@ pagination_prev: config-db/index pagination_next: topology/index --- +## Playbooks + Playbooks automate common workflows and processes by defining reusable templates of actions that can be triggered on-demand by users, when specific events happen or through webhooks. - + Some key benefits include: @@ -66,9 +68,6 @@ spec: -``` - - ## Parameters Playbooks have 2 types of parameters: diff --git a/mission-control/docs/playbooks/quick-start.mdx b/mission-control/docs/playbooks/quick-start.mdx index 2703d4f6..9d21203d 100644 --- a/mission-control/docs/playbooks/quick-start.mdx +++ b/mission-control/docs/playbooks/quick-start.mdx @@ -6,7 +6,6 @@ sidebar_position: 1 import Plus from '/img/icons/blue-plus.svg' In this walkthrough, we'll create and run a playbook that scales a Kubernetes deployment. -The only prerequisites are :::info Prerequisites - Mission Control is installed and configured @@ -40,19 +39,22 @@ See [Playbook](/reference/playbooks) for the full specification. ```bash kubectl apply -f restart-deployment.yaml ``` +

+ -:::warning -This is for development purposes, for production use a GitOps tool like Flux or Argo to apply the spec. -:::

-a. Navigate to the **Playbooks** page
-b. Click on the icon to add a new playbook
-c. Add the `spec` from the YAML file above
+:::warning +This is for development purposes, for production use a GitOps tool like Flux or Argo to apply the spec. +::: + +1. Navigate to the **Playbooks** page +2. Click on the icon to add a new playbook +3. Add the `spec` from the YAML file above - +
@@ -62,22 +64,16 @@ c. Add the `spec` from the YAML file above
-a. Navigate to a Deployment in the **Catalog**
-b. Select **Restart Deployment** from the Playbooks menu
-c. Click **Run**
- - - -> See [Event Triggers](/playbooks/events) to run the playbook on event (e.g. pod crashlooping)
-> See [Webhooks](/playbooks/webhooks) to run the playbook from a webhook (e.g. on Git push) - +1. Navigate to a Deployment in the **Catalog** +2. Select **Restart Deployment** from the Playbooks menu +3. Click **Run**
+ + > See [Event Triggers](/playbooks/events) to run the playbook on event (e.g. pod crashlooping)
+ > See [Webhooks](/playbooks/webhooks) to run the playbook from a webhook (e.g. on Git push) +1. View the playbook progress -
- - - - + diff --git a/mission-control/docs/reference/connections/index.md b/mission-control/docs/reference/connections/index.mdx similarity index 99% rename from mission-control/docs/reference/connections/index.md rename to mission-control/docs/reference/connections/index.mdx index 103f9e49..693f8dc6 100644 --- a/mission-control/docs/reference/connections/index.md +++ b/mission-control/docs/reference/connections/index.mdx @@ -5,6 +5,8 @@ sidebar_position: 2 Connections are an easy way to authenticate against sources. It can be created via a CRD or by adding it in the settings page + + A sample connection CRD looks like: ```yaml diff --git a/mission-control/docs/reference/env-var.md b/mission-control/docs/reference/env-var.mdx similarity index 100% rename from mission-control/docs/reference/env-var.md rename to mission-control/docs/reference/env-var.mdx diff --git a/mission-control/sidebars.js b/mission-control/sidebars.js index 8c3c8862..cac1d04f 100644 --- a/mission-control/sidebars.js +++ b/mission-control/sidebars.js @@ -181,22 +181,27 @@ const sidebars = { id: 'index', label: 'Overview' }, + { + type: 'doc', + id: 'how-it-works', + }, { type: 'category', label: 'Installation', - link: { - type: 'doc', - id: 'installation/deployment-models' - }, items: [ // { // type: 'doc', // label: 'SaaS', // id: 'installation/saas' // }, + { + type: 'doc', + label: 'Deployment Models', + id: 'installation/deployment-models' + }, { type: 'category', @@ -212,20 +217,14 @@ const sidebars = { { type: 'doc', - id: 'installation/saas/agent', - label: 'Agent' + id: 'installation/saas/eks', + label: 'AWS EKS' }, { - type: 'category', - label: 'Cloud Providers', - items: [ - { - type: 'doc', - id: 'installation/saas/eks', - label: 'AWS EKS' - } - ] + type: 'doc', + id: 'installation/saas/agent', + label: 'Agent' }, ] diff --git a/mission-control/static/img/architecture-flow.svg b/mission-control/static/img/architecture-flow.svg new file mode 100644 index 00000000..da27bb92 --- /dev/null +++ b/mission-control/static/img/architecture-flow.svg @@ -0,0 +1,3 @@ + + +
config-db
mission-control
canary-checker
Catalog
Changes
Config 
Insights
crd
Canary
crd
Notification
crd
ScrapeConfig
crd
Playbook
Eventscrd
Topology
ComponentSelf ServiceWebhooks
Health 
\ No newline at end of file diff --git a/mission-control/tailwind.config.js b/mission-control/tailwind.config.js index b6a24b33..58347fbd 100644 --- a/mission-control/tailwind.config.js +++ b/mission-control/tailwind.config.js @@ -4,4 +4,24 @@ module.exports = { extend: {}, }, plugins: [], + safelist: [ + { + pattern: /bg-zinc/, + }, + { + pattern: /bg-gray/, + }, + { + pattern: /bg-blue/, + }, + { + pattern: /bg-red/, + }, + { + pattern: /text-gray/, + }, + { + pattern: /text-zinc/, + }, + ], };