diff --git a/.github/workflows/deploy-genai-appstack.yml b/.github/workflows/deploy-genai-appstack.yml new file mode 100644 index 000000000..a846fd5e1 --- /dev/null +++ b/.github/workflows/deploy-genai-appstack.yml @@ -0,0 +1,86 @@ +name: "Deploy Gen AI on F5 XC Appstack and Managed k8s" + +on: + push: + branches: + - deploy-genai-appstack + pull_request: + +jobs: + terraform_xc: + name: "Deploy F5XC Appstack and Managed k8s" + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./workflow-guides/smcn/genai-inference-at-the-edge/terraform + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} + + - name: Setup Terraform Backend + id: backend + run: | + cat > backend.tf << EOF + terraform { + cloud { + organization = "${{ secrets.TF_CLOUD_ORGANIZATION }}" + workspaces { + name = "${{ secrets.TF_CLOUD_WORKSPACE_APPSTACK }}" + } + } + } + EOF + echo "${{secrets.P12}}" | base64 -d > api.p12 + #cat api.p12 + + - name: Terraform Init + id: init + run: terraform init + + - name: Terraform Validate + id: validate + run: terraform validate -no-color + + - name: Terraform Plan + id: plan + if: github.event_name == 'pull_request' + run: terraform plan -no-color -input=false + continue-on-error: true + + - uses: actions/github-script@v6 + if: github.event_name == 'pull_request' + env: + PLAN: "terraform\n${{ steps.plan.outputs.stdout }}" + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const output = `#### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\` + #### Terraform Validation 🤖\`${{ steps.validate.outcome }}\` + #### Terraform Plan 📖\`${{ steps.plan.outcome }}\` +
Show Plan + \`\`\`\n + ${process.env.PLAN} + \`\`\` +
+ *Pushed by: @${{ github.actor }}, Action: \`${{ github.event_name }}\`*`; + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: output + }) + - name: Terraform Plan Status + if: steps.plan.outcome == 'failure' + run: exit 1 + + - name: Terraform Apply + if: github.ref == 'refs/heads/deploy-genai-appstack' && github.event_name == 'push' + run: terraform apply -auto-approve -input=false + env: + TF_VAR_aws_access_key: ${{ secrets.aws_access_key }} + TF_VAR_aws_secret_key: ${{ secrets.aws_secret_key }} diff --git a/.github/workflows/destroy-genai-appstack.yaml b/.github/workflows/destroy-genai-appstack.yaml new file mode 100644 index 000000000..e05304ffc --- /dev/null +++ b/.github/workflows/destroy-genai-appstack.yaml @@ -0,0 +1,85 @@ +name: "Destroy Gen AI on F5 XC Appstack and Managed k8s" + +on: + push: + branches: + - destroy-genai-appstack + pull_request: + +jobs: + terraform_xc: + name: "Destroy F5XC Appstack and Managed k8s" + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./workflow-guides/smcn/genai-inference-at-the-edge/terraform + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} + + - name: Setup Terraform Backend + id: backend + run: | + cat > backend.tf << EOF + terraform { + cloud { + organization = "${{ secrets.TF_CLOUD_ORGANIZATION }}" + workspaces { + name = "${{ secrets.TF_CLOUD_WORKSPACE_APPSTACK }}" + } + } + } + EOF + echo "${{secrets.P12}}" | base64 -d > api.p12 + + - name: Terraform Init + id: init + run: terraform init + + - name: Terraform Validate + id: validate + run: terraform validate -no-color + + - name: Terraform Plan + id: plan + if: github.event_name == 'pull_request' + run: terraform plan -no-color -input=false + continue-on-error: true + + - uses: actions/github-script@v6 + if: github.event_name == 'pull_request' + env: + PLAN: "terraform\n${{ steps.plan.outputs.stdout }}" + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const output = `#### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\` + #### Terraform Validation 🤖\`${{ steps.validate.outcome }}\` + #### Terraform Plan 📖\`${{ steps.plan.outcome }}\` +
Show Plan + \`\`\`\n + ${process.env.PLAN} + \`\`\` +
+ *Pushed by: @${{ github.actor }}, Action: \`${{ github.event_name }}\`*`; + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: output + }) + - name: Terraform Plan Status + if: steps.plan.outcome == 'failure' + run: exit 1 + + - name: Terraform Destroy + if: github.ref == 'refs/heads/destroy-genai-appstack' && github.event_name == 'push' + run: terraform destroy -auto-approve -input=false + env: + TF_VAR_aws_access_key: ${{ secrets.aws_access_key }} + TF_VAR_aws_secret_key: ${{ secrets.aws_secret_key }} \ No newline at end of file diff --git a/aws/eks-cluster/ce-deployment/variables.tf b/aws/eks-cluster/ce-deployment/variables.tf index 390053d34..98b72a71e 100644 --- a/aws/eks-cluster/ce-deployment/variables.tf +++ b/aws/eks-cluster/ce-deployment/variables.tf @@ -5,7 +5,7 @@ variable "tf_cloud_organization" { } variable "aws_waf_ce" { - description = "Infra" + description = "Infra workspace name in terraform cloud." type = string default = "aws-infra" } diff --git a/aws/eks-cluster/variables.tf b/aws/eks-cluster/variables.tf index 156913622..08fd5e6c7 100644 --- a/aws/eks-cluster/variables.tf +++ b/aws/eks-cluster/variables.tf @@ -90,7 +90,7 @@ variable "aws_secret_key" { } variable "aws_waf_ce" { - description = "Infra" + description = "Infra workspace name in terraform cloud." type = string default = "" } diff --git a/azure/azure-vm/azure_vm.tf b/azure/azure-vm/azure_vm.tf index 35473060a..2dd5c49ed 100644 --- a/azure/azure-vm/azure_vm.tf +++ b/azure/azure-vm/azure_vm.tf @@ -73,6 +73,7 @@ resource "azurerm_public_ip" "puip" { name = "waf-public-ip" location = local.azure_region resource_group_name = local.resource_group_name + sku = "Basic" allocation_method = "Dynamic" } diff --git a/shared/booksinfo/data.tf b/shared/booksinfo/data.tf index e7ccff36a..b009f2367 100644 --- a/shared/booksinfo/data.tf +++ b/shared/booksinfo/data.tf @@ -1,6 +1,10 @@ data "tfe_outputs" "infra" { organization = var.tf_cloud_organization +<<<<<<< HEAD workspace = "aws-infra" +======= + workspace = "${coalesce(var.aws_waf_ce, "infra")}" +>>>>>>> 7759e1229d807d2da94cb1cc2c18127d452be360 } data "tfe_outputs" "eks" { organization = var.tf_cloud_organization @@ -9,4 +13,4 @@ data "tfe_outputs" "eks" { data "aws_eks_cluster_auth" "auth" { name = data.tfe_outputs.eks.values.cluster_name -} \ No newline at end of file +} diff --git a/shared/booksinfo/variables.tf b/shared/booksinfo/variables.tf index 8afcfb230..912e934f1 100644 --- a/shared/booksinfo/variables.tf +++ b/shared/booksinfo/variables.tf @@ -7,4 +7,10 @@ variable "tf_cloud_organization" { variable "ssh_key" { type = string description = "Only present for warning handling with TF cloud variable set" -} \ No newline at end of file +} + +variable "aws_waf_ce" { + description = "Infra workspace name in terraform cloud." + type = string + default = "" +} diff --git a/workflow-guides/smcn/genai-appconnect-waf/README.rst b/workflow-guides/smcn/genai-appconnect-waf/README.rst index 2b3232956..d52874ff7 100644 --- a/workflow-guides/smcn/genai-appconnect-waf/README.rst +++ b/workflow-guides/smcn/genai-appconnect-waf/README.rst @@ -5,8 +5,7 @@ Connecting and securing distributed Generative AI applications with F5 XC AppCon Overview ######### -This demo guide provides step-by-step walkthrough for connecting a distributed GenAI application (LLM hosted in AWS EKS and front-end GenAI application hosted in GCP's GKE) with F5's XC AppConnect and securing it with XC WAF, using XC console along with terraform scripts to automate the deployment. For more information on different WAAP deployment modes, refer to the devcentral article: ` -Deploy WAF on any Edge with F5 Distributed Cloud `__. +This demo guide provides step-by-step walkthrough for connecting a distributed GenAI application (LLM hosted in AWS EKS and front-end GenAI application hosted in GCP's GKE) with F5's XC AppConnect and securing it with XC WAF, using XC console along with terraform scripts to automate the deployment. For more information on different WAAP deployment modes, refer to the devcentral article: `Deploy WAF on any Edge with F5 Distributed Cloud `__. Setup Diagram ############# @@ -16,9 +15,13 @@ Setup Diagram Workflow Instructions ###################### -`F5 Distributed Cloud Console Workflow <./xc-console-demo-guide.rst>`__ +`F5 Distributed Cloud Console Workflow without NGINX Ingress Controller <./xc-console-demo-guide.rst>`__ -`F5 Distributed Cloud Console Automation Workflow <./automation-workflow.rst>`__ +`F5 Distributed Cloud Console Workflow (hybrid use case with NGINX Ingress Controller) `__ + +`F5 Distributed Cloud Console Automation Workflow without NGINX Ingress Controller <./automation-workflow.rst>`__ + +`F5 Distributed Cloud Console Automation Workflow (hybrid use case with NGINX Ingress Controller) `__ Additional Related Resources diff --git a/workflow-guides/smcn/genai-appconnect-waf/automation-workflow.rst b/workflow-guides/smcn/genai-appconnect-waf/automation-workflow.rst index 640a83095..47bc5da0d 100644 --- a/workflow-guides/smcn/genai-appconnect-waf/automation-workflow.rst +++ b/workflow-guides/smcn/genai-appconnect-waf/automation-workflow.rst @@ -1,6 +1,8 @@ Getting Started With Terraform Automation of connecting and securing distributed Generative AI applications with F5 XC AppConnect and XC WAF ############################################################################################################################################ +NOTE: THIS AUTOMATION IS NOT A HYBRID USE CASE AS IT'S NOT DEPLOYING NGINX INGRESS CONTROLLER. IF YOU ARE LOOKING FOR HYBRID USE CASE CHECK THIS `LINK `__ + Prerequisites -------------- diff --git a/workflow-guides/smcn/genai-appconnect-waf/xc-console-demo-guide.rst b/workflow-guides/smcn/genai-appconnect-waf/xc-console-demo-guide.rst index e4212e588..e5af86932 100644 --- a/workflow-guides/smcn/genai-appconnect-waf/xc-console-demo-guide.rst +++ b/workflow-guides/smcn/genai-appconnect-waf/xc-console-demo-guide.rst @@ -1,6 +1,9 @@ Manual step by step process to connect and secure distributed Generative AI applications with F5 XC AppConnect and XC WAF ============================================================================================================================ +NOTE: THIS IS NOT A HYBRID USE CASE AS IT'S NOT DEPLOYING NGINX INGRESS CONTROLLER. IF YOU ARE LOOKING FOR HYBRID USE CASE F5 CONSOLE DEMO GUIDE, CHECK THIS `LINK `__ + + Prerequisites ************** - F5 Distributed Cloud Console SaaS account @@ -24,7 +27,7 @@ To deploy an AppStack mk8s cluster on an AWS CE Site, steps are categorized as m 5. Using Kubectl, deploy the GenAI front-end application on the GKE cluster 6. Deploy the Distributed Cloud GCP site Customer Edge workload on the GKE cluster 7. Publish the LLM service from EKS as a local service in GKE -8. Advertise externally the GenAI application +8. Advertise externally the GenAI application without ``NGINX ingress controller`` 9. Test the GenAI application for sensitive information disclosure 10. Enable DataGuard on the HTTP LoadBalancer 11. Retest the GenAI application for sensitive information disclosure diff --git a/workflow-guides/smcn/genai-appconnect-waf/xc-console-demo-hybrid.rst b/workflow-guides/smcn/genai-appconnect-waf/xc-console-demo-hybrid.rst index af794757c..736f5e3f5 100644 --- a/workflow-guides/smcn/genai-appconnect-waf/xc-console-demo-hybrid.rst +++ b/workflow-guides/smcn/genai-appconnect-waf/xc-console-demo-hybrid.rst @@ -1,6 +1,8 @@ Manual step by step process to connect and secure distributed Generative AI applications with F5 XC AppConnect and XC WAF ============================================================================================================================ +NOTE: JUST FOR REFERENCE WE HAVE KEPT THIS FILE. THIS IS A HYBRID USE CASE WHICH USES NGINX INGRESS CONTROLLER AND F5 XC. IF YOU ARE LOOKING FOR HYBRID USE CASE AUTOMATION, CHECK THIS `LINK `__ + Prerequisites ************** - F5 Distributed Cloud Console SaaS account @@ -24,7 +26,7 @@ To deploy an AppStack mk8s cluster on an AWS CE Site, steps are categorized as m 5. Using Kubectl, deploy the GenAI front-end application on the GKE cluster 6. Deploy the Distributed Cloud GCP site Customer Edge workload on the GKE cluster 7. Publish the LLM service from EKS as a local service in GKE -8. Advertise externally the GenAI application +8. Advertise externally the GenAI application using ``NGINX Ingress Controller`` 9. Test the GenAI application for sensitive information disclosure 10. Enable DataGuard on the HTTP LoadBalancer 11. Retest the GenAI application for sensitive information disclosure diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/README.rst b/workflow-guides/smcn/genai-inference-at-the-edge/README.rst index 1f5d7aa26..c83fd1e21 100644 --- a/workflow-guides/smcn/genai-inference-at-the-edge/README.rst +++ b/workflow-guides/smcn/genai-inference-at-the-edge/README.rst @@ -5,8 +5,7 @@ Deploying and securing Generative AI applications at the Edge with F5 XC AppStac Overview ######### -This demo guide provides step-by-step walkthrough for deploying GenAI applications at the Edge (customer on-prem, public or private cloud) and securing them with XC WAF, using XC console along with terraform scripts to automate the deployment. For more information on different WAAP deployment modes, refer to the devcentral article: ` -Deploy WAF on any Edge with F5 Distributed Cloud `__. +This demo guide provides step-by-step walkthrough for deploying GenAI applications at the Edge (customer on-prem, public or private cloud) and securing them with XC WAF, using XC console along with terraform scripts to automate the deployment. For more information on different WAAP deployment modes, refer to the devcentral article: `Deploy WAF on any Edge with F5 Distributed Cloud `__. **Note:** Even though the scenario here focuses on XC WAF, customers can enable any security services in the same setup, such as API Security, Bot Defense, DoS/DDOS and Fraud, as per their needs. @@ -20,7 +19,7 @@ Workflow Instructions `F5 Distributed Cloud Console Workflow <./xc-console-demo-guide.rst>`__ -***Coming soon*** `F5 Distributed Cloud Automation Workflow` +`F5 Distributed Cloud Automation Workflow <./automation-user-guide.rst>`__ Additional Related Resources diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/assets/app-deploy.JPG b/workflow-guides/smcn/genai-inference-at-the-edge/assets/app-deploy.JPG new file mode 100644 index 000000000..4c8189ae4 Binary files /dev/null and b/workflow-guides/smcn/genai-inference-at-the-edge/assets/app-deploy.JPG differ diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/assets/aws-site-access.JPG b/workflow-guides/smcn/genai-inference-at-the-edge/assets/aws-site-access.JPG new file mode 100644 index 000000000..514b83bd2 Binary files /dev/null and b/workflow-guides/smcn/genai-inference-at-the-edge/assets/aws-site-access.JPG differ diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/assets/curl.JPG b/workflow-guides/smcn/genai-inference-at-the-edge/assets/curl.JPG new file mode 100644 index 000000000..f8a8af5b3 Binary files /dev/null and b/workflow-guides/smcn/genai-inference-at-the-edge/assets/curl.JPG differ diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/assets/deploy_pipeline.JPG b/workflow-guides/smcn/genai-inference-at-the-edge/assets/deploy_pipeline.JPG new file mode 100644 index 000000000..e0b54e6f1 Binary files /dev/null and b/workflow-guides/smcn/genai-inference-at-the-edge/assets/deploy_pipeline.JPG differ diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/assets/destroy_pipeline.JPG b/workflow-guides/smcn/genai-inference-at-the-edge/assets/destroy_pipeline.JPG new file mode 100644 index 000000000..023192d9f Binary files /dev/null and b/workflow-guides/smcn/genai-inference-at-the-edge/assets/destroy_pipeline.JPG differ diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/assets/kubeconfig.jpg b/workflow-guides/smcn/genai-inference-at-the-edge/assets/kubeconfig.jpg new file mode 100644 index 000000000..c750deab8 Binary files /dev/null and b/workflow-guides/smcn/genai-inference-at-the-edge/assets/kubeconfig.jpg differ diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/assets/op-edit.jpg b/workflow-guides/smcn/genai-inference-at-the-edge/assets/op-edit.jpg new file mode 100644 index 000000000..5271f4346 Binary files /dev/null and b/workflow-guides/smcn/genai-inference-at-the-edge/assets/op-edit.jpg differ diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/assets/pods-online.JPG b/workflow-guides/smcn/genai-inference-at-the-edge/assets/pods-online.JPG new file mode 100644 index 000000000..85da021fe Binary files /dev/null and b/workflow-guides/smcn/genai-inference-at-the-edge/assets/pods-online.JPG differ diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/assets/postman.JPG b/workflow-guides/smcn/genai-inference-at-the-edge/assets/postman.JPG new file mode 100644 index 000000000..50d0ad326 Binary files /dev/null and b/workflow-guides/smcn/genai-inference-at-the-edge/assets/postman.JPG differ diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/automation-user-guide.rst b/workflow-guides/smcn/genai-inference-at-the-edge/automation-user-guide.rst new file mode 100644 index 000000000..668d528f6 --- /dev/null +++ b/workflow-guides/smcn/genai-inference-at-the-edge/automation-user-guide.rst @@ -0,0 +1,142 @@ +Steps to deploy/protect/destroy Generative AI applications at the Edge with F5 XC AppStack mk8s and XC WAFs setup using automation: +=================================================================================================================================== + +Prerequisites +############# + +- `F5 Distributed Cloud (F5 XC) Account with access to system namespace `__ +- `AWS Account `__ +- `Terraform Cloud Account `__ +- `GitHub Account `__ + +List of Created Assets +####################### + +- **xc:** F5 Distributed Cloud WAF +- **infra:** AWS Infrastructure +- **LangServe GenAI App:** LangServe GenAI Application + +Tools +##### + +- **Cloud Provider:** AWS +- **IAC:** Terraform +- **IAC State:** Terraform Cloud +- **CI/CD:** GitHub Actions + +Terraform Cloud +############### + +- **Workspaces:** Create below CLI or API workspace in the terraform cloud. + + +---------------------------+-------------------------------------------+ + | **Workflow** | **Assets/Workspaces** | + +===========================+===========================================+ + | deploy-genai-appstack | appstack | + +---------------------------+-------------------------------------------+ + +- **Variable Set:** Create a Variable Set with the following values: + + +------------------------------------------+--------------+------------------------------------------------------+ + | **Name** | **Type** | **Description** | + +==========================================+==============+======================================================+ + | VES_P12_PASSWORD | Environment | Password set while creating F5XC API certificate | + +------------------------------------------+--------------+------------------------------------------------------+ + | VOLT_API_P12_FILE | Environment | Your F5XC API certificate. Set this to **api.p12** | + +------------------------------------------+--------------+------------------------------------------------------+ + | ssh_key | TERRAFORM | Your ssh key for accessing the created resources | + +------------------------------------------+--------------+------------------------------------------------------+ + | tf_cloud_organization | TERRAFORM | Your Terraform Cloud Organization name | + +------------------------------------------+--------------+------------------------------------------------------+ + + +GitHub +###### + +- Fork and Clone Repo. Navigate to ``Actions`` tab and enable it. + +- **Actions Secrets:** Create the following GitHub Actions secrets in + your forked repo + + - P12: The linux base64 encoded F5XC P12 certificate + - TF_API_TOKEN: Your Terraform Cloud API token + - TF_CLOUD_ORGANIZATION: Your Terraform Cloud Organization name + - TF_CLOUD_WORKSPACE_APPSTACK should be created with the value ``appstack`` + - aws_access_key: Your AWS access key + - aws_secret_key: Your AWS secret key + + +Workflow Runs +############# + +**STEP 1:** Check out a branch with the branch name as suggested below for the workflow you wish to run using +the following naming convention. + +**DEPLOY** + +=============================================== ======================= +Workflow Branch Name +=============================================== ======================= +Deploy Gen AI on F5 XC Appstack and Managed k8s deploy-genai-appstack +=============================================== ======================= + +Workflow File: `deploy-genai-appstack.yml `__ + +**DESTROY** + +================================================ ======================= +Workflow Branch Name +================================================ ======================= +Destroy Gen AI on F5 XC Appstack and Managed k8s destroy-genai-appstack +================================================ ======================= + +Workflow File: `destroy-genai-appstack.yml `__ + +**STEP 2:** Rename ``/workflow-guides/smcn/genai-inference-at-the-edge/terraform/terraform.tfvars.examples`` to ``terraform.tfvars`` and add the following data: + +- project_prefix = “Your project identifier name in **lower case** letters only - this will be applied as a prefix to all assets” + +- api_url = “Your F5XC tenant” + +- xc_tenant = “Your tenant id available in F5 XC ``Administration`` section ``Tenant Overview`` menu” + +- xc_namespace = “The existing XC namespace where you want to deploy resources” + +- app_domain = “the FQDN of your app (cert will be autogenerated)” + +- servicename = "set to k8s backend service of your aplication. For demo you can set to ``langchain-doc-qa-api.llm``." + +- serviceport = "set to your app port number. For existing demo use 8501" + +Check the rest of the values in variables.tf and update if need any changes. + +**STEP 4:** Commit and push your build branch to your forked repo + +- Build will run and can be monitored in the GitHub Actions tab and TF Cloud console + +.. image:: assets/deploy_pipeline.JPG + +**STEP 5:** Once the pipeline completes, verify your CE, Origin Pool and LB were deployed or destroyed based on your workflow. (**Note:** Appstack CE site will take 15-20 mins to come online) + +**STEP 6:** To validate the test infra, follow below steps + a. Navigate to ``Select the Distributed Apps`` Service, next select ``system`` workspace and in overview section download global kubeconfig file + b. You can use this config file to connect to managed k8s and deploy your application using your app related yaml files. ``(NOTE: for demo we have kept 2 manifest files in this folder which you can download and run kubectl apply -f just like shown below)`` + + .. image:: assets/app-deploy.JPG + + c. Once deployed make sure all pods/services are running and online (Please note GenAI app pods deployment will take around 20 mins) + + .. image:: assets/pods-online.JPG + + d. Open the F5 XC load balancer domain in a browser along with a valid URL and validate your AI application works as expected. Make sure response is returned and status code is 200 OK (If you have used demo app manifest files from this folder, you can check validation steps in this `manual guide <./xc-console-demo-guide.rst#deployment-verification>`__ for testing app functionality). + + .. image:: assets/postman.JPG + + e. ``If app is not accessible, as shown in below image navigate to Multi Cloud App Connect menu and then to Manage section, next from loadbalancers drop-down select origin pools and open your GenAI origin pool configurations in edit mode. Under k8s service, change network type to outside and apply the configuration. Once again open postman and rerun above step to check the app accessibility (Ideally app should be deployed first and then origin pool but here in this automation since we created origin pool before app deployment, there is a bug with k8s service discovery timing which is being tracked and under prioritisation).`` + + .. image:: assets/op-edit.jpg + + + f. Once deployment and validations are complete, if you want to destroy the entire setup, checkout a branch with name ``destroy-genai-appstack`` and push the repo code to it which will trigger destroy workflow and will remove all created resources. + +.. image:: assets/destroy_pipeline.JPG diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/langchain-doc-qa-api.yaml b/workflow-guides/smcn/genai-inference-at-the-edge/langchain-doc-qa-api.yaml new file mode 100644 index 000000000..240fa7810 --- /dev/null +++ b/workflow-guides/smcn/genai-inference-at-the-edge/langchain-doc-qa-api.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Service +metadata: + name: langchain-doc-qa-api + labels: + app: langchain-doc-qa-api + namespace: llm +spec: + type: ClusterIP + ports: + - port: 8501 + selector: + app: langchain-doc-qa-api + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: langchain-doc-qa-api + namespace: llm +spec: + selector: + matchLabels: + app: langchain-doc-qa-api + replicas: 1 + template: + metadata: + labels: + app: langchain-doc-qa-api + spec: + containers: + - name: langchain-doc-qa-api + image: registry.gitlab.com/f5-public/langchain-docs-summary:latest + imagePullPolicy: Always + ports: + - containerPort: 8501 + env: + - name: DOC_PATH + value: "https://www.f5.com/company/blog/what-is-generative-ai-and-is-it-the-way-to-aiops" diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/llm.yaml b/workflow-guides/smcn/genai-inference-at-the-edge/llm.yaml new file mode 100644 index 000000000..09ff31867 --- /dev/null +++ b/workflow-guides/smcn/genai-inference-at-the-edge/llm.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: llm +--- +apiVersion: v1 +kind: Service +metadata: + name: llama + labels: + app: llama + namespace: llm +spec: + type: ClusterIP + ports: + - port: 8000 + selector: + app: llama + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: llama + namespace: llm +spec: + selector: + matchLabels: + app: llama + replicas: 1 + template: + metadata: + labels: + app: llama + spec: + containers: + - name: llama + image: registry.gitlab.com/f5-public/llama-cpp-python:latest + imagePullPolicy: Always + ports: + - containerPort: 8000 diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/temp-automation-user-guide.rst b/workflow-guides/smcn/genai-inference-at-the-edge/temp-automation-user-guide.rst deleted file mode 100644 index 9efae62ff..000000000 --- a/workflow-guides/smcn/genai-inference-at-the-edge/temp-automation-user-guide.rst +++ /dev/null @@ -1,169 +0,0 @@ -Steps to deploy/protect/destroy Generative AI applications at the Edge with F5 XC AppStack mk8s and XC WAFs setup using automation: -=================================================================================================================================== - -Prerequisites -############# - -- `F5 Distributed Cloud (F5 XC) Account `__ -- `AWS Account `__ -- `Terraform Cloud Account `__ -- `GitHub Account `__ - -List of Existing Assets -####################### - -- **xc:** F5 Distributed Cloud WAF -- **infra:** AWS Infrastructure -- **LangServe GenAI App:** LangServe GenAI Application - -Tools -##### - -- **Cloud Provider:** AWS -- **IAC:** Terraform -- **IAC State:** Terraform Cloud -- **CI/CD:** GitHub Actions - -Terraform Cloud -############### - -- **Workspaces:** Create CLI or API workspaces for each asset in the workflow. - - +---------------------------+-------------------------------------------+ - | **Workflow** | **Assets/Workspaces** | - +===========================+===========================================+ - |f5-xc-genai-inf-at-the-edge| infra, mk8s-cluster, xc | - +---------------------------+-------------------------------------------+ - -.. image:: assets/workspaces.JPG - - -- **Workspace Sharing:** Under the settings for each Workspace, set the **Remote state sharing** to share with each Workspace created. - -- **Variable Set:** Create a Variable Set with the following values: - - +------------------------------------------+--------------+------------------------------------------------------+ - | **Name** | **Type** | **Description** | - +==========================================+==============+======================================================+ - | AWS_ACCESS_KEY_ID | Environment | AWS Access Key ID | - +------------------------------------------+--------------+------------------------------------------------------+ - | AWS_SECRET_ACCESS_KEY | Environment | AWS Secret Access Key ID | - +------------------------------------------+--------------+------------------------------------------------------+ - | AWS_SESSION_TOKEN | Environment | AWS Session Token | - +------------------------------------------+--------------+------------------------------------------------------+ - | TF_VAR_aws_access_key | Environment | AWS Programmatic Access Key ID | - +------------------------------------------+--------------+------------------------------------------------------+ - | TF_VAR_aws_secret_key | Environment | AWS Programmatic Secret Access Key | - +------------------------------------------+--------------+------------------------------------------------------+ - | VES_P12_PASSWORD | Environment | Password set while creating F5XC API certificate | - +------------------------------------------+--------------+------------------------------------------------------+ - | VOLT_API_P12_FILE | Environment | Your F5XC API certificate. Set this to **api.p12** | - +------------------------------------------+--------------+------------------------------------------------------+ - | ssh_key | TERRAFORM | Your ssh key for accessing the created resources | - +------------------------------------------+--------------+------------------------------------------------------+ - | tf_cloud_organization | TERRAFORM | Your Terraform Cloud Organization name | - +------------------------------------------+--------------+------------------------------------------------------+ - -- Variable set created in terraform cloud: -.. image:: assets/variable-set.JPG - - -GitHub -###### - -- Fork and Clone Repo. Navigate to ``Actions`` tab and enable it. - -- **Actions Secrets:** Create the following GitHub Actions secrets in - your forked repo - - - P12: The linux base64 encoded F5XC P12 certificate - - TF_API_TOKEN: Your Terraform Cloud API token - - TF_CLOUD_ORGANIZATION: Your Terraform Cloud Organization name - - TF_CLOUD_WORKSPACE\_\ **: Create for each - workspace in your workflow per each job - - - EX: TF_CLOUD_WORKSPACE_MK8S_CLUSTER would be created with the - value ``mk8s-cluster`` - -- Created GitHub Action Secrets: -.. image:: assets/action-secret.JPG - -Workflow Runs -############# - -**STEP 1:** Check out a branch with the branch name as suggested below for the workflow you wish to run using -the following naming convention. - -**DEPLOY** - -========================== ======================= -Workflow Branch Name -========================== ======================= -f5-xc-genai-inf-at-the-edge deploy-genai-inf-at-the-edge -========================== ======================= - -Workflow File: `genai-inf-at-the-edge-apply.yml `__ - -**DESTROY** - -=========================== ======================== -Workflow Branch Name -=========================== ======================== -f5-xc-genai-inf-at-the-edge destroy-genai-inf-at-the-edge -=========================== ======================== - -Workflow File: `genai-inf-at-the-edge-destroy.yml `__ - -**STEP 2:** Rename ``aws/infra/terraform.tfvars.examples`` to ``aws/infra/terraform.tfvars`` and add the following data: - -- project_prefix = “Your project identifier name in **lower case** letters only - this will be applied as a prefix to all assets” - -- aws_region = “AWS Region” ex. "eu-west-1" - -- azs = Availability Zones of that region. Ex. ["eu-west-1a", "eu-west-1b"] - -- Also update assets boolean value as per your workflow. - -**Step 3:** Rename ``xc/terraform.tfvars.examples`` to ``xc/terraform.tfvars`` and add the following data: - -- api_url = “Your F5XC tenant” - -- xc_tenant = “Your tenant id available in F5 XC ``Administration`` section ``Tenant Overview`` menu” - -- xc_namespace = “The existing XC namespace where you want to deploy resources” - -- app_domain = “the FQDN of your app (cert will be autogenerated)” - -- xc_waf_blocking = “Set to true to configure waf in blocking mode” - -- aws_ce_site = "set to true to deploy AWS CE site" - -- site_name = "Provide a name for AWS VPC site" - -- ip_address_on_site_pool = "Set to true to configured the Private IP address of the EKS Cluster Nodes" - -- advertise_sites = "set to true to advertise on public" - -- http_only = "set to true to deploy a http loadbalancer." - - -Keep the rest of the values as they are. - -**STEP 4:** Commit and push your build branch to your forked repo - -- Build will run and can be monitored in the GitHub Actions tab and TF Cloud console - -.. image:: Assets/deploy_pipeline.jpg - -**STEP 5:** Once the pipeline completes, verify your CE, Origin Pool and LB were deployed or destroyed based on your workflow. (**Note:** CE sites will take 15-20 mins to come online) - -**STEP 6:** To validate the test infra, copy the public IP of CE site (**Note:** In terraform cloud click on `xc-deploy` workspace and select `Outputs` tab to get the public of azure CE site) and send a request with XC LB domain as a `Host` header, You should be able to access the demo application as shown in the image below: - -.. image:: Assets/testing_logs.jpg - -**Note:** If you want to destroy the entire setup, checkout a branch with name ``destroy-genai-inf-at-the-edge`` and push the repo code to it which will trigger destroy workflow and will remove all created resources. - -.. image:: Assets/destroy_pipeline.jpg - -**Note:** Due to timing issue there might be chance of not deleting the AWS VPC site. Please remove the VPS site while deploying Infra again. - diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/terraform/main.tf b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/main.tf new file mode 100644 index 000000000..89fc410b6 --- /dev/null +++ b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/main.tf @@ -0,0 +1,3 @@ +provider "volterra" { + url = var.api_url +} diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/terraform/managed_k8s.tf b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/managed_k8s.tf new file mode 100644 index 000000000..84b5f9c34 --- /dev/null +++ b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/managed_k8s.tf @@ -0,0 +1,76 @@ +resource "volterra_k8s_cluster" "mk8s" { + name = format("%s-mk8s", var.project_prefix) + namespace = "system" + use_default_cluster_role_bindings = true + use_default_cluster_roles = true + use_default_pod_security_admission = true + global_access_enable = true + local_access_config { + local_domain = "kubernetes.default.svc" + } +} + +resource "volterra_cloud_credentials" "aws" { + name = format("%s-cred", var.project_prefix) + description = format("AWS credentials which will be used to create site for %s", var.project_prefix) + namespace = "system" + aws_secret_key { + access_key = var.aws_access_key + secret_key { + clear_secret_info { + url = "string:///${base64encode(var.aws_secret_key)}" + } + } + } +} + +resource "volterra_aws_vpc_site" "this" { + name = format("%s-appstack", var.project_prefix) + namespace = "system" + aws_region = var.aws_region + ssh_key = var.ssh_key + aws_cred { + name = volterra_cloud_credentials.aws.name + namespace = "system" + } + vpc { + new_vpc { + name_tag = format("%s-vpc", var.project_prefix) + primary_ipv4 = var.primary_ipv4 + } + } + disk_size = "80" + instance_type = "t3.xlarge" + + voltstack_cluster { + aws_certified_hw= "aws-byol-voltstack-combo" + az_nodes { + aws_az_name = format("%sa", var.aws_region) + local_subnet { + subnet_param { + ipv4 = var.subnet_ipv4 + } + } + } + k8s_cluster { + name = volterra_k8s_cluster.mk8s.name + namespace = "system" + tenant = var.xc_tenant + } + } +} + +resource "null_resource" "wait_for_aws_mns" { + triggers = { + depends = volterra_aws_vpc_site.this.id + } +} + +resource "volterra_tf_params_action" "apply_aws_vpc" { + depends_on = [null_resource.wait_for_aws_mns] + site_name = volterra_aws_vpc_site.this.name + site_kind = "aws_vpc_site" + action = "apply" + wait_for_action = true + ignore_on_update = true +} diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/terraform/outputs.tf b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/outputs.tf new file mode 100644 index 000000000..71ccc7c7d --- /dev/null +++ b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/outputs.tf @@ -0,0 +1,7 @@ +output "xc_lb_name" { + value = nonsensitive(volterra_http_loadbalancer.lb_https.name) +} + +output "endpoint" { + value = var.app_domain +} diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/terraform/terraform.tfvars.example b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/terraform.tfvars.example new file mode 100644 index 000000000..5457a3860 --- /dev/null +++ b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/terraform.tfvars.example @@ -0,0 +1,11 @@ +project_prefix = "genai" +api_url = "https://.console.ves.volterra.io/api" +xc_tenant = "tenant-id" +xc_namespace = "namespace-name" +app_domain = "lb.example.com" +serviceName = "langchain-doc-qa-api.llm" +serviceport = "8501" +aws_region = "ap-south-1" +xc_waf_blocking = "true" +primary_ipv4 = "10.0.0.0/16" +subnet_ipv4 = "10.0.0.0/24" diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/terraform/variables.tf b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/variables.tf new file mode 100644 index 000000000..b143bbeb0 --- /dev/null +++ b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/variables.tf @@ -0,0 +1,90 @@ +#TF Cloud +variable "tf_cloud_organization" { + type = string + description = "TF cloud org (Value set in TF cloud)" +} + +variable "ssh_key" { + type = string + description = "SSH pub key, only present for warning handling with TF cloud variable set" +} + +#XC +variable "project_prefix" { + type = string + default = "xcdemo" + description = "This value is inserted at the beginning of each XC object and only used if not set by Infra TF run" +} + +variable "xc_tenant" { + type = string + description = "Your F5 XC tenant name" +} + +variable "api_url" { + type = string + description = "Your F5 XC tenant api url" +} + +variable "xc_namespace" { + type = string + description = "Volterra app namespace where the object will be created. This cannot be system or shared ns." +} + +variable "app_domain" { + type = string + description = "FQDN for the app. If you have delegated domain `prod.example.com`, then your app_domain can be `.prod.example.com`" +} + +#XC WAF +variable "xc_waf_blocking" { + type = string + description = "Set XC WAF to Blocking(true) or Monitoring(false)" + default = "false" +} + +# k8s service name +variable "serviceName" { + type = string + description = "k8s backend service details to access the demo application" + default = "" +} + +variable "serviceport" { + type = string + description = "k8s backend application service port details" + default = "" +} + +variable "aws_region" { + type = string + description = "AWS Region." + default = "ap-south-1" +} + +variable "aws_access_key" { + description = "AWS Access Key ID" + type = string + sensitive = true + default = null +} + +variable "aws_secret_key" { + description = "AWS Secret Key ID" + type = string + sensitive = true + default = null +} + +variable "primary_ipv4" { + type = string + description = "IPv4 VPC range." + default = "10.0.0.0/16" +} + +variable "subnet_ipv4" { + type = string + description = "IPv4 range of subnet." + default = "10.0.0.0/24" +} + diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/terraform/versions.tf b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/versions.tf new file mode 100644 index 000000000..a8c20c05d --- /dev/null +++ b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_version = ">= 0.14.0" + required_providers { + volterra = { + source = "volterraedge/volterra" + version = ">= 0.11.34" + } + } +} diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/terraform/xc_loadbalancer.tf b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/xc_loadbalancer.tf new file mode 100644 index 000000000..1abc74165 --- /dev/null +++ b/workflow-guides/smcn/genai-inference-at-the-edge/terraform/xc_loadbalancer.tf @@ -0,0 +1,77 @@ +# Create XC LB config +resource "volterra_origin_pool" "op" { + depends_on = [volterra_tf_params_action.apply_aws_vpc] + name = format("%s-origin-pool", var.project_prefix) + namespace = var.xc_namespace + description = format("Origin pool pointing to origin server for %s", var.project_prefix) + + origin_servers { + k8s_service { + service_name = var.serviceName + inside_network = true + site_locator { + site { + name = volterra_aws_vpc_site.this.name + namespace = "system" + tenant = var.xc_tenant + } + } + } + } + no_tls = true + port = var.serviceport + endpoint_selection = "LOCAL_PREFERRED" + loadbalancer_algorithm = "LB_OVERRIDE" +} + +resource "volterra_app_firewall" "waap-tf" { + name = format("%s-firewall", var.project_prefix) + description = format("WAF in block mode for %s", var.project_prefix) + namespace = var.xc_namespace + allow_all_response_codes = true + default_anonymization = true + use_default_blocking_page = true + default_bot_setting = true + default_detection_settings= true + blocking = var.xc_waf_blocking +} + +resource "volterra_http_loadbalancer" "lb_https" { + depends_on = [volterra_origin_pool.op] + name = format("%s-load-balancer", var.project_prefix) + namespace = var.xc_namespace + description = format("HTTP load balancer object for %s origin server", var.project_prefix) + domains = [var.app_domain] + advertise_on_public_default_vip = true + + https_auto_cert { + add_hsts = false + http_redirect = true + no_mtls = true + enable_path_normalize = true + tls_config { + default_security = true + } + } + + default_route_pools { + pool { + name = volterra_origin_pool.op.name + namespace = var.xc_namespace + } + weight = 1 + } + + app_firewall { + name = volterra_app_firewall.waap-tf.name + namespace = var.xc_namespace + } + round_robin = true + service_policies_from_namespace = true + user_id_client_ip = true + source_ip_stickiness = true + + more_option { + idle_timeout = 600000 + } +} diff --git a/workflow-guides/smcn/genai-inference-at-the-edge/xc-console-demo-guide.rst b/workflow-guides/smcn/genai-inference-at-the-edge/xc-console-demo-guide.rst index 1155c5123..e1b88c31e 100644 --- a/workflow-guides/smcn/genai-inference-at-the-edge/xc-console-demo-guide.rst +++ b/workflow-guides/smcn/genai-inference-at-the-edge/xc-console-demo-guide.rst @@ -3,10 +3,10 @@ Manual step by step process to deploy and secure Generative AI applications at t Prerequisites ************** -- F5 Distributed Cloud Console SaaS account +- F5 Distributed Cloud Console SaaS account and user should also have access to ``system`` namespace - Access to Amazon Web Service (AWS) Management console & Command Line -- Install Kubectl command line tool to connect and push the app manifest file to mk8s cluster -- Install Postman for verifying the deployment +- Kubectl command line tool to connect and push the app manifest file to mk8s cluster +- Postman for verifying the deployment Create AWS credentials in XC by following the steps mentioned in this `Devcentral article `_ @@ -15,19 +15,19 @@ Deployment Steps To deploy an AppStack mk8s cluster on an AWS CE Site, steps are categorized as mentioned below. 1. Create mk8s cluster -2. Create AWS VPC Site and attach the mk8s cluster +2. Create AWS Appstack VPC Site and attach the mk8s cluster 3. Deploy the App to mk8s cluster 4. Configure Origin Pool and HTTPS LB Below we shall take a look into detailed steps as mentioned above. 1. Creating mk8s cluster from F5 XC Console: - **Step 1.1**: Login to F5 XC Console - a. From the F5 XC Home page, ``Select the Distributed Apps`` Service - b. Select Manage > Manage K8s > K8s clusters in the configuration menu. Click on Add K8s cluster. + a. From the F5 XC Home page, ``Select the Distributed Apps`` Service and select ``System`` namespace + b. Select Manage > Manage K8s > K8s clusters in the configuration menu. Click on Add K8s cluster. + c. As shown below select enable site API access, enable Volt Console API Access and add provide local domain as ``kubernetes.default.svc``. Optionally you can add pod security policies and cluster roles -.. figure:: assets/mk8s-cluster.png -Fig : mk8s cluster + .. figure:: assets/mk8s-cluster.png + Fig : mk8s cluster 2. Creating AWS VPC Site object from F5 XC Console: **Step 1.1**: Login to F5 XC Console @@ -37,12 +37,13 @@ Fig : mk8s cluster **Step 1.2**: Configure site type selection a. Select a region in the AWS Region drop-down field. b. Create New VPC by selecting New VPC Parameters from the VPC drop-down. Enter the CIDR in the ``Primay IPv4 CIDR blocks`` field. - c. Select Ingress Gateway (One Interface) for the ``Select Ingress Gateway or Ingress/Egress Gateway`` field. + c. Select Appstack Cluster (One Interface) for the ``Select Ingress Gateway or Ingress/Egress Gateway`` field. **Step 1.3**: Configure ingress/egress gateway nodes a. Click on configure to open the One-interface node configuration wizard. b. Click on Add Item button in the Ingress Gateway (One Interface) Nodes in AZ. a. Select an option for the AWS AZ Name from the given suggestions that matches the configured AWS regsion. b. Select New subnet from the Subnet for the local interface drop-down and enter the subnet address in the IPv4 Subnet text field. + c. Scroll down, enable ``Site Local K8s API access`` and select your managed k8s cluster **Step 1.4**: Complete AWS VPC site object creation a. Select the AWS credentials object from the Cloud Credentials drop-down. b. Enter public key for remote SSH to the VPC site. @@ -51,12 +52,19 @@ Fig : mk8s cluster a. Click on the Apply button for the created AWS VPC site object. b. After a few minutes, the Site Admin State shows online and Status shows as Applied. + .. figure:: assets/aws-vpc-site.png + .. figure:: assets/aws-site-access.JPG + Figs : AWS VPC Site and attaching to managed k8s cluster + -.. figure:: assets/aws-vpc-site.png -Fig : AWS VPC Site +3. Deploy the App to mk8s cluster + a. You can navigate to ``Select the Distributed Apps`` Service and then to ``system`` namespace. Click on overview section and download global kubeconfig file + b. You can use this config file to connect to managed k8s and deploy your application using your app related yaml files. We have kept couple of app files in this folder for reference + c. Once deployed make sure all pods/service are running and online (GenAI app related pods usually takes around 15-20 mins to come online) + .. figure:: assets/kubeconfig.jpg + Fig : Downloading kubeconfig -3. Deploy the App to mk8s cluster 4. Configuring Origin Pool and HTTPS LB in F5 XC Console **Step 4.1**: Creating Origin Pool In this process, we configure Origin pool with server as AWS VPC site and Advertise in HTTP Load Balancer. @@ -64,28 +72,27 @@ Fig : AWS VPC Site a. Log into F5 XC Console and Click on Multi-Cloud App Connect. b. Click Manage > Load Balancers > Origin Pools and Click ``Add Origin Pool``. c. In the name field, enter a name. Click on Add Item button in Origin Servers section. - d. From the ``Select type of Origin Server`` menu, select ``IP address of Origin Server on given Sites`` to specify the node with its private IP address. - e. Select ``Site`` from the ``Site or Virtual Site`` drop-down and select the AWS VPC site created in step 1. + d. From the ``Select type of Origin Server`` menu, select ``K8s Service Name of Origin Server on given Sites`` and specify your app k8s service name along with namespace (If you are using our demo app, set to ``langchain-doc-qa-api.llm`` which is the service available in same folder yml files). + e. Select ``Site`` from the ``Site or Virtual Site`` drop-down and select the AWS VPC site created in step 2. f. Select ``Outside Network`` for ``Select Network on the Site`` drop-down. Click on Apply. - g. In ``Origin server Port`` enter the port number of the frontend service from step 3.1 + g. In ``Origin server Port`` enter the port number of the frontend service of your application. If you are using our demo app, set to 8501. h. Click on Save and Exit. .. figure:: assets/origin-pool.png Fig : Origin Pool - **Step 4.2**: Creating HTTPS Load Balancer with VIP advertisement + **Step 4.2**: Creating HTTPS Load Balancer with Internet VIP advertisement a. Log into F5 XC Console and Click on Multi-Cloud App Connect. b. Click Manage > Load Balancers > HTTP Load Balancers and Click ``Add HTTP Load Balancer``. c. In the name field, enter the name of the LB, In the Domains field, enter a domain name. d. From the Load Balancer Type drop-down menu, Select HTTPS to create HTTPS load balancer. e. From the Origins sections, Click on Add Item to add the origin pool created in step 4.1 under ``Select Origin Pool Method`` drop-down menu. Click on Apply. - f. Navigate to Other Setting section, From the VIP Advertisement drop-down menu, Select Custom. Click Configure in the Advertise Custom field to perform the configurations and click on Add Item. - g. From ``Select Where to Advertise`` menu, select Site. From the ``Site Network`` menu, select Outside Network from the drop-down. - h. From the Site Referrence menu, Select the AWS VPC site created in step 1. Click on Apply. - i. Click on Apply and ``Save and Exit``. + f. Increase idle timeout to 120000/600000 as per the app to make sure requests are not timed out + g. Click on Apply and ``Save and Exit``. + + .. figure:: assets/https-lb.png + Fig : HTTPS LB -.. figure:: assets/https-lb.png -Fig : HTTPS LB Deployment Verification ************************ @@ -94,11 +101,15 @@ To verify the deployment we shall follow the below steps to make sure users can .. figure:: assets/langserve-api.png Fig: LangServe API -1. Open the Postman -2. Enter the domain name of the HTTPS Load Balancer in the URL field. -3. Update the Host header as the domain name of the Load Balancer from the F5 XC Console. -4. Generate a POST request. +Validation Steps: + 1. Open the Postman/curl + 2. Enter the domain name of the HTTPS Load Balancer in the URL field. Next add your endpoint to the domain name. For ex ``/ask-a-doc/stream`` + 3. Generate a POST request to your application by providing valid body as below and validate response is returned + +.. figure:: assets/curl.JPG +.. figure:: assets/postman.JPG +Fig : App deployment response validation Conclusion ########### diff --git a/workflow-guides/waf/f5-xc-waf-on-k8s/assets/xc-tfvars.JPG b/workflow-guides/waf/f5-xc-waf-on-k8s/assets/xc-tfvars.JPG new file mode 100644 index 000000000..df63a1d8e Binary files /dev/null and b/workflow-guides/waf/f5-xc-waf-on-k8s/assets/xc-tfvars.JPG differ diff --git a/workflow-guides/waf/f5-xc-waf-on-k8s/automation-workflow.rst b/workflow-guides/waf/f5-xc-waf-on-k8s/automation-workflow.rst index 79f1086b7..8c6a60461 100644 --- a/workflow-guides/waf/f5-xc-waf-on-k8s/automation-workflow.rst +++ b/workflow-guides/waf/f5-xc-waf-on-k8s/automation-workflow.rst @@ -23,7 +23,7 @@ Prerequisites Workflow Steps ----------------- -- For deploying WAF on k8s, please copy both yml files in workflow folder to root folder .github/workflows folder. For ex: `waf-k8s-apply.yml <.github/workflows/waf-k8s-apply.yml>`__ +- For deploying WAF on k8s, please copy both yml files in workflow folder to root folder .github/workflows folder. For ex: `waf-k8s-apply.yml `__ - Login to Distributed Cloud, click on `Multi-Cloud-Connect`, navigate to `Site Management` and then to `Site Tokens` as shown below @@ -57,7 +57,7 @@ Terraform Cloud .. image:: /workflow-guides/waf/f5-xc-waf-on-k8s/assets/cloud-workspaces.JPG - Login to terraform cloud and create below workspaces for storing the terraform state file of each job. - infra, xc, eks, bookinfo, registration, k8sce + aws-infra, xc, eks, bookinfo, registration, k8sce - **Workspace Sharing:** Under the settings for each Workspace, set the **Remote state sharing** to share with each Workspace created. @@ -95,7 +95,7 @@ GitHub - **Actions Secrets:** Create the following GitHub Actions secrets in your forked repo - - P12: The linux base64 encoded F5XC P12 certificate + - P12: The linux base64 encoded F5XC P12 certificate (For windows run ``base64 ``, copy output content into a file and remove spaces.) - TF_API_TOKEN: Your Terraform Cloud API token - TF_CLOUD_ORGANIZATION: Your Terraform Cloud Organization name - TF_CE_LATITUDE: Your CE location latitude @@ -105,8 +105,9 @@ GitHub - TF_CLOUD_WORKSPACE\_\ **: Create for each workspace in your workflow per each job - - EX: TF_CLOUD_WORKSPACE_EKS would be created with the - value ``EKS`` + - EX: Create TF_CLOUD_WORKSPACE_EKS with the value ``EKS`` + + - EX: Create TF_CLOUD_WORKSPACE_INFRA with the value ``aws-infra``, etc - Check below image for more info on action secrets @@ -136,7 +137,7 @@ f5-xc-waf-on-k8s destroy-waf-k8s **Note:** Make sure to comment line no. 16 (# *.tfvars) in ".gitignore" file -**STEP 2:** Rename ``infra/terraform.tfvars.examples`` to ``infra/terraform.tfvars`` and add the following data: +**STEP 2:** Rename ``aws/infra/terraform.tfvars.examples`` to ``aws/infra/terraform.tfvars`` and add the following data: - project_prefix = “Your project identifier name in **lower case** letters only - this will be applied as a prefix to all assets” @@ -162,15 +163,19 @@ f5-xc-waf-on-k8s destroy-waf-k8s - k8s_pool = "true if backend is residing in k8s" -- serviceName = "k8s service name of backend" +- serviceName = "k8s service name of backend. Set this to productpage.default." -- serviceport = "k8s service port of backend" +- serviceport = "k8s service port of backend. For bookinfo demo application you can keep this value as 9080." - advertise_sites = "set to false if want to advertise on public" - http_only = "set to true if want to advertise on http protocol" -**STEP 4:** Commit and push your build branch to your forked repo, Build will run and can be monitored in the GitHub Actions tab and TF Cloud console +Check below image for sample data + +.. image:: /workflow-guides/waf/f5-xc-waf-on-k8s/assets/xc-tfvars.JPG + +**STEP 4:** Also update default value of ``aws_waf_ce`` variable in ``variables.tf`` file of ``/aws/eks-cluster``, ``/aws/eks-cluster/ce-deployment`` and ``/shared/booksinfo`` folders if it's not ``infra``. Commit and push your build branch to your forked repo, Build will run and can be monitored in the GitHub Actions tab and TF Cloud console **STEP 5:** Once the pipeline completes, verify your CE, Origin Pool and LB were deployed or destroyed based on your workflow.