diff --git a/.github/workflows/_deployment.yaml b/.github/workflows/_deployment.yaml index 38271d8..b6e3689 100644 --- a/.github/workflows/_deployment.yaml +++ b/.github/workflows/_deployment.yaml @@ -147,16 +147,13 @@ jobs: export HCA_PROJECT_ID="${{ vars.hca_project_id }}" export TCO_ID="${{ vars.tco_id }}" export ENVIRONMENT="${{ inputs.environment }}" - envsubst < ./sample-deploy-code/service-yaml/container-${{ inputs.environment }}.yaml > container-${{ inputs.environment }}.yaml + envsubst < ./service-yaml/container-${{ inputs.environment }}.yaml > container-${{ inputs.environment }}.yaml - name: Deploy to ${{ inputs.environment }} Cloud Run id: deploy-dev-qa uses: google-github-actions/deploy-cloudrun@v2 - continue-on-error: true with: project_id: ${{ vars.gcp_project_id }} service: ${{ vars.service_name }} region: ${{ vars.region }} - metadata: container-${{ inputs.environment }}.yaml - - run: echo "OK" - if: job.steps.bad.status == failure() \ No newline at end of file + metadata: container-${{ inputs.environment }}.yaml \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 655302a..7acaeef 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: Continuous Integration (CI) Build +name: Continuous Integration (CI) Build for QA and Dev Environments on: pull_request: diff --git a/.github/workflows/liquibase-ci.yml b/.github/workflows/liquibase-ci.yml index 2ae9444..9acef02 100644 --- a/.github/workflows/liquibase-ci.yml +++ b/.github/workflows/liquibase-ci.yml @@ -1,4 +1,4 @@ -name: Reusable Continuous Integration (CI) Build Workflow +name: Reusable Continuous Integration (CI) Build Workflow for Dev, QA and prod-plan on: workflow_call: @@ -31,6 +31,9 @@ jobs: pull-requests: write actions: read security-events: write + defaults: + run: + working-directory: ./db/ steps: - id: 'auth' @@ -70,15 +73,15 @@ jobs: - name: Liquibase Status run: | - ./liquibase/liquibase --defaultsFile=./liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{ vars.SQL_LOG_LEVEL }} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} status --verbose + ./liquibase/liquibase --defaultsFile=./db/liquibase/liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{ vars.SQL_LOG_LEVEL }} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} status --verbose - name: Liquibase Validate run: | - ./liquibase/liquibase --defaultsFile=./liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{vars.SQL_LOG_LEVEL}} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} validate + ./liquibase/liquibase --defaultsFile=./db/liquibase/liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{vars.SQL_LOG_LEVEL}} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} validate - name: Liquibase Print SQL run: | - ./liquibase/liquibase --defaultsFile=./liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{ vars.SQL_LOG_LEVEL }} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} --output-file=./artifacts/update-sql.sql update-sql + ./liquibase/liquibase --defaultsFile=./db/liquibase/liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{ vars.SQL_LOG_LEVEL }} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} --output-file=./artifacts/update-sql.sql update-sql - name: Set Timestamp run: echo "TIMESTAMP=$(date +"%Y%m%d%H%M%S")" >> $GITHUB_ENV @@ -92,4 +95,4 @@ jobs: - name: Liquibase Update if: github.event_name == 'push' run: | - ./liquibase/liquibase --defaultsFile=./liquibase.properties --log-level ${{vars.LOG_LEVEL}} --sql-log-level ${{vars.SQL_LOG_LEVEL}} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} update + ./liquibase/liquibase --defaultsFile=./db/liquibase/liquibase.properties --log-level ${{vars.LOG_LEVEL}} --sql-log-level ${{vars.SQL_LOG_LEVEL}} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} update diff --git a/.github/workflows/liquibase-release.yml b/.github/workflows/liquibase-release.yml index eaa1a8d..1d62c69 100644 --- a/.github/workflows/liquibase-release.yml +++ b/.github/workflows/liquibase-release.yml @@ -1,4 +1,4 @@ -name: Reusable Liquibase Publishing Workflow +name: Reusable Liquibase & Cloud Run Publishing Workflow for Production Environment on: workflow_call: @@ -30,6 +30,9 @@ jobs: pull-requests: write actions: read security-events: write + defaults: + run: + working-directory: ./db/ steps: - id: 'auth' @@ -69,11 +72,11 @@ jobs: - name: Liquibase Status run: | - ./liquibase/liquibase --defaultsFile=./liquibase.properties --log-level ${{vars.LOG_LEVEL}} --sql-log-level ${{vars.SQL_LOG_LEVEL}} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} status --verbose + ./liquibase/liquibase --defaultsFile=./db/liquibase/liquibase.properties --log-level ${{vars.LOG_LEVEL}} --sql-log-level ${{vars.SQL_LOG_LEVEL}} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} status --verbose - name: Liquibase Print SQL run: | - ./liquibase/liquibase --defaultsFile=./liquibase.properties --log-level ${{vars.LOG_LEVEL}} --sql-log-level ${{vars.SQL_LOG_LEVEL}} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} --output-file=./artifacts/update-sql.sql update-sql + ./liquibase/liquibase --defaultsFile=./db/liquibase/liquibase.properties --log-level ${{vars.LOG_LEVEL}} --sql-log-level ${{vars.SQL_LOG_LEVEL}} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} --output-file=./artifacts/update-sql.sql update-sql - name: Set Timestamp run: echo "TIMESTAMP=$(date +"%Y%m%d%H%M%S")" >> $GITHUB_ENV @@ -86,4 +89,4 @@ jobs: - name: Liquibase Update run: | - ./liquibase/liquibase --defaultsFile=./liquibase.properties --log-level ${{vars.LOG_LEVEL}} --sql-log-level ${{vars.SQL_LOG_LEVEL}} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} update + ./liquibase/liquibase --defaultsFile=./db/liquibase/liquibase.properties --log-level ${{vars.LOG_LEVEL}} --sql-log-level ${{vars.SQL_LOG_LEVEL}} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} update diff --git a/.github/workflows/liquibase-rollback.yml b/.github/workflows/liquibase-rollback.yml index c7c889c..6ec2593 100644 --- a/.github/workflows/liquibase-rollback.yml +++ b/.github/workflows/liquibase-rollback.yml @@ -1,4 +1,4 @@ -name: Liquibase Rollback Reusable Workflow +name: Reusable Liquibase Rollback Reusable Workflow on: workflow_call: @@ -31,6 +31,9 @@ jobs: pull-requests: write actions: read security-events: write + defaults: + run: + working-directory: ./db/ steps: - id: 'auth' @@ -70,7 +73,7 @@ jobs: - name: Liquibase Print Rollback SQL run: | - ./liquibase/liquibase --defaultsFile=./liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{ vars.SQL_LOG_LEVEL }} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} --output-file=./artifacts/rollback-sql.sql rollback-sql ${{ vars.LAST_STABLE_VERSION_TAG }} + ./liquibase/liquibase --defaultsFile=./db/liquibase/liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{ vars.SQL_LOG_LEVEL }} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} --output-file=./artifacts/rollback-sql.sql rollback-sql ${{ vars.LAST_STABLE_VERSION_TAG }} - name: Set Timestamp run: echo "TIMESTAMP=$(date +"%Y%m%d%H%M%S")" >> $GITHUB_ENV @@ -83,4 +86,4 @@ jobs: - name: Liquibase Rollback run: | - ./liquibase/liquibase --defaultsFile=./liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{ vars.SQL_LOG_LEVEL }} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} rollback ${{ vars.LAST_STABLE_VERSION_TAG }} \ No newline at end of file + ./liquibase/liquibase --defaultsFile=./db/liquibase/liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{ vars.SQL_LOG_LEVEL }} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} rollback ${{ vars.LAST_STABLE_VERSION_TAG }} \ No newline at end of file diff --git a/.github/workflows/liquibase-tag.yml b/.github/workflows/liquibase-tag.yml index 8fe6798..97a6ae7 100644 --- a/.github/workflows/liquibase-tag.yml +++ b/.github/workflows/liquibase-tag.yml @@ -1,4 +1,4 @@ -name: Reusable Continuous Integration (CI) Build Workflow +name: Reusable Workflow for Liquibase Tagging on: workflow_call: @@ -37,6 +37,9 @@ jobs: pull-requests: write actions: read security-events: write + defaults: + run: + working-directory: ./db/ steps: - id: 'auth' @@ -76,7 +79,7 @@ jobs: - name: Liquibase Tag run: | - ./liquibase/liquibase --defaultsFile=./liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{ vars.SQL_LOG_LEVEL }} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} tag --tag=${{ inputs.liquibase-tag }} + ./liquibase/liquibase --defaultsFile=./db/liquibase/liquibase.properties --log-level ${{ vars.LOG_LEVEL }} --sql-log-level ${{ vars.SQL_LOG_LEVEL }} --url ${{ vars.URL }} --username ${{ secrets.USERNAME }} tag --tag=${{ inputs.liquibase-tag }} - name: Update Repository Env Variable run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4b54226..b307b69 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,5 @@ -name: Release + +name: Release Workflow for PROD Environments on: release: types: [published] @@ -40,7 +41,7 @@ jobs: environment: 'prod' secrets: inherit - prod-liquibase-rollback: + prod-liquibase-rollback: name: PROD liquibase rollback uses: ./.github/workflows/liquibase-rollback.yml if: failure() || cancelled() @@ -52,7 +53,7 @@ jobs: with: environment: 'prod' runner: 'ubuntu-latest' - prod-liquibase-update-tag: + prod-liquibase-update-tag: name: PROD Liquibase Update tag and repository variable needs: [ prod-liquibase-deploy, prod-docker ] uses: ./.github/workflows/liquibase-tag.yml diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 60bdda2..0000000 --- a/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -*.class -.idea -# Mobile Tools for Java (J2ME) -.mtj.tmp/ - -# Package Files # -*.jar -*.war -*.ear - -# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml -hs_err_pid* diff --git a/sample-deploy-code/code/hello-python/Dockerfile b/Dockerfile similarity index 100% rename from sample-deploy-code/code/hello-python/Dockerfile rename to Dockerfile diff --git a/README.md b/README.md index 422dec2..a531b97 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,51 @@ -##DB Change Management With Liquibase - -This serves as an example liquibase set up following the liquibase's best practices as outlined on the [liquibase website](http://www.liquibase.org/bestpractices.html). - -###SqlFire Example -You will find the required jar files in the lib folder of the packaged version of liquibase. - -* sqlfireclient.jar -* liquibase-sqlfire-3.0.0.jar (sqlfire extensions for liquibase) - -The example creates the example schema that was shipped with verion 1.0 of SqlFire and was used in thier documentation. (Airline, Cities, Flights, etc). - -The schema generation uses the functionality added in liquibase 3.0 to use annotated sql scripts. This allows for the use of sqlfire's extended keyword set required for sqlfire schemas (collocate, replicate, etc). - -It also loads reference data that was provided with the example sqlfire download. - -It is important to note that due to the extended keyword set and changes to the information schema from Apache Derby to support sqlfire's distribution, liquibase funcitionality such as database diff/generateChangelog is not supported. - - -###SqlServer Example - - -###PostgreSql Example +# Continuous Integration & Continuous Deployment for Cloud Run +This repository is designed to lint code, scan code and deploy packaged code to Cloud Run. It manages the promotion process from development to production through pull requests and releases while also allowing for canary deployments through workflow dispatch. + +## Prerequisites + * Develop, QA and Production Google Cloud projects are created. + * Workload identity [pools and providers](https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers) have been created and added to [GitHub Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions). + * The roles/iam.workloadIdentityUser role has been granted to the service account that will authenticate with workload identity federation. See [documentation](https://cloud.google.com/blog/products/identity-security/secure-your-use-of-third-party-tools-with-identity-federation) for guidance on provisioning workload identity with GitHub Actions. + * The service account has been added to GitHub Secrets and has the following roles. + * roles/artifactregistry.writer + * roles/run.admin + * roles/secretmanager.secretAccessor + * roles/cloudsql.client + * All required APIs for Google Cloud services have been enabled. + * [Artifact Registry](https://cloud.google.com/artifact-registry/docs/docker/store-docker-container-images) repository must be created. + * Branches are created for develop and main. + * Environments for dev, qa and prod are created within GitHub Environments. + * Following environment variables are created in each environment. + * ARTIFACT_REGISTRY_PROJECT + * ARTIFACT_REGISTRY_REPO + * CLOUD_RUN_SA + * GCP_PROJECT_ID + * SERVICE_NAME + * Following repsository variables are created in each environment. + * CODE_DIRECTORY + * LANGUAGE + * REGION + + +## Deploying to DEV +1. Create a new feature branch from main, make necessary changes to your code. +2. Raise a pull request from your new feature branch to develop. +3. When the pull request is raised, the workflow will lint the code to ensure quality and CodeQL will scan the code to ensure there are no vulnerabilities in the code. +4. If there are no linting issues or CodeQL vulnerabilities, the pull request can be merged after the workflow completes and approvals are received. +5. Once merged, the image would be built and pushed to Artifact Registry in the Google Cloud project used for development. +6. In develop, once the image is built, it will immediately be deployed to Cloud Run as a new revision in the development project. + +## Deploying to QA +1. Raise a pull request from develop to main. This will not trigger a workflow. +2. Once develop is merged to main, the image is built and pushed to the **production** Artifact Registry repository. The reason this is done is to test the image in QA, then re-tag the image for use in production if QA testing is sucessful. +3. Once the image is pushed to the production Artifact Registry, Cloud Run will pull the image and deploy it to the QA Google Cloud project. + +## Canary Deployments to Production +1. Go to the Google Cloud console to retrieve the existing revision name. +2. Go to the workflow named *Canary Deployment to Cloud Run* to trigger the workflow from workflow dispatch. +3. Insert the existing revision name into the field named *Old Revision Name* and set the traffic split so it adds up to 100%. Feel free to do this a few times to gradually rollout the new revision, increasing the traffic to the new revision each time. +4. In the console, you can see the new revision will have the URL tag *blue* and the old revision will have the URL tag *green*. This can be used to see which users hit each revision or to have users test the new revision by using the revision URL. + +## Deploying to Production +1. Create a [GitHub Release](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository) to trigger a new production deployment. Once the release is published, the workflow will be triggered. +2. This environment should have approvals on the workflow, so approvers will need to approve before the image build and before the Cloud Run deployment. +3. This workflow will re-tag the image with the release tag and will deploy 100% of traffic to the new revision. \ No newline at end of file diff --git a/commands.txt b/commands.txt deleted file mode 100644 index d9b5847..0000000 --- a/commands.txt +++ /dev/null @@ -1,2 +0,0 @@ -./liquibase update -./liquibase --changeLogFile=baseline.xml generateChangeLog \ No newline at end of file diff --git a/changesets/postgresql/initial-schema-postgresql.sql b/db/changesets/postgresql/initial-schema-postgresql.sql similarity index 100% rename from changesets/postgresql/initial-schema-postgresql.sql rename to db/changesets/postgresql/initial-schema-postgresql.sql diff --git a/changesets/postgresql/update-tables.sql b/db/changesets/postgresql/update-tables.sql similarity index 100% rename from changesets/postgresql/update-tables.sql rename to db/changesets/postgresql/update-tables.sql diff --git a/liquibase-postgres-master.xml b/db/liquibase-postgres-master.xml similarity index 88% rename from liquibase-postgres-master.xml rename to db/liquibase-postgres-master.xml index fac6ed7..4dc6feb 100644 --- a/liquibase-postgres-master.xml +++ b/db/liquibase-postgres-master.xml @@ -7,7 +7,7 @@ http://www.liquibase.org/xml/ns/dbchangelog-ext http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-ext.xsd"> - + \ No newline at end of file diff --git a/liquibase.bat b/db/liquibase/liquibase.bat similarity index 100% rename from liquibase.bat rename to db/liquibase/liquibase.bat diff --git a/liquibase.properties b/db/liquibase/liquibase.properties similarity index 100% rename from liquibase.properties rename to db/liquibase/liquibase.properties diff --git a/liquibase.spec b/db/liquibase/liquibase.spec similarity index 100% rename from liquibase.spec rename to db/liquibase/liquibase.spec diff --git a/docs/Deployment.md b/docs/Deployment.md index d4c97fb..d97b03e 100644 --- a/docs/Deployment.md +++ b/docs/Deployment.md @@ -9,8 +9,12 @@ There are two Github workflows under the .github/workflows/ folder used to facil * Establish cloudSQL proxy connection to the SQL database * Setup liquibase and run liquibase status. Print SQL to be executed as an artifact to the workflow * Additionally, if triggered due to a push request, the liquibase update is run to execute the changes on the respective environment based on which the push is triggered (dev for develop branch and QA for main branch) + * Build the docker application and deploy it to cloud run + * If the Docker deployment is successful, liquibase changelog is tagged with the commit ID and the Repository environment Variable for the latest stable commit will be updated to this commit ID. + * If the Docker deployment fails, liquibase rollback will be triggered to revert the database to the last stable version tag based on the repository environment variable `LAST_STABLE_VERSION_TAG` - release.yaml * Executes database changes in the production environment on creation of a release from the main branch. + * Similar to the `ci.yaml` workflow, app is deployed to cloud run. Based on the success or failure of app deployment, the liquibase changelog will be tagged or the changes will be reverted to the last stable commit. ## Deployment into dev, QA, and prod ### Dev environment @@ -18,17 +22,23 @@ There are two Github workflows under the .github/workflows/ folder used to facil - Create a PR merging `your-branch` into `develop`. - This will run checks on your changes, attaches the SQL script to be executed as an artifact to the workflow run. This should be reviewed and approved before merging. - Once merged, liquibase update command will be run and changes will be deployed to the `dev` environment. +- Docker image is built and deployed to Cloud Run in the `dev` environment +- If the Cloud Run deployment is successful, then Liquibase version is tagged and updated in the `LAST_STABLE_VERSION_TAG` Github Environment variable for `dev` +- If the Cloud Run deployment is failed, then Liquibase Rollback is run to rollback the database changes to the current `LAST_STABLE_VERSION_TAG` in the Github Environment variables for `dev` - Test on `dev` ### QA environment - When testing is completed successfully in dev environment create a PR merging `develop` into `main`. - This will again run checks on your changes, adds a SQL script to be reviewed before merging to the workflow. -- Then merge `develop` into `main` and the database changes will be deployed to the `qa` environment. +- Then merge `develop` into `main` and the changes will be deployed to the `qa` environment. +- Docker image is built and deployed to Cloud Run in the `qa` environment +- If the Cloud Run deployment is successful, then Liquibase version is tagged and updated in the `LAST_STABLE_VERSION_TAG` Github Environment variable for `qa` +- If the Cloud Run deployment is failed, then Liquibase Rollback is run to rollback the database changes to the current `LAST_STABLE_VERSION_TAG` in the Github Environment variables for `qa` - Test again on `qa ` for quality assurance. ### Prod environment - When QA is completed successfully, you can now promote to `prod`. - To do this, you will have to create release from the `main` branch with the appropriate tag and release notes. -- Once this is done, a plan will be run on the github workflow and an artifact of the SQL script to be run will be attached to the workflow. -- If approved and published, this will execute the same database changes on `prod`. +- Once this is done, a plan will be run on the github workflow using an idential `prod-plan` github environment and an artifact of the SQL script to be run will be attached to the workflow. +- Once the artifact is reviewed and the `prod` job workflow is approved, this will execute the same database changes and app deployment on `prod`. ## References diff --git a/sample-deploy-code/code/hello-python/main.py b/main.py similarity index 100% rename from sample-deploy-code/code/hello-python/main.py rename to main.py diff --git a/sample-deploy-code/code/hello-python/requirements.txt b/requirements.txt similarity index 100% rename from sample-deploy-code/code/hello-python/requirements.txt rename to requirements.txt diff --git a/sample-deploy-code/.DS_Store b/sample-deploy-code/.DS_Store deleted file mode 100644 index 0162591..0000000 Binary files a/sample-deploy-code/.DS_Store and /dev/null differ diff --git a/sample-deploy-code/README.md b/sample-deploy-code/README.md deleted file mode 100644 index a531b97..0000000 --- a/sample-deploy-code/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# Continuous Integration & Continuous Deployment for Cloud Run -This repository is designed to lint code, scan code and deploy packaged code to Cloud Run. It manages the promotion process from development to production through pull requests and releases while also allowing for canary deployments through workflow dispatch. - -## Prerequisites - * Develop, QA and Production Google Cloud projects are created. - * Workload identity [pools and providers](https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers) have been created and added to [GitHub Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions). - * The roles/iam.workloadIdentityUser role has been granted to the service account that will authenticate with workload identity federation. See [documentation](https://cloud.google.com/blog/products/identity-security/secure-your-use-of-third-party-tools-with-identity-federation) for guidance on provisioning workload identity with GitHub Actions. - * The service account has been added to GitHub Secrets and has the following roles. - * roles/artifactregistry.writer - * roles/run.admin - * roles/secretmanager.secretAccessor - * roles/cloudsql.client - * All required APIs for Google Cloud services have been enabled. - * [Artifact Registry](https://cloud.google.com/artifact-registry/docs/docker/store-docker-container-images) repository must be created. - * Branches are created for develop and main. - * Environments for dev, qa and prod are created within GitHub Environments. - * Following environment variables are created in each environment. - * ARTIFACT_REGISTRY_PROJECT - * ARTIFACT_REGISTRY_REPO - * CLOUD_RUN_SA - * GCP_PROJECT_ID - * SERVICE_NAME - * Following repsository variables are created in each environment. - * CODE_DIRECTORY - * LANGUAGE - * REGION - - -## Deploying to DEV -1. Create a new feature branch from main, make necessary changes to your code. -2. Raise a pull request from your new feature branch to develop. -3. When the pull request is raised, the workflow will lint the code to ensure quality and CodeQL will scan the code to ensure there are no vulnerabilities in the code. -4. If there are no linting issues or CodeQL vulnerabilities, the pull request can be merged after the workflow completes and approvals are received. -5. Once merged, the image would be built and pushed to Artifact Registry in the Google Cloud project used for development. -6. In develop, once the image is built, it will immediately be deployed to Cloud Run as a new revision in the development project. - -## Deploying to QA -1. Raise a pull request from develop to main. This will not trigger a workflow. -2. Once develop is merged to main, the image is built and pushed to the **production** Artifact Registry repository. The reason this is done is to test the image in QA, then re-tag the image for use in production if QA testing is sucessful. -3. Once the image is pushed to the production Artifact Registry, Cloud Run will pull the image and deploy it to the QA Google Cloud project. - -## Canary Deployments to Production -1. Go to the Google Cloud console to retrieve the existing revision name. -2. Go to the workflow named *Canary Deployment to Cloud Run* to trigger the workflow from workflow dispatch. -3. Insert the existing revision name into the field named *Old Revision Name* and set the traffic split so it adds up to 100%. Feel free to do this a few times to gradually rollout the new revision, increasing the traffic to the new revision each time. -4. In the console, you can see the new revision will have the URL tag *blue* and the old revision will have the URL tag *green*. This can be used to see which users hit each revision or to have users test the new revision by using the revision URL. - -## Deploying to Production -1. Create a [GitHub Release](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository) to trigger a new production deployment. Once the release is published, the workflow will be triggered. -2. This environment should have approvals on the workflow, so approvers will need to approve before the image build and before the Cloud Run deployment. -3. This workflow will re-tag the image with the release tag and will deploy 100% of traffic to the new revision. \ No newline at end of file diff --git a/sample-deploy-code/code/cloudsql/Dockerfile b/sample-deploy-code/code/cloudsql/Dockerfile deleted file mode 100644 index a95a16d..0000000 --- a/sample-deploy-code/code/cloudsql/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM maven:3.8.5-jdk-11 as builder -# WORKDIR /src/main/java/com/example/cloudsql -COPY pom.xml . -RUN mvn -B dependency:resolve -COPY src ./src -RUN mvn -B package - -FROM openjdk:11-jdk-slim -# WORKDIR /src/main/java/com/example/cloudsql -COPY --from=builder target/cloudsql-1.0.jar /cloudsql/cloudsql-1.0.jar -EXPOSE 8080 -ENTRYPOINT ["java", "-cp", "/cloudsql/cloudsql-1.0.jar", "com.example.cloudsql.functions.Main"] \ No newline at end of file diff --git a/sample-deploy-code/code/cloudsql/pom.xml b/sample-deploy-code/code/cloudsql/pom.xml deleted file mode 100644 index 275a813..0000000 --- a/sample-deploy-code/code/cloudsql/pom.xml +++ /dev/null @@ -1,171 +0,0 @@ - - - - 4.0.0 - - com.example - cloudsql - 1.0 - - cloudsql - - http://www.example.com - - - UTF-8 - 1.8 - 1.8 - - - - - javax.servlet - javax.servlet-api - 3.1.0 - jar - provided - - - javax.servlet - jstl - 1.2 - - - org.postgresql - postgresql - 42.6.0 - - - com.google.cloud.sql - postgres-socket-factory - 1.13.0 - - - com.google.code.findbugs - annotations - 3.0.1 - - - com.zaxxer - HikariCP - 5.0.1 - - - org.mockito - mockito-core - 5.4.0 - test - - - junit - junit - 4.13.2 - test - - - com.google.truth - truth - 1.1.5 - test - - - - com.google.cloud.functions.invoker - java-function-invoker - 1.3.0 - - - com.google.cloud.functions - functions-framework-api - 1.1.0 - provided - - - junit - junit - 4.11 - test - - - - - - - - - maven-clean-plugin - 3.1.0 - - - - maven-resources-plugin - 3.0.2 - - - org.apache.maven.plugins - maven-war-plugin - 3.4.0 - - - org.eclipse.jetty - jetty-maven-plugin - 9.4.51.v20230217 - - 1 - - - - org.apache.maven.plugins - maven-jar-plugin - - - - com.example.cloudsql.functions.Main - - - - - - - com.google.cloud.tools - appengine-maven-plugin - 2.4.4 - - GCLOUD_CONFIG - GCLOUD_CONFIG - - - - maven-compiler-plugin - 3.8.0 - - - maven-surefire-plugin - 2.22.1 - - - maven-jar-plugin - 3.0.2 - - - maven-install-plugin - 2.5.2 - - - maven-deploy-plugin - 2.8.2 - - - - maven-site-plugin - 3.7.1 - - - maven-project-info-reports-plugin - 3.0.0 - - - - - diff --git a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectionPoolContextListener.java b/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectionPoolContextListener.java deleted file mode 100644 index 5b35fe5..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectionPoolContextListener.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2018 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.cloudsql; - -import com.zaxxer.hikari.HikariDataSource; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import java.sql.SQLException; -import javax.servlet.ServletContext; -import javax.servlet.ServletContextEvent; -import javax.servlet.ServletContextListener; -import javax.servlet.annotation.WebListener; -import javax.sql.DataSource; - -@SuppressFBWarnings( - value = {"HARD_CODE_PASSWORD", "WEM_WEAK_EXCEPTION_MESSAGING"}, - justification = "Extracted from environment, Exception message adds context.") -@WebListener("Creates a connection pool that is stored in the Servlet's context for later use.") -public class ConnectionPoolContextListener implements ServletContextListener { - - @Override - public void contextDestroyed(ServletContextEvent event) { - // This function is called when the Servlet is destroyed. - HikariDataSource pool = (HikariDataSource) event.getServletContext().getAttribute("my-pool"); - if (pool != null) { - pool.close(); - } - } - - @Override - public void contextInitialized(ServletContextEvent event) { - // This function is called when the application starts and will safely create a connection pool - // that can be used to connect to. - ServletContext servletContext = event.getServletContext(); - DataSource pool = (DataSource) servletContext.getAttribute("my-pool"); - if (pool == null) { - if (System.getenv("INSTANCE_HOST") != null) { - pool = TcpConnectionPoolFactory.createConnectionPool(); - } else if (System.getenv("DB_IAM_USER") != null) { - pool = ConnectorIamAuthnConnectionPoolFactory.createConnectionPool(); - } else { - pool = ConnectorConnectionPoolFactory.createConnectionPool(); - } - servletContext.setAttribute("my-pool", pool); - } - try { - Utils.createTable(pool); - } catch (SQLException ex) { - throw new RuntimeException( - "Unable to verify table schema. Please double check the steps" - + "in the README and try again.", - ex); - } - } -} diff --git a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectionPoolFactory.java b/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectionPoolFactory.java deleted file mode 100644 index 62c4536..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectionPoolFactory.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.cloudsql; - -import com.zaxxer.hikari.HikariConfig; - -public class ConnectionPoolFactory { - - public static HikariConfig configureConnectionPool(HikariConfig config) { - // [START cloud_sql_postgres_servlet_limit] - // maximumPoolSize limits the total number of concurrent connections this pool will keep. Ideal - // values for this setting are highly variable on app design, infrastructure, and database. - config.setMaximumPoolSize(5); - // minimumIdle is the minimum number of idle connections Hikari maintains in the pool. - // Additional connections will be established to meet this value unless the pool is full. - config.setMinimumIdle(5); - // [END cloud_sql_postgres_servlet_limit] - - // [START cloud_sql_postgres_servlet_timeout] - // setConnectionTimeout is the maximum number of milliseconds to wait for a connection checkout. - // Any attempt to retrieve a connection from this pool that exceeds the set limit will throw an - // SQLException. - config.setConnectionTimeout(10000); // 10 seconds - // idleTimeout is the maximum amount of time a connection can sit in the pool. Connections that - // sit idle for this many milliseconds are retried if minimumIdle is exceeded. - config.setIdleTimeout(600000); // 10 minutes - // [END cloud_sql_postgres_servlet_timeout] - - // [START cloud_sql_postgres_servlet_backoff] - // Hikari automatically delays between failed connection attempts, eventually reaching a - // maximum delay of `connectionTimeout / 2` between attempts. - // [END cloud_sql_postgres_servlet_backoff] - - // [START cloud_sql_postgres_servlet_lifetime] - // maxLifetime is the maximum possible lifetime of a connection in the pool. Connections that - // live longer than this many milliseconds will be closed and reestablished between uses. This - // value should be several minutes shorter than the database's timeout value to avoid unexpected - // terminations. - config.setMaxLifetime(1800000); // 30 minutes - // [END cloud_sql_postgres_servlet_lifetime] - return config; - } -} diff --git a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectorConnectionPoolFactory.java b/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectorConnectionPoolFactory.java deleted file mode 100644 index 7d89954..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectorConnectionPoolFactory.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.cloudsql; - -// [START cloud_sql_postgres_servlet_connect_connector] -// [START cloud_sql_postgres_servlet_connect_unix] -import com.zaxxer.hikari.HikariConfig; -import com.zaxxer.hikari.HikariDataSource; -import javax.sql.DataSource; - -public class ConnectorConnectionPoolFactory extends ConnectionPoolFactory { - - // Note: Saving credentials in environment variables is convenient, but not - // secure - consider a more secure solution such as - // Cloud Secret Manager (https://cloud.google.com/secret-manager) to help - // keep secrets safe. - private static final String INSTANCE_CONNECTION_NAME = - System.getenv("INSTANCE_CONNECTION_NAME"); - private static final String INSTANCE_UNIX_SOCKET = System.getenv("INSTANCE_UNIX_SOCKET"); - private static final String DB_USER = System.getenv("DB_USER"); - private static final String DB_PASS = System.getenv("DB_PASS"); - private static final String DB_NAME = System.getenv("DB_NAME"); - - public static DataSource createConnectionPool() { - // The configuration object specifies behaviors for the connection pool. - HikariConfig config = new HikariConfig(); - - // The following URL is equivalent to setting the config options below: - // jdbc:postgresql:///?cloudSqlInstance=& - // socketFactory=com.google.cloud.sql.postgres.SocketFactory&user=&password= - // See the link below for more info on building a JDBC URL for the Cloud SQL JDBC Socket Factory - // https://github.com/GoogleCloudPlatform/cloud-sql-jdbc-socket-factory#creating-the-jdbc-url - - // Configure which instance and what database user to connect with. - config.setJdbcUrl(String.format("jdbc:postgresql:///%s", DB_NAME)); - config.setUsername(DB_USER); // e.g. "root", _postgres" - config.setPassword(DB_PASS); // e.g. "my-password" - - config.addDataSourceProperty("socketFactory", "com.google.cloud.sql.postgres.SocketFactory"); - config.addDataSourceProperty("cloudSqlInstance", INSTANCE_CONNECTION_NAME); - - // [END cloud_sql_postgres_servlet_connect_connector] - // Unix sockets are not natively supported in Java, so it is necessary to use the Cloud SQL - // Java Connector to connect. When setting INSTANCE_UNIX_SOCKET, the connector will - // call an external package that will enable Unix socket connections. - // Note: For Java users, the Cloud SQL Java Connector can provide authenticated connections - // which is usually preferable to using the Cloud SQL Proxy with Unix sockets. - // See https://github.com/GoogleCloudPlatform/cloud-sql-jdbc-socket-factory for details. - if (INSTANCE_UNIX_SOCKET != null) { - config.addDataSourceProperty("unixSocketPath", INSTANCE_UNIX_SOCKET); - } - // [START cloud_sql_postgres_servlet_connect_connector] - - // [END cloud_sql_postgres_servlet_connect_unix] - // The ipTypes argument can be used to specify a comma delimited list of preferred IP types - // for connecting to a Cloud SQL instance. The argument ipTypes=PRIVATE will force the - // SocketFactory to connect with an instance's associated private IP. - config.addDataSourceProperty("ipTypes", "PUBLIC,PRIVATE"); - // [START cloud_sql_postgres_servlet_connect_unix] - - - // ... Specify additional connection properties here. - // [START_EXCLUDE] - configureConnectionPool(config); - // [END_EXCLUDE] - - // Initialize the connection pool using the configuration object. - return new HikariDataSource(config); - } -} -// [END cloud_sql_postgres_servlet_connect_connector] -// [END cloud_sql_postgres_servlet_connect_unix] diff --git a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectorIamAuthnConnectionPoolFactory.java b/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectorIamAuthnConnectionPoolFactory.java deleted file mode 100644 index 5342e1e..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/ConnectorIamAuthnConnectionPoolFactory.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.cloudsql; - -// [START cloud_sql_postgres_servlet_auto_iam_authn] -import com.zaxxer.hikari.HikariConfig; -import com.zaxxer.hikari.HikariDataSource; -import javax.sql.DataSource; - -public class ConnectorIamAuthnConnectionPoolFactory extends ConnectionPoolFactory { - - // Note: Saving credentials in environment variables is convenient, but not - // secure - consider a more secure solution such as - // Cloud Secret Manager (https://cloud.google.com/secret-manager) to help - // keep secrets safe. - private static final String INSTANCE_CONNECTION_NAME = - System.getenv("INSTANCE_CONNECTION_NAME"); - private static final String DB_IAM_USER = System.getenv("DB_IAM_USER"); - private static final String DB_NAME = System.getenv("DB_NAME"); - - public static DataSource createConnectionPool() { - // The configuration object specifies behaviors for the connection pool. - HikariConfig config = new HikariConfig(); - - // The following URL is equivalent to setting the config options below: - // jdbc:postgresql:///?cloudSqlInstance=& - // socketFactory=com.google.cloud.sql.postgres.SocketFactory&user=& - // password=password - // See the link below for more info on building a JDBC URL for the Cloud SQL JDBC Socket Factory - // https://github.com/GoogleCloudPlatform/cloud-sql-jdbc-socket-factory#creating-the-jdbc-url - - // Configure which instance and what database to connect with. - config.setJdbcUrl(String.format("jdbc:postgresql:///%s", DB_NAME)); - - config.addDataSourceProperty("socketFactory", "com.google.cloud.sql.postgres.SocketFactory"); - config.addDataSourceProperty("cloudSqlInstance", INSTANCE_CONNECTION_NAME); - - // If connecting using automatic database authentication, follow the instructions for - // connecting using the connector, but set the DB_IAM_USER value to an IAM user or - // service account that has been given access to the database. - // See https://cloud.google.com/sql/docs/postgres/iam-logins for more details. - config.addDataSourceProperty("enableIamAuth", "true"); - config.addDataSourceProperty("user", DB_IAM_USER); - // Password must be set to a nonempty value to bypass driver validation errors. - config.addDataSourceProperty("password", "password"); - // Explicitly set sslmode to disable to prevent driver from hanging. - // The Java Connector will handle SSL so it is unneccesary to enable it at the driver level. - config.addDataSourceProperty("sslmode", "disable"); - - - // ... Specify additional connection properties here. - // [START_EXCLUDE] - configureConnectionPool(config); - // [END_EXCLUDE] - - // Initialize the connection pool using the configuration object. - return new HikariDataSource(config); - } -} -// [END cloud_sql_postgres_servlet_auto_iam_authn] diff --git a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/IndexServlet.java b/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/IndexServlet.java deleted file mode 100644 index 10e73fd..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/IndexServlet.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2018 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.cloudsql; - -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import java.io.IOException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.Locale; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.annotation.Nullable; -import javax.servlet.ServletException; -import javax.servlet.annotation.WebServlet; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.sql.DataSource; - -@SuppressFBWarnings( - value = {"SE_NO_SERIALVERSIONID", "WEM_WEAK_EXCEPTION_MESSAGING"}, - justification = "Not needed for IndexServlet, Exception adds context") -@WebServlet(name = "Index", value = "") -public class IndexServlet extends HttpServlet { - - private static final Logger LOGGER = Logger.getLogger(IndexServlet.class.getName()); - - public TemplateData getTemplateData(DataSource pool) throws ServletException { - try { - return TemplateData.getTemplateData(pool); - } catch (SQLException ex) { - throw new ServletException(ex); - } - } - - @Override - public void doGet(HttpServletRequest req, HttpServletResponse resp) - throws IOException, ServletException { - // Extract the pool from the Servlet Context, reusing the one that was created - // in the ContextListener when the application was started - DataSource pool = (DataSource) req.getServletContext().getAttribute("my-pool"); - - TemplateData templateData = getTemplateData(pool); - - // Add variables and render the page - req.setAttribute("tabCount", templateData.tabCount); - req.setAttribute("spaceCount", templateData.spaceCount); - req.setAttribute("recentVotes", templateData.recentVotes); - req.getRequestDispatcher("/index.jsp").forward(req, resp); - } - - // Used to validate user input. All user provided data should be validated and sanitized before - // being used something like a SQL query. Returns null if invalid. - @Nullable - private String validateTeam(String input) { - if (input != null) { - input = input.toUpperCase(Locale.ENGLISH); - // Must be either "TABS" or "SPACES" - if (!"TABS".equals(input) && !"SPACES".equals(input)) { - return null; - } - } - return input; - } - - @SuppressFBWarnings( - value = {"SERVLET_PARAMETER", "XSS_SERVLET"}, - justification = "Input is validated and sanitized.") - @Override - public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException { - // Get the team from the request and record the time of the vote. - String team = validateTeam(req.getParameter("team")); - Timestamp now = new Timestamp(new Date().getTime()); - if (team == null) { - resp.setStatus(400); - resp.getWriter().append("Invalid team specified."); - return; - } - - // Reuse the pool that was created in the ContextListener when the Servlet started. - DataSource pool = (DataSource) req.getServletContext().getAttribute("my-pool"); - // [START cloud_sql_postgres_servlet_connection] - // Using a try-with-resources statement ensures that the connection is always released back - // into the pool at the end of the statement (even if an error occurs) - try (Connection conn = pool.getConnection()) { - - // PreparedStatements can be more efficient and project against injections. - String stmt = "INSERT INTO votes (time_cast, candidate) VALUES (?, ?);"; - try (PreparedStatement voteStmt = conn.prepareStatement(stmt);) { - voteStmt.setTimestamp(1, now); - voteStmt.setString(2, team); - - // Finally, execute the statement. If it fails, an error will be thrown. - voteStmt.execute(); - } - } catch (SQLException ex) { - // If something goes wrong, handle the error in this section. This might involve retrying or - // adjusting parameters depending on the situation. - // [START_EXCLUDE] - LOGGER.log(Level.WARNING, "Error while attempting to submit vote.", ex); - resp.setStatus(500); - resp.getWriter() - .write( - "Unable to successfully cast vote! Please check the application " - + "logs for more details."); - // [END_EXCLUDE] - } - // [END cloud_sql_postgres_servlet_connection] - - resp.setStatus(200); - resp.getWriter().printf("Vote successfully cast for '%s' at time %s!%n", team, now); - } -} diff --git a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/TcpConnectionPoolFactory.java b/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/TcpConnectionPoolFactory.java deleted file mode 100644 index 7908f20..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/TcpConnectionPoolFactory.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.cloudsql; - -// [START cloud_sql_postgres_servlet_connect_tcp] -// [START cloud_sql_postgres_servlet_connect_tcp_sslcerts] - -import com.zaxxer.hikari.HikariConfig; -import com.zaxxer.hikari.HikariDataSource; -import javax.sql.DataSource; - -public class TcpConnectionPoolFactory extends ConnectionPoolFactory { - - // Note: Saving credentials in environment variables is convenient, but not - // secure - consider a more secure solution such as - // Cloud Secret Manager (https://cloud.google.com/secret-manager) to help - // keep secrets safe. - private static final String DB_USER = System.getenv("DB_USER"); - private static final String DB_PASS = System.getenv("DB_PASS"); - private static final String DB_NAME = System.getenv("DB_NAME"); - - private static final String INSTANCE_HOST = System.getenv("INSTANCE_HOST"); - private static final String DB_PORT = System.getenv("DB_PORT"); - - // [END cloud_sql_postgres_servlet_connect_tcp] - private static final String SSL_CLIENT_KEY_PATH = System.getenv("SSL_CLIENT_KEY_PATH"); - private static final String SSL_CLIENT_KEY_PASSWD = System.getenv("SSL_CLIENT_KEY_PASSWD"); - private static final String SSL_SERVER_CA_PATH = System.getenv("SSL_SERVER_CA_PATH"); - // [START cloud_sql_postgres_servlet_connect_tcp] - - public static DataSource createConnectionPool() { - // The configuration object specifies behaviors for the connection pool. - HikariConfig config = new HikariConfig(); - - // The following URL is equivalent to setting the config options below: - // jdbc:postgresql://:/?user=&password= - // See the link below for more info on building a JDBC URL for the Cloud SQL JDBC Socket Factory - // https://github.com/GoogleCloudPlatform/cloud-sql-jdbc-socket-factory#creating-the-jdbc-url - - // Configure which instance and what database user to connect with. - config.setJdbcUrl(String.format("jdbc:postgresql://%s:%s/%s", INSTANCE_HOST, DB_PORT, DB_NAME)); - config.setUsername(DB_USER); // e.g. "root", "postgres" - config.setPassword(DB_PASS); // e.g. "my-password" - - // [END cloud_sql_postgres_servlet_connect_tcp] - // (OPTIONAL) Configure SSL certificates - // For deployments that connect directly to a Cloud SQL instance without - // using the Cloud SQL Proxy, configuring SSL certificates will ensure the - // connection is encrypted. - // See the link below for more information on how to configure SSL Certificates for use with - // the Postgres JDBC driver - // https://jdbc.postgresql.org/documentation/head/ssl-client.html - if (SSL_CLIENT_KEY_PATH != null && SSL_SERVER_CA_PATH != null) { - config.addDataSourceProperty("ssl", "true"); - config.addDataSourceProperty("sslmode", "verify-full"); - - config.addDataSourceProperty("sslkey", SSL_CLIENT_KEY_PATH); - config.addDataSourceProperty("sslpassword", SSL_CLIENT_KEY_PASSWD); - config.addDataSourceProperty("sslrootcert", SSL_SERVER_CA_PATH); - } - // [START cloud_sql_postgres_servlet_connect_tcp] - - // ... Specify additional connection properties here. - // [START_EXCLUDE] - configureConnectionPool(config); - // [END_EXCLUDE] - - // Initialize the connection pool using the configuration object. - return new HikariDataSource(config); - } -} -// [END cloud_sql_postgres_servlet_connect_tcp] -// [END cloud_sql_postgres_servlet_connect_tcp_sslcerts] diff --git a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/TemplateData.java b/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/TemplateData.java deleted file mode 100644 index 10a7f00..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/TemplateData.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.cloudsql; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.List; -import javax.sql.DataSource; - -public class TemplateData { - - public int tabCount; - public int spaceCount; - public List recentVotes; - - public TemplateData(int tabCount, int spaceCount, List recentVotes) { - this.tabCount = tabCount; - this.spaceCount = spaceCount; - this.recentVotes = recentVotes; - } - - public static TemplateData getTemplateData(DataSource pool) throws SQLException { - int tabCount = 0; - int spaceCount = 0; - List recentVotes = new ArrayList<>(); - try (Connection conn = pool.getConnection()) { - // PreparedStatements are compiled by the database immediately and executed at a later date. - // Most databases cache previously compiled queries, which improves efficiency. - String stmt1 = "SELECT candidate, time_cast FROM votes ORDER BY time_cast DESC LIMIT 5"; - try (PreparedStatement voteStmt = conn.prepareStatement(stmt1);) { - // Execute the statement - ResultSet voteResults = voteStmt.executeQuery(); - // Convert a ResultSet into Vote objects - while (voteResults.next()) { - String candidate = voteResults.getString(1); - Timestamp timeCast = voteResults.getTimestamp(2); - recentVotes.add(new Vote(candidate.trim(), timeCast)); - } - } - - // PreparedStatements can also be executed multiple times with different arguments. This can - // improve efficiency, and project a query from being vulnerable to an SQL injection. - String stmt2 = "SELECT COUNT(vote_id) FROM votes WHERE candidate=?"; - try (PreparedStatement voteCountStmt = conn.prepareStatement(stmt2);) { - voteCountStmt.setString(1, "TABS"); - ResultSet tabResult = voteCountStmt.executeQuery(); - if (tabResult.next()) { // Move to the first result - tabCount = tabResult.getInt(1); - } - - voteCountStmt.setString(1, "SPACES"); - ResultSet spaceResult = voteCountStmt.executeQuery(); - if (spaceResult.next()) { // Move to the first result - spaceCount = spaceResult.getInt(1); - } - } - } catch (SQLException ex) { - // If something goes wrong, the application needs to react appropriately. This might mean - // getting a new connection and executing the query again, or it might mean redirecting the - // user to a different page to let them know something went wrong. - throw new SQLException( - "Unable to successfully connect to the database. Please check the " - + "steps in the README and try again.", - ex); - } - TemplateData templateData = new TemplateData(tabCount, spaceCount, recentVotes); - - return templateData; - } -} diff --git a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/Utils.java b/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/Utils.java deleted file mode 100644 index 0c1dcac..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/Utils.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.cloudsql; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.Locale; -import javax.annotation.Nullable; -import javax.sql.DataSource; - -public class Utils { - - // Used to validate user input. All user provided data should be validated and sanitized before - // being used something like a SQL query. Returns null if invalid. - @Nullable - public static String validateTeam(String input) { - if (input != null) { - input = input.toUpperCase(Locale.ENGLISH); - // Must be either "TABS" or "SPACES" - if (!"TABS".equals(input) && !"SPACES".equals(input)) { - return null; - } - } - return input; - } - - public static void createTable(DataSource pool) throws SQLException { - // Safely attempt to create the table schema. - try (Connection conn = pool.getConnection()) { - String stmt = - "CREATE TABLE IF NOT EXISTS votes ( " - + "vote_id SERIAL NOT NULL, time_cast timestamp NOT NULL, candidate CHAR(6) NOT NULL," - + " PRIMARY KEY (vote_id) );"; - try (PreparedStatement createTableStatement = conn.prepareStatement(stmt);) { - createTableStatement.execute(); - } - } - } -} diff --git a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/Vote.java b/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/Vote.java deleted file mode 100644 index abfe4c3..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/Vote.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2018 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.cloudsql; - -import java.sql.Timestamp; -import java.util.Locale; - -public class Vote { - - private String candidate; - private Timestamp timeCast; - - public Vote(String candidate, Timestamp timeCast) { - this.candidate = candidate.toUpperCase(Locale.ENGLISH); - this.timeCast = new Timestamp(timeCast.getTime()); - } - - public String getCandidate() { - return candidate; - } - - public void setCandidate(String candidate) { - this.candidate = candidate.toUpperCase(Locale.ENGLISH); - } - - public Timestamp getTimeCast() { - return new Timestamp(timeCast.getTime()); - } - - public void setTimeCast(Timestamp timeCast) { - this.timeCast = new Timestamp(timeCast.getTime()); - } - - public String toString() { - return String.format("Vote(candidate=%s,timeCast=%s)", this.candidate, this.timeCast); - } -} diff --git a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/functions/Main.java b/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/functions/Main.java deleted file mode 100644 index e42f4ce..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/java/com/example/cloudsql/functions/Main.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.example.cloudsql.functions; - -import com.example.cloudsql.ConnectorConnectionPoolFactory; -import com.example.cloudsql.TcpConnectionPoolFactory; -import com.example.cloudsql.TemplateData; -import com.example.cloudsql.Utils; -import com.google.cloud.functions.HttpFunction; -import com.google.cloud.functions.HttpRequest; -import com.google.cloud.functions.HttpResponse; -import com.google.gson.Gson; -import com.google.gson.JsonObject; -import java.io.IOException; -import java.net.HttpURLConnection; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.Date; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.sql.DataSource; - -public class Main implements HttpFunction { - - private Logger logger = Logger.getLogger(Main.class.getName()); - private static final Gson gson = new Gson(); - - // Declared at cold-start, but only initialized if/when the function executes - // Uses the "initialization-on-demand holder" idiom - // More information: https://en.wikipedia.org/wiki/Initialization-on-demand_holder_idiom - private static class PoolHolder { - - // Making the default constructor private prohibits instantiation of this class - private PoolHolder() { - } - - // This value is initialized only if (and when) the getInstance() function below is called - private static final DataSource INSTANCE = setupPool(); - - private static DataSource setupPool() { - DataSource pool; - if (System.getenv("INSTANCE_HOST") != null) { - pool = TcpConnectionPoolFactory.createConnectionPool(); - } else { - pool = ConnectorConnectionPoolFactory.createConnectionPool(); - } - try { - Utils.createTable(pool); - } catch (SQLException ex) { - throw new RuntimeException( - "Unable to verify table schema. Please double check the steps" - + "in the README and try again.", - ex); - } - return pool; - } - - private static DataSource getInstance() { - return PoolHolder.INSTANCE; - } - } - - private void returnVoteCounts(HttpRequest req, HttpResponse resp) - throws SQLException, IOException { - DataSource pool = PoolHolder.getInstance(); - TemplateData templateData = TemplateData.getTemplateData(pool); - JsonObject respContent = new JsonObject(); - - // Return JSON Data - respContent.addProperty("tabCount", templateData.tabCount); - respContent.addProperty("spaceCount", templateData.spaceCount); - respContent.addProperty("recentVotes", gson.toJson(templateData.recentVotes)); - resp.getWriter().write(respContent.toString()); - resp.setStatusCode(HttpURLConnection.HTTP_OK); - } - - private void submitVote(HttpRequest req, HttpResponse resp) throws IOException { - DataSource pool = PoolHolder.getInstance(); - Timestamp now = new Timestamp(new Date().getTime()); - JsonObject body = gson.fromJson(req.getReader(), JsonObject.class); - String team = Utils.validateTeam(body.get("team").getAsString()); - if (team == null) { - resp.setStatusCode(400); - resp.getWriter().append("Invalid team specified."); - return; - } - try (Connection conn = pool.getConnection()) { - // PreparedStatements can be more efficient and project against injections. - String stmt = "INSERT INTO votes (time_cast, candidate) VALUES (?, ?);"; - try (PreparedStatement voteStmt = conn.prepareStatement(stmt);) { - voteStmt.setTimestamp(1, now); - voteStmt.setString(2, team); - - // Finally, execute the statement. If it fails, an error will be thrown. - voteStmt.execute(); - } - } catch (SQLException ex) { - // If something goes wrong, handle the error in this section. This might involve retrying or - // adjusting parameters depending on the situation. - logger.log(Level.WARNING, "Error while attempting to submit vote.", ex); - resp.setStatusCode(500); - resp.getWriter() - .write( - "Unable to successfully cast vote! Please check the application " - + "logs for more details."); - } - } - - @Override - public void service(HttpRequest req, HttpResponse resp) throws IOException, SQLException { - - String method = req.getMethod(); - switch (method) { - case "GET": - returnVoteCounts(req, resp); - break; - case "POST": - submitVote(req, resp); - break; - default: - resp.setStatusCode(HttpURLConnection.HTTP_BAD_METHOD); - resp.getWriter().write(String.format("HTTP Method %s is not supported", method)); - break; - } - } -} diff --git a/sample-deploy-code/code/cloudsql/src/main/webapp/WEB-INF/appengine-web.xml b/sample-deploy-code/code/cloudsql/src/main/webapp/WEB-INF/appengine-web.xml deleted file mode 100644 index 0a2cd6a..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/webapp/WEB-INF/appengine-web.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - true - java17 - - - - - - - diff --git a/sample-deploy-code/code/cloudsql/src/main/webapp/index.jsp b/sample-deploy-code/code/cloudsql/src/main/webapp/index.jsp deleted file mode 100644 index d0f61b9..0000000 --- a/sample-deploy-code/code/cloudsql/src/main/webapp/index.jsp +++ /dev/null @@ -1,115 +0,0 @@ - -<%@ page contentType="text/html;charset=UTF-8" language="java" %> -<%@ taglib prefix="c" uri="http://java.sun.com/jsp/jstl/core" %> - - - Tabs VS Spaces - - - - - - -
-
-

- - - TABS and SPACES are evenly matched! - - - TABS are winning by - ! - - - SPACES are winning by - !! - - -

-
-
-
- - - -
- keyboard_tab -

votes

- -
-
-
- - - -
- space_bar -

votes

- -
-
-
-

Recent Votes

-
    - -
  • - - - keyboard_tab - - - space_bar - - - - A vote for - -

    was cast at .

    -
  • -
    -
-
- - - diff --git a/sample-deploy-code/code/cloudsql/src/test/java/com/example/cloudsql/TestIndexServletPostgres.java b/sample-deploy-code/code/cloudsql/src/test/java/com/example/cloudsql/TestIndexServletPostgres.java deleted file mode 100644 index cfc7358..0000000 --- a/sample-deploy-code/code/cloudsql/src/test/java/com/example/cloudsql/TestIndexServletPostgres.java +++ /dev/null @@ -1,132 +0,0 @@ -// /* -// * Copyright 2020 Google LLC -// * -// * Licensed under the Apache License, Version 2.0 (the "License"); -// * you may not use this file except in compliance with the License. -// * You may obtain a copy of the License at -// * -// * http://www.apache.org/licenses/LICENSE-2.0 -// * -// * Unless required by applicable law or agreed to in writing, software -// * distributed under the License is distributed on an "AS IS" BASIS, -// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// * See the License for the specific language governing permissions and -// * limitations under the License. -// */ - -// package com.example.cloudsql; -//TODO: fix this test -// import static com.google.common.truth.Truth.assertWithMessage; -// import static org.junit.Assert.assertNotNull; -// import static org.junit.Assert.assertTrue; -// import static org.mockito.Mockito.mock; -// import static org.mockito.Mockito.when; - -// import com.zaxxer.hikari.HikariConfig; -// import com.zaxxer.hikari.HikariDataSource; -// import java.io.PrintWriter; -// import java.io.StringWriter; -// import java.sql.Connection; -// import java.sql.PreparedStatement; -// import java.sql.SQLException; -// import java.util.Arrays; -// import java.util.List; -// import java.util.UUID; -// import javax.servlet.ServletContext; -// import javax.servlet.http.HttpServletRequest; -// import javax.servlet.http.HttpServletResponse; -// import javax.sql.DataSource; -// import org.junit.AfterClass; -// import org.junit.BeforeClass; -// import org.junit.Test; - - -// public class TestIndexServletPostgres { - -// private static List requiredEnvVars = -// Arrays.asList("PG_USER", "PG_PASS", "PG_DB", "PG_CONNECTION_NAME"); - -// private static DataSource pool; -// private static String tableName; - -// public static void checkEnvVars() { -// // Check that required env vars are set -// requiredEnvVars.forEach((varName) -> { -// assertWithMessage( -// String.format("Environment variable '%s' must be set to perform these tests.", varName)) -// .that(System.getenv(varName)).isNotEmpty(); -// }); -// } - -// private static void createTable(DataSource pool) throws SQLException { -// // Safely attempt to create the table schema. -// tableName = String.format("votes_%s", UUID.randomUUID().toString().replace("-", "")); -// try (Connection conn = pool.getConnection()) { -// String stmt = -// "CREATE TABLE IF NOT EXISTS " -// + tableName -// + " ( vote_id SERIAL NOT NULL, time_cast timestamp NOT NULL," -// + " candidate CHAR(6) NOT NULL," -// + " PRIMARY KEY (vote_id) );"; -// try (PreparedStatement createTableStatement = conn.prepareStatement(stmt);) { -// createTableStatement.execute(); -// } -// } -// } - - -// @BeforeClass -// public static void createPool() throws SQLException { -// checkEnvVars(); -// HikariConfig config = new HikariConfig(); - -// config.setJdbcUrl(String.format("jdbc:postgresql:///%s", System.getenv("PG_DB"))); -// config.setUsername(System.getenv("PG_USER")); // e.g. "root", "mysql" -// config.setPassword(System.getenv("PG_PASS")); // e.g. "my-password" -// config.addDataSourceProperty("socketFactory", "com.google.cloud.sql.postgres.SocketFactory"); -// config.addDataSourceProperty("cloudSqlInstance", System.getenv("PG_CONNECTION_NAME")); - -// pool = new HikariDataSource(config); -// createTable(pool); - -// } - -// @AfterClass -// public static void dropTable() throws SQLException { -// try (Connection conn = pool.getConnection()) { -// String stmt = String.format("DROP TABLE %s;", tableName); -// try (PreparedStatement createTableStatement = conn.prepareStatement(stmt);) { -// createTableStatement.execute(); -// } -// } -// } - -// @Test -// public void testGetTemplateData() throws Exception { -// TemplateData templateData = new IndexServlet().getTemplateData(pool); - -// assertNotNull(templateData.tabCount); -// assertNotNull(templateData.spaceCount); -// assertNotNull(templateData.recentVotes); -// } - -// @Test -// public void testServletPost() throws Exception { -// HttpServletRequest request = mock(HttpServletRequest.class); -// HttpServletResponse response = mock(HttpServletResponse.class); -// ServletContext context = mock(ServletContext.class); - -// when(request.getServletContext()).thenReturn(context); -// when(context.getAttribute("my-pool")).thenReturn(pool); -// when(request.getParameter("team")).thenReturn("TABS"); - -// StringWriter stringWriter = new StringWriter(); -// PrintWriter writer = new PrintWriter(stringWriter); -// when(response.getWriter()).thenReturn(writer); - -// new IndexServlet().doPost(request, response); - -// writer.flush(); -// assertTrue(stringWriter.toString().contains("Vote successfully cast for")); -// } -// } \ No newline at end of file diff --git a/sample-deploy-code/code/cloudsql/target/maven-archiver/pom.properties b/sample-deploy-code/code/cloudsql/target/maven-archiver/pom.properties deleted file mode 100644 index 46fe2a9..0000000 --- a/sample-deploy-code/code/cloudsql/target/maven-archiver/pom.properties +++ /dev/null @@ -1,4 +0,0 @@ -#Created by Apache Maven 3.8.6 -groupId=com.example -artifactId=cloudsql -version=1.0 diff --git a/sample-deploy-code/code/cloudsql/target/maven-status/maven-compiler-plugin/compile/default-compile/createdFiles.lst b/sample-deploy-code/code/cloudsql/target/maven-status/maven-compiler-plugin/compile/default-compile/createdFiles.lst deleted file mode 100644 index ca9fd37..0000000 --- a/sample-deploy-code/code/cloudsql/target/maven-status/maven-compiler-plugin/compile/default-compile/createdFiles.lst +++ /dev/null @@ -1,11 +0,0 @@ -com/example/cloudsql/ConnectorIamAuthnConnectionPoolFactory.class -com/example/cloudsql/ConnectionPoolContextListener.class -com/example/cloudsql/TemplateData.class -com/example/cloudsql/ConnectionPoolFactory.class -com/example/cloudsql/ConnectorConnectionPoolFactory.class -com/example/cloudsql/IndexServlet.class -com/example/cloudsql/functions/Main.class -com/example/cloudsql/Vote.class -com/example/cloudsql/functions/Main$PoolHolder.class -com/example/cloudsql/Utils.class -com/example/cloudsql/TcpConnectionPoolFactory.class diff --git a/sample-deploy-code/code/cloudsql/target/maven-status/maven-compiler-plugin/compile/default-compile/inputFiles.lst b/sample-deploy-code/code/cloudsql/target/maven-status/maven-compiler-plugin/compile/default-compile/inputFiles.lst deleted file mode 100644 index f3f471e..0000000 --- a/sample-deploy-code/code/cloudsql/target/maven-status/maven-compiler-plugin/compile/default-compile/inputFiles.lst +++ /dev/null @@ -1,10 +0,0 @@ -/Users/andrewchasin/Documents/GitHub/cloud-run-reference-architecture/code/cloudsql/src/main/java/com/example/cloudsql/ConnectorConnectionPoolFactory.java -/Users/andrewchasin/Documents/GitHub/cloud-run-reference-architecture/code/cloudsql/src/main/java/com/example/cloudsql/ConnectionPoolContextListener.java -/Users/andrewchasin/Documents/GitHub/cloud-run-reference-architecture/code/cloudsql/src/main/java/com/example/cloudsql/Utils.java -/Users/andrewchasin/Documents/GitHub/cloud-run-reference-architecture/code/cloudsql/src/main/java/com/example/cloudsql/Vote.java -/Users/andrewchasin/Documents/GitHub/cloud-run-reference-architecture/code/cloudsql/src/main/java/com/example/cloudsql/functions/Main.java -/Users/andrewchasin/Documents/GitHub/cloud-run-reference-architecture/code/cloudsql/src/main/java/com/example/cloudsql/ConnectorIamAuthnConnectionPoolFactory.java -/Users/andrewchasin/Documents/GitHub/cloud-run-reference-architecture/code/cloudsql/src/main/java/com/example/cloudsql/ConnectionPoolFactory.java -/Users/andrewchasin/Documents/GitHub/cloud-run-reference-architecture/code/cloudsql/src/main/java/com/example/cloudsql/TcpConnectionPoolFactory.java -/Users/andrewchasin/Documents/GitHub/cloud-run-reference-architecture/code/cloudsql/src/main/java/com/example/cloudsql/TemplateData.java -/Users/andrewchasin/Documents/GitHub/cloud-run-reference-architecture/code/cloudsql/src/main/java/com/example/cloudsql/IndexServlet.java diff --git a/sample-deploy-code/code/cloudsql/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/createdFiles.lst b/sample-deploy-code/code/cloudsql/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/createdFiles.lst deleted file mode 100644 index e69de29..0000000 diff --git a/sample-deploy-code/code/cloudsql/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/inputFiles.lst b/sample-deploy-code/code/cloudsql/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/inputFiles.lst deleted file mode 100644 index cb99285..0000000 --- a/sample-deploy-code/code/cloudsql/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/inputFiles.lst +++ /dev/null @@ -1 +0,0 @@ -/Users/andrewchasin/Documents/GitHub/cloud-run-reference-architecture/code/cloudsql/src/test/java/com/example/cloudsql/TestIndexServletPostgres.java diff --git a/sample-deploy-code/service-yaml/container-canary.yaml b/service-yaml/container-canary.yaml similarity index 100% rename from sample-deploy-code/service-yaml/container-canary.yaml rename to service-yaml/container-canary.yaml diff --git a/sample-deploy-code/service-yaml/container-dev.yaml b/service-yaml/container-dev.yaml similarity index 100% rename from sample-deploy-code/service-yaml/container-dev.yaml rename to service-yaml/container-dev.yaml diff --git a/sample-deploy-code/service-yaml/container-prod.yaml b/service-yaml/container-prod.yaml similarity index 100% rename from sample-deploy-code/service-yaml/container-prod.yaml rename to service-yaml/container-prod.yaml diff --git a/sample-deploy-code/service-yaml/container-qa.yaml b/service-yaml/container-qa.yaml similarity index 100% rename from sample-deploy-code/service-yaml/container-qa.yaml rename to service-yaml/container-qa.yaml