diff --git a/.github/workflows/build_on_pr.yaml b/.github/workflows/build_on_pr.yaml new file mode 100644 index 0000000..de30294 --- /dev/null +++ b/.github/workflows/build_on_pr.yaml @@ -0,0 +1,25 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Main branch PR Build +on: + pull_request: + branches: + - main + +jobs: + build: + runs-on: ubuntu-latest + name: Gradle Build + steps: + - name: Checkout repo + uses: actions/checkout@v3 + - name: Set up Zulu JDK 11 + uses: actions/setup-java@v3 + with: + distribution: 'zulu' + java-version: '11' + - name: Build + run: ./gradlew clean build diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000..e705a9c --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,18 @@ +name: CI + +on: [ push, pull_request ] + +jobs: + build: + runs-on: ubuntu-latest + name: Gradle Build + steps: + - name: Checkout repo + uses: actions/checkout@v3 + - name: Set up Zulu JDK 11 + uses: actions/setup-java@v3 + with: + distribution: 'zulu' + java-version: '11' + - name: Build + run: ./gradlew clean build \ No newline at end of file diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..4461526 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,91 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Publish release to Maven Central +on: + release: + types: + - released + - prereleased + push: + branches: [ documentation ] + +jobs: + publish: + runs-on: ubuntu-latest + environment: prod + name: Gradle Build and Publish + steps: + - name: Checkout repo + uses: actions/checkout@v3 + - name: Set up Zulu JDK 11 + uses: actions/setup-java@v3 + with: + distribution: 'zulu' + java-version: '11' + - name: Publish + run: | + export VERSION=${{github.ref_name}} + export REL_VER=`echo ${VERSION:1}` + echo "Release version is $REL_VER" + echo "RELEASE_VERSION=$REL_VER" >> $GITHUB_ENV + ./gradlew -x test publish -Pversion=$REL_VER -PmavenCentral -Pusername=${{ secrets.SONATYPE_USERNAME }} -Ppassword=${{ secrets.SONATYPE_PASSWORD }} + echo "Building UI" + ls -ltr server/build/libs + docker/build-ui.sh + echo "Done building UI" + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Login to Docker Hub Container Registry + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build and push Server + uses: docker/build-push-action@v3 + with: + context: . + file: docker/DockerfileServer + push: true + platforms: linux/arm64,linux/amd64 + tags: | + orkesio/orkes-conductor-community:latest + orkesio/orkes-conductor-community:${{ env.RELEASE_VERSION }} + + - name: Build and push Server + uses: docker/build-push-action@v3 + with: + context: . + file: docker/DockerfileStandalone + push: true + platforms: linux/arm64,linux/amd64 + tags: | + orkesio/orkes-conductor-community-standalone:latest + orkesio/orkes-conductor-community-standalone:${{ env.RELEASE_VERSION }} + env: + ORG_GRADLE_PROJECT_signingKeyId: ${{ secrets.SIGNING_KEY_ID }} + ORG_GRADLE_PROJECT_signingKey: ${{ secrets.SIGNING_KEY }} + ORG_GRADLE_PROJECT_signingPassword: ${{ secrets.SIGNING_PASSWORD }} + + + + + + + + + + + + + diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..03cd24b --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +# Compiled class file +*.class + +# Log file +*.log + +# BlueJ files +*.ctxt + +# Mobile Tools for Java (J2ME) +.mtj.tmp/ + +# Package Files # +*.war +*.nar +*.ear +*.zip +*.tar.gz +*.rar + +# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml +hs_err_pid* +.idea/ +.gradle/ +build/ + +.DS_Store +tmp/ +out/ +build/ diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..4204bd5 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,207 @@ + Orkes Community License Agreement + +PLEASE READ THIS ORKES COMMUNITY LICENSE AGREEMENT (“Agreement”). BY +DOWNLOADING, INSTALLING, USING OR DISTRIBUTING THE SOFTWARE +(DEFINED BELOW) (“Software”), YOU AND ANY ENTITY YOU REPRESENT (“Licensee” +or “you”) AGREE TO BE BOUND BY THIS AGREEMENT WITH ORKES, INC., A +DELAWARE CORPORATION (“Orkes”). IF AT ANYTIME YOU DO NOT AGREE TO +ALL OF THE TERMS OF THIS AGREEMENT, THEN DO NOT DOWNLOAD, INSTALL, +USE OR DISTRIBUTE THE SOFTWARE AND IMMEDIATELY CEASE ALL USE OF THE +SOFTWARE. + +By agreeing to this Agreement, you represent that you have full power, capacity and authority to +accept the terms of this Agreement. If you are accepting the terms of this Agreement on behalf +of an employer or another entity, you and such employer or other entity represent that you have +full legal authority to bind such employer or other entity to this Agreement. +Orkes and Licensee agree to the following terms and conditions: + + Article 1 +Definitions +1.1 “Conductor Community Software” means the Conductor community software, +sometimes referred to by the community as Netflix Conductor, contributed by the community +under an Apache 2.0 license, which has been made available at +https://github.com/Netflix/conductor. +1.2 “Intellectual Property Rights” mean all legally protectable proprietary or intellectual +property rights in the United States, and anywhere in the world, past, present and future, +including without limitation any patent, copyright, or trade secret. For the purposes of this +Agreement, Intellectual Property Rights do not include trademarks. +1.3 “Source Code” means software in source code or human readable form. +1.4 “Source Materials” means those portions of the Software furnished to Licensee by Orkes +in Source Code. +1.5 “Software” means the Orkes software licensed under this Agreement as may be identified +at the following url: https://github.com/orkes-io/licenses/blob/main/community/SOFTWARE.txt + + Article 2 +License Grant +2.1 Software License. As of the Effective Date, subject to the terms and conditions of this +Agreement, including, without limitation, Article 3 (License Restrictions and Intellectual +Property Rights), Orkes grants Licensee a limited, non-exclusive, non-transferable, and non- +sublicensable license, to: + (a) evaluate the Software in Licensee’s development and testing environments; + (b) use the Software internally for Licensee’s internal business purposes; + (c) modify and make derivative works of the Software; + (d) reproduce the Software; and + (e) distribute Software in binary or Source Code form. +2.2 Conditions to License. The licenses under Article 2 (License Grants) are conditioned +upon Licensee’s compliance with the terms of this Agreement including without limitation +Article 3 (License Restrictions and Intellectual Property Rights). +2.3 Third Party Intellectual Property. The Software may use, include or rely on third-party +software or other Intellectual Property Rights (“Third-Party Intellectual Property”). Other than +the Conductor Community Software included in the Software that is licensed under the Apache +2.0 license, any Third Party Intellectual Property that Orkes provides to Licensee is for +convenience only, and is not part of the Software and is not licensed hereunder. Licensee is +solely responsible for procuring and complying with, and shall procure and comply with, any +necessary license rights if Licensee uses any Third-Party Intellectual Property. + + Article 3 +License Restrictions and Intellectual Property Rights +3.1 Orkes Software Notice. Licensee shall not remove or modify any Orkes or third-party +copyright or other proprietary notices on the Software. Additionally, Licensee shall provide the +following notice on each copy of Software: +THIS SOFTWARE IS MADE AVAILABLE BY ORKES, INC., A DELAWARE +CORPORATION (“Orkes”) UNDER THE ORKES COMMUNITY LICENSE +AGREEMENT (“Agreement”). BY DOWNLOADING, INSTALLING, USING +OR DISTRIBUTING THE SOFTWARE, YOU AND ANY ENTITY YOU +REPRESENT (“Licensee” or “you”) AGREE TO BE BOUND BY THE ORKES +COMMUNITY LICENSE AGREEMENT AT ALL TIMES. IF YOU DO NOT +AGREE TO ALL OF THE TERMS OF THE ORKES COMMUNITY LICENSE +AGREEMENT AT ANY TIME, THEN DO NOT DOWNLOAD, INSTALL, +USE OR DISTRIBUTE THE SOFTWARE AND YOU SHALL +IMMEDIATELY CEASE ANY USE OF THE SOFTWARE. +3.2 Restrictions. Licensee agrees that Licensee shall not: (i) use the Software outside of the +scope of the license granted hereunder or in violation of any restrictions hereunder; or (ii) export +or re-export the Software directly or indirectly in violation of the laws of the United States or any +other jurisdiction. +3.3 Competitive Products. Licensee shall not (i) use or provide the Software as a service for +any third party (including as software-as-a-service, time-sharing or service bureau), (ii) +otherwise provide the Software to a third party in competition with the Software or in +competition with Orkes, (iii) use the Software (including any Source Materials) to develop any +product, technology or service that competes with the Software or any Orkes product or service +or (iv) allow its personnel who have access to the Software to develop any such competitive +product, technology or service. +3.4 Open Source Software. Licensee shall not use or combine the Source Materials (or the +Software) with open-source software or other items in any manner which would subject the +Software to any additional open source software terms and conditions. For clarity, the foregoing +does not prohibit Licensee from combining and using the Software with Conductor code that is +subject only to the Apache 2.0 license or subject to an Orkes community license. +3.5 Ownership by Orkes. Orkes retains all Orkes’ Intellectual Property Rights covering or +embodied in the Software, subject to the limited licenses granted to Licensee under this +Agreement and any third-party rights in the Software. +3.6 No Trademark License. Licensee acquires no right or license to any trademarks of Orkes +hereunder. +3.7 No Other Rights. All Intellectual Property Rights of Orkes not expressly granted to +Licensee in this Agreement are expressly reserved by Orkes. Without limitation, Licensee +receives no right or license, by implication, estoppel or otherwise, to any software, product, +technology or Intellectual Property Rights not embodied in the Software, even if such other +software, technology or Intellectual Property Rights are useful or necessary in connection with +the Software. Licensee agrees not to claim, assert or assist in the claim or assertion of any such +license or right disclaimed as provided above. + + Article 4 +No Warranty +THE SOFTWARE IS PROVIDED “AS-IS” WITHOUT ANY WARRANTY OF ANY KIND. +TO THE MAXIMUM EXTENT PERMITTED BY LAW, ORKES DISCLAIMS ALL +WARRANTIES, CONDITIONS AND REPRESENTATIONS (EXPRESS OR IMPLIED, +ORAL OR WRITTEN) WITH RESPECT TO THE SOFTWARE, INCLUDING, WITHOUT +LIMITATION, WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A +PARTICULAR PURPOSE. + + Article 5 +Licensee Responsibility +Licensee, and not Orkes, is solely responsible for any warranties and covenants Licensee makes in +connection with the Licensee’s products and services as well as the Software and any results thereof, and +any resulting claims from any customers or other third party. Without limiting the foregoing, Licensee is +responsible for complying with applicable law in connection with use of the Software and verifying and +validating the suitability and reliability of the Software for all of Licensee’s use thereof. Further, +Licensee must take prudent steps to protect against failures when the Software or results thereof is +incorporated in a system or application, including providing back-up and shut-down mechanisms. + + Article 6 +Limitation of Liability +6.1 Limitation of Liability. ORKES SHALL NOT BE LIABLE TO LICENSEE FOR ANY +SPECIAL, INDIRECT, INCIDENTAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES, +INCLUDING, WITHOUT LIMITATION, LOST PROFITS, BUSINESS INTERRUPTION OR +LOSS OF INFORMATION, IN ANY WAY RELATED TO THIS AGREEMENT, +REGARDLESS OF WHETHER SUCH PARTY WAS ADVISED OF THE POSSIBILITY OF +ANY OF THE FOREGOING. IN NO EVENT SHALL THE TOTAL COLLECTIVE +LIABILITY OF ORKES FOR ALL CLAIMS HEREUNDER OR IN ANY WAY RELATED +TO THIS AGREEMENT EXCEED THE GREATER OF (I) THE TWENTY PERCENT (20%) +OF AGGREGATE AMOUNTS PAID OR OWED BY LICENSEE UNDER THIS +AGREEMENT IN THE PRECEDING TWELVE (12) MONTHS OR (II) ONE HUNDRED +DOLLARS ($100). +6.2 Allocation of Risk. The warranty disclaimer and limitations of liability set forth in this +Agreement shall apply irrespective of any failure of the essential purpose of any limited remedy. +Licensee acknowledges and agrees that, but for these provisions, Orkes would not have made the +Software available to Licensee under the terms contemplated under this Agreement. +6.3 Applicable Law. The warranty disclaimer and limitations of liability set forth in this +Agreement shall not apply to the extent prohibited by law, in which case the disclaimer or +limitation shall be modified to disclaim and/or limit in accordance with applicable law. Without +limiting the foregoing, to the extent required by law, the foregoing limitations shall not apply to +claims due to fraud, bodily injury or death. + + Article 7 +Termination +7.1 Term. This Agreement shall become effective until terminated. +7.2 Termination for Cause. Orkes may terminate this Agreement upon written notice to the +other upon the other Licensee’s material breach of this Agreement, which breach is incurable or, +if curable, remains uncured for thirty (30) days after notice to Licensee. +7.3 Termination for Bankruptcy. If Licensee (a) becomes insolvent or bankrupt, (b) dissolves +or ceases to conduct business in the ordinary course, (c) makes an assignment for the benefit of +its creditors, (d) commences any insolvency, receivership, bankruptcy or other similar +proceeding for the settlement of its debts or (e) has commenced against it any insolvency, +receivership, bankruptcy or other similar proceeding for the settlement of its debts that is not +dismissed within thirty (30) days after notice of such proceeding, then Orkes may terminate this +Agreement immediately upon written notice to Licensee. +7.4 Effect of Termination. If this Agreement is terminated or expires for any reason, all +rights granted hereunder to Licensee shall terminate, and Licensee shall immediately cease all +use of the Software. The provisions of Article 1 (Definitions), Article 3 (License Restrictions +and Intellectual Property Rights), Article 4 (No Warranty), Article 5 (Limitation of Liability), +Article 6 (Term and Termination) and Article 7 (Miscellaneous) shall survive termination of this +Agreement. + + Article 8 +Miscellaneous +8.1 Relationship of Parties. The Parties to this Agreement are independent contractors, and +this Agreement shall not establish any relationship of partnership, joint venture, employment, +franchise or agency between the Parties. Neither Party shall have the power to bind the other or +incur obligations on the other’s behalf without the other Party’s prior written consent. +8.2 Assignment. Licensee shall not have the right to assign this Agreement, in whole or in +part, without Orkes’s prior written consent; assignment by operation of law or change of control +Licensee is prohibited. Orkes may assign this Agreement without consent. Any attempt to +assign this Agreement, other than as permitted above, shall be null and void. +8.3 Federal Acquisition. This provision applies to all acquisitions of the Software by or for +the Federal Government, whether by any prime contractor or subcontractor and whether under +any procurement contract, grant, cooperative agreement or other activity by or with the Federal +Government. By accepting delivery of the Software, the Government agrees the Software +qualifies as “commercial” computer software within the meaning of the acquisition regulations +applicable to this procurement. The terms and conditions of this Agreement shall pertain to the +Government’s use and disclosure of the software and shall supersede any conflicting contractual +terms or conditions. If this Agreement fails to meet the Government’s needs or is inconsistent in +any respect with Federal law, the Government agrees to return the Software, unused, to Orkes. +8.4 Governing Law. This Agreement shall be governed by, and construed in accordance +with, the laws of the State of California, U.S.A., applicable to contracts made in full and +performed in the State of California, U.S.A., without reference to any conflict of law or choice of +law principles that would cause the application of laws of any other jurisdiction. The United +Nations Convention on Contracts for the International Sales of Goods shall not apply to this +Agreement. +8.5 Jurisdiction and Venue. Jurisdiction and venue for any dispute arising from or related to +this Agreement shall be in the state and federal courts of Santa Clara County, California, USA, +and each party hereby consents to the jurisdiction and venue of such courts. +8.6 Language of Agreement. This Agreement is made in the English language only, and the +English language version shall control in all respects. In the event that this Agreement is +translated into another language, such translation shall not be binding upon the Parties. +8.7 Severability. If any provision of this Agreement, or the application thereof, shall for any +reason and to any extent be determined by a court of competent jurisdiction to be invalid or +unenforceable under applicable law, a valid provision that most closely matches the intent of the +original shall be substituted, and the remaining provisions of this Agreement shall be interpreted +so as best to reasonably effect its original intent. +8.8 Waiver. The failure by either Party to enforce any provision of this Agreement shall not +constitute a waiver of future enforcement of that or any other provision. +8.9 Entire Agreement. This Agreement contains the complete understanding and agreement +of the parties and supersedes all prior or contemporaneous agreements or understandings, oral or +written, relating to the subject matter herein. Any waiver, modification or amendment of any +provision of this Agreement shall be effective only if in writing and signed by duly authorized +representatives of the Parties. No inconsistent or additional terms or conditions in any document +provided by Licensee, including any purchase orders, purchase agreements, requests for +proposals, bills of lading or the like shall apply to this Agreement or the activities hereunder, and +any such terms or conditions are hereby rejected. diff --git a/README.md b/README.md new file mode 100644 index 0000000..10031a0 --- /dev/null +++ b/README.md @@ -0,0 +1,83 @@ +# Orkes Conductor +Orkes Conductor is a fully compatible version of Netflix Conductor with Orkes certified stack. + +[![CI](https://github.com/orkes-io/orkes-conductor-community/actions/workflows/ci.yaml/badge.svg)](https://github.com/orkes-io/orkes-conductor-community/actions/workflows/ci.yml) +[![CI](https://img.shields.io/badge/license-orkes%20community%20license-green)](https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt) + + +
+  ______   .______       __  ___  _______     _______.
+ /  __  \  |   _  \     |  |/  / |   ____|   /       |
+|  |  |  | |  |_)  |    |  '  /  |  |__     |   (----`
+|  |  |  | |      /     |    <   |   __|     \   \    
+|  `--'  | |  |\  \----.|  .  \  |  |____.----)   |   
+ \______/  | _| `._____||__|\__\ |_______|_______/    
+                                                      
+  ______   ______   .__   __.  _______   __    __    ______ .___________.  ______   .______      
+ /      | /  __  \  |  \ |  | |       \ |  |  |  |  /      ||           | /  __  \  |   _  \     
+|  ,----'|  |  |  | |   \|  | |  .--.  ||  |  |  | |  ,----'`---|  |----`|  |  |  | |  |_)  |    
+|  |     |  |  |  | |  . `  | |  |  |  ||  |  |  | |  |         |  |     |  |  |  | |      /     
+|  `----.|  `--'  | |  |\   | |  '--'  ||  `--'  | |  `----.    |  |     |  `--'  | |  |\  \----.
+ \______| \______/  |__| \__| |_______/  \______/   \______|    |__|      \______/  | _| `._____| 
+
+ +## Stack +1. **Redis** as a the primary store for running workflows +2. **Postgres** for storing completed workflows and indexing enabling full text search +3. **Orkes-Queues** Redis based queues that improve upon dyno-queues and providers higher performance and are built from ground up to support Redis standalone and cluster mode + +## Getting Started +### Docker +Easiest way to run Conductor. Each release is published as `orkes-io/orkes-conductor-community` docker images. + +#### Fully self-contained standalone server with all the dependencies +Container image useful for local development and testing. +>**Note:** self-contained docker image shouldn't be used in production environment. + +#### Simple self-contained script to launch docker image +```shell +curl https://github.com/orkes-io/orkes-conductor-community/blob/scripts/run.sh | sh +``` +#### Using `docker run` manually (provides more control) +```shell + +# Create volumes for persistent stores +# Used to create a persistent volume that will preserve the +docker volume create postgres +docker volume create redis + +docker run --init -p 8080:8080 -p 1234:5000 --mount source=redis,target=/redis \ +--mount source=postgres,target=/pgdata orkesio/orkes-conductor-community:latest +``` +Navigate to http://localhost:1234 once the container starts to launch UI + +#### Server + UI Docker +```shell +docker pull orkes-io/orkes-conductor-server:latest +``` +>**Note** To use specific version of Conductor, replace `latest` with the release version +> e.g. +> +> ```docker pull orkes-io/orkes-conductor-server:3.11.0``` + +#### Published Artifacts +Server Jar is published on maven central at the following location: +[TBD](http://orkes.io) + +### Contributions +We welcome community contributions and PRs to this repository. + +### Get Support +Use Github issue tracking for filing issues and Discussion Forum for any other questions, ideas or support requests. +Orkes (http://orkes.io) development team creates and maintains the Orkes-Conductor releases. + +### License +Copyright 2022 Orkes, Inc +Licensed under Orkes Community License. You may obtain a copy of the License at: +``` +https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt +``` +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +**TL;DR** +Orkes Community License is an open source license and allows anyone to download and use Orkes Conductor versions. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..06c9d56 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,4 @@ +# Security Policy + +## Reporting a Vulnerability +Please open an issue to report vulnerability related to source code in this repository. diff --git a/archive/README.md b/archive/README.md new file mode 100644 index 0000000..01e4ecc --- /dev/null +++ b/archive/README.md @@ -0,0 +1,34 @@ +# Workflow archival module +This modules uses Postgres as an archival and indexing system + +See [schema folder](src/main/resources/db/migration_archive_postgres/) for the details on the schema + +## Behavior +When enabled, all the completed workflows (COMPLETED, TERMINATED, FAILED) are moved to the postgres archival and removed from primary datasource + +*Please do not use elasticsearch for indexing when using this - it will be redundant* + +## Configuration properties +### Enable archival +```properties +conductor.archive.db.enabled=true +conductor.archive.db.type=postgres +conductor.archive.db.indexer.threadCount=1 +conductor.archive.db.indexer.pollingInterval=1 +``` + +### Database configuration +Below is the example for connecting to the local postgres +```properties +spring.datasource.url=jdbc:postgresql://localhost/ +spring.datasource.username=postgres +spring.datasource.password=postgres +spring.datasource.hikari.maximum-pool-size=8 +spring.datasource.hikari.auto-commit=false +``` + +### Disable elasticsearch indexing +```properties +conductor.app.asyncIndexingEnabled=false +conductor.indexing.enabled=false +``` diff --git a/archive/build.gradle b/archive/build.gradle new file mode 100644 index 0000000..2332c73 --- /dev/null +++ b/archive/build.gradle @@ -0,0 +1,51 @@ +dependencies { + + implementation "com.netflix.conductor:conductor-common:${versions.conductorfork}" + implementation "com.netflix.conductor:conductor-core:${versions.conductorfork}" + implementation ("com.netflix.conductor:conductor-postgres-persistence:${versions.conductorfork}") + + implementation 'org.springframework.boot:spring-boot-starter' + implementation 'org.springframework.boot:spring-boot-starter-data-jdbc' + implementation 'org.springframework.boot:spring-boot-starter-data-jpa' + implementation 'org.springframework.boot:spring-boot-starter-security' + + implementation "com.fasterxml.jackson.core:jackson-databind" + implementation "com.fasterxml.jackson.core:jackson-core" + + //Metrics + implementation "io.micrometer:micrometer-registry-prometheus:1.7.5" + implementation "io.micrometer:micrometer-core:1.7.5" + implementation "com.netflix.spectator:spectator-reg-micrometer:${versions.revSpectator}" + implementation "com.netflix.spectator:spectator-reg-metrics3:${versions.revSpectator}" + implementation "com.netflix.spectator:spectator-api:${versions.revSpectator}" + + implementation 'org.springframework.retry:spring-retry' + + //Flyway for postgres configuration + implementation "org.flywaydb:flyway-core" + + //Cache + implementation "com.google.guava:guava:${versions.revGuava}" + + //Lucene + implementation "org.apache.lucene:lucene-core:${versions.revLucene}" + implementation "org.apache.lucene:lucene-analyzers-common:${versions.revLucene}" + + //spring + testImplementation 'org.springframework.boot:spring-boot-starter-test' + testImplementation 'org.springframework.security:spring-security-test' + + + testImplementation 'org.hamcrest:hamcrest' + testImplementation "org.awaitility:awaitility:3.1.6" + + //postgres test container + testImplementation "org.testcontainers:postgresql:${versions.revTestContainer}" + + //Fake data generator + testImplementation "com.github.javafaker:javafaker:1.0.2" +} + +test { + useJUnitPlatform() +} \ No newline at end of file diff --git a/archive/src/main/java/io/orkes/conductor/dao/archive/ArchiveDAO.java b/archive/src/main/java/io/orkes/conductor/dao/archive/ArchiveDAO.java new file mode 100644 index 0000000..58bc968 --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/archive/ArchiveDAO.java @@ -0,0 +1,41 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.archive; + +import java.util.List; + +import com.netflix.conductor.common.metadata.tasks.TaskExecLog; +import com.netflix.conductor.model.WorkflowModel; + +public interface ArchiveDAO { + + // Workflow Methods + + void createOrUpdateWorkflow(WorkflowModel workflow); + + boolean removeWorkflow(String workflowId); + + WorkflowModel getWorkflow(String workflowId, boolean includeTasks); + + List getWorkflowIdsByType(String workflowName, Long startTime, Long endTime); + + List getWorkflowIdsByCorrelationId( + String workflowName, String correlationId, boolean includeClosed, boolean includeTasks); + + ScrollableSearchResult searchWorkflows( + String query, String freeText, int start, int count); + + List getTaskExecutionLogs(String taskId); + + void addTaskExecutionLogs(List logs); +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/archive/ArchivedExecutionDAO.java b/archive/src/main/java/io/orkes/conductor/dao/archive/ArchivedExecutionDAO.java new file mode 100644 index 0000000..d73409e --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/archive/ArchivedExecutionDAO.java @@ -0,0 +1,345 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.archive; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.stream.Collectors; + +import com.netflix.conductor.common.metadata.events.EventExecution; +import com.netflix.conductor.common.run.Workflow; +import com.netflix.conductor.dao.ExecutionDAO; +import com.netflix.conductor.dao.QueueDAO; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; + +import io.orkes.conductor.dao.indexer.IndexWorker; +import io.orkes.conductor.id.TimeBasedUUIDGenerator; +import io.orkes.conductor.metrics.MetricsCollector; + +import lombok.extern.slf4j.Slf4j; + +import static io.orkes.conductor.dao.indexer.IndexWorker.INDEXER_QUEUE; + +@Slf4j +public class ArchivedExecutionDAO implements ExecutionDAO { + + private static final int OFFSET_TIME_SEC = 0; + + private final ExecutionDAO primaryDAO; + + private final ArchiveDAO archiveDAO; + + private final QueueDAO queueDAO; + + private final MetricsCollector metricsCollector; + + private final Clock clock; + + public ArchivedExecutionDAO( + ExecutionDAO primaryDAO, + ArchiveDAO archiveDAO, + QueueDAO queueDAO, + MetricsCollector metricsCollector) { + this.primaryDAO = primaryDAO; + this.archiveDAO = archiveDAO; + this.queueDAO = queueDAO; + this.metricsCollector = metricsCollector; + this.clock = Clock.systemDefaultZone(); + log.info( + "Initialized {} as Execution DAO with {} as primary DAO", + ArchivedExecutionDAO.class.getSimpleName(), + primaryDAO.getClass().getSimpleName()); + } + + //////////////////////////////////////////////////////////////////////// + // Delegate to Primary DAO // + //////////////////////////////////////////////////////////////////////// + @Override + public List getPendingTasksByWorkflow(String taskName, String workflowId) { + return primaryDAO.getPendingTasksByWorkflow(taskName, workflowId); + } + + @Override + public List createTasks(List tasks) { + return metricsCollector + .getTimer("create_tasks_dao") + .record(() -> primaryDAO.createTasks(tasks)); + } + + @Override + public void updateTask(TaskModel task) { + metricsCollector + .getTimer("update_task_dao", "taskType", task.getTaskDefName()) + .record( + () -> { + primaryDAO.updateTask(task); + if (task.getStatus().isTerminal()) { + metricsCollector.recordTaskComplete(task); + } + }); + } + + @Override + public boolean exceedsInProgressLimit(TaskModel task) { + return primaryDAO.exceedsInProgressLimit(task); + } + + @Override + public boolean removeTask(String taskId) { + return primaryDAO.removeTask(taskId); + } + + @Override + public List getPendingTasksForTaskType(String taskType) { + return primaryDAO.getPendingTasksForTaskType(taskType); + } + + @Override + public void removeFromPendingWorkflow(String workflowType, String workflowId) { + primaryDAO.removeFromPendingWorkflow(workflowType, workflowId); + } + + @Override + public List getRunningWorkflowIds(String workflowName, int version) { + return primaryDAO.getRunningWorkflowIds(workflowName, version); + } + + @Override + public List getPendingWorkflowsByType(String workflowName, int version) { + return primaryDAO.getPendingWorkflowsByType(workflowName, version); + } + + @Override + public long getPendingWorkflowCount(String workflowName) { + return primaryDAO.getPendingWorkflowCount(workflowName); + } + + @Override + public List getTasks(String taskType, String startKey, int count) { + // This method is only intended to show pending tasks + return primaryDAO.getTasks(taskType, startKey, count); + } + + @Override + public long getInProgressTaskCount(String taskDefName) { + return primaryDAO.getInProgressTaskCount(taskDefName); + } + + @Override + public boolean canSearchAcrossWorkflows() { + return true; + } + + //////////////////////////////////////////////////////////////////////// + // Hybrid Mode // + //////////////////////////////////////////////////////////////////////// + + @Override + public String updateWorkflow(WorkflowModel workflow) { + return metricsCollector + .getTimer("update_workflow_dao", "workflowName", workflow.getWorkflowName()) + .record( + () -> { + workflow.setUpdatedTime(System.currentTimeMillis()); + String id = primaryDAO.updateWorkflow(workflow); + queueForIndexing(workflow, false); + if (workflow.getStatus().isTerminal()) { + metricsCollector.recordWorkflowComplete(workflow); + } + return id; + }); + } + + @Override + public TaskModel getTask(String taskId) { + return metricsCollector + .getTimer("get_task_dao") + .record( + () -> { + TaskModel task = primaryDAO.getTask(taskId); + return task; + }); + } + + @Override + public List getTasks(List taskIds) { + return metricsCollector + .getTimer("get_tasks_dao") + .record(() -> primaryDAO.getTasks(taskIds)); + } + + @Override + public List getTasksForWorkflow(String workflowId) { + return metricsCollector + .getTimer("get_tasks_for_workflow_dao") + .record( + () -> { + List tasks = primaryDAO.getTasksForWorkflow(workflowId); + if (tasks == null || tasks.isEmpty()) { + tasks = archiveDAO.getWorkflow(workflowId, true).getTasks(); + } + return tasks; + }); + } + + @Override + public String createWorkflow(WorkflowModel workflow) { + // UUID used are time based and we want to keep the created time of the UUID with the create + // time of workflow + // The reason is that the create time is used for partitioning and + // we want to be able to get the create time from workflow id + long time = TimeBasedUUIDGenerator.getDate(workflow.getWorkflowId()); + workflow.setCreateTime(time); + + return metricsCollector + .getTimer("create_workflow_dao", "workflowName", workflow.getWorkflowName()) + .record( + () -> { + workflow.setUpdatedTime(System.currentTimeMillis()); + String workflowId = primaryDAO.createWorkflow(workflow); + queueForIndexing(workflow, true); + return workflowId; + }); + } + + @Override + public boolean removeWorkflow(String workflowId) { + boolean removed = primaryDAO.removeWorkflow(workflowId); + if (!removed) { + removed = archiveDAO.removeWorkflow(workflowId); + } + return removed; + } + + @Override + public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) { + return primaryDAO.removeWorkflowWithExpiry(workflowId, ttlSeconds); + } + + @Override + public WorkflowModel getWorkflow(String workflowId) { + return getWorkflow(workflowId, false); + } + + @Override + public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) { + WorkflowModel workflow = primaryDAO.getWorkflow(workflowId, includeTasks); + if (workflow == null) { + log.debug("Not found in primary dao, going to archive {}", workflowId); + workflow = + metricsCollector + .getTimer("get_workflow_archive_dao", "includeTasks", "" + includeTasks) + .record(() -> archiveDAO.getWorkflow(workflowId, includeTasks)); + } + return workflow; + } + + @Override + public List getWorkflowsByType( + String workflowName, Long startTime, Long endTime) { + List workflows = new ArrayList<>(); + List workflowIds = + archiveDAO.getWorkflowIdsByType(workflowName, startTime, endTime); + for (String workflowId : workflowIds) { + workflows.add(getWorkflow(workflowId)); + } + + return workflows; + } + + @Override + public List getWorkflowsByCorrelationId( + String workflowName, String correlationId, boolean includeTasks) { + List ids = + archiveDAO.getWorkflowIdsByCorrelationId( + workflowName, correlationId, false, includeTasks); + return ids.stream() + .map(id -> getWorkflow(id, includeTasks)) + .filter(wf -> wf != null) + .collect(Collectors.toList()); + } + + @Override + public boolean addEventExecution(EventExecution eventExecution) { + boolean added = primaryDAO.addEventExecution(eventExecution); + return added; + } + + @Override + public void updateEventExecution(EventExecution eventExecution) { + primaryDAO.updateEventExecution(eventExecution); + } + + @Override + public void removeEventExecution(EventExecution eventExecution) { + primaryDAO.removeEventExecution(eventExecution); + } + + private void queueForIndexing(WorkflowModel workflow, boolean created) { + + if (!created && !workflow.getStatus().isTerminal()) { + // Do nothing! We only index the workflow once its created and once its completed + return; + } + String messageId = + IndexWorker.WORKFLOW_ID_PREFIX + + workflow.getWorkflowId() + + ":" + + workflow.getStatus(); + long offsetTime = OFFSET_TIME_SEC; + + if (workflow.getStatus().isTerminal()) { + // Move ahead of the queue + + // Below is how the score is calculated for pushing the message to the sorted set + // double score = Long.valueOf(clock.millis() + message.getTimeout()).doubleValue() + + // priority; + + // Making the time to be negative pushes the message at the beginning of the queue + // Reducing the current time by 1s second, to ensure any mismatches do not cause score + // to be negative + // Negative score is allowed,but when querying the messages, the min score is set to 0 + offsetTime = -1 * ((clock.millis() / 1000) - 1000); + } + + if (!created && !workflow.getStatus().isTerminal()) { + // If this is not a newly created workflow and is not yet completed, + // We add a random delay to index + // Adding a delay ensures two things: + // 1. If the workflow completes in the next 1-2 seconds, the completed status will + // remove the pending + // workflow indexing --> see the block below + // 2. Probabiliy that multiple arallel threads/workers picking up the same workflow Id + // reduces + // avoiding database row lock contention + + int delayInSeconds = Math.max(1, new Random().nextInt(10)); + offsetTime = delayInSeconds; + } + + queueDAO.push(INDEXER_QUEUE, messageId, offsetTime); + + if (workflow.getStatus().isTerminal()) { + // Remove any previous message, so we can avoid indexing it twice + messageId = + IndexWorker.WORKFLOW_ID_PREFIX + + workflow.getWorkflowId() + + ":" + + Workflow.WorkflowStatus.RUNNING.toString(); + queueDAO.ack(INDEXER_QUEUE, messageId); + } + } +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/archive/ArchivedIndexDAO.java b/archive/src/main/java/io/orkes/conductor/dao/archive/ArchivedIndexDAO.java new file mode 100644 index 0000000..6a2ac24 --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/archive/ArchivedIndexDAO.java @@ -0,0 +1,152 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.archive; + +import java.util.List; +import java.util.concurrent.CompletableFuture; + +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Primary; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.common.metadata.events.EventExecution; +import com.netflix.conductor.common.metadata.tasks.TaskExecLog; +import com.netflix.conductor.common.run.SearchResult; +import com.netflix.conductor.common.run.TaskSummary; +import com.netflix.conductor.common.run.WorkflowSummary; +import com.netflix.conductor.core.events.queue.Message; +import com.netflix.conductor.dao.IndexDAO; + +import lombok.extern.slf4j.Slf4j; + +@Component +@ConditionalOnProperty(name = "conductor.archive.db.enabled", havingValue = "true") +@Primary +@Slf4j +public class ArchivedIndexDAO implements IndexDAO { + + private final ArchiveDAO archiveDAO; + + public ArchivedIndexDAO(ArchiveDAO archiveDAO) { + this.archiveDAO = archiveDAO; + } + + @Override + public void setup() throws Exception {} + + @Override + public void indexWorkflow(WorkflowSummary workflow) {} + + @Override + public CompletableFuture asyncIndexWorkflow(WorkflowSummary workflow) { + return CompletableFuture.completedFuture(null); + } + + @Override + public void indexTask(TaskSummary task) { + return; + } + + @Override + public CompletableFuture asyncIndexTask(TaskSummary task) { + return CompletableFuture.completedFuture(null); + } + + @Override + public SearchResult searchWorkflows( + String query, String freeText, int start, int count, List sort) { + return archiveDAO.searchWorkflows(query, freeText, start, count); + } + + @Override + public SearchResult searchTasks( + String query, String freeText, int start, int count, List sort) { + throw new UnsupportedOperationException("Task search is not supported in this environment"); + } + + @Override + public void removeWorkflow(String workflowId) { + archiveDAO.removeWorkflow(workflowId); + } + + @Override + public CompletableFuture asyncRemoveWorkflow(String workflowId) { + archiveDAO.removeWorkflow(workflowId); + return CompletableFuture.completedFuture(null); + } + + @Override + public void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values) {} + + @Override + public CompletableFuture asyncUpdateWorkflow( + String workflowInstanceId, String[] keys, Object[] values) { + return CompletableFuture.completedFuture(null); + } + + @Override + public String get(String workflowInstanceId, String key) { + return null; + } + + @Override + public void addTaskExecutionLogs(List logs) { + archiveDAO.addTaskExecutionLogs(logs); + } + + @Override + public CompletableFuture asyncAddTaskExecutionLogs(List logs) { + archiveDAO.addTaskExecutionLogs(logs); + return CompletableFuture.completedFuture(null); + } + + @Override + public List getTaskExecutionLogs(String taskId) { + return archiveDAO.getTaskExecutionLogs(taskId); + } + + @Override + public void addEventExecution(EventExecution eventExecution) {} + + @Override + public List getEventExecutions(String event) { + return null; + } + + @Override + public CompletableFuture asyncAddEventExecution(EventExecution eventExecution) { + return CompletableFuture.completedFuture(null); + } + + @Override + public void addMessage(String queue, Message msg) {} + + @Override + public CompletableFuture asyncAddMessage(String queue, Message message) { + return CompletableFuture.completedFuture(null); + } + + @Override + public List getMessages(String queue) { + return null; + } + + @Override + public List searchArchivableWorkflows(String indexName, long archiveTtlDays) { + throw new UnsupportedOperationException("You do not need to use this! :)"); + } + + public long getWorkflowCount(String query, String freeText) { + return 0; + } +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/archive/DocumentStoreDAO.java b/archive/src/main/java/io/orkes/conductor/dao/archive/DocumentStoreDAO.java new file mode 100644 index 0000000..4c45f17 --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/archive/DocumentStoreDAO.java @@ -0,0 +1,29 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.archive; + +import com.netflix.conductor.model.WorkflowModel; + +/** + * Document store is used to store completed workflow JSON data. + * + * @see io.orkes.conductor.dao.postgres.archive.PostgresArchiveDAO + */ +public interface DocumentStoreDAO { + + void createOrUpdateWorkflow(WorkflowModel workflow); + + boolean removeWorkflow(String workflowId); + + WorkflowModel getWorkflow(String workflowId, boolean includeTasks); +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/archive/ScrollableSearchResult.java b/archive/src/main/java/io/orkes/conductor/dao/archive/ScrollableSearchResult.java new file mode 100644 index 0000000..c0dfe6b --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/archive/ScrollableSearchResult.java @@ -0,0 +1,44 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.archive; + +import java.util.List; + +import com.netflix.conductor.common.run.SearchResult; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import lombok.Getter; +import lombok.NoArgsConstructor; +import lombok.Setter; + +@Getter +@Setter +@NoArgsConstructor +public class ScrollableSearchResult extends SearchResult { + + private String queryId; + + public ScrollableSearchResult(List results, String queryId) { + super(0, results); + this.queryId = queryId; + } + + // With ScrollableSearchResult this will always be zero and it's confusing from an API client's + // perspective. + // That's why it's ignored. + @Override + @JsonIgnore + public long getTotalHits() { + return super.getTotalHits(); + } +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/indexer/IndexValueExtractor.java b/archive/src/main/java/io/orkes/conductor/dao/indexer/IndexValueExtractor.java new file mode 100644 index 0000000..e2518bc --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/indexer/IndexValueExtractor.java @@ -0,0 +1,98 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.indexer; + +import java.util.*; +import java.util.stream.Collectors; + +import org.apache.lucene.analysis.en.EnglishAnalyzer; + +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class IndexValueExtractor { + + private static final String splitWords = "\"|,|;|\\s|,"; + private static final String replaceWords = "\"|,|;|\\s|,|:"; + private static final Collection stopWords = EnglishAnalyzer.getDefaultStopSet(); + + public static Collection getIndexWords( + WorkflowModel workflow, int maxWords, int maxWordLength) { + + try { + + List words = getIndexWords(workflow); + return words.stream() + .flatMap(value -> Arrays.asList(value.split(splitWords)).stream()) + .filter(word -> word.length() < maxWordLength) + .filter(word -> word.length() > 2) + .filter(word -> !word.trim().isBlank()) + .map(word -> word.toLowerCase().trim().replaceAll(replaceWords + "+$", "")) + .filter(word -> !stopWords.contains(word)) + .limit(maxWords) + .collect(Collectors.toSet()); + + } catch (Exception e) { + log.warn("Error serializing input/output map to text: " + e.getMessage(), e); + return new ArrayList<>(); + } + } + + private static List getIndexWords(WorkflowModel workflow) { + List words = new ArrayList<>(); + append(words, workflow.getCorrelationId()); + append(words, workflow.getInput()); + append(words, workflow.getOutput()); + append(words, workflow.getReasonForIncompletion()); + append(words, workflow.getVariables()); + + for (TaskModel task : workflow.getTasks()) { + append(words, task.getOutputData()); + } + return words; + } + + private static void append(List words, Object value) { + if (value instanceof String || value instanceof Number) { + if (value != null) words.add(value.toString()); + } else if (value instanceof List) { + List values = (List) value; + for (Object valueObj : values) { + if (valueObj != null) words.add(valueObj.toString()); + } + } else if (value instanceof Map) { + append(words, (Map) value); + } + } + + private static void append(List words, Map map) { + + map.values() + .forEach( + value -> { + if (value instanceof String || value instanceof Number) { + if (value != null) words.add(value.toString()); + } else if (value instanceof Map) { + append(words, (Map) value); + } else if (value instanceof List) { + List values = (List) value; + for (Object valueObj : values) { + if (valueObj != null) words.add(valueObj.toString()); + } + } + }); + } +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/indexer/IndexWorker.java b/archive/src/main/java/io/orkes/conductor/dao/indexer/IndexWorker.java new file mode 100644 index 0000000..9e0c45e --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/indexer/IndexWorker.java @@ -0,0 +1,150 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.indexer; + +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.dao.ExecutionDAO; +import com.netflix.conductor.dao.QueueDAO; +import com.netflix.conductor.metrics.Monitors; +import com.netflix.conductor.model.WorkflowModel; + +import io.orkes.conductor.dao.archive.ArchiveDAO; +import io.orkes.conductor.metrics.MetricsCollector; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@Component +@ConditionalOnProperty(name = "conductor.archive.db.enabled", havingValue = "true") +public class IndexWorker { + + public static final String INDEXER_QUEUE = "_index_queue"; + + public static final String WORKFLOW_ID_PREFIX = "wf:"; + + public static final String EVENT_ID_PREFIX = "e:"; + + private final QueueDAO queueDAO; + + private final ArchiveDAO archiveDAO; + + private final ExecutionDAO primaryExecDAO; + + private final MetricsCollector metricsCollector; + + private ScheduledExecutorService executorService; + + private int pollBatchSize; + + private int ttlSeconds = 30; + + public IndexWorker( + QueueDAO queueDAO, + ArchiveDAO execDAO, + ExecutionDAO primaryExecDAO, + IndexWorkerProperties properties, + MetricsCollector metricsCollector) { + this.queueDAO = queueDAO; + this.archiveDAO = execDAO; + this.primaryExecDAO = primaryExecDAO; + this.pollBatchSize = properties.getPollBatchSize(); + this.metricsCollector = metricsCollector; + + int threadCount = properties.getThreadCount(); + int pollingInterval = properties.getPollingInterval(); + if (threadCount > 0) { + this.executorService = + Executors.newScheduledThreadPool( + threadCount, + new ThreadFactoryBuilder().setNameFormat("indexer-thread-%d").build()); + + for (int i = 0; i < threadCount; i++) { + this.executorService.scheduleWithFixedDelay( + () -> { + try { + pollAndExecute(); + } catch (Throwable t) { + log.error(t.getMessage(), t); + } + }, + 10, + pollingInterval, + TimeUnit.MILLISECONDS); + } + } + + log.info( + "IndexWorker::INIT with primaryExecDAO = {}, threadCount = {}, pollingInterval = {} and pollBatchSize = {}", + primaryExecDAO, + threadCount, + pollingInterval, + pollBatchSize); + } + + private void pollAndExecute() { + + try { + + List ids = queueDAO.pop(INDEXER_QUEUE, pollBatchSize, 1000); // 1 second + + for (String id : ids) { + if (id.startsWith(WORKFLOW_ID_PREFIX)) { + String workflowId = id.substring(WORKFLOW_ID_PREFIX.length()); + workflowId = workflowId.substring(0, workflowId.lastIndexOf(':')); + indexWorkflow(workflowId); + queueDAO.ack(INDEXER_QUEUE, id); + } else if (id.startsWith(EVENT_ID_PREFIX)) { + String eventId = id.substring(EVENT_ID_PREFIX.length()); + eventId = eventId.substring(0, eventId.lastIndexOf(':')); + indexEvent(eventId); + queueDAO.ack(INDEXER_QUEUE, id); + } + } + + } catch (Throwable e) { + Monitors.error(IndexWorker.class.getSimpleName(), "pollAndExecute"); + log.error(e.getMessage(), e); + } finally { + Monitors.recordQueueDepth(INDEXER_QUEUE, queueDAO.getSize(INDEXER_QUEUE), ""); + } + } + + private void indexWorkflow(String workflowId) { + WorkflowModel workflow = primaryExecDAO.getWorkflow(workflowId, true); + if (workflow == null) { + log.warn("Cannot find workflow in the primary DAO: {}", workflowId); + return; + } + + metricsCollector + .getTimer("archive_workflow", "workflowName", "" + workflow.getWorkflowName()) + .record(() -> archiveDAO.createOrUpdateWorkflow(workflow)); + + if (workflow.getStatus().isTerminal()) { + primaryExecDAO.removeWorkflowWithExpiry(workflowId, ttlSeconds); + } + } + + private void indexEvent(String eventId) { + log.trace("Indexing event {}", eventId); + // Do nothing for now + } +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/indexer/IndexWorkerProperties.java b/archive/src/main/java/io/orkes/conductor/dao/indexer/IndexWorkerProperties.java new file mode 100644 index 0000000..7811821 --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/indexer/IndexWorkerProperties.java @@ -0,0 +1,51 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.indexer; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +@Configuration +@ConfigurationProperties("conductor.archive.db.indexer") +public class IndexWorkerProperties { + + private int threadCount = 3; + + private int pollingInterval = 10; // in millisecond + + private int pollBatchSize = 10; + + public int getThreadCount() { + return threadCount; + } + + public void setThreadCount(int threadCount) { + this.threadCount = threadCount; + } + + public int getPollingInterval() { + return pollingInterval; + } + + public void setPollingInterval(int pollingInterval) { + this.pollingInterval = pollingInterval; + } + + public int getPollBatchSize() { + return pollBatchSize; + } + + public void setPollBatchSize(int pollBatchSize) { + this.pollBatchSize = pollBatchSize; + } +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/indexer/TaskIndex.java b/archive/src/main/java/io/orkes/conductor/dao/indexer/TaskIndex.java new file mode 100644 index 0000000..4395a14 --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/indexer/TaskIndex.java @@ -0,0 +1,105 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.indexer; + +import java.util.List; +import java.util.Map; + +import com.netflix.conductor.model.TaskModel; + +public class TaskIndex { + + private TaskModel task; + + private int maxValueLen; + + public TaskIndex(TaskModel task, int maxValueLen) { + this.task = task; + this.maxValueLen = maxValueLen; + } + + public String toIndexString() { + StringBuilder sb = new StringBuilder(); + append(sb, task.getTaskType()); + append(sb, task.getTaskId()); + append(sb, task.getDomain()); + append(sb, task.getReferenceTaskName()); + append(sb, task.getStatus().toString()); + append(sb, task.getExternalInputPayloadStoragePath()); + append(sb, task.getExternalOutputPayloadStoragePath()); + append(sb, task.getInputData()); + append(sb, task.getOutputData()); + append(sb, task.getReasonForIncompletion()); + append(sb, task.getWorkerId()); + + return sb.toString(); + } + + @Override + public String toString() { + return toIndexString(); + } + + private String toString(Map map) { + StringBuilder sb = new StringBuilder(); + append(sb, map); + return sb.toString(); + } + + private void append(StringBuilder sb, Object value) { + if (value instanceof String || value instanceof Number) { + sb.append(" "); + sb.append(value.toString()); + } else if (value instanceof Map) { + sb.append(" "); + append(sb, (Map) value); + } else if (value instanceof List) { + List values = (List) value; + for (Object valueObj : values) { + sb.append(" "); + append(sb, valueObj); + } + } + } + + private void append(StringBuilder sb, Map map) { + + map.entrySet() + .forEach( + entry -> { + String key = entry.getKey(); + Object value = entry.getValue(); + sb.append(" "); + sb.append(key); + if (value instanceof String) { + sb.append(" "); + String valueString = value.toString(); + sb.append( + valueString.substring( + 0, Math.min(valueString.length(), maxValueLen))); + } else if (value instanceof Number) { + sb.append(" "); + sb.append(value.toString()); + } else if (value instanceof Map) { + sb.append(" "); + append(sb, (Map) value); + } else if (value instanceof List) { + List values = (List) value; + for (Object valueObj : values) { + sb.append(" "); + append(sb, valueObj); + } + } + }); + } +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/indexer/WorkflowIndex.java b/archive/src/main/java/io/orkes/conductor/dao/indexer/WorkflowIndex.java new file mode 100644 index 0000000..32efcfb --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/indexer/WorkflowIndex.java @@ -0,0 +1,39 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.indexer; + +import java.util.Collection; + +import com.netflix.conductor.model.WorkflowModel; + +public class WorkflowIndex { + + private WorkflowModel workflow; + private int maxWords; + private int maxWordLength; + + public WorkflowIndex(WorkflowModel workflow, int maxWords, int maxWordLength) { + this.workflow = workflow; + this.maxWords = maxWords; + this.maxWordLength = maxWordLength; + } + + public Collection toIndexWords() { + return IndexValueExtractor.getIndexWords(workflow, maxWords, maxWordLength); + } + + @Override + public String toString() { + return toIndexWords().toString(); + } +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/postgres/archive/PostgresArchiveDAO.java b/archive/src/main/java/io/orkes/conductor/dao/postgres/archive/PostgresArchiveDAO.java new file mode 100644 index 0000000..1c4624e --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/postgres/archive/PostgresArchiveDAO.java @@ -0,0 +1,417 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.postgres.archive; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import javax.sql.DataSource; + +import org.apache.logging.log4j.util.Strings; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.retry.support.RetryTemplate; + +import com.netflix.conductor.common.metadata.tasks.TaskExecLog; +import com.netflix.conductor.common.run.SearchResult; +import com.netflix.conductor.model.WorkflowModel; +import com.netflix.conductor.postgres.dao.PostgresBaseDAO; + +import io.orkes.conductor.dao.archive.ArchiveDAO; +import io.orkes.conductor.dao.archive.DocumentStoreDAO; +import io.orkes.conductor.dao.archive.ScrollableSearchResult; +import io.orkes.conductor.dao.indexer.WorkflowIndex; + +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class PostgresArchiveDAO extends PostgresBaseDAO implements ArchiveDAO, DocumentStoreDAO { + + private static final String GET_WORKFLOW = + "SELECT json_data FROM workflow_archive WHERE workflow_id = ? FOR SHARE SKIP LOCKED"; + + private static final String REMOVE_WORKFLOW = + "DELETE FROM workflow_archive WHERE workflow_id = ?"; + + private final DataSource searchDatasource; + + private static final String TABLE_NAME = "workflow_archive"; + + public PostgresArchiveDAO( + ObjectMapper objectMapper, + DataSource dataSource, + @Qualifier("searchDatasource") DataSource searchDatasource) { + super(RetryTemplate.defaultInstance(), objectMapper, dataSource); + this.searchDatasource = searchDatasource; + + log.info("Using {} as search datasource", searchDatasource); + + try (Connection conn = searchDatasource.getConnection()) { + log.info("Using {} as search datasource", conn.getMetaData().getURL()); + } catch (Exception e) { + log.error(e.getMessage(), e); + } + } + + public void createOrUpdateWorkflow(WorkflowModel workflow) { + + String INSERT_OR_UPDATE_LATEST = + "INSERT INTO " + + TABLE_NAME + + " as wf" + + "(workflow_id, created_on, modified_on, correlation_id, workflow_name, status, index_data, created_by, json_data) " + + "VALUES " + + "(?, ?, ?, ?, ?, ?, ?, ?, ?) " + + "ON CONFLICT (workflow_id) DO " + + "UPDATE SET modified_on = ?, status = ?, index_data = ?, json_data = ? " + + "WHERE wf.modified_on < ? ;"; + + try (Connection connection = super.dataSource.getConnection()) { + connection.setAutoCommit(true); + PreparedStatement statement = connection.prepareStatement(INSERT_OR_UPDATE_LATEST); + + WorkflowIndex index = new WorkflowIndex(workflow, 200, 50); + Collection indexData = index.toIndexWords(); + + int indx = 1; + long updatedTime = workflow.getUpdatedTime() == null ? 0 : workflow.getUpdatedTime(); + + // Insert values + + statement.setString(indx++, workflow.getWorkflowId()); + statement.setLong(indx++, workflow.getCreateTime()); + statement.setLong(indx++, updatedTime); + statement.setString(indx++, workflow.getCorrelationId()); + statement.setString(indx++, workflow.getWorkflowName()); + statement.setString(indx++, workflow.getStatus().toString()); + statement.setArray( + indx++, connection.createArrayOf("text", indexData.toArray(new String[0]))); + statement.setString(indx++, workflow.getCreatedBy()); + + String workflowJson = null; + if (workflow.getStatus().isTerminal()) { + workflowJson = objectMapper.writeValueAsString(workflow); + } + statement.setString(indx++, workflowJson); + // Update values + statement.setLong(indx++, updatedTime); + statement.setString(indx++, workflow.getStatus().toString()); + statement.setArray( + indx++, connection.createArrayOf("text", indexData.toArray(new String[0]))); + statement.setString(indx++, workflowJson); + statement.setLong(indx, updatedTime); + + statement.executeUpdate(); + + } catch (Exception e) { + log.error( + "Error updating workflow {} - {}", workflow.getWorkflowId(), e.getMessage(), e); + throw new RuntimeException(e); + } + } + + @Override + public boolean removeWorkflow(String workflowId) { + boolean removed = false; + WorkflowModel workflow = this.getWorkflow(workflowId, true); + if (workflow != null) { + withTransaction( + connection -> + execute( + connection, + REMOVE_WORKFLOW, + q -> q.addParameter(workflowId).executeDelete())); + removed = true; + } + return removed; + } + + @Override + public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) { + try (Connection connection = super.dataSource.getConnection()) { + PreparedStatement statement = connection.prepareStatement(GET_WORKFLOW); + statement.setString(1, workflowId); + ResultSet rs = statement.executeQuery(); + if (rs.next()) { + byte[] json = rs.getBytes("json_data"); + if (json == null || json.length == 0) { + return null; + } + return objectMapper.readValue(json, WorkflowModel.class); + } + } catch (Exception e) { + log.error("Error reading workflow - " + e.getMessage(), e); + throw new RuntimeException(e); + } + return null; + } + + @Override + public List getWorkflowIdsByType(String workflowName, Long startTime, Long endTime) { + String query = + "workflowType IN (" + + workflowName + + ") AND startTime>" + + startTime + + " AND startTime< " + + endTime; + ScrollableSearchResult result = searchWorkflows(query, "*", 0, 100_000); + return result.getResults(); + } + + @Override + public List getWorkflowIdsByCorrelationId( + String workflowName, + String correlationId, + boolean includeClosed, + boolean includeTasks) { + String query = + "workflowType = '" + + workflowName.trim() + + "' AND correlationId = '" + + correlationId.trim() + + "'"; + if (!includeClosed) { + query += " AND status IN (" + WorkflowModel.Status.RUNNING + ")"; + } + ScrollableSearchResult result = searchWorkflows(query, null, 0, 100_000); + return result.getResults(); + } + + // Search + public ScrollableSearchResult searchWorkflows( + String query, String freeText, int start, int count) { + + if (query == null) query = ""; + if (freeText == null) freeText = ""; + + log.debug( + "search. query = {}, fulltext={}, limit={}, start= {}", + query, + freeText, + count, + start); + + SearchQuery parsedQuery = SearchQuery.parse(query); + SearchResult results = + search(TABLE_NAME, parsedQuery, freeText.trim(), count, start); + ScrollableSearchResult scrollableSearchResult = new ScrollableSearchResult<>(); + scrollableSearchResult.setResults(results.getResults()); + return scrollableSearchResult; + } + + private SearchResult search( + String tableName, SearchQuery query, String freeText, int limit, int start) { + + List workflowNames = query.getWorkflowNames(); + List workflowIds = query.getWorkflowIds(); + List correlationIds = query.getCorrelationIds(); + List statuses = query.getStatuses(); + long startTime = query.getFromStartTime(); + long endTime = query.getToStartTime(); + if (endTime == 0) { + endTime = System.currentTimeMillis(); + } + + boolean search = false; + + // Task specific + List taskIds = query.getTaskIds(); + List taskTypes = query.getTaskTypes(); + + String WHERE_CLAUSE = "from " + tableName + " archive "; + + WHERE_CLAUSE += " WHERE 1=1 "; + + String SELECT_QUERY = "select workflow_id, created_on "; + + String JOINER = " AND "; + if (workflowNames != null && !workflowNames.isEmpty()) { + WHERE_CLAUSE += JOINER + "archive.workflow_name = ANY (?) "; + } + + if (taskTypes != null && !taskTypes.isEmpty()) { + WHERE_CLAUSE += JOINER + "task_type = ANY (?) "; + } + + if (workflowIds != null && !workflowIds.isEmpty()) { + WHERE_CLAUSE += JOINER + "workflow_id = ANY (?) "; + JOINER = " AND "; + } + + if (taskIds != null && !taskIds.isEmpty()) { + WHERE_CLAUSE += JOINER + "task_id = ANY (?) "; + JOINER = " AND "; + } + + if (statuses != null && !statuses.isEmpty()) { + WHERE_CLAUSE += JOINER + "status = ANY (?) "; + JOINER = " AND "; + } + + if (correlationIds != null && !correlationIds.isEmpty()) { + WHERE_CLAUSE += JOINER + "correlation_id = ANY (?) "; + JOINER = " AND "; + } + + if (startTime > 0) { + WHERE_CLAUSE += JOINER + "created_on BETWEEN ? AND ? "; + JOINER = " AND "; + } + + if (Strings.isNotBlank(freeText) && !"*".equals(freeText)) { + search = true; + WHERE_CLAUSE += JOINER + " index_data @> ? "; + } + + String SEARCH_QUERY = + SELECT_QUERY + + " " + + WHERE_CLAUSE + + " order by created_on desc limit " + + limit + + " offset " + + start; + if (search) { + SEARCH_QUERY = + "select a.workflow_id, a.created_on from (" + + SELECT_QUERY + + " " + + WHERE_CLAUSE + + " limit 2000000 offset 0) a order by a.created_on desc limit " + + limit + + " offset " + + start; + } + + log.debug(SEARCH_QUERY); + + SearchResult result = new SearchResult<>(); + result.setResults(new ArrayList<>()); + + try (Connection conn = searchDatasource.getConnection()) { + PreparedStatement pstmt = conn.prepareStatement(SEARCH_QUERY); + int indx = 1; + if (workflowNames != null && !workflowNames.isEmpty()) { + pstmt.setArray( + indx++, + conn.createArrayOf("VARCHAR", workflowNames.toArray(new String[0]))); + } + if (taskTypes != null && !taskTypes.isEmpty()) { + pstmt.setArray( + indx++, conn.createArrayOf("VARCHAR", taskTypes.toArray(new String[0]))); + } + + if (workflowIds != null && !workflowIds.isEmpty()) { + pstmt.setArray( + indx++, conn.createArrayOf("VARCHAR", workflowIds.toArray(new String[0]))); + } + + if (taskIds != null && !taskIds.isEmpty()) { + pstmt.setArray( + indx++, conn.createArrayOf("VARCHAR", taskIds.toArray(new String[0]))); + } + + if (statuses != null && !statuses.isEmpty()) { + pstmt.setArray( + indx++, conn.createArrayOf("VARCHAR", statuses.toArray(new String[0]))); + } + + if (correlationIds != null && !correlationIds.isEmpty()) { + pstmt.setArray( + indx++, + conn.createArrayOf("VARCHAR", correlationIds.toArray(new String[0]))); + } + + if (startTime > 0) { + pstmt.setLong(indx++, startTime); + pstmt.setLong(indx++, endTime); + } + + if (Strings.isNotBlank(freeText) && !"*".equals(freeText)) { + String[] textArray = freeText.toLowerCase().split(" "); + pstmt.setArray(indx++, conn.createArrayOf("text", textArray)); + } + + result.setTotalHits(0); + long countStart = System.currentTimeMillis(); + ResultSet rs = pstmt.executeQuery(); + log.debug( + "search query took {} ms to execute", + (System.currentTimeMillis() - countStart)); + while (rs.next()) { + String workflowId = rs.getString("workflow_id"); + result.getResults().add(workflowId); + } + + } catch (SQLException sqlException) { + log.error(sqlException.getMessage(), sqlException); + throw new RuntimeException(sqlException); + } + + return result; + } + + // Private Methods + @Override + public List getTaskExecutionLogs(String taskId) { + String GET_TASK = "SELECT seq, log, created_on FROM task_logs WHERE task_id = ?"; + return queryWithTransaction( + GET_TASK, + q -> { + List taskExecLogs = + q.addParameter(taskId) + .executeAndFetch( + resultSet -> { + List logs = new ArrayList<>(); + while (resultSet.next()) { + TaskExecLog log = new TaskExecLog(); + log.setTaskId(taskId); + log.setLog(resultSet.getString(2)); + log.setCreatedTime(resultSet.getLong(3)); + logs.add(log); + } + return logs; + }); + return taskExecLogs; + }); + } + + @Override + public void addTaskExecutionLogs(List logs) { + String INSERT_STMT = "INSERT INTO task_logs (task_id, log, created_on) values(?,?,?)"; + for (TaskExecLog taskExecLog : logs) { + withTransaction( + tx -> { + execute( + tx, + INSERT_STMT, + q -> + q + + // Insert values + .addParameter(taskExecLog.getTaskId()) + .addParameter(taskExecLog.getLog()) + .addParameter(taskExecLog.getCreatedTime()) + + // execute + .executeUpdate()); + }); + } + } +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/postgres/archive/PostgresArchiveDAOConfiguration.java b/archive/src/main/java/io/orkes/conductor/dao/postgres/archive/PostgresArchiveDAOConfiguration.java new file mode 100644 index 0000000..409ec2c --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/postgres/archive/PostgresArchiveDAOConfiguration.java @@ -0,0 +1,146 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.postgres.archive; + +import javax.annotation.PostConstruct; +import javax.sql.DataSource; + +import org.apache.logging.log4j.util.Strings; +import org.flywaydb.core.Flyway; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.autoconfigure.flyway.FlywayConfigurationCustomizer; +import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.*; +import org.springframework.core.env.Environment; + +import com.netflix.conductor.dao.ExecutionDAO; +import com.netflix.conductor.dao.QueueDAO; +import com.netflix.conductor.postgres.config.PostgresProperties; + +import io.orkes.conductor.dao.archive.ArchiveDAO; +import io.orkes.conductor.dao.archive.ArchivedExecutionDAO; +import io.orkes.conductor.metrics.MetricsCollector; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@Configuration(proxyBeanMethods = false) +@EnableConfigurationProperties({PostgresProperties.class}) +@Import(DataSourceAutoConfiguration.class) +@ConditionalOnProperty(name = "conductor.archive.db.type", havingValue = "postgres") +public class PostgresArchiveDAOConfiguration { + + private final DataSource dataSource; + + private final MetricsCollector metricsCollector; + + private final ExecutionDAO primaryExecutionDAO; + + private final QueueDAO dynoQueueDAO; + + private final Environment environment; + + private final ObjectMapper objectMapper; + + public PostgresArchiveDAOConfiguration( + ObjectMapper objectMapper, + Environment environment, + DataSource dataSource, + ExecutionDAO primaryExecutionDAO, + QueueDAO dynoQueueDAO, + MetricsCollector metricsCollector) { + + this.objectMapper = objectMapper; + this.environment = environment; + this.dataSource = dataSource; + this.primaryExecutionDAO = primaryExecutionDAO; + this.dynoQueueDAO = dynoQueueDAO; + this.metricsCollector = metricsCollector; + } + + @Bean + @Primary + @ConditionalOnProperty(name = "conductor.archive.db.enabled", havingValue = "true") + public ExecutionDAO getExecutionDAO(ArchiveDAO archiveDAO) { + return new ArchivedExecutionDAO( + primaryExecutionDAO, archiveDAO, dynoQueueDAO, metricsCollector); + } + + @Bean(initMethod = "migrate", name = "flyway") + @PostConstruct + public Flyway flywayForPrimaryDb() { + return Flyway.configure() + .locations( + "classpath:db/migration_postgres", + "classpath:db/migration_archive_postgres") + .schemas("public") + .dataSource(dataSource) + .baselineOnMigrate(true) + .mixed(true) + .load(); + } + + @Bean(name = "flywayInitializer") + public FlywayConfigurationCustomizer flywayConfigurationCustomizer() { + // override the default location. + return configuration -> + configuration.locations( + "classpath:db/migration_postgres", + "classpath:db/migration_archive_postgres"); + } + + @Bean + @Qualifier("searchDatasource") + public DataSource searchDatasource(DataSource defaultDatasource) { + String url = environment.getProperty("spring.search-datasource.url"); + String user = environment.getProperty("spring.search-datasource.username"); + String password = environment.getProperty("spring.search-datasource.password"); + String maxPoolSizeString = + environment.getProperty("spring.search-datasource.hikari.maximum-pool-size"); + + if (Strings.isEmpty(url)) { + return defaultDatasource; + } + log.info("Configuring searchDatasource with {}", url); + + int maxPoolSize = 10; + if (Strings.isNotEmpty(maxPoolSizeString)) { + try { + maxPoolSize = Integer.parseInt(maxPoolSizeString); + } catch (Exception e) { + } + } + HikariConfig config = new HikariConfig(); + config.setJdbcUrl(url); + config.setAutoCommit(true); + config.setUsername(user); + config.setPassword(password); + config.setMaximumPoolSize(maxPoolSize); + + HikariDataSource hikariDataSource = new HikariDataSource(config); + return hikariDataSource; + } + + @Bean + @DependsOn({"flyway", "flywayInitializer"}) + @ConditionalOnProperty(value = "conductor.archive.db.type", havingValue = "postgres") + public PostgresArchiveDAO getPostgresArchiveDAO( + @Qualifier("searchDatasource") DataSource searchDatasource) { + return new PostgresArchiveDAO(objectMapper, dataSource, searchDatasource); + } +} diff --git a/archive/src/main/java/io/orkes/conductor/dao/postgres/archive/SearchQuery.java b/archive/src/main/java/io/orkes/conductor/dao/postgres/archive/SearchQuery.java new file mode 100644 index 0000000..aafef30 --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/dao/postgres/archive/SearchQuery.java @@ -0,0 +1,160 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.postgres.archive; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public class SearchQuery { + + private List workflowNames; + + private List taskTypes; + + private List workflowIds; + + private List correlationIds; + + private List taskIds; + + private List statuses; + + private long fromStartTime; + + private long toStartTime; + + private SearchQuery() {} + + public static SearchQuery parse(String query) { + SearchQuery searchQuery = new SearchQuery(); + + String[] values = query.split(" AND "); + for (String value : values) { + value = value.trim(); + if (value.startsWith("workflowId")) { + searchQuery.workflowIds = getValues(value); + } + if (value.startsWith("correlationId")) { + searchQuery.correlationIds = getValues(value); + } else if (value.startsWith("taskId")) { + searchQuery.taskIds = getValues(value); + } else if (value.startsWith("workflowType")) { + searchQuery.workflowNames = getValues(value); + } else if (value.startsWith("taskType")) { + searchQuery.taskTypes = getValues(value); + } else if (value.startsWith("status")) { + searchQuery.statuses = getValues(value); + } else if (value.startsWith("startTime")) { + + if (value.contains(">")) { + + String[] kv = value.split(">"); + if (kv.length > 0) { + try { + searchQuery.fromStartTime = Long.parseLong(kv[1].trim()); + } catch (Exception e) { + } + } + + } else if (value.contains("<")) { + + String[] kv = value.split("<"); + if (kv.length > 0) { + try { + searchQuery.toStartTime = Long.parseLong(kv[1].trim()); + } catch (Exception e) { + } + } + } + } + } + return searchQuery; + } + + private static List getValues(String keyValue) { + List values = new ArrayList<>(); + if (keyValue.contains("=")) { + String[] kv = keyValue.split("="); + if (kv.length > 0) { + String value = kv[1].trim(); + // remove quotes from the start and end + value = value.substring(1, value.length() - 1); + return Arrays.asList(value.trim()); + } + } else if (keyValue.contains(" IN ")) { + + String[] kv = keyValue.split(" IN "); + if (kv.length > 0) { + String[] inValues = kv[1].trim().substring(1, kv[1].length() - 1).split(","); + for (String inValue : inValues) { + values.add(inValue.trim()); + } + } + } + return values; + } + + public List getWorkflowNames() { + return workflowNames; + } + + public List getWorkflowIds() { + return workflowIds; + } + + public List getCorrelationIds() { + return correlationIds; + } + + public List getStatuses() { + return statuses; + } + + public long getFromStartTime() { + return fromStartTime; + } + + public long getToStartTime() { + if (toStartTime == 0) { + toStartTime = System.currentTimeMillis(); + } + return toStartTime; + } + + public List getTaskTypes() { + return taskTypes; + } + + public List getTaskIds() { + return taskIds; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SearchQuery that = (SearchQuery) o; + return fromStartTime == that.fromStartTime + && toStartTime == that.toStartTime + && Objects.equals(workflowNames, that.workflowNames) + && Objects.equals(workflowIds, that.workflowIds) + && Objects.equals(statuses, that.statuses); + } + + @Override + public int hashCode() { + return Objects.hash(workflowNames, workflowIds, statuses, fromStartTime, toStartTime); + } +} diff --git a/archive/src/main/java/io/orkes/conductor/id/TimeBasedUUIDGenerator.java b/archive/src/main/java/io/orkes/conductor/id/TimeBasedUUIDGenerator.java new file mode 100644 index 0000000..eb27b38 --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/id/TimeBasedUUIDGenerator.java @@ -0,0 +1,64 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.id; + +import java.time.LocalDate; +import java.util.Calendar; +import java.util.TimeZone; +import java.util.UUID; + +import org.apache.logging.log4j.core.util.UuidUtil; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.core.utils.IDGenerator; + +import lombok.extern.slf4j.Slf4j; + +@Component +@ConditionalOnProperty(name = "conductor.id.generator", havingValue = "time_based") +@Slf4j +public class TimeBasedUUIDGenerator extends IDGenerator { + + private static final LocalDate JAN_1_2020 = LocalDate.of(2020, 1, 1); + + private static final int uuidLength = UUID.randomUUID().toString().length(); + + private static Calendar uuidEpoch = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + + private static final long epochMillis; + + static { + uuidEpoch.clear(); + uuidEpoch.set(1582, 9, 15, 0, 0, 0); // + epochMillis = uuidEpoch.getTime().getTime(); + } + + public TimeBasedUUIDGenerator() { + log.info("Using TimeBasedUUIDGenerator to generate Ids"); + } + + public String generate() { + UUID uuid = UuidUtil.getTimeBasedUuid(); + return uuid.toString(); + } + + public static long getDate(String id) { + UUID uuid = UUID.fromString(id); + if (uuid.version() != 1) { + return 0; + } + long time = (uuid.timestamp() / 10000L) + epochMillis; + return time; + } +} diff --git a/archive/src/main/java/io/orkes/conductor/metrics/MetricsCollector.java b/archive/src/main/java/io/orkes/conductor/metrics/MetricsCollector.java new file mode 100644 index 0000000..cd00980 --- /dev/null +++ b/archive/src/main/java/io/orkes/conductor/metrics/MetricsCollector.java @@ -0,0 +1,178 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.metrics; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import org.springframework.core.env.Environment; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; +import com.netflix.spectator.api.CompositeRegistry; +import com.netflix.spectator.api.Spectator; +import com.netflix.spectator.micrometer.MicrometerRegistry; + +import io.micrometer.core.instrument.*; +import io.micrometer.core.instrument.config.MeterFilter; +import io.micrometer.prometheus.PrometheusRenameFilter; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@Component +public class MetricsCollector { + + private static final CompositeRegistry spectatorRegistry = Spectator.globalRegistry(); + + private final MeterRegistry meterRegistry; + + private static final Map, Counter>> counters = new ConcurrentHashMap<>(); + + private static final Map, Timer>> timers = new ConcurrentHashMap<>(); + + private static final Map, Gauge>> gauges = new ConcurrentHashMap<>(); + + private static double[] percentiles = new double[] {0.5, 0.75, 0.90, 0.95, 0.99}; + + private final boolean skipLabels; + + public MetricsCollector(MeterRegistry meterRegistry, Environment env) { + + Boolean skipLabels = env.getProperty("conductor.metrics.skipLabels", Boolean.class); + if (skipLabels == null) { + this.skipLabels = false; + } else { + this.skipLabels = skipLabels; + } + + this.meterRegistry = meterRegistry; + final MicrometerRegistry metricsRegistry = new MicrometerRegistry(this.meterRegistry); + + this.meterRegistry + .config() + .meterFilter(new PrometheusRenameFilter()) + .meterFilter(MeterFilter.denyNameStartsWith("task_queue_wait")) + .meterFilter(MeterFilter.denyNameStartsWith("dao_payload_size")) + .meterFilter(MeterFilter.denyNameStartsWith("dao_requests")) + .meterFilter(MeterFilter.denyNameStartsWith("workflow_execution")) + .meterFilter(MeterFilter.denyNameStartsWith("tasks_in_workflow")) + .meterFilter(MeterFilter.denyNameStartsWith("task_pending_time")) + .meterFilter(MeterFilter.denyNameStartsWith("workflow_start_success")) + .meterFilter(MeterFilter.denyNameStartsWith("task_execution")); + + if (this.skipLabels) { + this.meterRegistry + .config() + .meterFilter(MeterFilter.denyNameStartsWith("workflow_failure")) + .meterFilter(MeterFilter.denyNameStartsWith("workflow_running")); + } + + spectatorRegistry.add(metricsRegistry); + + log.info("skipLabels: {}", this.skipLabels); + } + + public Timer getTimer(String name, String... additionalTags) { + List tags = tags(additionalTags); + return timers.computeIfAbsent(name, s -> new ConcurrentHashMap<>()) + .computeIfAbsent( + tags, + t -> { + Timer.Builder timerBuilder = + Timer.builder(name) + .description(name) + .publishPercentiles(percentiles); + if (!this.skipLabels) { + timerBuilder = timerBuilder.tags(tags); + } + return timerBuilder.register(meterRegistry); + }); + } + + public Counter getCounter(String name, String... additionalTags) { + List tags = tags(additionalTags); + return counters.computeIfAbsent(name, s -> new ConcurrentHashMap<>()) + .computeIfAbsent( + tags, + t -> { + Counter.Builder builder = Counter.builder(name).description(name); + if (!this.skipLabels) { + builder = builder.tags(tags); + } + return builder.register(meterRegistry); + }); + } + + public void recordGauge(String name, Number value, String... additionalTags) { + List tags = tags(additionalTags); + gauges.computeIfAbsent(name, s -> new ConcurrentHashMap<>()) + .computeIfAbsent( + tags, + t -> { + Gauge.Builder> builder = + Gauge.builder(name, () -> value); + + if (!this.skipLabels) { + builder = builder.tags(tags); + } + return builder.register(meterRegistry); + }); + } + + private void gauge(String name, Number value, String... additionalTags) { + Gauge.builder(name, () -> value).register(meterRegistry); + } + + public void recordWorkflowComplete(WorkflowModel workflow) { + String workflowName = workflow.getWorkflowName(); + if (skipLabels) { + workflowName = "None"; + } + long duration = workflow.getEndTime() - workflow.getCreateTime(); + getTimer( + "workflow_completed", + "workflowName", + workflowName, + "status", + workflow.getStatus().toString()) + .record(duration, TimeUnit.MILLISECONDS); + } + + public void recordTaskComplete(TaskModel task) { + String taskType = task.getTaskDefName(); + if (skipLabels) { + taskType = "None"; + } + getTimer("task_completed", "taskType", taskType, "status", task.getStatus().toString()) + .record((task.getEndTime() - task.getStartTime()), TimeUnit.MILLISECONDS); + } + + private static List tags(String... additionalTags) { + List tags = new ArrayList<>(); + + for (int j = 0; j < additionalTags.length - 1; j++) { + String tk = additionalTags[j]; + String tv = "" + additionalTags[j + 1]; + if (!tv.isEmpty()) { + tags.add(Tag.of(tk, tv)); + } + j++; + } + return tags; + } +} diff --git a/archive/src/main/resources/db/migration_archive_postgres/V99__initial_schema.sql b/archive/src/main/resources/db/migration_archive_postgres/V99__initial_schema.sql new file mode 100644 index 0000000..64a5bb5 --- /dev/null +++ b/archive/src/main/resources/db/migration_archive_postgres/V99__initial_schema.sql @@ -0,0 +1,26 @@ +-- Workflow +CREATE TABLE workflow_archive ( + workflow_id varchar(255) NOT NULL, + created_on bigint, + modified_on bigint, + created_by varchar(255), + correlation_id varchar(255), + workflow_name varchar(255), + status varchar(255), + json_data TEXT, + index_data text[], + PRIMARY KEY (workflow_id) +) with (autovacuum_vacuum_scale_factor = 0.0, autovacuum_vacuum_threshold = 10000); + +CREATE INDEX workflow_archive_workflow_name_index ON workflow_archive (workflow_name, status, correlation_id, created_on); +CREATE INDEX workflow_archive_search_index ON workflow_archive USING gin(index_data); +CREATE INDEX workflow_archive_created_on_index ON workflow_archive (created_on desc); + +-- Task Logs +CREATE TABLE task_logs ( + task_id varchar(255) NOT NULL, + seq bigserial, + log TEXT, + created_on bigint, + PRIMARY KEY (task_id, seq) +); \ No newline at end of file diff --git a/archive/src/test/java/io/orkes/conductor/dao/archive/TestScheduler.java b/archive/src/test/java/io/orkes/conductor/dao/archive/TestScheduler.java new file mode 100644 index 0000000..eabb005 --- /dev/null +++ b/archive/src/test/java/io/orkes/conductor/dao/archive/TestScheduler.java @@ -0,0 +1,112 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.archive; + +import java.text.DecimalFormat; +import java.time.Duration; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.springframework.boot.web.client.RestTemplateBuilder; +import org.springframework.http.*; +import org.springframework.web.client.RestClientException; +import org.springframework.web.client.RestTemplate; + +import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest; + +public class TestScheduler { + + public static void main(String[] args) throws InterruptedException { + RestTemplate rt = + new RestTemplateBuilder() + .setReadTimeout(Duration.ofMillis(7000)) + .setConnectTimeout(Duration.ofMillis(7000)) + .build(); + + StartWorkflowRequest request = new StartWorkflowRequest(); + request.setName("first_flow_all_simple"); + DecimalFormat df = new DecimalFormat("000"); + ExecutorService es = Executors.newFixedThreadPool(100); + int start = 1; + int maxSet = 150; + String host = "https://perf6.conductorworkflow.net/"; + CountDownLatch latch = new CountDownLatch(maxSet); + HttpHeaders headers = new HttpHeaders(); + headers.put( + "X-Authorization", + Collections.singletonList( + "eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIiwiaXNzIjoiaHR0cHM6Ly9vcmtlcy10ZXN0LnVzLmF1dGgwLmNvbS8ifQ..Hhotod4LBfYqEZBc.AFl2Xa_AYk4NaWJzakRjuMx_wpmj-RSLaf7RzzFZxdo35jW3fXzA7RQ8pOevieLWZKgMRYPc6FWdyoFbIZXFUYZcQPUk83UzsigXQSB4g1PUUslmxv6betHQtBNt0PYaNLzwI5PF7QkzNWEdeIPa2b-IsTXniagk_fFeTRJmMdPmfqDDqGmU6kd1v3M12JOqVp5hNXGJirErz-9tB8uOySPXFVbiTCbz8mk_JA-B4LTUWzPzdyE6J7QqSHmsqjQZfkPNpCTYnEF958xLn1x3vZ2K9d84YYSYQTPU_ce3lZJeI3RbfoOp2fQL6KWzPIcPujRh.NdgKCYUDzhIZzfHinmMQdg")); + headers.setContentType(MediaType.APPLICATION_JSON); + + for (int i = 0; i < 60; i++) { + int finalI = i; + es.submit( + () -> { + try { + for (int j = start; j <= maxSet; j++) { + Map sp = new HashMap<>(); + sp.put( + "name", + String.format( + "test_second_%s_%s", + df.format(j), df.format(finalI))); + System.out.println( + String.format( + "test_second_%s_%s", + df.format(j), df.format(finalI))); + + // rt.exchange("https://perf6.conductorworkflow.net/api/scheduler/schedules/" + sp.get("name"), HttpMethod.DELETE, new HttpEntity<>(headers), Void.class); + + sp.put("cronExpression", String.format("%s * * ? * *", finalI)); + sp.put("startWorkflowRequest", request); + HttpEntity> entity = + new HttpEntity<>(sp, headers); + + ResponseEntity voidResponseEntity = + rt.exchange( + host + "api/scheduler/schedules", + HttpMethod.POST, + entity, + Void.class); + System.out.println(voidResponseEntity.getStatusCode()); + } + } catch (RestClientException e) { + e.printStackTrace(); + } finally { + latch.countDown(); + System.out.println("latch: " + latch.getCount()); + } + }); + } + latch.await(); + System.out.println("Latch: " + latch.getCount()); + System.out.println("Done"); + } + + private static void createWorkflow(RestTemplate rt, HttpHeaders headers) { + Map sp = new HashMap<>(); + sp.put("name", "first_flow"); + HttpEntity> entity = new HttpEntity<>(sp, headers); + ResponseEntity exchange = + rt.exchange( + "https://perf5.orkesconductor.com/api/workflow", + HttpMethod.DELETE, + entity, + String.class); + System.out.println(exchange.getStatusCode()); + } +} diff --git a/archive/src/test/java/io/orkes/conductor/dao/indexer/TestIndexValueExtractor.java b/archive/src/test/java/io/orkes/conductor/dao/indexer/TestIndexValueExtractor.java new file mode 100644 index 0000000..ced1698 --- /dev/null +++ b/archive/src/test/java/io/orkes/conductor/dao/indexer/TestIndexValueExtractor.java @@ -0,0 +1,72 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.indexer; + +import java.util.Collection; +import java.util.Map; +import java.util.UUID; + +import org.junit.jupiter.api.Test; + +import com.netflix.conductor.common.config.ObjectMapperProvider; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; + +import static io.orkes.conductor.dao.indexer.IndexValueExtractor.getIndexWords; +import static org.junit.jupiter.api.Assertions.*; + +public class TestIndexValueExtractor { + + @Test + public void testIndexer() { + WorkflowModel model = new WorkflowModel(); + model.getTasks().add(new TaskModel()); + + String uuid = UUID.randomUUID().toString(); + model.getInput().put("correlation_id", uuid); + System.out.println("correlationId: " + uuid); + model.getInput().put("keep", "abcd"); + + String json = + "{\n" + + " \"response\": \"java.lang.Exception: I/O error on GET request for \\\"https://orkes-services.web.app2/data.json\\\": orkes-services.web.app2: nodename nor servname provided, or not known; nested exception is java.net.UnknownHostException: orkes-services.web.app2: nodename nor servname provided, or not known\"\n" + + "}"; + try { + Map output = new ObjectMapperProvider().getObjectMapper().readValue(json, Map.class); + model.getOutput().putAll(output); + } catch (Exception e) { + + } + + for (int i = 0; i < 100; i++) { + model.getTasks().get(0).getOutputData().put("id" + i, UUID.randomUUID().toString()); + } + Collection words = getIndexWords(model, 2, 50); + + // Sine all the UUIDs are longer than max word length, they should all get filtered out + assertNotNull(words); + assertEquals(2, words.size()); + assertTrue(words.contains("abcd")); + assertTrue(words.contains(uuid), uuid + " not in the list of words : " + words); + + words = getIndexWords(model, 200, 50); + System.out.println(words); + System.out.println(words.size()); + words.stream().forEach(System.out::println); + + // All UUIDs shouldbe present + assertNotNull(words); + assertTrue(words.contains("https://orkes-services.web.app2/data.json")); + assertTrue(words.contains(uuid)); + } +} diff --git a/archive/src/test/java/io/orkes/conductor/dao/postgres/PostgresArchiveDAOTest.java b/archive/src/test/java/io/orkes/conductor/dao/postgres/PostgresArchiveDAOTest.java new file mode 100644 index 0000000..93f4b00 --- /dev/null +++ b/archive/src/test/java/io/orkes/conductor/dao/postgres/PostgresArchiveDAOTest.java @@ -0,0 +1,308 @@ +/* + * Copyright 2021 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.postgres; + +import java.util.*; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.PostgreSQLContainer; +import org.testcontainers.utility.DockerImageName; + +import com.netflix.conductor.common.config.ObjectMapperProvider; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; + +import io.orkes.conductor.dao.postgres.archive.PostgresArchiveDAO; +import io.orkes.conductor.id.TimeBasedUUIDGenerator; + +import com.fasterxml.jackson.databind.ObjectMapper; + +import static org.junit.jupiter.api.Assertions.*; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class PostgresArchiveDAOTest { + + private final ObjectMapper objectMapper = new ObjectMapperProvider().getObjectMapper(); + private PostgresDAOTestUtil testPostgres; + private PostgresArchiveDAO archiveDAO; + public PostgreSQLContainer postgreSQLContainer; + public GenericContainer redis; + private TimeBasedUUIDGenerator timeBasedUUIDGenerator = new TimeBasedUUIDGenerator(); + + @BeforeAll + public void setup() { + redis = + new GenericContainer(DockerImageName.parse("redis:6.2.6-alpine")) + .withExposedPorts(6379); + redis.start(); + + ConductorProperties conductorProperties = new ConductorProperties(); + + postgreSQLContainer = + new PostgreSQLContainer<>(DockerImageName.parse("postgres:11.15-alpine")) + .withDatabaseName("conductor"); + postgreSQLContainer.start(); + testPostgres = new PostgresDAOTestUtil(postgreSQLContainer, objectMapper); + + archiveDAO = + new PostgresArchiveDAO( + testPostgres.getObjectMapper(), + testPostgres.getDataSource(), + testPostgres.getDataSource()); + } + + @AfterAll + public void teardown() { + testPostgres.getDataSource().close(); + postgreSQLContainer.stop(); + } + + @Test + public void testIndexLargeDoc() { + WorkflowDef def = new WorkflowDef(); + def.setName("pending_count_correlation_jtest"); + + WorkflowModel workflow = createTestWorkflow(); + Map output = workflow.getTasks().get(0).getOutputData(); + for (int i = 0; i < 100; i++) { + output.put("key_" + i, generateRandomString()); + } + + archiveDAO.createOrUpdateWorkflow(workflow); + } + + private static String generateRandomString() { + Random random = new Random(); + int wordCount = 5; + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < wordCount; i++) { + sb.append(generateRandomWord()); + sb.append(" "); + } + return sb.toString(); + } + + private static String generateRandomWord() { + int leftLimit = 48; // numeral '0' + int rightLimit = 122; // letter 'z' + Random random = new Random(); + int targetStringLength = 15; + + String generatedString = + random.ints(leftLimit, rightLimit + 1) + .filter(i -> (i <= 57 || i >= 65) && (i <= 90 || i >= 97)) + .limit(targetStringLength) + .collect( + StringBuilder::new, + StringBuilder::appendCodePoint, + StringBuilder::append) + .toString(); + + return generatedString; + } + + @Test + public void testOlderDataIndex() { + WorkflowDef def = new WorkflowDef(); + def.setName("pending_count_correlation_jtest"); + + WorkflowModel workflow = createTestWorkflow(); + workflow.setWorkflowDefinition(def); + workflow.setWorkflowId(UUID.randomUUID().toString()); + + archiveDAO.createOrUpdateWorkflow(workflow); + WorkflowModel found = archiveDAO.getWorkflow(workflow.getWorkflowId(), false); + assertNotNull(found); + assertEquals(workflow.getWorkflowId(), found.getWorkflowId()); + } + + @Test + public void testIndexWorkflow() { + WorkflowDef def = new WorkflowDef(); + def.setName("pending_count_correlation_jtest"); + + WorkflowModel workflow = createTestWorkflow(); + workflow.setWorkflowDefinition(def); + + generateWorkflows(workflow, 10); + List bycorrelationId = + archiveDAO.getWorkflowIdsByCorrelationId( + "pending_count_correlation_jtest", "corr001", true, true); + assertNotNull(bycorrelationId); + assertEquals(10, bycorrelationId.size()); + System.out.println("Workflow Ids: " + bycorrelationId); + + List bycorrelationId2 = + archiveDAO.getWorkflowIdsByCorrelationId( + "pending_count_correlation_jtest", "corr001", true, true); + System.out.println("Workflow Ids: " + bycorrelationId2); + System.out.println("Workflow Ids: " + (bycorrelationId.size() == bycorrelationId2.size())); + + workflow.setStatus(WorkflowModel.Status.COMPLETED); + workflow.setUpdatedTime(System.currentTimeMillis()); + workflow.getTasks().forEach(t -> t.setStatus(TaskModel.Status.COMPLETED)); + workflow.setUpdatedTime(System.currentTimeMillis()); + archiveDAO.createOrUpdateWorkflow(workflow); + WorkflowModel found = archiveDAO.getWorkflow(workflow.getWorkflowId(), false); + assertNotNull(found); + assertNotNull(workflow.getTasks()); + assertFalse(workflow.getTasks().isEmpty()); + + // Updating it back to running status shouldn't do anything! + workflow.setStatus(WorkflowModel.Status.RUNNING); + archiveDAO.createOrUpdateWorkflow(workflow); + found = archiveDAO.getWorkflow(workflow.getWorkflowId(), false); + assertNotNull(found); + assertEquals(WorkflowModel.Status.COMPLETED, found.getStatus()); + } + + @Test + public void testTablePartitioning() { + WorkflowDef def = new WorkflowDef(); + def.setName("pending_count_correlation_jtest"); + + WorkflowModel workflow = createTestWorkflow(); + workflow.setWorkflowDefinition(def); + workflow.setWorkflowId("4c66564d-c1e8-11ec-8f29-d2403f37b380"); + workflow.setStatus(WorkflowModel.Status.COMPLETED); + archiveDAO.createOrUpdateWorkflow(workflow); + WorkflowModel found = archiveDAO.getWorkflow(workflow.getWorkflowId(), false); + assertNotNull(found); + assertEquals(WorkflowModel.Status.COMPLETED, found.getStatus()); + } + + @Test + public void testFindByCorrelationId() { + WorkflowDef def = new WorkflowDef(); + def.setName("pending_count_correlation_jtest"); + String correlationId = "correlation_id#001"; + + WorkflowModel workflow1 = createTestWorkflow(); + workflow1.setWorkflowDefinition(def); + workflow1.setWorkflowId(timeBasedUUIDGenerator.generate()); + workflow1.setStatus(WorkflowModel.Status.RUNNING); + workflow1.setCorrelationId(correlationId); + archiveDAO.createOrUpdateWorkflow(workflow1); + + WorkflowModel workflow2 = createTestWorkflow(); + workflow2.setWorkflowDefinition(def); + workflow2.setWorkflowId(timeBasedUUIDGenerator.generate()); + workflow2.setStatus(WorkflowModel.Status.COMPLETED); + workflow2.setCorrelationId(correlationId); + archiveDAO.createOrUpdateWorkflow(workflow2); + + List found = + archiveDAO.getWorkflowIdsByCorrelationId(def.getName(), correlationId, true, true); + assertNotNull(found); + assertEquals(2, found.size()); + assertTrue(found.contains(workflow1.getWorkflowId())); + assertTrue(found.contains(workflow2.getWorkflowId())); + + found = archiveDAO.getWorkflowIdsByCorrelationId(def.getName(), correlationId, false, true); + assertNotNull(found); + assertEquals(1, found.size()); + assertTrue(found.contains(workflow1.getWorkflowId())); + } + + protected List generateWorkflows(WorkflowModel workflow, int count) { + List workflowIds = new ArrayList<>(); + for (int i = 0; i < count; i++) { + String workflowId = new TimeBasedUUIDGenerator().generate(); + workflow.setWorkflowId(workflowId); + workflow.setCorrelationId("corr001"); + workflow.setStatus(WorkflowModel.Status.RUNNING); + workflow.setCreateTime(System.currentTimeMillis()); + archiveDAO.createOrUpdateWorkflow(workflow); + workflowIds.add(workflowId); + } + return workflowIds; + } + + protected WorkflowModel createTestWorkflow() { + WorkflowDef def = new WorkflowDef(); + def.setName("Junit Workflow"); + def.setVersion(3); + def.setSchemaVersion(2); + + WorkflowModel workflow = new WorkflowModel(); + workflow.setWorkflowDefinition(def); + workflow.setCorrelationId("correlationX"); + workflow.setCreatedBy("junit_tester"); + workflow.setEndTime(200L); + + Map input = new HashMap<>(); + input.put("param1", "param1 value"); + input.put("param2", 100); + workflow.setInput(input); + + Map output = new HashMap<>(); + output.put("ouput1", "output 1 value"); + output.put("op2", 300); + workflow.setOutput(output); + + workflow.setOwnerApp("workflow"); + workflow.setParentWorkflowId("parentWorkflowId"); + workflow.setParentWorkflowTaskId("parentWFTaskId"); + workflow.setReasonForIncompletion("missing recipe"); + workflow.setReRunFromWorkflowId("re-run from id1"); + workflow.setCreateTime(90L); + workflow.setStatus(WorkflowModel.Status.FAILED); + workflow.setWorkflowId(timeBasedUUIDGenerator.generate()); + + List tasks = new LinkedList<>(); + + TaskModel task = new TaskModel(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId(timeBasedUUIDGenerator.generate()); + task.setReferenceTaskName("t1"); + task.setWorkflowInstanceId(workflow.getWorkflowId()); + task.setTaskDefName("task1"); + task.setStatus(TaskModel.Status.COMPLETED); + + TaskModel task2 = new TaskModel(); + task2.setScheduledTime(2L); + task2.setSeq(2); + task2.setTaskId(timeBasedUUIDGenerator.generate()); + task2.setReferenceTaskName("t2"); + task2.setWorkflowInstanceId(workflow.getWorkflowId()); + task2.setTaskDefName("task2"); + task2.setStatus(TaskModel.Status.COMPLETED); + + TaskModel task3 = new TaskModel(); + task3.setScheduledTime(2L); + task3.setSeq(3); + task3.setTaskId(timeBasedUUIDGenerator.generate()); + task3.setReferenceTaskName("t3"); + task3.setWorkflowInstanceId(workflow.getWorkflowId()); + task3.setTaskDefName("task3"); + task3.setStatus(TaskModel.Status.IN_PROGRESS); + + tasks.add(task); + tasks.add(task2); + tasks.add(task3); + + workflow.setTasks(tasks); + + workflow.setUpdatedBy("junit_tester"); + workflow.setUpdatedTime(800L); + + return workflow; + } +} diff --git a/archive/src/test/java/io/orkes/conductor/dao/postgres/PostgresArchivePerformanceTest.java b/archive/src/test/java/io/orkes/conductor/dao/postgres/PostgresArchivePerformanceTest.java new file mode 100644 index 0000000..8f89abc --- /dev/null +++ b/archive/src/test/java/io/orkes/conductor/dao/postgres/PostgresArchivePerformanceTest.java @@ -0,0 +1,215 @@ +/* + * Copyright 2021 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.postgres; + +import java.util.*; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import com.netflix.conductor.common.config.ObjectMapperProvider; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.tasks.TaskType; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; + +import io.orkes.conductor.dao.postgres.archive.PostgresArchiveDAO; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.javafaker.*; +import com.zaxxer.hikari.HikariDataSource; + +public class PostgresArchivePerformanceTest { + + private PostgresArchiveDAO archiveDAO; + private ObjectMapper objectMapper = new ObjectMapperProvider().getObjectMapper(); + private HikariDataSource dataSource; + private static final Faker[] fakers = + new Faker[] { + new Faker(new Locale("en_US")), + new Faker(new Locale("nb-NO")), + new Faker(new Locale("zh-CN")), + new Faker(new Locale("fi-FI")) + }; + + private static final int TASK_COUNT = 5; + + private Faker faker; + + public PostgresArchivePerformanceTest() { + dataSource = new HikariDataSource(); + dataSource.setJdbcUrl("jdbc:postgresql://localhost/neel"); + dataSource.setUsername("postgres"); + dataSource.setPassword("postgres"); + dataSource.setAutoCommit(false); + dataSource.setMaximumPoolSize(10); + archiveDAO = new PostgresArchiveDAO(objectMapper, dataSource, dataSource); + faker = new Faker(); + } + + public void generateWorkflowsAndIndex(int count) { + WorkflowDef workflowDef = generateWorkflowDef(); + for (int i = 0; i < count; i++) { + WorkflowModel workflow = createWorkflowRun(workflowDef); + archiveDAO.createOrUpdateWorkflow(workflow); + System.out.println(workflow.getStatus() + " : " + workflow.getWorkflowId()); + } + } + + private WorkflowDef generateWorkflowDef() { + WorkflowDef def = new WorkflowDef(); + def.setName("load_test_workflow"); + def.setVersion(1); + def.setOwnerEmail("loadtest@orkes.io"); + def.setDescription("load testing workflow"); + + for (int i = 0; i < TASK_COUNT; i++) { + WorkflowTask workflowTask = new WorkflowTask(); + workflowTask.setType(TaskType.SIMPLE.name()); + workflowTask.setTaskReferenceName("task_" + i); + workflowTask.setName("task-" + i); + TaskDef taskDef = new TaskDef(); + taskDef.setName("task-" + i); + taskDef.setOwnerEmail("dev@orkes.io"); + workflowTask.setTaskDefinition(taskDef); + def.getTasks().add(workflowTask); + } + return def; + } + + protected WorkflowModel createWorkflowRun(WorkflowDef workflowDef) { + + WorkflowModel workflow = new WorkflowModel(); + String workflowId = UUID.randomUUID().toString(); + workflow.setWorkflowId(workflowId); + workflow.setWorkflowDefinition(workflowDef); + workflow.setCorrelationId(faker.gameOfThrones().character()); + workflow.setCreatedBy("junit_tester"); + workflow.setEndTime(200L); + + workflow.setInput(getAddress(faker)); + workflow.setOutput(getCommerceData(faker)); + + workflow.setOwnerApp("workflow"); + workflow.setReRunFromWorkflowId(UUID.randomUUID().toString()); + workflow.setCreateTime(System.currentTimeMillis() - 10000L); + workflow.setWorkflowId(UUID.randomUUID().toString()); + + List tasks = new LinkedList<>(); + boolean failed = false; + String reason = null; + for (int i = 0; i < TASK_COUNT; i++) { + TaskModel task = new TaskModel(); + task.setScheduledTime(1L); + task.setSeq(i + 1); + + task.setTaskId(UUID.randomUUID().toString()); + task.setReferenceTaskName("task_" + i); + task.setWorkflowInstanceId(workflow.getWorkflowId()); + task.setTaskDefName("task-" + i); + task.setWorkerId(faker.idNumber().valid()); + task.setTaskType(TaskType.SIMPLE.name()); + task.setWorkflowTask(workflowDef.getTasks().get(i)); + task.setWorkflowType(workflow.getWorkflowName()); + + int status = new Random().nextInt(10000); + if (status >= 20) { + task.setStatus(TaskModel.Status.COMPLETED); + } else if (status < 10) { + task.setStatus(TaskModel.Status.FAILED); + failed = true; + reason = faker.chuckNorris().fact(); + task.setReasonForIncompletion(reason); + } else if (status >= 10 && status < 20) { + task.setStatus(TaskModel.Status.TIMED_OUT); + failed = true; + reason = faker.chuckNorris().fact(); + task.setReasonForIncompletion(reason); + } + + Faker faker2 = fakers[new Random().nextInt(fakers.length)]; + task.getOutputData().putAll(getCommerceData(faker2)); + task.getOutputData().putAll(getAddress(faker2)); + task.getOutputData().putAll(getDemographics(faker2)); + task.getInputData().putAll(getAddress(faker2)); + + tasks.add(task); + } + workflow.setTasks(tasks); + + if (failed) { + workflow.setStatus(WorkflowModel.Status.FAILED); + workflow.setReasonForIncompletion(reason); + } else { + workflow.setStatus(WorkflowModel.Status.COMPLETED); + } + + workflow.setUpdatedBy("junit_tester"); + workflow.setUpdatedTime(System.currentTimeMillis()); + + return workflow; + } + + private Map getCommerceData(Faker faker) { + Commerce commerce = faker.commerce(); + Map data = new HashMap<>(); + data.put("department", commerce.department()); + data.put("price", commerce.price()); + data.put("material", commerce.material()); + data.put("promo_code", commerce.promotionCode()); + data.put("product_name", commerce.productName()); + return data; + } + + private Map getAddress(Faker faker) { + Address address = faker.address(); + Map data = new HashMap<>(); + + data.put("apt", address.buildingNumber()); + data.put("full_address", address.fullAddress()); + data.put("city", address.city()); + data.put("street", address.streetAddress()); + data.put("country", address.country()); + data.put("zip", address.zipCode()); + + return data; + } + + private Map getDemographics(Faker faker) { + Demographic demographic = faker.demographic(); + Map data = new HashMap<>(); + + data.put("demonym", demographic.demonym()); + data.put("education", demographic.educationalAttainment()); + data.put("marital_status", demographic.maritalStatus()); + data.put("race", demographic.race()); + data.put("sex", demographic.sex()); + return data; + } + + public static void main(String[] args) { + System.out.println("Start"); + PostgresArchivePerformanceTest tester = new PostgresArchivePerformanceTest(); + ExecutorService es = Executors.newFixedThreadPool(100); + for (int i = 0; i < 1000; i++) { + es.submit( + () -> { + tester.generateWorkflowsAndIndex(1000); + }); + } + es.shutdown(); + + System.out.println("Done"); + } +} diff --git a/archive/src/test/java/io/orkes/conductor/dao/postgres/PostgresDAOTestUtil.java b/archive/src/test/java/io/orkes/conductor/dao/postgres/PostgresDAOTestUtil.java new file mode 100644 index 0000000..6fa204d --- /dev/null +++ b/archive/src/test/java/io/orkes/conductor/dao/postgres/PostgresDAOTestUtil.java @@ -0,0 +1,80 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.postgres; + +import java.nio.file.Paths; +import java.time.Duration; + +import javax.sql.DataSource; + +import org.flywaydb.core.Flyway; +import org.flywaydb.core.api.configuration.FluentConfiguration; +import org.testcontainers.containers.PostgreSQLContainer; + +import com.netflix.conductor.postgres.config.PostgresProperties; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.zaxxer.hikari.HikariDataSource; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class PostgresDAOTestUtil { + + private final HikariDataSource dataSource; + private final PostgresProperties properties = mock(PostgresProperties.class); + private final ObjectMapper objectMapper; + + public PostgresDAOTestUtil( + PostgreSQLContainer postgreSQLContainer, ObjectMapper objectMapper) { + + this.objectMapper = objectMapper; + + this.dataSource = new HikariDataSource(); + dataSource.setJdbcUrl(postgreSQLContainer.getJdbcUrl()); + dataSource.setUsername(postgreSQLContainer.getUsername()); + dataSource.setPassword(postgreSQLContainer.getPassword()); + dataSource.setAutoCommit(false); + // Prevent DB from getting exhausted during rapid testing + dataSource.setMaximumPoolSize(8); + + when(properties.getTaskDefCacheRefreshInterval()).thenReturn(Duration.ofSeconds(60)); + + flywayMigrate(dataSource); + } + + private void flywayMigrate(DataSource dataSource) { + FluentConfiguration fluentConfiguration = + Flyway.configure() + .table("schema_version") + .locations(Paths.get("db", "migration_archive_postgres").toString()) + .dataSource(dataSource) + .mixed(true) + .placeholderReplacement(false); + + Flyway flyway = fluentConfiguration.load(); + flyway.migrate(); + } + + public HikariDataSource getDataSource() { + return dataSource; + } + + public PostgresProperties getTestProperties() { + return properties; + } + + public ObjectMapper getObjectMapper() { + return objectMapper; + } +} diff --git a/archive/src/test/java/io/orkes/conductor/dao/postgres/UuidUtil.java b/archive/src/test/java/io/orkes/conductor/dao/postgres/UuidUtil.java new file mode 100644 index 0000000..ed18fd9 --- /dev/null +++ b/archive/src/test/java/io/orkes/conductor/dao/postgres/UuidUtil.java @@ -0,0 +1,153 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.dao.postgres; + +import java.nio.ByteBuffer; +import java.security.SecureRandom; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.util.NetUtils; +import org.apache.logging.log4j.core.util.Patterns; +import org.apache.logging.log4j.status.StatusLogger; +import org.apache.logging.log4j.util.PropertiesUtil; + +/** + * Generates a unique ID. The generated UUID will be unique for approximately 8,925 years so long as + * less than 10,000 IDs are generated per millisecond on the same device (as identified by its MAC + * address). + */ +public final class UuidUtil { + + private static final long[] EMPTY_LONG_ARRAY = {}; + + /** System property that may be used to seed the UUID generation with an integer value. */ + public static final String UUID_SEQUENCE = "org.apache.logging.log4j.uuidSequence"; + + private static final Logger LOGGER = StatusLogger.getLogger(); + + private static final String ASSIGNED_SEQUENCES = "org.apache.logging.log4j.assignedSequences"; + + private static final AtomicInteger COUNT = new AtomicInteger(0); + private static final long TYPE1 = 0x1000L; + private static final byte VARIANT = (byte) 0x80; + private static final int SEQUENCE_MASK = 0x3FFF; + private static final long NUM_100NS_INTERVALS_SINCE_UUID_EPOCH = 0x01b21dd213814000L; + private static final long INITIAL_UUID_SEQNO = + PropertiesUtil.getProperties().getLongProperty(UUID_SEQUENCE, 0); + + private static final long LOW_MASK = 0xffffffffL; + private static final long MID_MASK = 0xffff00000000L; + private static final long HIGH_MASK = 0xfff000000000000L; + private static final int NODE_SIZE = 8; + private static final int SHIFT_2 = 16; + private static final int SHIFT_4 = 32; + private static final int SHIFT_6 = 48; + private static final int HUNDRED_NANOS_PER_MILLI = 10000; + + private static final long LEAST = initialize(NetUtils.getMacAddress()); + + /* This class cannot be instantiated */ + private UuidUtil() {} + + /** + * Initializes this class + * + * @param mac MAC address + * @return Least + */ + static long initialize(byte[] mac) { + final Random randomGenerator = new SecureRandom(); + if (mac == null || mac.length == 0) { + mac = new byte[6]; + randomGenerator.nextBytes(mac); + } + final int length = mac.length >= 6 ? 6 : mac.length; + final int index = mac.length >= 6 ? mac.length - 6 : 0; + final byte[] node = new byte[NODE_SIZE]; + node[0] = VARIANT; + node[1] = 0; + for (int i = 2; i < NODE_SIZE; ++i) { + node[i] = 0; + } + System.arraycopy(mac, index, node, 2, length); + final ByteBuffer buf = ByteBuffer.wrap(node); + long rand = INITIAL_UUID_SEQNO; + String assigned = PropertiesUtil.getProperties().getStringProperty(ASSIGNED_SEQUENCES); + long[] sequences; + if (assigned == null) { + sequences = EMPTY_LONG_ARRAY; + } else { + final String[] array = assigned.split(Patterns.COMMA_SEPARATOR); + sequences = new long[array.length]; + int i = 0; + for (final String value : array) { + sequences[i] = Long.parseLong(value); + ++i; + } + } + if (rand == 0) { + rand = randomGenerator.nextLong(); + } + rand &= SEQUENCE_MASK; + boolean duplicate; + do { + duplicate = false; + for (final long sequence : sequences) { + if (sequence == rand) { + duplicate = true; + break; + } + } + if (duplicate) { + rand = (rand + 1) & SEQUENCE_MASK; + } + } while (duplicate); + assigned = assigned == null ? Long.toString(rand) : assigned + ',' + Long.toString(rand); + System.setProperty(ASSIGNED_SEQUENCES, assigned); + + return buf.getLong() | rand << SHIFT_6; + } + + /** + * Generates Type 1 UUID. The time contains the number of 100NS intervals that have occurred + * since 00:00:00.00 UTC, 10 October 1582. Each UUID on a particular machine is unique to the + * 100NS interval until they rollover around 3400 A.D. + * + *

    + *
  1. Digits 1-12 are the lower 48 bits of the number of 100 ns increments since the start of + * the UUID epoch. + *
  2. Digit 13 is the version (with a value of 1). + *
  3. Digits 14-16 are a sequence number that is incremented each time a UUID is generated. + *
  4. Digit 17 is the variant (with a value of binary 10) and 10 bits of the sequence number + *
  5. Digit 18 is final 16 bits of the sequence number. + *
  6. Digits 19-32 represent the system the application is running on. + *
+ * + * @return universally unique identifiers (UUID) + */ + public static UUID getTimeBasedUuid(long offset) { + + final long time = + (((System.currentTimeMillis() + offset) * HUNDRED_NANOS_PER_MILLI) + + NUM_100NS_INTERVALS_SINCE_UUID_EPOCH) + + (COUNT.incrementAndGet() % HUNDRED_NANOS_PER_MILLI); + final long timeLow = (time & LOW_MASK) << SHIFT_4; + final long timeMid = (time & MID_MASK) >> SHIFT_2; + final long timeHi = (time & HIGH_MASK) >> SHIFT_6; + final long most = timeLow | timeMid | TYPE1 | timeHi; + return new UUID(most, LEAST); + } +} diff --git a/archive/src/test/resources/application-testazurearchive.properties b/archive/src/test/resources/application-testazurearchive.properties new file mode 100644 index 0000000..07785cb --- /dev/null +++ b/archive/src/test/resources/application-testazurearchive.properties @@ -0,0 +1,15 @@ +conductor.db.type=redis_standalone +conductor.redis.hosts=localhost:6379:us-east-1c + +conductor.archive.db.enabled=true +conductor.archive.db.type=postgres +conductor.archive.db.indexer.threadCount=1 +conductor.archive.db.indexer.pollingInterval=10 +spring.datasource.url=jdbc:tc:postgresql:11.15-alpine:///conductordb + +conductor.archive.db.document.store.type=azureblob +conductor.archive.db.document.store.type.azureblob.connectionString=DefaultEndpointsProtocol=https;AccountName=aiademo;AccountKey=nMNQta5j9ILcRoALQhlFELQLJ0ILbXVu5NeEfkXWQT15WcupsNuQ1wrMZP1O5K5gWyyd3tJpQXpVln/zctt19A==;EndpointSuffix=core.windows.net + +conductor.grpc-server.port=8091 +es.set.netty.runtime.available.processors=false + diff --git a/archive/src/test/resources/application-tests3archive.properties b/archive/src/test/resources/application-tests3archive.properties new file mode 100644 index 0000000..d75e8fc --- /dev/null +++ b/archive/src/test/resources/application-tests3archive.properties @@ -0,0 +1,14 @@ +conductor.db.type=memory +conductor.redis.hosts=host1:port:rack;host2:port:rack:host3:port:rack + +conductor.archive.db.enabled=true +conductor.archive.db.type=postgres +conductor.archive.db.indexer.threadCount=1 +conductor.archive.db.indexer.pollingInterval=10 +spring.datasource.url=jdbc:tc:postgresql:11.15-alpine:///conductordb + +conductor.archive.db.document.store.type=s3 + +conductor.grpc-server.port=8091 +es.set.netty.runtime.available.processors=false + diff --git a/archive/src/test/resources/drop_all.sql b/archive/src/test/resources/drop_all.sql new file mode 100644 index 0000000..a4f96d0 --- /dev/null +++ b/archive/src/test/resources/drop_all.sql @@ -0,0 +1 @@ +DROP SCHEMA public CASCADE; \ No newline at end of file diff --git a/archive/src/test/resources/logback.xml b/archive/src/test/resources/logback.xml new file mode 100644 index 0000000..d1353c6 --- /dev/null +++ b/archive/src/test/resources/logback.xml @@ -0,0 +1,31 @@ + + + + + + + %black(%d{ISO8601}) %highlight(%-5level) [%blue(%t)] %yellow(%C{1.}): %msg%n%throwable + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/build.gradle b/build.gradle new file mode 100644 index 0000000..7cfa6cb --- /dev/null +++ b/build.gradle @@ -0,0 +1,179 @@ +buildscript { + dependencies { + classpath "org.springframework.boot:spring-boot-gradle-plugin:2.5.7" + classpath 'com.diffplug.spotless:spotless-plugin-gradle:6.+' + } +} + +plugins { + id 'io.spring.dependency-management' version '1.0.11.RELEASE' +} + +ext { + group = 'io.orkes.queue' + appVersion = '0.0.1-SNAPSHOT' + springBootVersion = '2.5.6' + + versions = [ + conductorfork : '3.10.7', + revTestContainer : '1.17.2', + revGuava : '30.0-jre', + log4j : '2.17.1', + revJedis : '3.3.0', + revMockServerClient : '5.12.0', + revCommonsLang : '3.12.0', + revLombok : '1.18.24', + revLucene : '7.7.3', + revSpectator : '0.122.0', + revOpenapi : '1.6.11', + revAwsSdk : '1.12.153', + revProtoBuf : '3.13.0', + revRarefiedRedis : '0.0.17' + ] +} + +def relVersion = System.getenv('REL_VER') +if (relVersion) { + println "Inferred version from env variable 'REL_VER': $relVersion" + appVersion = relVersion +} + +subprojects { + + group = 'io.orkes.queue' + version = "${appVersion}" + + apply plugin: 'java' + apply plugin: 'io.spring.dependency-management' + apply plugin: 'maven-publish' + apply plugin: 'signing' + apply plugin: 'com.diffplug.spotless' + + repositories { + mavenCentral() + } + + java { + withSourcesJar() + withJavadocJar() + } + + configurations { + compileOnly { + extendsFrom annotationProcessor + } + testCompileOnly { + extendsFrom annotationProcessor + } + all { + exclude group: 'org.apache.logging.log4j', module: 'log4j-slf4j-impl' + exclude group: 'org.slf4j', module: 'slf4j-log4j12' + } + } + + dependencies { + implementation "org.apache.logging.log4j:log4j-core:${versions.log4j}!!" + implementation "org.apache.logging.log4j:log4j-api:${versions.log4j}!!" + implementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}!!" + implementation "org.apache.logging.log4j:log4j-jul:${versions.log4j}!!" + implementation "org.apache.logging.log4j:log4j-web:${versions.log4j}!!" + implementation "org.apache.logging.log4j:log4j-to-slf4j:${versions.log4j}!!" + + compileOnly "org.projectlombok:lombok:${versions.revLombok}" + annotationProcessor "org.projectlombok:lombok:${versions.revLombok}" + testAnnotationProcessor "org.projectlombok:lombok:${versions.revLombok}" + implementation "org.apache.commons:commons-lang3:${versions.revCommonsLang}" + } + + dependencyManagement { + imports { + mavenBom(org.springframework.boot.gradle.plugin.SpringBootPlugin.BOM_COORDINATES) + } + } + + test { + useJUnitPlatform() + testLogging { + events = ["SKIPPED", "FAILED"] + exceptionFormat = "full" + showStandardStreams = false + } + } + + + compileJava { + sourceCompatibility = 11 + targetCompatibility = 11 + } + + spotless { + java { + googleJavaFormat().aosp() + removeUnusedImports() + importOrder('java', 'javax', 'org', 'com.netflix', 'io.orkes','', '\\#com.netflix', '\\#') + licenseHeaderFile("$rootDir/licenseheader.txt") + } + } + build.dependsOn(spotlessApply) + + + publishing { + publications { + mavenJava(MavenPublication) { + from components.java + pom { + name = 'Orkes Conductor Community' + description = 'Orkes supported version of open source Netflix Conductor' + url = 'https://github.com/orkes-io/orkes-conductor-community' + scm { + connection = 'scm:git://github.com/orkes-io/orkes-conductor-community.git' + developerConnection = 'scm:git://github.com/orkes-io/orkes-conductor-community.git' + url = 'https://github.com/orkes-io/orkes-conductor-community' + } + licenses { + license { + name = 'Orkes Community License' + url = 'https://github.com/orkes-io/licenses/community/LICENSE.txt' + } + } + developers { + developer { + organization = 'Orkes' + organizationUrl = 'https://orkes.io' + name = 'Orkes Development Team' + email = 'developers@orkes.io' + } + } + } + } + } + + repositories { + maven { + if (project.hasProperty("mavenCentral")) { + println "Publishing to Sonatype Repository" + url = "https://s01.oss.sonatype.org/${project.version.endsWith('-SNAPSHOT') ? "content/repositories/snapshots/" : "service/local/staging/deploy/maven2/"}" + credentials { + username project.properties.username + password project.properties.password + } + } + } + } + + def signingKeyId = findProperty('signingKeyId') + if(signingKeyId) { + println 'Signing the artifact with keys' + signing { + def signingKey = findProperty('signingKey') + def signingPassword = findProperty('signingPassword') + if (signingKeyId && signingKey && signingPassword) { + useInMemoryPgpKeys(signingKeyId, signingKey, signingPassword) + } + + sign publishing.publications + } + } + + } +} diff --git a/docker/DockerfileServer b/docker/DockerfileServer new file mode 100644 index 0000000..d1548b9 --- /dev/null +++ b/docker/DockerfileServer @@ -0,0 +1,39 @@ +FROM alpine:3.16.2 + +MAINTAINER Orkes Inc + +RUN apk add nginx + +RUN apk add openjdk11 + +RUN apk add coreutils +RUN apk add curl + +# Make app folders +RUN mkdir -p /app/config /app/logs /app/libs /app/info + +# Add UI +WORKDIR /usr/share/nginx/html +RUN rm -rf ./* +COPY tmp/ui/conductor/ui/build . +COPY docker/config/nginx.conf /etc/nginx/http.d/default.conf + +# Startup script(s) +COPY docker/config/startup.sh /app/startup.sh +COPY docker/config/config.properties /app/config/config.properties +COPY server/src/main/resources/banner.txt /app/config/banner.txt + +# JAR files +COPY server/build/libs/orkes-conductor-server-boot.jar /app/libs/server.jar + +# Server version +#COPY assembled/libs/server-version.txt* /app/info + +RUN chmod +x /app/startup.sh + +HEALTHCHECK --interval=60s --timeout=30s --retries=10 CMD curl -I -XGET http://localhost:8080/health || exit 1 + +EXPOSE 5000 8080 + +CMD ["/app/startup.sh"] +ENTRYPOINT ["/bin/sh"] diff --git a/docker/DockerfileStandalone b/docker/DockerfileStandalone new file mode 100644 index 0000000..8cc2aa2 --- /dev/null +++ b/docker/DockerfileStandalone @@ -0,0 +1,44 @@ +FROM alpine:3.16.2 +MAINTAINER Orkes Inc + +# Install software required to run conductor stack +RUN apk add nginx +RUN apk add coreutils +RUN apk add openjdk11 +RUN apk add redis +RUN apk add coreutils +RUN apk add postgresql14 + +# Make app folders +RUN mkdir -p /app/config /app/logs /app/libs /app/info + +# Add UI +WORKDIR /usr/share/nginx/html +RUN rm -rf ./* +COPY tmp/ui/conductor/ui/build . +COPY docker/config/nginx.conf /etc/nginx/http.d/default.conf + + +# Startup script(s) +COPY docker/config/startup.sh /app/startup.sh +COPY docker/config/config.properties /app/config/config.properties +COPY docker/config/redis.conf /app/config/redis.conf +COPY docker/config/start_all.sh /app/start_all.sh +COPY server/src/main/resources/banner.txt /app/config/banner.txt + +# JAR files +COPY server/build/libs/orkes-conductor-server-boot.jar /app/libs/server.jar + +RUN chmod +x /app/startup.sh +RUN touch /app/logs/server.log + +# setup postgres +RUN mkdir /run/postgresql +RUN chown postgres:postgres /run/postgresql/ + +HEALTHCHECK --interval=60s --timeout=30s --retries=10 CMD curl -I -XGET http://localhost:8080/health || exit 1 +EXPOSE 5000 8080 + +USER root +CMD ["/app/start_all.sh"] +ENTRYPOINT ["/bin/sh"] diff --git a/docker/build-ui.sh b/docker/build-ui.sh new file mode 100755 index 0000000..920ae43 --- /dev/null +++ b/docker/build-ui.sh @@ -0,0 +1,8 @@ +# clone and build conductor UI +mkdir -p tmp/ui +cd tmp/ui +pwd +git clone https://github.com/Netflix/conductor +cd conductor/ui +yarn install +yarn build \ No newline at end of file diff --git a/docker/config/config.properties b/docker/config/config.properties new file mode 100644 index 0000000..e69de29 diff --git a/docker/config/nginx.conf b/docker/config/nginx.conf new file mode 100644 index 0000000..652721d --- /dev/null +++ b/docker/config/nginx.conf @@ -0,0 +1,44 @@ +server { + listen 5000; + server_name conductor; + server_tokens off; + + gzip on; + gzip_vary on; + gzip_comp_level 6; + gzip_types text/plain text/css application/json application/x-javascript application/javascript text/xml application/xml application/rss+xml text/javascript image/svg+xml application/vnd.ms-fontobject application/x-font-ttf font/opentype; + + location / { + add_header Referrer-Policy "strict-origin"; + add_header X-Frame-Options "SAMEORIGIN"; + add_header X-Content-Type-Options "nosniff"; + add_header Content-Security-Policy "script-src 'self' 'unsafe-inline' 'unsafe-eval' assets.orkes.io *.googletagmanager.com *.pendo.io https://cdn.jsdelivr.net; worker-src 'self' 'unsafe-inline' 'unsafe-eval' data: blob:;"; + add_header Permissions-Policy "accelerometer=(), autoplay=(), camera=(), cross-origin-isolated=(), display-capture=(), document-domain=(), encrypted-media=(), fullscreen=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), publickey-credentials-get=(), screen-wake-lock=(), sync-xhr=(), usb=(), xr-spatial-tracking=(), clipboard-read=(self), clipboard-write=(self), gamepad=(), hid=(), idle-detection=(), serial=(), window-placement=(self)"; + # This would be the directory where your React app's static files are stored at + root /usr/share/nginx/html; + try_files $uri /index.html; + } + + location /api { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-NginX-Proxy true; + proxy_pass http://localhost:8080/api; + proxy_ssl_session_reuse off; + proxy_set_header Host $http_host; + proxy_cache_bypass $http_upgrade; + proxy_redirect off; + } + + location /swagger-ui { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-NginX-Proxy true; + proxy_pass http://localhost:8080/swagger-ui; + proxy_ssl_session_reuse off; + proxy_set_header Host $http_host; + proxy_cache_bypass $http_upgrade; + proxy_redirect off; + } + +} \ No newline at end of file diff --git a/docker/config/redis.conf b/docker/config/redis.conf new file mode 100644 index 0000000..f43add6 --- /dev/null +++ b/docker/config/redis.conf @@ -0,0 +1 @@ +appendonly yes \ No newline at end of file diff --git a/docker/config/start_all.sh b/docker/config/start_all.sh new file mode 100644 index 0000000..d6365b5 --- /dev/null +++ b/docker/config/start_all.sh @@ -0,0 +1,15 @@ +# configure and start redis +mkdir -p /redis +cd /redis +nohup redis-server /app/config/redis.conf & + +# configure and start postgres +mkdir -p /pgdata +chown -R postgres:postgres /pgdata +chmod 0700 /pgdata +su postgres -c 'initdb -D /pgdata' +su postgres -c 'pg_ctl start -D /pgdata' + +cat /app/config/banner.txt +nohup /app/startup.sh & +tail -f /app/logs/server.log \ No newline at end of file diff --git a/docker/config/startup.sh b/docker/config/startup.sh new file mode 100644 index 0000000..5bd33b6 --- /dev/null +++ b/docker/config/startup.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +echo "Starting Conductor Server and UI" +echo "Running Nginx in background" +# Start nginx as daemon +nginx + +# Start the server +cd /app/libs +echo "Using config properties"; +export config_file=/app/config/config.properties + +if [[ -z "${JVM_MEMORY_SETTINGS}" ]]; then + JVM_MEMORY="-Xms512M -Xmx750M" +else + JVM_MEMORY="${JVM_MEMORY_SETTINGS}" +fi + +echo "Starting Conductor with $JVM_MEMORY memory settings" +export LOG_FILE=/app/logs/server.log + +java $JVM_MEMORY -jar -DCONDUCTOR_CONFIG_FILE=$config_file server.jar \ No newline at end of file diff --git a/gradle.properties b/gradle.properties new file mode 100644 index 0000000..af82e00 --- /dev/null +++ b/gradle.properties @@ -0,0 +1 @@ +org.gradle.parallel=true \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000..7454180 Binary files /dev/null and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..69a9715 --- /dev/null +++ b/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.1-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew new file mode 100755 index 0000000..1b6c787 --- /dev/null +++ b/gradlew @@ -0,0 +1,234 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit + +APP_NAME="Gradle" +APP_BASE_NAME=${0##*/} + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 0000000..107acd3 --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/licenseheader.txt b/licenseheader.txt new file mode 100644 index 0000000..60e5820 --- /dev/null +++ b/licenseheader.txt @@ -0,0 +1,12 @@ +/* + * Copyright $YEAR Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ \ No newline at end of file diff --git a/persistence/build.gradle b/persistence/build.gradle new file mode 100644 index 0000000..5ec704b --- /dev/null +++ b/persistence/build.gradle @@ -0,0 +1,31 @@ +dependencies { + + + + implementation "com.netflix.conductor:conductor-common:${versions.conductorfork}" + implementation "com.netflix.conductor:conductor-core:${versions.conductorfork}" + compileOnly 'org.springframework.boot:spring-boot-starter' + + implementation "redis.clients:jedis:${versions.revJedis}" + + implementation "com.google.guava:guava:${versions.revGuava}" + + //Object Mapper + implementation "com.fasterxml.jackson.core:jackson-databind" + implementation "com.fasterxml.jackson.core:jackson-core" + + //In Memory redis + implementation "org.rarefiedredis.redis:redis-java:${versions.revRarefiedRedis}" + + //In memory + implementation "com.jayway.jsonpath:json-path:2.4.0" + + + //Micrometer + implementation "io.micrometer:micrometer-core:1.7.5" + + //spring + testImplementation 'org.springframework.boot:spring-boot-starter-test' + testImplementation 'org.junit.vintage:junit-vintage-engine' + +} \ No newline at end of file diff --git a/persistence/src/main/java/com/netflix/conductor/redis/config/AnyRedisCondition.java b/persistence/src/main/java/com/netflix/conductor/redis/config/AnyRedisCondition.java new file mode 100644 index 0000000..0789d72 --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/config/AnyRedisCondition.java @@ -0,0 +1,38 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.config; + +import org.springframework.boot.autoconfigure.condition.AnyNestedCondition; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; + +public class AnyRedisCondition extends AnyNestedCondition { + + public AnyRedisCondition() { + super(ConfigurationPhase.PARSE_CONFIGURATION); + } + + @ConditionalOnProperty(name = "conductor.db.type", havingValue = "dynomite") + static class DynomiteClusterCondition {} + + @ConditionalOnProperty(name = "conductor.db.type", havingValue = "memory") + static class InMemoryRedisCondition {} + + @ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_cluster") + static class RedisClusterConfiguration {} + + @ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_sentinel") + static class RedisSentinelConfiguration {} + + @ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_standalone") + static class RedisStandaloneConfiguration {} +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/config/InMemoryRedisConfiguration.java b/persistence/src/main/java/com/netflix/conductor/redis/config/InMemoryRedisConfiguration.java new file mode 100644 index 0000000..8913bc2 --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/config/InMemoryRedisConfiguration.java @@ -0,0 +1,67 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.config; + +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Primary; + +import com.netflix.conductor.redis.dynoqueue.LocalhostHostSupplier; +import com.netflix.conductor.redis.jedis.JedisMock; +import com.netflix.conductor.redis.jedis.JedisStandalone; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; +import com.netflix.dyno.connectionpool.HostSupplier; + +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisPool; +import redis.clients.jedis.commands.JedisCommands; + +@Configuration(proxyBeanMethods = false) +@ConditionalOnProperty(name = "conductor.db.type", havingValue = "memory") +public class InMemoryRedisConfiguration { + + public static final JedisMock jedisMock = new JedisMock(); + + @Bean + public HostSupplier hostSupplier(RedisProperties properties) { + return new LocalhostHostSupplier(properties); + } + + @Bean + public JedisMock jedisMock() { + return jedisMock; + } + + @Bean + public JedisCommands jedisCommands() { + return new JedisStandalone(jedisPool()); + } + + @Bean + public JedisPool jedisPool() { + return new JedisPool() { + @Override + public Jedis getResource() { + return jedisMock; + } + }; + } + + @Primary + @Bean + public OrkesJedisProxy OrkesJedisProxy() { + System.out.println("OrkesJedisProxy created"); + return new OrkesJedisProxy(jedisPool()); + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/config/RedisClusterConfiguration.java b/persistence/src/main/java/com/netflix/conductor/redis/config/RedisClusterConfiguration.java new file mode 100644 index 0000000..1f5105b --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/config/RedisClusterConfiguration.java @@ -0,0 +1,72 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.config; + +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import org.apache.commons.pool2.impl.GenericObjectPoolConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import com.netflix.conductor.redis.dynoqueue.ConfigurationHostSupplier; +import com.netflix.conductor.redis.jedis.JedisCluster; +import com.netflix.dyno.connectionpool.Host; + +import redis.clients.jedis.HostAndPort; +import redis.clients.jedis.Protocol; + +@Configuration(proxyBeanMethods = false) +@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_cluster") +public class RedisClusterConfiguration { + + private static final Logger log = LoggerFactory.getLogger(RedisClusterConfiguration.class); + + // Same as redis.clients.jedis.BinaryJedisCluster + protected static final int DEFAULT_MAX_ATTEMPTS = 5; + + @Bean + public JedisCluster getJedisCluster(RedisProperties properties) { + GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig<>(); + genericObjectPoolConfig.setMaxTotal(properties.getMaxConnectionsPerHost()); + ConfigurationHostSupplier hostSupplier = new ConfigurationHostSupplier(properties); + Set hosts = + hostSupplier.getHosts().stream() + .map(h -> new HostAndPort(h.getHostName(), h.getPort())) + .collect(Collectors.toSet()); + String password = getPassword(hostSupplier.getHosts()); + + if (password != null) { + log.info("Connecting to Redis Cluster with AUTH"); + } + + return new JedisCluster( + new redis.clients.jedis.JedisCluster( + hosts, + Protocol.DEFAULT_TIMEOUT, + Protocol.DEFAULT_TIMEOUT, + DEFAULT_MAX_ATTEMPTS, + password, + null, + genericObjectPoolConfig, + properties.isSsl())); + } + + private String getPassword(List hosts) { + return hosts.isEmpty() ? null : hosts.get(0).getPassword(); + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/config/RedisProperties.java b/persistence/src/main/java/com/netflix/conductor/redis/config/RedisProperties.java new file mode 100644 index 0000000..2aa6776 --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/config/RedisProperties.java @@ -0,0 +1,298 @@ +/* + * Copyright 2021 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.config; + +import java.time.Duration; +import java.time.temporal.ChronoUnit; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.convert.DurationUnit; +import org.springframework.context.annotation.Configuration; + +import com.netflix.conductor.core.config.ConductorProperties; + +@ConfigurationProperties("conductor.redis") +@Configuration +public class RedisProperties { + + private final ConductorProperties conductorProperties; + + @Autowired + public RedisProperties(ConductorProperties conductorProperties) { + this.conductorProperties = conductorProperties; + } + + /** + * Data center region. If hosting on Amazon the value is something like us-east-1, us-west-2 + * etc. + */ + private String dataCenterRegion = "us-east-1"; + + /** + * Local rack / availability zone. For AWS deployments, the value is something like us-east-1a, + * etc. + */ + private String availabilityZone = "us-east-1c"; + + /** The name of the redis / dynomite cluster */ + private String clusterName = ""; + + /** Dynomite Cluster details. Format is host:port:rack separated by semicolon */ + private String hosts = null; + + /** The prefix used to prepend workflow data in redis */ + private String workflowNamespacePrefix = null; + + /** The prefix used to prepend keys for queues in redis */ + private String queueNamespacePrefix = null; + + /** + * The domain name to be used in the key prefix for logical separation of workflow data and + * queues in a shared redis setup + */ + private String keyspaceDomain = null; + + /** + * The maximum number of connections that can be managed by the connection pool on a given + * instance + */ + private int maxConnectionsPerHost = 10; + + /** Database number. Defaults to a 0. Can be anywhere from 0 to 15 */ + private int database = 0; + + /** + * The maximum amount of time to wait for a connection to become available from the connection + * pool + */ + private Duration maxTimeoutWhenExhausted = Duration.ofMillis(800); + + /** The maximum retry attempts to use with this connection pool */ + private int maxRetryAttempts = 0; + + /** The read connection port to be used for connecting to dyno-queues */ + private int queuesNonQuorumPort = 22122; + + /** The time in seconds after which the in-memory task definitions cache will be refreshed */ + @DurationUnit(ChronoUnit.SECONDS) + private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60); + + /** The time to live in seconds for which the event execution will be persisted */ + @DurationUnit(ChronoUnit.SECONDS) + private Duration eventExecutionPersistenceTTL = Duration.ofSeconds(60); + + /** The time in seconds after which the in-memory metadata cache will be refreshed */ + @DurationUnit(ChronoUnit.SECONDS) + private Duration metadataCacheRefreshInterval = Duration.ofSeconds(60); + + // Maximum number of idle connections to be maintained + private int maxIdleConnections = 8; + + // Minimum number of idle connections to be maintained + private int minIdleConnections = 5; + + private long minEvictableIdleTimeMillis = 1800000; + + private long timeBetweenEvictionRunsMillis = -1L; + + private boolean testWhileIdle = false; + + private int numTestsPerEvictionRun = 3; + + private boolean ssl; + + public int getNumTestsPerEvictionRun() { + return numTestsPerEvictionRun; + } + + public void setNumTestsPerEvictionRun(int numTestsPerEvictionRun) { + this.numTestsPerEvictionRun = numTestsPerEvictionRun; + } + + public boolean isTestWhileIdle() { + return testWhileIdle; + } + + public void setTestWhileIdle(boolean testWhileIdle) { + this.testWhileIdle = testWhileIdle; + } + + public long getMinEvictableIdleTimeMillis() { + return minEvictableIdleTimeMillis; + } + + public void setMinEvictableIdleTimeMillis(long minEvictableIdleTimeMillis) { + this.minEvictableIdleTimeMillis = minEvictableIdleTimeMillis; + } + + public long getTimeBetweenEvictionRunsMillis() { + return timeBetweenEvictionRunsMillis; + } + + public void setTimeBetweenEvictionRunsMillis(long timeBetweenEvictionRunsMillis) { + this.timeBetweenEvictionRunsMillis = timeBetweenEvictionRunsMillis; + } + + public int getMinIdleConnections() { + return minIdleConnections; + } + + public void setMinIdleConnections(int minIdleConnections) { + this.minIdleConnections = minIdleConnections; + } + + public int getMaxIdleConnections() { + return maxIdleConnections; + } + + public void setMaxIdleConnections(int maxIdleConnections) { + this.maxIdleConnections = maxIdleConnections; + } + + public String getDataCenterRegion() { + return dataCenterRegion; + } + + public void setDataCenterRegion(String dataCenterRegion) { + this.dataCenterRegion = dataCenterRegion; + } + + public String getAvailabilityZone() { + return availabilityZone; + } + + public void setAvailabilityZone(String availabilityZone) { + this.availabilityZone = availabilityZone; + } + + public String getClusterName() { + return clusterName; + } + + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public String getHosts() { + return hosts; + } + + public void setHosts(String hosts) { + this.hosts = hosts; + } + + public String getWorkflowNamespacePrefix() { + return workflowNamespacePrefix; + } + + public void setWorkflowNamespacePrefix(String workflowNamespacePrefix) { + this.workflowNamespacePrefix = workflowNamespacePrefix; + } + + public String getQueueNamespacePrefix() { + return queueNamespacePrefix; + } + + public void setQueueNamespacePrefix(String queueNamespacePrefix) { + this.queueNamespacePrefix = queueNamespacePrefix; + } + + public String getKeyspaceDomain() { + return keyspaceDomain; + } + + public void setKeyspaceDomain(String keyspaceDomain) { + this.keyspaceDomain = keyspaceDomain; + } + + public int getMaxConnectionsPerHost() { + return maxConnectionsPerHost; + } + + public void setMaxConnectionsPerHost(int maxConnectionsPerHost) { + this.maxConnectionsPerHost = maxConnectionsPerHost; + } + + public Duration getMaxTimeoutWhenExhausted() { + return maxTimeoutWhenExhausted; + } + + public void setMaxTimeoutWhenExhausted(Duration maxTimeoutWhenExhausted) { + this.maxTimeoutWhenExhausted = maxTimeoutWhenExhausted; + } + + public int getMaxRetryAttempts() { + return maxRetryAttempts; + } + + public void setMaxRetryAttempts(int maxRetryAttempts) { + this.maxRetryAttempts = maxRetryAttempts; + } + + public int getQueuesNonQuorumPort() { + return queuesNonQuorumPort; + } + + public void setQueuesNonQuorumPort(int queuesNonQuorumPort) { + this.queuesNonQuorumPort = queuesNonQuorumPort; + } + + public Duration getTaskDefCacheRefreshInterval() { + return taskDefCacheRefreshInterval; + } + + public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) { + this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval; + } + + public Duration getEventExecutionPersistenceTTL() { + return eventExecutionPersistenceTTL; + } + + public void setEventExecutionPersistenceTTL(Duration eventExecutionPersistenceTTL) { + this.eventExecutionPersistenceTTL = eventExecutionPersistenceTTL; + } + + public int getDatabase() { + return database; + } + + public void setDatabase(int database) { + this.database = database; + } + + public String getQueuePrefix() { + String prefix = getQueueNamespacePrefix() + "." + conductorProperties.getStack(); + if (getKeyspaceDomain() != null) { + prefix = prefix + "." + getKeyspaceDomain(); + } + return prefix; + } + + public Duration getMetadataCacheRefreshInterval() { + return metadataCacheRefreshInterval; + } + + public void setMetadataCacheRefreshInterval(Duration metadataCacheRefreshInterval) { + this.metadataCacheRefreshInterval = metadataCacheRefreshInterval; + } + + public boolean isSsl() { + return ssl; + } + + public void setSsl(boolean ssl) { + this.ssl = ssl; + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/config/RedisSentinelConfiguration.java b/persistence/src/main/java/com/netflix/conductor/redis/config/RedisSentinelConfiguration.java new file mode 100644 index 0000000..029ede8 --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/config/RedisSentinelConfiguration.java @@ -0,0 +1,92 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.config; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.commons.pool2.impl.GenericObjectPoolConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import com.netflix.conductor.redis.dynoqueue.ConfigurationHostSupplier; +import com.netflix.conductor.redis.jedis.JedisSentinel; +import com.netflix.dyno.connectionpool.Host; + +import redis.clients.jedis.JedisSentinelPool; +import redis.clients.jedis.Protocol; + +@Configuration(proxyBeanMethods = false) +@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_sentinel") +public class RedisSentinelConfiguration { + + private static final Logger log = LoggerFactory.getLogger(RedisSentinelConfiguration.class); + + @Bean + protected JedisSentinel getJedisSentinel(RedisProperties properties) { + GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig<>(); + genericObjectPoolConfig.setMinIdle(properties.getMinIdleConnections()); + genericObjectPoolConfig.setMaxIdle(properties.getMaxIdleConnections()); + genericObjectPoolConfig.setMaxTotal(properties.getMaxConnectionsPerHost()); + genericObjectPoolConfig.setTestWhileIdle(properties.isTestWhileIdle()); + genericObjectPoolConfig.setMinEvictableIdleTimeMillis( + properties.getMinEvictableIdleTimeMillis()); + genericObjectPoolConfig.setTimeBetweenEvictionRunsMillis( + properties.getTimeBetweenEvictionRunsMillis()); + genericObjectPoolConfig.setNumTestsPerEvictionRun(properties.getNumTestsPerEvictionRun()); + ConfigurationHostSupplier hostSupplier = new ConfigurationHostSupplier(properties); + + log.info( + "Starting conductor server using redis_sentinel and cluster " + + properties.getClusterName()); + Set sentinels = new HashSet<>(); + for (Host host : hostSupplier.getHosts()) { + sentinels.add(host.getHostName() + ":" + host.getPort()); + } + // We use the password of the first sentinel host as password and sentinelPassword + String password = getPassword(hostSupplier.getHosts()); + if (password != null) { + return new JedisSentinel( + new JedisSentinelPool( + properties.getClusterName(), + sentinels, + genericObjectPoolConfig, + Protocol.DEFAULT_TIMEOUT, + Protocol.DEFAULT_TIMEOUT, + password, + properties.getDatabase(), + null, + Protocol.DEFAULT_TIMEOUT, + Protocol.DEFAULT_TIMEOUT, + password, + null)); + } else { + return new JedisSentinel( + new JedisSentinelPool( + properties.getClusterName(), + sentinels, + genericObjectPoolConfig, + Protocol.DEFAULT_TIMEOUT, + null, + properties.getDatabase())); + } + } + + private String getPassword(List hosts) { + return hosts.isEmpty() ? null : hosts.get(0).getPassword(); + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/config/RedisStandaloneConfiguration.java b/persistence/src/main/java/com/netflix/conductor/redis/config/RedisStandaloneConfiguration.java new file mode 100644 index 0000000..5f61fdd --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/config/RedisStandaloneConfiguration.java @@ -0,0 +1,66 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.config; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import com.netflix.conductor.redis.dynoqueue.ConfigurationHostSupplier; +import com.netflix.dyno.connectionpool.Host; + +import redis.clients.jedis.JedisPool; +import redis.clients.jedis.JedisPoolConfig; +import redis.clients.jedis.Protocol; + +@Configuration(proxyBeanMethods = false) +@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_standalone") +public class RedisStandaloneConfiguration { + + private static final Logger log = LoggerFactory.getLogger(RedisSentinelConfiguration.class); + + @Bean + public JedisPool getJedisPool(RedisProperties redisProperties) { + JedisPoolConfig config = new JedisPoolConfig(); + config.setMinIdle(2); + config.setMaxTotal(redisProperties.getMaxConnectionsPerHost()); + log.info( + "Starting conductor server using redis_standalone - use SSL? {}", + redisProperties.isSsl()); + ConfigurationHostSupplier hostSupplier = new ConfigurationHostSupplier(redisProperties); + Host host = hostSupplier.getHosts().get(0); + + if (host.getPassword() != null) { + log.info("Connecting to Redis Standalone with AUTH"); + return new JedisPool( + config, + host.getHostName(), + host.getPort(), + Protocol.DEFAULT_TIMEOUT, + host.getPassword(), + redisProperties.getDatabase(), + redisProperties.isSsl()); + } else { + return new JedisPool( + config, + host.getHostName(), + host.getPort(), + Protocol.DEFAULT_TIMEOUT, + null, + redisProperties.getDatabase(), + redisProperties.isSsl()); + } + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/dao/BaseDynoDAO.java b/persistence/src/main/java/com/netflix/conductor/redis/dao/BaseDynoDAO.java new file mode 100644 index 0000000..8655520 --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/dao/BaseDynoDAO.java @@ -0,0 +1,104 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.io.IOException; + +import org.apache.commons.lang3.StringUtils; + +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.metrics.Monitors; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; + +public class BaseDynoDAO { + + private static final String NAMESPACE_SEP = "."; + private static final String DAO_NAME = "redis"; + private final String domain; + private final RedisProperties properties; + private final ConductorProperties conductorProperties; + protected OrkesJedisProxy orkesJedisProxy; + protected ObjectMapper objectMapper; + + protected BaseDynoDAO( + OrkesJedisProxy jedisProxy, + ObjectMapper objectMapper, + ConductorProperties conductorProperties, + RedisProperties properties) { + this.orkesJedisProxy = jedisProxy; + this.objectMapper = objectMapper; + this.conductorProperties = conductorProperties; + this.properties = properties; + this.domain = properties.getKeyspaceDomain(); + } + + String nsKey(String... nsValues) { + String rootNamespace = properties.getWorkflowNamespacePrefix(); + StringBuilder namespacedKey = new StringBuilder(); + if (StringUtils.isNotBlank(rootNamespace)) { + namespacedKey.append(rootNamespace).append(NAMESPACE_SEP); + } + String stack = conductorProperties.getStack(); + if (StringUtils.isNotBlank(stack)) { + namespacedKey.append(stack).append(NAMESPACE_SEP); + } + if (StringUtils.isNotBlank(domain)) { + namespacedKey.append(domain).append(NAMESPACE_SEP); + } + for (String nsValue : nsValues) { + namespacedKey.append(nsValue).append(NAMESPACE_SEP); + } + return StringUtils.removeEnd(namespacedKey.toString(), NAMESPACE_SEP); + } + + String toJson(Object value) { + try { + return objectMapper.writeValueAsString(value); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + T readValue(String json, Class clazz) { + try { + return objectMapper.readValue(json, clazz); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + void recordRedisDaoRequests(String action) { + recordRedisDaoRequests(action, "n/a", "n/a"); + } + + void recordRedisDaoRequests(String action, String taskType, String workflowType) { + Monitors.recordDaoRequests(DAO_NAME, action, taskType, workflowType); + } + + void recordRedisDaoEventRequests(String action, String event) { + Monitors.recordDaoEventRequests(DAO_NAME, action, event); + } + + void recordRedisDaoPayloadSize(String action, int size, String taskType, String workflowType) { + Monitors.recordDaoPayloadSize( + DAO_NAME, + action, + StringUtils.defaultIfBlank(taskType, ""), + StringUtils.defaultIfBlank(workflowType, ""), + size); + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/dao/OrkesMetadataDAO.java b/persistence/src/main/java/com/netflix/conductor/redis/dao/OrkesMetadataDAO.java new file mode 100644 index 0000000..13fc6aa --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/dao/OrkesMetadataDAO.java @@ -0,0 +1,130 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import org.springframework.context.annotation.Conditional; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.common.metadata.events.EventHandler; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.core.exception.ConflictException; +import com.netflix.conductor.dao.EventHandlerDAO; +import com.netflix.conductor.redis.config.AnyRedisCondition; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@Component +@Conditional(AnyRedisCondition.class) +public class OrkesMetadataDAO extends RedisMetadataDAO { + + private final ConcurrentMap missingTaskDefs = new ConcurrentHashMap<>(); + + private final long taskDefCacheTTL; + private final EventHandlerDAO eventHandlerDAO; + private static final String WORKFLOW_DEF = "WORKFLOW_DEF"; + private static final String WORKFLOW_DEF_NAMES = "WORKFLOW_DEF_NAMES"; + private static final String WAIT_FOR_EVENT = "WAIT_FOR_EVENT"; + + public OrkesMetadataDAO( + OrkesJedisProxy orkesJedisProxy, + ObjectMapper objectMapper, + ConductorProperties conductorProperties, + RedisProperties properties, + EventHandlerDAO eventHandlerDAO) { + super(orkesJedisProxy, objectMapper, conductorProperties, properties); + taskDefCacheTTL = properties.getTaskDefCacheRefreshInterval().getSeconds() * 1000; + log.info("taskDefCacheTTL set to {}", taskDefCacheTTL); + this.eventHandlerDAO = eventHandlerDAO; + } + + @Override + public void createWorkflowDef(WorkflowDef def) { + if (orkesJedisProxy.hexists( + nsKey(WORKFLOW_DEF, def.getName()), String.valueOf(def.getVersion()))) { + throw new ConflictException("Workflow with " + def.key() + " already exists!"); + } + _createOrUpdate(def); + } + + @Override + public void updateWorkflowDef(WorkflowDef def) { + _createOrUpdate(def); + } + + private void _createOrUpdate(WorkflowDef workflowDef) { + // First set the workflow def + orkesJedisProxy.hset( + nsKey(WORKFLOW_DEF, workflowDef.getName()), + String.valueOf(workflowDef.getVersion()), + toJson(workflowDef)); + + orkesJedisProxy.sadd(nsKey(WORKFLOW_DEF_NAMES), workflowDef.getName()); + for (var task : workflowDef.getTasks()) { + if (task.getType().equals(WAIT_FOR_EVENT)) { + EventHandler eventHandler = new EventHandler(); + eventHandler.setEvent(task.getSink()); + eventHandler.setName(workflowDef.getName() + "_" + task.getTaskReferenceName()); + eventHandler.setActive(true); + EventHandler.Action action = new EventHandler.Action(); + EventHandler.TaskDetails taskDetails = new EventHandler.TaskDetails(); + taskDetails.setTaskRefName(task.getTaskReferenceName()); + taskDetails.setWorkflowId("${targetWorkflowId}"); + taskDetails.setOutput(Map.of("orkes_wait_for_event_task", true)); + action.setComplete_task(taskDetails); + action.setAction(EventHandler.Action.Type.complete_task); + eventHandler.setActions(List.of(action)); + // TODO: extend EventHandlerDAO with exists and updateOrInsert + if (eventHandlerDAO.getAllEventHandlers().stream() + .noneMatch(e -> e.getName().equals(eventHandler.getName()))) { + eventHandlerDAO.addEventHandler(eventHandler); + } else { + eventHandlerDAO.updateEventHandler(eventHandler); + } + } + } + recordRedisDaoRequests("storeWorkflowDef", "n/a", workflowDef.getName()); + } + + public TaskDef getTaskDef(String name, boolean ignoreCache) { + return ignoreCache ? super.getTaskDef(name) : getTaskDef(name); + } + + @Override + public TaskDef getTaskDef(String name) { + Long lastChecked = missingTaskDefs.get(name); + // If the last check is NOT null, ie the task was reported missing earlier + // If so, check when was it last checked and if more than the configured TTL then refresh + long now = System.currentTimeMillis(); + if (lastChecked != null && (now - lastChecked) < taskDefCacheTTL) { + return null; + } + TaskDef found = super.getTaskDef(name); + if (found == null) { + missingTaskDefs.put(name, now); + } else { + missingTaskDefs.remove(name); + } + return found; + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisEventHandlerDAO.java b/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisEventHandlerDAO.java new file mode 100644 index 0000000..724d6d9 --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisEventHandlerDAO.java @@ -0,0 +1,144 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.annotation.Conditional; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.common.metadata.events.EventHandler; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.core.exception.ConflictException; +import com.netflix.conductor.core.exception.NotFoundException; +import com.netflix.conductor.dao.EventHandlerDAO; +import com.netflix.conductor.redis.config.AnyRedisCondition; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Preconditions; + +@Component +@Conditional(AnyRedisCondition.class) +public class RedisEventHandlerDAO extends BaseDynoDAO implements EventHandlerDAO { + + private static final Logger LOGGER = LoggerFactory.getLogger(RedisEventHandlerDAO.class); + + private static final String EVENT_HANDLERS = "EVENT_HANDLERS"; + private static final String EVENT_HANDLERS_BY_EVENT = "EVENT_HANDLERS_BY_EVENT"; + + public RedisEventHandlerDAO( + OrkesJedisProxy jedisProxy, + ObjectMapper objectMapper, + ConductorProperties conductorProperties, + RedisProperties properties) { + super(jedisProxy, objectMapper, conductorProperties, properties); + } + + @Override + public void addEventHandler(EventHandler eventHandler) { + Preconditions.checkNotNull(eventHandler.getName(), "Missing Name"); + if (getEventHandler(eventHandler.getName()) != null) { + throw new ConflictException( + "EventHandler with name %s already exists!", eventHandler.getName()); + } + index(eventHandler); + orkesJedisProxy.hset(nsKey(EVENT_HANDLERS), eventHandler.getName(), toJson(eventHandler)); + recordRedisDaoRequests("addEventHandler"); + } + + @Override + public void updateEventHandler(EventHandler eventHandler) { + Preconditions.checkNotNull(eventHandler.getName(), "Missing Name"); + EventHandler existing = getEventHandler(eventHandler.getName()); + if (existing == null) { + throw new NotFoundException( + "EventHandler with name %s not found!", eventHandler.getName()); + } + index(eventHandler); + orkesJedisProxy.hset(nsKey(EVENT_HANDLERS), eventHandler.getName(), toJson(eventHandler)); + recordRedisDaoRequests("updateEventHandler"); + } + + @Override + public void removeEventHandler(String name) { + EventHandler existing = getEventHandler(name); + if (existing == null) { + throw new NotFoundException("EventHandler with name %s not found!", name); + } + orkesJedisProxy.hdel(nsKey(EVENT_HANDLERS), name); + recordRedisDaoRequests("removeEventHandler"); + removeIndex(existing); + } + + @Override + public List getAllEventHandlers() { + Map all = orkesJedisProxy.hgetAll(nsKey(EVENT_HANDLERS)); + List handlers = new LinkedList<>(); + all.forEach( + (key, json) -> { + EventHandler eventHandler = readValue(json, EventHandler.class); + handlers.add(eventHandler); + }); + recordRedisDaoRequests("getAllEventHandlers"); + return handlers; + } + + private void index(EventHandler eventHandler) { + String event = eventHandler.getEvent(); + String key = nsKey(EVENT_HANDLERS_BY_EVENT, event); + orkesJedisProxy.sadd(key, eventHandler.getName()); + } + + private void removeIndex(EventHandler eventHandler) { + String event = eventHandler.getEvent(); + String key = nsKey(EVENT_HANDLERS_BY_EVENT, event); + orkesJedisProxy.srem(key, eventHandler.getName()); + } + + @Override + public List getEventHandlersForEvent(String event, boolean activeOnly) { + String key = nsKey(EVENT_HANDLERS_BY_EVENT, event); + Set names = orkesJedisProxy.smembers(key); + List handlers = new LinkedList<>(); + for (String name : names) { + try { + EventHandler eventHandler = getEventHandler(name); + recordRedisDaoEventRequests("getEventHandler", event); + if (eventHandler.getEvent().equals(event) + && (!activeOnly || eventHandler.isActive())) { + handlers.add(eventHandler); + } + } catch (NotFoundException nfe) { + LOGGER.info("No matching event handler found for event: {}", event); + throw nfe; + } + } + return handlers; + } + + private EventHandler getEventHandler(String name) { + EventHandler eventHandler = null; + String json = orkesJedisProxy.hget(nsKey(EVENT_HANDLERS), name); + if (json != null) { + eventHandler = readValue(json, EventHandler.class); + } + return eventHandler; + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisExecutionDAO.java b/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisExecutionDAO.java new file mode 100644 index 0000000..8f2ab2e --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisExecutionDAO.java @@ -0,0 +1,730 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.text.SimpleDateFormat; +import java.util.*; +import java.util.stream.Collectors; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.annotation.Conditional; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.common.metadata.events.EventExecution; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO; +import com.netflix.conductor.dao.ExecutionDAO; +import com.netflix.conductor.metrics.Monitors; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; +import com.netflix.conductor.redis.config.AnyRedisCondition; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; + +@Component +@Conditional(AnyRedisCondition.class) +public class RedisExecutionDAO extends BaseDynoDAO + implements ExecutionDAO, ConcurrentExecutionLimitDAO { + + public static final Logger LOGGER = LoggerFactory.getLogger(RedisExecutionDAO.class); + + // Keys Families + private static final String TASK_LIMIT_BUCKET = "TASK_LIMIT_BUCKET"; + private static final String IN_PROGRESS_TASKS = "IN_PROGRESS_TASKS"; + private static final String TASKS_IN_PROGRESS_STATUS = + "TASKS_IN_PROGRESS_STATUS"; // Tasks which are in IN_PROGRESS status. + private static final String WORKFLOW_TO_TASKS = "WORKFLOW_TO_TASKS"; + private static final String SCHEDULED_TASKS = "SCHEDULED_TASKS"; + private static final String TASK = "TASK"; + private static final String WORKFLOW = "WORKFLOW"; + private static final String PENDING_WORKFLOWS = "PENDING_WORKFLOWS"; + private static final String WORKFLOW_DEF_TO_WORKFLOWS = "WORKFLOW_DEF_TO_WORKFLOWS"; + private static final String EVENT_EXECUTION = "EVENT_EXECUTION"; + private final int ttlEventExecutionSeconds; + + public RedisExecutionDAO( + OrkesJedisProxy orkesJedisProxy, + ObjectMapper objectMapper, + ConductorProperties conductorProperties, + RedisProperties properties) { + super(orkesJedisProxy, objectMapper, conductorProperties, properties); + + ttlEventExecutionSeconds = (int) properties.getEventExecutionPersistenceTTL().getSeconds(); + } + + private static String dateStr(Long timeInMs) { + Date date = new Date(timeInMs); + return dateStr(date); + } + + private static String dateStr(Date date) { + SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd"); + return format.format(date); + } + + private static List dateStrBetweenDates(Long startdatems, Long enddatems) { + List dates = new ArrayList<>(); + Calendar calendar = new GregorianCalendar(); + Date startdate = new Date(startdatems); + Date enddate = new Date(enddatems); + calendar.setTime(startdate); + while (calendar.getTime().before(enddate) || calendar.getTime().equals(enddate)) { + Date result = calendar.getTime(); + dates.add(dateStr(result)); + calendar.add(Calendar.DATE, 1); + } + return dates; + } + + @Override + public List getPendingTasksByWorkflow(String taskName, String workflowId) { + List tasks = new LinkedList<>(); + + List pendingTasks = getPendingTasksForTaskType(taskName); + pendingTasks.forEach( + pendingTask -> { + if (pendingTask.getWorkflowInstanceId().equals(workflowId)) { + tasks.add(pendingTask); + } + }); + + return tasks; + } + + @Override + public List getTasks(String taskDefName, String startKey, int count) { + List tasks = new LinkedList<>(); + + List pendingTasks = getPendingTasksForTaskType(taskDefName); + boolean startKeyFound = startKey == null; + int foundcount = 0; + for (TaskModel pendingTask : pendingTasks) { + if (!startKeyFound) { + if (pendingTask.getTaskId().equals(startKey)) { + startKeyFound = true; + if (startKey != null) { + continue; + } + } + } + if (startKeyFound && foundcount < count) { + tasks.add(pendingTask); + foundcount++; + } + } + return tasks; + } + + @Override + public List createTasks(List tasks) { + + List tasksCreated = new LinkedList<>(); + + for (TaskModel task : tasks) { + validate(task); + String taskKey = task.getReferenceTaskName() + "" + task.getRetryCount(); + Long added = + orkesJedisProxy.hset( + nsKey(SCHEDULED_TASKS, task.getWorkflowInstanceId()), + taskKey, + task.getTaskId()); + if (added < 1) { + LOGGER.debug( + "Task already scheduled, skipping the run " + + task.getTaskId() + + ", ref=" + + task.getReferenceTaskName() + + ", key=" + + taskKey); + continue; + } + + if (task.getStatus() != null + && !task.getStatus().isTerminal() + && task.getScheduledTime() == 0) { + task.setScheduledTime(System.currentTimeMillis()); + } + + correlateTaskToWorkflowInDS(task.getTaskId(), task.getWorkflowInstanceId()); + LOGGER.debug( + "Scheduled task added to WORKFLOW_TO_TASKS workflowId: {}, taskId: {}, taskType: {} during createTasks", + task.getWorkflowInstanceId(), + task.getTaskId(), + task.getTaskType()); + + String inProgressTaskKey = nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()); + orkesJedisProxy.sadd(inProgressTaskKey, task.getTaskId()); + LOGGER.debug( + "Scheduled task added to IN_PROGRESS_TASKS with inProgressTaskKey: {}, workflowId: {}, taskId: {}, taskType: {} during createTasks", + inProgressTaskKey, + task.getWorkflowInstanceId(), + task.getTaskId(), + task.getTaskType()); + + updateTask(task); + tasksCreated.add(task); + } + + return tasksCreated; + } + + @Override + public void updateTask(TaskModel task) { + Optional taskDefinition = task.getTaskDefinition(); + + if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) { + + if (task.getStatus() != null && task.getStatus().equals(TaskModel.Status.IN_PROGRESS)) { + orkesJedisProxy.sadd( + nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId()); + LOGGER.debug( + "Workflow Task added to TASKS_IN_PROGRESS_STATUS with tasksInProgressKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask", + nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName(), task.getTaskId()), + task.getWorkflowInstanceId(), + task.getTaskId(), + task.getTaskType(), + task.getStatus().name()); + } else { + orkesJedisProxy.srem( + nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId()); + LOGGER.debug( + "Workflow Task removed from TASKS_IN_PROGRESS_STATUS with tasksInProgressKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask", + nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName(), task.getTaskId()), + task.getWorkflowInstanceId(), + task.getTaskId(), + task.getTaskType(), + task.getStatus().name()); + String key = nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName()); + orkesJedisProxy.zrem(key, task.getTaskId()); + LOGGER.debug( + "Workflow Task removed from TASK_LIMIT_BUCKET with taskLimitBucketKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask", + key, + task.getWorkflowInstanceId(), + task.getTaskId(), + task.getTaskType(), + task.getStatus().name()); + } + } + + String payload = toJson(task); + recordRedisDaoPayloadSize( + "updateTask", + payload.length(), + taskDefinition.map(TaskDef::getName).orElse("n/a"), + task.getWorkflowType()); + + orkesJedisProxy.set(nsKey(TASK, task.getTaskId()), payload); + LOGGER.debug( + "Workflow task payload saved to TASK with taskKey: {}, workflowId: {}, taskId: {}, taskType: {} during updateTask", + nsKey(TASK, task.getTaskId()), + task.getWorkflowInstanceId(), + task.getTaskId(), + task.getTaskType()); + if (task.getStatus() != null && task.getStatus().isTerminal()) { + orkesJedisProxy.srem(nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()), task.getTaskId()); + LOGGER.debug( + "Workflow Task removed from TASKS_IN_PROGRESS_STATUS with tasksInProgressKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask", + nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()), + task.getWorkflowInstanceId(), + task.getTaskId(), + task.getTaskType(), + task.getStatus().name()); + } + + Set taskIds = + orkesJedisProxy.smembers(nsKey(WORKFLOW_TO_TASKS, task.getWorkflowInstanceId())); + if (!taskIds.contains(task.getTaskId())) { + correlateTaskToWorkflowInDS(task.getTaskId(), task.getWorkflowInstanceId()); + } + } + + @Override + public boolean exceedsLimit(TaskModel task) { + Optional taskDefinition = task.getTaskDefinition(); + if (taskDefinition.isEmpty()) { + return false; + } + int limit = taskDefinition.get().concurrencyLimit(); + if (limit <= 0) { + return false; + } + + long current = getInProgressTaskCount(task.getTaskDefName()); + if (current >= limit) { + LOGGER.info( + "Task execution count limited. task - {}:{}, limit: {}, current: {}", + task.getTaskId(), + task.getTaskDefName(), + limit, + current); + Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit); + return true; + } + + String rateLimitKey = nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName()); + double score = System.currentTimeMillis(); + String taskId = task.getTaskId(); + orkesJedisProxy.zaddnx(rateLimitKey, score, taskId); + + Set ids = orkesJedisProxy.zrangeByScore(rateLimitKey, 0, score + 1, limit); + boolean rateLimited = !ids.contains(taskId); + if (rateLimited) { + LOGGER.info( + "Task execution count limited. task - {}:{}, limit: {}, current: {}", + task.getTaskId(), + task.getTaskDefName(), + limit, + current); + String inProgressKey = nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()); + // Cleanup any items that are still present in the rate limit bucket but not in progress + // anymore! + ids.stream() + .filter(id -> !orkesJedisProxy.sismember(inProgressKey, id)) + .forEach(id2 -> orkesJedisProxy.zrem(rateLimitKey, id2)); + Monitors.recordTaskRateLimited(task.getTaskDefName(), limit); + } + return rateLimited; + } + + private void removeTaskMappings(TaskModel task) { + String taskKey = task.getReferenceTaskName() + "" + task.getRetryCount(); + + orkesJedisProxy.hdel(nsKey(SCHEDULED_TASKS, task.getWorkflowInstanceId()), taskKey); + orkesJedisProxy.srem(nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()), task.getTaskId()); + orkesJedisProxy.srem( + nsKey(WORKFLOW_TO_TASKS, task.getWorkflowInstanceId()), task.getTaskId()); + orkesJedisProxy.srem( + nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId()); + orkesJedisProxy.zrem(nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName()), task.getTaskId()); + } + + private void removeTaskMappingsWithExpiry(TaskModel task) { + String taskKey = task.getReferenceTaskName() + "" + task.getRetryCount(); + + orkesJedisProxy.hdel(nsKey(SCHEDULED_TASKS, task.getWorkflowInstanceId()), taskKey); + orkesJedisProxy.srem(nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()), task.getTaskId()); + orkesJedisProxy.srem( + nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId()); + orkesJedisProxy.zrem(nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName()), task.getTaskId()); + } + + @Override + public boolean removeTask(String taskId) { + TaskModel task = getTask(taskId); + if (task == null) { + LOGGER.warn("No such task found by id {}", taskId); + return false; + } + removeTaskMappings(task); + + orkesJedisProxy.del(nsKey(TASK, task.getTaskId())); + return true; + } + + private boolean removeTaskWithExpiry(String taskId, int ttlSeconds) { + TaskModel task = getTask(taskId); + if (task == null) { + LOGGER.warn("No such task found by id {}", taskId); + return false; + } + removeTaskMappingsWithExpiry(task); + + orkesJedisProxy.expire(nsKey(TASK, task.getTaskId()), ttlSeconds); + return true; + } + + @Override + public TaskModel getTask(String taskId) { + Preconditions.checkNotNull(taskId, "taskId cannot be null"); + return Optional.ofNullable(orkesJedisProxy.get(nsKey(TASK, taskId))) + .map( + json -> { + TaskModel task = readValue(json, TaskModel.class); + recordRedisDaoPayloadSize( + "getTask", + toJson(task).length(), + task.getTaskType(), + task.getWorkflowType()); + return task; + }) + .orElse(null); + } + + @Override + public List getTasks(List taskIds) { + return taskIds.stream() + .map(taskId -> nsKey(TASK, taskId)) + .map(orkesJedisProxy::get) + .filter(Objects::nonNull) + .map( + jsonString -> { + TaskModel task = readValue(jsonString, TaskModel.class); + return task; + }) + .collect(Collectors.toList()); + } + + @Override + public List getTasksForWorkflow(String workflowId) { + Preconditions.checkNotNull(workflowId, "workflowId cannot be null"); + Set taskIds = orkesJedisProxy.smembers(nsKey(WORKFLOW_TO_TASKS, workflowId)); + return getTasks(new ArrayList<>(taskIds)); + } + + @Override + public List getPendingTasksForTaskType(String taskName) { + Preconditions.checkNotNull(taskName, "task name cannot be null"); + Set taskIds = orkesJedisProxy.smembers(nsKey(IN_PROGRESS_TASKS, taskName)); + return getTasks(new ArrayList<>(taskIds)); + } + + @Override + public String createWorkflow(WorkflowModel workflow) { + return insertOrUpdateWorkflow(workflow, false); + } + + @Override + public String updateWorkflow(WorkflowModel workflow) { + return insertOrUpdateWorkflow(workflow, true); + } + + @Override + public boolean removeWorkflow(String workflowId) { + WorkflowModel workflow = getWorkflow(workflowId, true); + if (workflow != null) { + // Remove from lists + String key = + nsKey( + WORKFLOW_DEF_TO_WORKFLOWS, + workflow.getWorkflowName(), + dateStr(workflow.getCreateTime())); + orkesJedisProxy.srem(key, workflowId); + orkesJedisProxy.srem(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflowId); + + // Remove the object + orkesJedisProxy.del(nsKey(WORKFLOW, workflowId)); + for (TaskModel task : workflow.getTasks()) { + removeTask(task.getTaskId()); + } + return true; + } + return false; + } + + public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) { + WorkflowModel workflow = getWorkflow(workflowId, true); + if (workflow != null) { + // Remove from lists + String key = + nsKey( + WORKFLOW_DEF_TO_WORKFLOWS, + workflow.getWorkflowName(), + dateStr(workflow.getCreateTime())); + orkesJedisProxy.srem(key, workflowId); + orkesJedisProxy.srem(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflowId); + + // Remove the object + orkesJedisProxy.expire(nsKey(WORKFLOW, workflowId), ttlSeconds); + for (TaskModel task : workflow.getTasks()) { + removeTaskWithExpiry(task.getTaskId(), ttlSeconds); + } + orkesJedisProxy.expire(nsKey(WORKFLOW_TO_TASKS, workflowId), ttlSeconds); + + return true; + } + return false; + } + + @Override + public void removeFromPendingWorkflow(String workflowType, String workflowId) { + orkesJedisProxy.del(nsKey(SCHEDULED_TASKS, workflowId)); + orkesJedisProxy.srem(nsKey(PENDING_WORKFLOWS, workflowType), workflowId); + } + + @Override + public WorkflowModel getWorkflow(String workflowId) { + return getWorkflow(workflowId, true); + } + + @Override + public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) { + String json = orkesJedisProxy.get(nsKey(WORKFLOW, workflowId)); + WorkflowModel workflow = null; + + if (json != null) { + workflow = readValue(json, WorkflowModel.class); + if (includeTasks) { + List tasks = getTasksForWorkflow(workflowId); + tasks.sort(Comparator.comparingInt(TaskModel::getSeq)); + workflow.setTasks(tasks); + } + } + return workflow; + } + + /** + * @param workflowName name of the workflow + * @param version the workflow version + * @return list of workflow ids that are in RUNNING state returns workflows of all versions + * for the given workflow name + */ + @Override + public List getRunningWorkflowIds(String workflowName, int version) { + Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); + List workflowIds; + Set pendingWorkflows = + orkesJedisProxy.smembers(nsKey(PENDING_WORKFLOWS, workflowName)); + workflowIds = new LinkedList<>(pendingWorkflows); + return workflowIds; + } + + /** + * @param workflowName name of the workflow + * @param version the workflow version + * @return list of workflows that are in RUNNING state + */ + @Override + public List getPendingWorkflowsByType(String workflowName, int version) { + Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); + List workflowIds = getRunningWorkflowIds(workflowName, version); + return workflowIds.stream() + .map(this::getWorkflow) + .filter(workflow -> workflow.getWorkflowVersion() == version) + .collect(Collectors.toList()); + } + + @Override + public List getWorkflowsByType( + String workflowName, Long startTime, Long endTime) { + Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); + Preconditions.checkNotNull(startTime, "startTime cannot be null"); + Preconditions.checkNotNull(endTime, "endTime cannot be null"); + + List workflows = new LinkedList<>(); + + // Get all date strings between start and end + List dateStrs = dateStrBetweenDates(startTime, endTime); + dateStrs.forEach( + dateStr -> { + String key = nsKey(WORKFLOW_DEF_TO_WORKFLOWS, workflowName, dateStr); + orkesJedisProxy + .smembers(key) + .forEach( + workflowId -> { + try { + WorkflowModel workflow = getWorkflow(workflowId); + if (workflow.getCreateTime() >= startTime + && workflow.getCreateTime() <= endTime) { + workflows.add(workflow); + } + } catch (Exception e) { + LOGGER.error( + "Failed to get workflow: {}", workflowId, e); + } + }); + }); + + return workflows; + } + + @Override + public List getWorkflowsByCorrelationId( + String workflowName, String correlationId, boolean includeTasks) { + throw new UnsupportedOperationException( + "This method is not implemented in RedisExecutionDAO. Please use ExecutionDAOFacade instead."); + } + + @Override + public boolean canSearchAcrossWorkflows() { + return false; + } + + /** + * Inserts a new workflow/ updates an existing workflow in the datastore. Additionally, if a + * workflow is in terminal state, it is removed from the set of pending workflows. + * + * @param workflow the workflow instance + * @param update flag to identify if update or create operation + * @return the workflowId + */ + private String insertOrUpdateWorkflow(WorkflowModel workflow, boolean update) { + Preconditions.checkNotNull(workflow, "workflow object cannot be null"); + + List tasks = workflow.getTasks(); + workflow.setTasks(new LinkedList<>()); + + String payload = toJson(workflow); + // Store the workflow object + orkesJedisProxy.set(nsKey(WORKFLOW, workflow.getWorkflowId()), payload); + recordRedisDaoPayloadSize( + "storeWorkflow", payload.length(), "n/a", workflow.getWorkflowName()); + if (!update) { + // Add to list of workflows for a workflowdef + String key = + nsKey( + WORKFLOW_DEF_TO_WORKFLOWS, + workflow.getWorkflowName(), + dateStr(workflow.getCreateTime())); + orkesJedisProxy.sadd(key, workflow.getWorkflowId()); + } + // Add or remove from the pending workflows + if (workflow.getStatus().isTerminal()) { + orkesJedisProxy.srem( + nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflow.getWorkflowId()); + } else { + orkesJedisProxy.sadd( + nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflow.getWorkflowId()); + } + + workflow.setTasks(tasks); + return workflow.getWorkflowId(); + } + + /** + * Stores the correlation of a task to the workflow instance in the datastore + * + * @param taskId the taskId to be correlated + * @param workflowInstanceId the workflowId to which the tasks belongs to + */ + @VisibleForTesting + void correlateTaskToWorkflowInDS(String taskId, String workflowInstanceId) { + String workflowToTaskKey = nsKey(WORKFLOW_TO_TASKS, workflowInstanceId); + orkesJedisProxy.sadd(workflowToTaskKey, taskId); + LOGGER.debug( + "Task mapped in WORKFLOW_TO_TASKS with workflowToTaskKey: {}, workflowId: {}, taskId: {}", + workflowToTaskKey, + workflowInstanceId, + taskId); + } + + public long getPendingWorkflowCount(String workflowName) { + String key = nsKey(PENDING_WORKFLOWS, workflowName); + return orkesJedisProxy.scard(key); + } + + @Override + public long getInProgressTaskCount(String taskDefName) { + String inProgressKey = nsKey(TASKS_IN_PROGRESS_STATUS, taskDefName); + return orkesJedisProxy.scard(inProgressKey); + } + + @Override + public boolean addEventExecution(EventExecution eventExecution) { + try { + String key = + nsKey( + EVENT_EXECUTION, + eventExecution.getName(), + eventExecution.getEvent(), + eventExecution.getMessageId()); + String json = objectMapper.writeValueAsString(eventExecution); + recordRedisDaoEventRequests("addEventExecution", eventExecution.getEvent()); + recordRedisDaoPayloadSize( + "addEventExecution", json.length(), eventExecution.getEvent(), "n/a"); + boolean added = orkesJedisProxy.hsetnx(key, eventExecution.getId(), json) == 1L; + + if (ttlEventExecutionSeconds > 0) { + orkesJedisProxy.expire(key, ttlEventExecutionSeconds); + } + + return added; + } catch (Exception e) { + throw new RuntimeException( + "Unable to add event execution for " + eventExecution.getId(), e); + } + } + + @Override + public void updateEventExecution(EventExecution eventExecution) { + try { + + String key = + nsKey( + EVENT_EXECUTION, + eventExecution.getName(), + eventExecution.getEvent(), + eventExecution.getMessageId()); + String json = objectMapper.writeValueAsString(eventExecution); + LOGGER.info("updating event execution {}", key); + orkesJedisProxy.hset(key, eventExecution.getId(), json); + recordRedisDaoEventRequests("updateEventExecution", eventExecution.getEvent()); + recordRedisDaoPayloadSize( + "updateEventExecution", json.length(), eventExecution.getEvent(), "n/a"); + } catch (Exception e) { + throw new RuntimeException( + "Unable to update event execution for " + eventExecution.getId(), e); + } + } + + @Override + public void removeEventExecution(EventExecution eventExecution) { + try { + String key = + nsKey( + EVENT_EXECUTION, + eventExecution.getName(), + eventExecution.getEvent(), + eventExecution.getMessageId()); + LOGGER.info("removing event execution {}", key); + orkesJedisProxy.hdel(key, eventExecution.getId()); + recordRedisDaoEventRequests("removeEventExecution", eventExecution.getEvent()); + } catch (Exception e) { + throw new RuntimeException( + "Unable to remove event execution for " + eventExecution.getId(), e); + } + } + + public List getEventExecutions( + String eventHandlerName, String eventName, String messageId, int max) { + try { + String key = nsKey(EVENT_EXECUTION, eventHandlerName, eventName, messageId); + LOGGER.info("getting event execution {}", key); + List executions = new LinkedList<>(); + for (int i = 0; i < max; i++) { + String field = messageId + "_" + i; + String value = orkesJedisProxy.hget(key, field); + if (value == null) { + break; + } + recordRedisDaoEventRequests("getEventExecution", eventHandlerName); + recordRedisDaoPayloadSize( + "getEventExecution", value.length(), eventHandlerName, "n/a"); + EventExecution eventExecution = objectMapper.readValue(value, EventExecution.class); + executions.add(eventExecution); + } + return executions; + + } catch (Exception e) { + throw new RuntimeException("Unable to get event execution for " + e); + } + } + + private void validate(TaskModel task) { + try { + Preconditions.checkNotNull(task, "task object cannot be null"); + Preconditions.checkNotNull(task.getTaskId(), "Task id cannot be null"); + Preconditions.checkNotNull( + task.getWorkflowInstanceId(), "Workflow instance id cannot be null"); + Preconditions.checkNotNull( + task.getReferenceTaskName(), "Task reference name cannot be null"); + } catch (NullPointerException npe) { + throw new IllegalArgumentException(npe); + } + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisMetadataDAO.java b/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisMetadataDAO.java new file mode 100644 index 0000000..c5f5572 --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisMetadataDAO.java @@ -0,0 +1,306 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.util.*; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.netflix.conductor.common.metadata.Auditable; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.core.exception.ConflictException; +import com.netflix.conductor.core.exception.NotFoundException; +import com.netflix.conductor.dao.MetadataDAO; +import com.netflix.conductor.metrics.Monitors; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Preconditions; + +import static java.util.Objects.isNull; + +public class RedisMetadataDAO extends BaseDynoDAO implements MetadataDAO { + + private static final Logger LOGGER = LoggerFactory.getLogger(RedisMetadataDAO.class); + + // Keys Families + private static final String ALL_TASK_DEFS = "TASK_DEFS"; + private static final String WORKFLOW_DEF_NAMES = "WORKFLOW_DEF_NAMES"; + private static final String WORKFLOW_DEF = "WORKFLOW_DEF"; + private static final String LATEST = "latest"; + private static final String className = RedisMetadataDAO.class.getSimpleName(); + private Map taskDefCache = new HashMap<>(); + + RedisMetadataDAO( + OrkesJedisProxy orkesJedisProxy, + ObjectMapper objectMapper, + ConductorProperties conductorProperties, + RedisProperties properties) { + super(orkesJedisProxy, objectMapper, conductorProperties, properties); + refreshTaskDefs(); + long cacheRefreshTime = properties.getTaskDefCacheRefreshInterval().getSeconds(); + if (cacheRefreshTime > 0) { + Executors.newSingleThreadScheduledExecutor() + .scheduleWithFixedDelay( + this::refreshTaskDefs, + cacheRefreshTime, + cacheRefreshTime, + TimeUnit.SECONDS); + } + } + + @Override + public TaskDef createTaskDef(TaskDef taskDef) { + insertOrUpdateTaskDef(taskDef); + return taskDef; + } + + @Override + public TaskDef updateTaskDef(TaskDef taskDef) { + insertOrUpdateTaskDef(taskDef); + return taskDef; + } + + private String insertOrUpdateTaskDef(TaskDef taskDef) { + // Store all task def in under one key + String payload = toJson(taskDef); + orkesJedisProxy.hset(nsKey(ALL_TASK_DEFS), taskDef.getName(), payload); + refreshTaskDefs(); + return taskDef.getName(); + } + + private void refreshTaskDefs() { + try { + Map map = new HashMap<>(); + getAllTaskDefs().forEach(taskDef -> map.put(taskDef.getName(), taskDef)); + this.taskDefCache = map; + LOGGER.debug("Refreshed task defs " + this.taskDefCache.size()); + } catch (Exception e) { + Monitors.error(className, "refreshTaskDefs"); + LOGGER.error("refresh TaskDefs failed ", e); + } + } + + @Override + public TaskDef getTaskDef(String name) { + return Optional.ofNullable(taskDefCache.get(name)).orElseGet(() -> getTaskDefFromDB(name)); + } + + private TaskDef getTaskDefFromDB(String name) { + Preconditions.checkNotNull(name, "TaskDef name cannot be null"); + + TaskDef taskDef = null; + String taskDefJsonStr = orkesJedisProxy.hget(nsKey(ALL_TASK_DEFS), name); + if (taskDefJsonStr != null) { + taskDef = readValue(taskDefJsonStr, TaskDef.class); + recordRedisDaoRequests("getTaskDef"); + recordRedisDaoPayloadSize( + "getTaskDef", taskDefJsonStr.length(), taskDef.getName(), "n/a"); + } + return taskDef; + } + + @Override + public List getAllTaskDefs() { + List allTaskDefs = new LinkedList<>(); + + recordRedisDaoRequests("getAllTaskDefs"); + Map taskDefs = orkesJedisProxy.hgetAll(nsKey(ALL_TASK_DEFS)); + int size = 0; + if (taskDefs.size() > 0) { + for (String taskDefJsonStr : taskDefs.values()) { + if (taskDefJsonStr != null) { + allTaskDefs.add(readValue(taskDefJsonStr, TaskDef.class)); + size += taskDefJsonStr.length(); + } + } + recordRedisDaoPayloadSize("getAllTaskDefs", size, "n/a", "n/a"); + } + + return allTaskDefs; + } + + @Override + public void removeTaskDef(String name) { + Preconditions.checkNotNull(name, "TaskDef name cannot be null"); + Long result = orkesJedisProxy.hdel(nsKey(ALL_TASK_DEFS), name); + if (!result.equals(1L)) { + throw new NotFoundException("Cannot remove the task - no such task definition"); + } + recordRedisDaoRequests("removeTaskDef"); + refreshTaskDefs(); + } + + @Override + public void createWorkflowDef(WorkflowDef def) { + if (orkesJedisProxy.hexists( + nsKey(WORKFLOW_DEF, def.getName()), String.valueOf(def.getVersion()))) { + throw new ConflictException("Workflow with " + def.key() + " already exists!"); + } + _createOrUpdate(def); + } + + @Override + public void updateWorkflowDef(WorkflowDef def) { + _createOrUpdate(def); + } + + @Override + /* + * @param name Name of the workflow definition + * @return Latest version of workflow definition + * @see WorkflowDef + */ + public Optional getLatestWorkflowDef(String name) { + Preconditions.checkNotNull(name, "WorkflowDef name cannot be null"); + WorkflowDef workflowDef = null; + + Optional optionalMaxVersion = getWorkflowMaxVersion(name); + + if (optionalMaxVersion.isPresent()) { + String latestdata = + orkesJedisProxy.hget( + nsKey(WORKFLOW_DEF, name), optionalMaxVersion.get().toString()); + if (latestdata != null) { + workflowDef = readValue(latestdata, WorkflowDef.class); + } + } + + return Optional.ofNullable(workflowDef); + } + + private Optional getWorkflowMaxVersion(String workflowName) { + return orkesJedisProxy.hkeys(nsKey(WORKFLOW_DEF, workflowName)).stream() + .filter(key -> !key.equals(LATEST)) + .map(Integer::valueOf) + .max(Comparator.naturalOrder()); + } + + public List getAllVersions(String name) { + Preconditions.checkNotNull(name, "WorkflowDef name cannot be null"); + List workflows = new LinkedList<>(); + + recordRedisDaoRequests("getAllWorkflowDefsByName"); + Map workflowDefs = orkesJedisProxy.hgetAll(nsKey(WORKFLOW_DEF, name)); + int size = 0; + for (String key : workflowDefs.keySet()) { + if (key.equals(LATEST)) { + continue; + } + String workflowDef = workflowDefs.get(key); + workflows.add(readValue(workflowDef, WorkflowDef.class)); + size += workflowDef.length(); + } + recordRedisDaoPayloadSize("getAllWorkflowDefsByName", size, "n/a", name); + + return workflows; + } + + @Override + public Optional getWorkflowDef(String name, int version) { + Preconditions.checkNotNull(name, "WorkflowDef name cannot be null"); + WorkflowDef def = null; + + recordRedisDaoRequests("getWorkflowDef"); + String workflowDefJsonString = + orkesJedisProxy.hget(nsKey(WORKFLOW_DEF, name), String.valueOf(version)); + if (workflowDefJsonString != null) { + def = readValue(workflowDefJsonString, WorkflowDef.class); + recordRedisDaoPayloadSize( + "getWorkflowDef", workflowDefJsonString.length(), "n/a", name); + } + return Optional.ofNullable(def); + } + + @Override + public void removeWorkflowDef(String name, Integer version) { + Preconditions.checkArgument( + StringUtils.isNotBlank(name), "WorkflowDef name cannot be null"); + Preconditions.checkNotNull(version, "Input version cannot be null"); + Long result = orkesJedisProxy.hdel(nsKey(WORKFLOW_DEF, name), String.valueOf(version)); + if (!result.equals(1L)) { + throw new NotFoundException( + String.format( + "Cannot remove the workflow - no such workflow" + + " definition: %s version: %d", + name, version)); + } + + // check if there are any more versions remaining if not delete the + // workflow name + Optional optionMaxVersion = getWorkflowMaxVersion(name); + + // delete workflow name + if (!optionMaxVersion.isPresent()) { + orkesJedisProxy.srem(nsKey(WORKFLOW_DEF_NAMES), name); + } + + recordRedisDaoRequests("removeWorkflowDef"); + } + + public List findAll() { + Set wfNames = orkesJedisProxy.smembers(nsKey(WORKFLOW_DEF_NAMES)); + return new ArrayList<>(wfNames); + } + + @Override + public List getAllWorkflowDefs() { + List workflows = new LinkedList<>(); + + // Get all from WORKFLOW_DEF_NAMES + recordRedisDaoRequests("getAllWorkflowDefs"); + Set wfNames = orkesJedisProxy.smembers(nsKey(WORKFLOW_DEF_NAMES)); + int size = 0; + for (String wfName : wfNames) { + Map workflowDefs = orkesJedisProxy.hgetAll(nsKey(WORKFLOW_DEF, wfName)); + for (String key : workflowDefs.keySet()) { + if (key.equals(LATEST)) { + continue; + } + String workflowDef = workflowDefs.get(key); + workflows.add(readValue(workflowDef, WorkflowDef.class)); + size += workflowDef.length(); + } + } + recordRedisDaoPayloadSize("getAllWorkflowDefs", size, "n/a", "n/a"); + return workflows; + } + + private void _createOrUpdate(WorkflowDef workflowDef) { + if (isNull(workflowDef.getUpdateTime())) { + workflowDef.setUpdateTime(System.currentTimeMillis()); + } + if (isNull(workflowDef.getCreateTime())) { + workflowDef.setCreateTime( + getWorkflowDef(workflowDef.getName(), workflowDef.getVersion()) + .map(Auditable::getCreateTime) + .orElse(System.currentTimeMillis())); + } + + // First set the workflow def + orkesJedisProxy.hset( + nsKey(WORKFLOW_DEF, workflowDef.getName()), + String.valueOf(workflowDef.getVersion()), + toJson(workflowDef)); + + orkesJedisProxy.sadd(nsKey(WORKFLOW_DEF_NAMES), workflowDef.getName()); + recordRedisDaoRequests("storeWorkflowDef", "n/a", workflowDef.getName()); + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisPollDataDAO.java b/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisPollDataDAO.java new file mode 100644 index 0000000..aeb27ac --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisPollDataDAO.java @@ -0,0 +1,126 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.commons.lang3.StringUtils; +import org.springframework.context.annotation.Conditional; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.common.metadata.tasks.PollData; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.dao.PollDataDAO; +import com.netflix.conductor.redis.config.AnyRedisCondition; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Preconditions; +import lombok.extern.slf4j.Slf4j; + +@Component +@Conditional(AnyRedisCondition.class) +@Slf4j +public class RedisPollDataDAO extends BaseDynoDAO implements PollDataDAO { + + private static final String POLL_DATA = "POLL_DATA"; + + private final Clock clock; + + private final Map lastUpdateTimes = new ConcurrentHashMap<>(); + + private static final int MAX_UPDATE_FREQUENCY = 10_000; // 10 second + + public RedisPollDataDAO( + OrkesJedisProxy orkesJedisProxy, + ObjectMapper objectMapper, + ConductorProperties conductorProperties, + RedisProperties properties) { + super(orkesJedisProxy, objectMapper, conductorProperties, properties); + this.clock = Clock.systemDefaultZone(); + log.info("Using OrkesPollDataDAO"); + } + + @Override + public void updateLastPollData(String taskDefName, String domain, String workerId) { + + long now = clock.millis(); + lastUpdateTimes.compute( + taskDefName, + (s, lastUpdateTime) -> { + if (lastUpdateTime == null || lastUpdateTime < (now - MAX_UPDATE_FREQUENCY)) { + _updateLastPollData(taskDefName, domain, workerId); + return now; + } + return lastUpdateTime; + }); + } + + private void _updateLastPollData(String taskDefName, String domain, String workerId) { + Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); + PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis()); + + String key = nsKey(POLL_DATA, pollData.getQueueName()); + String field = (domain == null) ? "DEFAULT" : domain; + + String payload = toJson(pollData); + recordRedisDaoRequests("updatePollData"); + recordRedisDaoPayloadSize("updatePollData", payload.length(), "n/a", "n/a"); + orkesJedisProxy.hset(key, field, payload); + } + + @Override + public PollData getPollData(String taskDefName, String domain) { + Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); + + String key = nsKey(POLL_DATA, taskDefName); + String field = (domain == null) ? "DEFAULT" : domain; + + String pollDataJsonString = orkesJedisProxy.hget(key, field); + recordRedisDaoRequests("getPollData"); + recordRedisDaoPayloadSize( + "getPollData", StringUtils.length(pollDataJsonString), "n/a", "n/a"); + + PollData pollData = null; + if (StringUtils.isNotBlank(pollDataJsonString)) { + pollData = readValue(pollDataJsonString, PollData.class); + } + return pollData; + } + + @Override + public List getPollData(String taskDefName) { + Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); + + String key = nsKey(POLL_DATA, taskDefName); + + Map pMapdata = orkesJedisProxy.hgetAll(key); + List pollData = new ArrayList<>(); + if (pMapdata != null) { + pMapdata.values() + .forEach( + pollDataJsonString -> { + pollData.add(readValue(pollDataJsonString, PollData.class)); + recordRedisDaoRequests("getPollData"); + recordRedisDaoPayloadSize( + "getPollData", pollDataJsonString.length(), "n/a", "n/a"); + }); + } + return pollData; + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisRateLimitingDAO.java b/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisRateLimitingDAO.java new file mode 100644 index 0000000..9a5cc49 --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/dao/RedisRateLimitingDAO.java @@ -0,0 +1,145 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.util.Optional; + +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.annotation.Conditional; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.dao.RateLimitingDAO; +import com.netflix.conductor.metrics.Monitors; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.redis.config.AnyRedisCondition; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; + +@Component +@Conditional(AnyRedisCondition.class) +public class RedisRateLimitingDAO extends BaseDynoDAO implements RateLimitingDAO { + + private static final Logger LOGGER = LoggerFactory.getLogger(RedisRateLimitingDAO.class); + + private static final String TASK_RATE_LIMIT_BUCKET = "TASK_RATE_LIMIT_BUCKET"; + + public RedisRateLimitingDAO( + OrkesJedisProxy jedisProxy, + ObjectMapper objectMapper, + ConductorProperties conductorProperties, + RedisProperties properties) { + super(jedisProxy, objectMapper, conductorProperties, properties); + } + + /** + * This method evaluates if the {@link TaskDef} is rate limited or not based on {@link + * TaskModel#getRateLimitPerFrequency()} and {@link TaskModel#getRateLimitFrequencyInSeconds()} + * if not checks the {@link TaskModel} is rate limited or not based on {@link + * TaskModel#getRateLimitPerFrequency()} and {@link TaskModel#getRateLimitFrequencyInSeconds()} + * + *

The rate limiting is implemented using the Redis constructs of sorted set and TTL of each + * element in the rate limited bucket. + * + *

    + *
  • All the entries that are in the not in the frequency bucket are cleaned up by + * leveraging {@link OrkesJedisProxy#zremrangeByScore(String, String, String)}, this is + * done to make the next step of evaluation efficient + *
  • A current count(tasks executed within the frequency) is calculated based on the current + * time and the beginning of the rate limit frequency time(which is current time - {@link + * TaskModel#getRateLimitFrequencyInSeconds()} in millis), this is achieved by using + * {@link OrkesJedisProxy#zcount(String, double, double)} + *
  • Once the count is calculated then a evaluation is made to determine if it is within the + * bounds of {@link TaskModel#getRateLimitPerFrequency()}, if so the count is increased + * and an expiry TTL is added to the entry + *
+ * + * @param task: which needs to be evaluated whether it is rateLimited or not + * @return true: If the {@link TaskModel} is rateLimited false: If the {@link TaskModel} is not + * rateLimited + */ + @Override + public boolean exceedsRateLimitPerFrequency(TaskModel task, TaskDef taskDef) { + // Check if the TaskDefinition is not null then pick the definition values or else pick from + // the Task + ImmutablePair rateLimitPair = + Optional.ofNullable(taskDef) + .map( + definition -> + new ImmutablePair<>( + definition.getRateLimitPerFrequency(), + definition.getRateLimitFrequencyInSeconds())) + .orElse( + new ImmutablePair<>( + task.getRateLimitPerFrequency(), + task.getRateLimitFrequencyInSeconds())); + + int rateLimitPerFrequency = rateLimitPair.getLeft(); + int rateLimitFrequencyInSeconds = rateLimitPair.getRight(); + if (rateLimitPerFrequency <= 0 || rateLimitFrequencyInSeconds <= 0) { + LOGGER.debug( + "Rate limit not applied to the Task: {} either rateLimitPerFrequency: {} or rateLimitFrequencyInSeconds: {} is 0 or less", + task, + rateLimitPerFrequency, + rateLimitFrequencyInSeconds); + return false; + } else { + LOGGER.debug( + "Evaluating rate limiting for TaskId: {} with TaskDefinition of: {} with rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {}", + task.getTaskId(), + task.getTaskDefName(), + rateLimitPerFrequency, + rateLimitFrequencyInSeconds); + long currentTimeEpochMillis = System.currentTimeMillis(); + long currentTimeEpochMinusRateLimitBucket = + currentTimeEpochMillis - (rateLimitFrequencyInSeconds * 1000L); + String key = nsKey(TASK_RATE_LIMIT_BUCKET, task.getTaskDefName()); + orkesJedisProxy.zremrangeByScore( + key, "-inf", String.valueOf(currentTimeEpochMinusRateLimitBucket)); + int currentBucketCount = + Math.toIntExact( + orkesJedisProxy.zcount( + key, + currentTimeEpochMinusRateLimitBucket, + currentTimeEpochMillis)); + if (currentBucketCount < rateLimitPerFrequency) { + orkesJedisProxy.zadd( + key, currentTimeEpochMillis, String.valueOf(currentTimeEpochMillis)); + orkesJedisProxy.expire(key, rateLimitFrequencyInSeconds); + LOGGER.info( + "TaskId: {} with TaskDefinition of: {} has rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {} within the rate limit with current count {}", + task.getTaskId(), + task.getTaskDefName(), + rateLimitPerFrequency, + rateLimitFrequencyInSeconds, + ++currentBucketCount); + Monitors.recordTaskRateLimited(task.getTaskDefName(), rateLimitPerFrequency); + return false; + } else { + LOGGER.info( + "TaskId: {} with TaskDefinition of: {} has rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {} is out of bounds of rate limit with current count {}", + task.getTaskId(), + task.getTaskDefName(), + rateLimitPerFrequency, + rateLimitFrequencyInSeconds, + currentBucketCount); + return true; + } + } + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/ConfigurationHostSupplier.java b/persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/ConfigurationHostSupplier.java new file mode 100644 index 0000000..296212a --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/ConfigurationHostSupplier.java @@ -0,0 +1,84 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dynoqueue; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.dyno.connectionpool.Host; +import com.netflix.dyno.connectionpool.HostBuilder; +import com.netflix.dyno.connectionpool.HostSupplier; + +public class ConfigurationHostSupplier implements HostSupplier { + + private static final Logger log = LoggerFactory.getLogger(ConfigurationHostSupplier.class); + + private final RedisProperties properties; + + public ConfigurationHostSupplier(RedisProperties properties) { + this.properties = properties; + } + + @Override + public List getHosts() { + return parseHostsFromConfig(); + } + + private List parseHostsFromConfig() { + String hosts = properties.getHosts(); + if (hosts == null) { + // FIXME This type of validation probably doesn't belong here. + String message = + "Missing dynomite/redis hosts. Ensure 'workflow.dynomite.cluster.hosts' has been set in the supplied configuration."; + log.error(message); + throw new RuntimeException(message); + } + return parseHostsFrom(hosts); + } + + private List parseHostsFrom(String hostConfig) { + List hostConfigs = Arrays.asList(hostConfig.split(";")); + + return hostConfigs.stream() + .map( + hc -> { + String[] hostConfigValues = hc.split(":"); + String host = hostConfigValues[0]; + int port = Integer.parseInt(hostConfigValues[1]); + String rack = hostConfigValues[2]; + + if (hostConfigValues.length >= 4) { + String password = hostConfigValues[3]; + return new HostBuilder() + .setHostname(host) + .setPort(port) + .setRack(rack) + .setStatus(Host.Status.Up) + .setPassword(password) + .createHost(); + } + return new HostBuilder() + .setHostname(host) + .setPort(port) + .setRack(rack) + .setStatus(Host.Status.Up) + .createHost(); + }) + .collect(Collectors.toList()); + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/LocalhostHostSupplier.java b/persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/LocalhostHostSupplier.java new file mode 100644 index 0000000..7834271 --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/LocalhostHostSupplier.java @@ -0,0 +1,42 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dynoqueue; + +import java.util.Arrays; +import java.util.List; + +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.dyno.connectionpool.Host; +import com.netflix.dyno.connectionpool.HostBuilder; +import com.netflix.dyno.connectionpool.HostSupplier; + +public class LocalhostHostSupplier implements HostSupplier { + + private final RedisProperties properties; + + public LocalhostHostSupplier(RedisProperties properties) { + this.properties = properties; + } + + @Override + public List getHosts() { + Host dynoHost = + new HostBuilder() + .setHostname("localhost") + .setIpAddress("0") + .setRack(properties.getAvailabilityZone()) + .setStatus(Host.Status.Up) + .createHost(); + return Arrays.asList(dynoHost); + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisCluster.java b/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisCluster.java new file mode 100644 index 0000000..fc989bf --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisCluster.java @@ -0,0 +1,953 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.jedis; + +import java.util.AbstractMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.stream.Collectors; + +import redis.clients.jedis.BitPosParams; +import redis.clients.jedis.GeoCoordinate; +import redis.clients.jedis.GeoRadiusResponse; +import redis.clients.jedis.GeoUnit; +import redis.clients.jedis.ListPosition; +import redis.clients.jedis.ScanParams; +import redis.clients.jedis.ScanResult; +import redis.clients.jedis.SortingParams; +import redis.clients.jedis.StreamConsumersInfo; +import redis.clients.jedis.StreamEntry; +import redis.clients.jedis.StreamEntryID; +import redis.clients.jedis.StreamGroupInfo; +import redis.clients.jedis.StreamInfo; +import redis.clients.jedis.StreamPendingEntry; +import redis.clients.jedis.Tuple; +import redis.clients.jedis.commands.JedisCommands; +import redis.clients.jedis.params.GeoRadiusParam; +import redis.clients.jedis.params.SetParams; +import redis.clients.jedis.params.ZAddParams; +import redis.clients.jedis.params.ZIncrByParams; + +public class JedisCluster implements JedisCommands { + + private final redis.clients.jedis.JedisCluster jedisCluster; + + public JedisCluster(redis.clients.jedis.JedisCluster jedisCluster) { + this.jedisCluster = jedisCluster; + } + + @Override + public String set(String key, String value) { + return jedisCluster.set(key, value); + } + + @Override + public String set(String key, String value, SetParams params) { + return jedisCluster.set(key, value, params); + } + + @Override + public String get(String key) { + return jedisCluster.get(key); + } + + @Override + public Boolean exists(String key) { + return jedisCluster.exists(key); + } + + @Override + public Long persist(String key) { + return jedisCluster.persist(key); + } + + @Override + public String type(String key) { + return jedisCluster.type(key); + } + + @Override + public byte[] dump(String key) { + return jedisCluster.dump(key); + } + + @Override + public String restore(String key, int ttl, byte[] serializedValue) { + return jedisCluster.restore(key, ttl, serializedValue); + } + + @Override + public String restoreReplace(String key, int ttl, byte[] serializedValue) { + throw new UnsupportedOperationException(); + } + + @Override + public Long expire(String key, int seconds) { + return jedisCluster.expire(key, seconds); + } + + @Override + public Long pexpire(String key, long milliseconds) { + return jedisCluster.pexpire(key, milliseconds); + } + + @Override + public Long expireAt(String key, long unixTime) { + return jedisCluster.expireAt(key, unixTime); + } + + @Override + public Long pexpireAt(String key, long millisecondsTimestamp) { + return jedisCluster.pexpireAt(key, millisecondsTimestamp); + } + + @Override + public Long ttl(String key) { + return jedisCluster.ttl(key); + } + + @Override + public Long pttl(String key) { + return jedisCluster.pttl(key); + } + + @Override + public Long touch(String key) { + return jedisCluster.touch(key); + } + + @Override + public Boolean setbit(String key, long offset, boolean value) { + return jedisCluster.setbit(key, offset, value); + } + + @Override + public Boolean setbit(String key, long offset, String value) { + return jedisCluster.setbit(key, offset, value); + } + + @Override + public Boolean getbit(String key, long offset) { + return jedisCluster.getbit(key, offset); + } + + @Override + public Long setrange(String key, long offset, String value) { + return jedisCluster.setrange(key, offset, value); + } + + @Override + public String getrange(String key, long startOffset, long endOffset) { + return jedisCluster.getrange(key, startOffset, endOffset); + } + + @Override + public String getSet(String key, String value) { + return jedisCluster.getSet(key, value); + } + + @Override + public Long setnx(String key, String value) { + return jedisCluster.setnx(key, value); + } + + @Override + public String setex(String key, int seconds, String value) { + return jedisCluster.setex(key, seconds, value); + } + + @Override + public String psetex(String key, long milliseconds, String value) { + return jedisCluster.psetex(key, milliseconds, value); + } + + @Override + public Long decrBy(String key, long integer) { + return jedisCluster.decrBy(key, integer); + } + + @Override + public Long decr(String key) { + return jedisCluster.decr(key); + } + + @Override + public Long incrBy(String key, long integer) { + return jedisCluster.incrBy(key, integer); + } + + @Override + public Double incrByFloat(String key, double value) { + return jedisCluster.incrByFloat(key, value); + } + + @Override + public Long incr(String key) { + return jedisCluster.incr(key); + } + + @Override + public Long append(String key, String value) { + return jedisCluster.append(key, value); + } + + @Override + public String substr(String key, int start, int end) { + return jedisCluster.substr(key, start, end); + } + + @Override + public Long hset(String key, String field, String value) { + return jedisCluster.hset(key, field, value); + } + + @Override + public Long hset(String key, Map hash) { + return jedisCluster.hset(key, hash); + } + + @Override + public String hget(String key, String field) { + return jedisCluster.hget(key, field); + } + + @Override + public Long hsetnx(String key, String field, String value) { + return jedisCluster.hsetnx(key, field, value); + } + + @Override + public String hmset(String key, Map hash) { + return jedisCluster.hmset(key, hash); + } + + @Override + public List hmget(String key, String... fields) { + return jedisCluster.hmget(key, fields); + } + + @Override + public Long hincrBy(String key, String field, long value) { + return jedisCluster.hincrBy(key, field, value); + } + + @Override + public Double hincrByFloat(String key, String field, double value) { + return jedisCluster.hincrByFloat(key.getBytes(), field.getBytes(), value); + } + + @Override + public Boolean hexists(String key, String field) { + return jedisCluster.hexists(key, field); + } + + @Override + public Long hdel(String key, String... field) { + return jedisCluster.hdel(key, field); + } + + @Override + public Long hlen(String key) { + return jedisCluster.hlen(key); + } + + @Override + public Set hkeys(String key) { + return jedisCluster.hkeys(key); + } + + @Override + public List hvals(String key) { + return jedisCluster.hvals(key); + } + + @Override + public Map hgetAll(String key) { + return jedisCluster.hgetAll(key); + } + + @Override + public Long rpush(String key, String... string) { + return jedisCluster.rpush(key, string); + } + + @Override + public Long lpush(String key, String... string) { + return jedisCluster.lpush(key, string); + } + + @Override + public Long llen(String key) { + return jedisCluster.llen(key); + } + + @Override + public List lrange(String key, long start, long end) { + return jedisCluster.lrange(key, start, end); + } + + @Override + public String ltrim(String key, long start, long end) { + return jedisCluster.ltrim(key, start, end); + } + + @Override + public String lindex(String key, long index) { + return jedisCluster.lindex(key, index); + } + + @Override + public String lset(String key, long index, String value) { + return jedisCluster.lset(key, index, value); + } + + @Override + public Long lrem(String key, long count, String value) { + return jedisCluster.lrem(key, count, value); + } + + @Override + public String lpop(String key) { + return jedisCluster.lpop(key); + } + + @Override + public String rpop(String key) { + return jedisCluster.rpop(key); + } + + @Override + public Long sadd(String key, String... member) { + return jedisCluster.sadd(key, member); + } + + @Override + public Set smembers(String key) { + return jedisCluster.smembers(key); + } + + @Override + public Long srem(String key, String... member) { + return jedisCluster.srem(key, member); + } + + @Override + public String spop(String key) { + return jedisCluster.spop(key); + } + + @Override + public Set spop(String key, long count) { + return jedisCluster.spop(key, count); + } + + @Override + public Long scard(String key) { + return jedisCluster.scard(key); + } + + @Override + public Boolean sismember(String key, String member) { + return jedisCluster.sismember(key, member); + } + + @Override + public String srandmember(String key) { + return jedisCluster.srandmember(key); + } + + @Override + public List srandmember(String key, int count) { + return jedisCluster.srandmember(key, count); + } + + @Override + public Long strlen(String key) { + return jedisCluster.strlen(key); + } + + @Override + public Long zadd(String key, double score, String member) { + return jedisCluster.zadd(key, score, member); + } + + @Override + public Long zadd(String key, double score, String member, ZAddParams params) { + return jedisCluster.zadd(key, score, member, params); + } + + @Override + public Long zadd(String key, Map scoreMembers) { + return jedisCluster.zadd(key, scoreMembers); + } + + @Override + public Long zadd(String key, Map scoreMembers, ZAddParams params) { + return jedisCluster.zadd(key, scoreMembers, params); + } + + @Override + public Set zrange(String key, long start, long end) { + return jedisCluster.zrange(key, start, end); + } + + @Override + public Long zrem(String key, String... member) { + return jedisCluster.zrem(key, member); + } + + @Override + public Double zincrby(String key, double score, String member) { + return jedisCluster.zincrby(key, score, member); + } + + @Override + public Double zincrby(String key, double score, String member, ZIncrByParams params) { + return jedisCluster.zincrby(key, score, member, params); + } + + @Override + public Long zrank(String key, String member) { + return jedisCluster.zrank(key, member); + } + + @Override + public Long zrevrank(String key, String member) { + return jedisCluster.zrevrank(key, member); + } + + @Override + public Set zrevrange(String key, long start, long end) { + return jedisCluster.zrevrange(key, start, end); + } + + @Override + public Set zrangeWithScores(String key, long start, long end) { + return jedisCluster.zrangeWithScores(key, start, end); + } + + @Override + public Set zrevrangeWithScores(String key, long start, long end) { + return jedisCluster.zrevrangeWithScores(key, start, end); + } + + @Override + public Long zcard(String key) { + return jedisCluster.zcard(key); + } + + @Override + public Double zscore(String key, String member) { + return jedisCluster.zscore(key, member); + } + + @Override + public Tuple zpopmax(String key) { + return jedisCluster.zpopmax(key); + } + + @Override + public Set zpopmax(String key, int count) { + return jedisCluster.zpopmax(key, count); + } + + @Override + public Tuple zpopmin(String key) { + return jedisCluster.zpopmin(key); + } + + @Override + public Set zpopmin(String key, int count) { + return jedisCluster.zpopmin(key, count); + } + + @Override + public List sort(String key) { + return jedisCluster.sort(key); + } + + @Override + public List sort(String key, SortingParams sortingParameters) { + return jedisCluster.sort(key, sortingParameters); + } + + @Override + public Long zcount(String key, double min, double max) { + return jedisCluster.zcount(key, min, max); + } + + @Override + public Long zcount(String key, String min, String max) { + return jedisCluster.zcount(key, min, max); + } + + @Override + public Set zrangeByScore(String key, double min, double max) { + return jedisCluster.zrangeByScore(key, min, max); + } + + @Override + public Set zrangeByScore(String key, String min, String max) { + return jedisCluster.zrangeByScore(key, min, max); + } + + @Override + public Set zrevrangeByScore(String key, double max, double min) { + return jedisCluster.zrevrangeByScore(key, max, min); + } + + @Override + public Set zrangeByScore(String key, double min, double max, int offset, int count) { + return jedisCluster.zrangeByScore(key, min, max, offset, count); + } + + @Override + public Set zrevrangeByScore(String key, String max, String min) { + return jedisCluster.zrevrangeByScore(key, max, min); + } + + @Override + public Set zrangeByScore(String key, String min, String max, int offset, int count) { + return jedisCluster.zrangeByScore(key, min, max, offset, count); + } + + @Override + public Set zrevrangeByScore(String key, double max, double min, int offset, int count) { + return jedisCluster.zrevrangeByScore(key, max, min, offset, count); + } + + @Override + public Set zrangeByScoreWithScores(String key, double min, double max) { + return jedisCluster.zrangeByScoreWithScores(key, min, max); + } + + @Override + public Set zrevrangeByScoreWithScores(String key, double max, double min) { + return jedisCluster.zrevrangeByScoreWithScores(key, max, min); + } + + @Override + public Set zrangeByScoreWithScores( + String key, double min, double max, int offset, int count) { + return jedisCluster.zrangeByScoreWithScores(key, min, max, offset, count); + } + + @Override + public Set zrevrangeByScore(String key, String max, String min, int offset, int count) { + return jedisCluster.zrevrangeByScore(key, max, min, offset, count); + } + + @Override + public Set zrangeByScoreWithScores(String key, String min, String max) { + return jedisCluster.zrangeByScoreWithScores(key, min, max); + } + + @Override + public Set zrevrangeByScoreWithScores(String key, String max, String min) { + return jedisCluster.zrevrangeByScoreWithScores(key, max, min); + } + + @Override + public Set zrangeByScoreWithScores( + String key, String min, String max, int offset, int count) { + return jedisCluster.zrangeByScoreWithScores(key, min, max, offset, count); + } + + @Override + public Set zrevrangeByScoreWithScores( + String key, double max, double min, int offset, int count) { + return jedisCluster.zrevrangeByScoreWithScores(key, max, min, offset, count); + } + + @Override + public Set zrevrangeByScoreWithScores( + String key, String max, String min, int offset, int count) { + return jedisCluster.zrevrangeByScoreWithScores(key, max, min, offset, count); + } + + @Override + public Long zremrangeByRank(String key, long start, long end) { + return jedisCluster.zremrangeByRank(key, start, end); + } + + @Override + public Long zremrangeByScore(String key, double start, double end) { + return jedisCluster.zremrangeByScore(key, start, end); + } + + @Override + public Long zremrangeByScore(String key, String start, String end) { + return jedisCluster.zremrangeByScore(key, start, end); + } + + @Override + public Long zlexcount(String key, String min, String max) { + return jedisCluster.zlexcount(key, min, max); + } + + @Override + public Set zrangeByLex(String key, String min, String max) { + return jedisCluster.zrangeByLex(key, min, max); + } + + @Override + public Set zrangeByLex(String key, String min, String max, int offset, int count) { + return jedisCluster.zrangeByLex(key, min, max, offset, count); + } + + @Override + public Set zrevrangeByLex(String key, String max, String min) { + return jedisCluster.zrevrangeByLex(key, max, min); + } + + @Override + public Set zrevrangeByLex(String key, String max, String min, int offset, int count) { + return jedisCluster.zrevrangeByLex(key, max, min, offset, count); + } + + @Override + public Long zremrangeByLex(String key, String min, String max) { + return jedisCluster.zremrangeByLex(key, min, max); + } + + @Override + public Long linsert(String key, ListPosition where, String pivot, String value) { + return jedisCluster.linsert(key, where, pivot, value); + } + + @Override + public Long lpushx(String key, String... string) { + return jedisCluster.lpushx(key, string); + } + + @Override + public Long rpushx(String key, String... string) { + return jedisCluster.rpushx(key, string); + } + + @Override + public List blpop(int timeout, String key) { + return jedisCluster.blpop(timeout, key); + } + + @Override + public List brpop(int timeout, String key) { + return jedisCluster.brpop(timeout, key); + } + + @Override + public Long del(String key) { + return jedisCluster.del(key); + } + + @Override + public Long unlink(String key) { + return jedisCluster.unlink(key); + } + + @Override + public String echo(String string) { + return jedisCluster.echo(string); + } + + @Override + public Long move(String key, int dbIndex) { + throw new UnsupportedOperationException(); + } + + @Override + public Long bitcount(String key) { + return jedisCluster.bitcount(key); + } + + @Override + public Long bitcount(String key, long start, long end) { + return jedisCluster.bitcount(key, start, end); + } + + @Override + public Long bitpos(String key, boolean value) { + throw new UnsupportedOperationException(); + } + + @Override + public Long bitpos(String key, boolean value, BitPosParams params) { + throw new UnsupportedOperationException(); + } + + @Override + public ScanResult> hscan(String key, String cursor) { + return jedisCluster.hscan(key, cursor); + } + + @Override + public ScanResult> hscan( + String key, String cursor, ScanParams params) { + ScanResult> scanResult = + jedisCluster.hscan(key.getBytes(), cursor.getBytes(), params); + List> results = + scanResult.getResult().stream() + .map( + entry -> + new AbstractMap.SimpleEntry<>( + new String(entry.getKey()), + new String(entry.getValue()))) + .collect(Collectors.toList()); + return new ScanResult<>(scanResult.getCursorAsBytes(), results); + } + + @Override + public ScanResult sscan(String key, String cursor) { + return jedisCluster.sscan(key, cursor); + } + + @Override + public ScanResult sscan(String key, String cursor, ScanParams params) { + ScanResult scanResult = + jedisCluster.sscan(key.getBytes(), cursor.getBytes(), params); + List results = + scanResult.getResult().stream().map(String::new).collect(Collectors.toList()); + return new ScanResult<>(scanResult.getCursorAsBytes(), results); + } + + @Override + public ScanResult zscan(String key, String cursor) { + return jedisCluster.zscan(key, cursor); + } + + @Override + public ScanResult zscan(String key, String cursor, ScanParams params) { + return jedisCluster.zscan(key.getBytes(), cursor.getBytes(), params); + } + + @Override + public Long pfadd(String key, String... elements) { + return jedisCluster.pfadd(key, elements); + } + + @Override + public long pfcount(String key) { + return jedisCluster.pfcount(key); + } + + @Override + public Long geoadd(String key, double longitude, double latitude, String member) { + return jedisCluster.geoadd(key, longitude, latitude, member); + } + + @Override + public Long geoadd(String key, Map memberCoordinateMap) { + return jedisCluster.geoadd(key, memberCoordinateMap); + } + + @Override + public Double geodist(String key, String member1, String member2) { + return jedisCluster.geodist(key, member1, member2); + } + + @Override + public Double geodist(String key, String member1, String member2, GeoUnit unit) { + return jedisCluster.geodist(key, member1, member2, unit); + } + + @Override + public List geohash(String key, String... members) { + return jedisCluster.geohash(key, members); + } + + @Override + public List geopos(String key, String... members) { + return jedisCluster.geopos(key, members); + } + + @Override + public List georadius( + String key, double longitude, double latitude, double radius, GeoUnit unit) { + return jedisCluster.georadius(key, longitude, latitude, radius, unit); + } + + @Override + public List georadiusReadonly( + String key, double longitude, double latitude, double radius, GeoUnit unit) { + return jedisCluster.georadiusReadonly(key, longitude, latitude, radius, unit); + } + + @Override + public List georadius( + String key, + double longitude, + double latitude, + double radius, + GeoUnit unit, + GeoRadiusParam param) { + return jedisCluster.georadius(key, longitude, latitude, radius, unit, param); + } + + @Override + public List georadiusReadonly( + String key, + double longitude, + double latitude, + double radius, + GeoUnit unit, + GeoRadiusParam param) { + return jedisCluster.georadiusReadonly(key, longitude, latitude, radius, unit, param); + } + + @Override + public List georadiusByMember( + String key, String member, double radius, GeoUnit unit) { + return jedisCluster.georadiusByMember(key, member, radius, unit); + } + + @Override + public List georadiusByMemberReadonly( + String key, String member, double radius, GeoUnit unit) { + return jedisCluster.georadiusByMemberReadonly(key, member, radius, unit); + } + + @Override + public List georadiusByMember( + String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) { + return jedisCluster.georadiusByMember(key, member, radius, unit, param); + } + + @Override + public List georadiusByMemberReadonly( + String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) { + return jedisCluster.georadiusByMemberReadonly(key, member, radius, unit, param); + } + + @Override + public List bitfield(String key, String... arguments) { + return jedisCluster.bitfield(key, arguments); + } + + @Override + public List bitfieldReadonly(String key, String... arguments) { + return jedisCluster.bitfieldReadonly(key, arguments); + } + + @Override + public Long hstrlen(String key, String field) { + return jedisCluster.hstrlen(key, field); + } + + @Override + public StreamEntryID xadd(String key, StreamEntryID id, Map hash) { + return jedisCluster.xadd(key, id, hash); + } + + @Override + public StreamEntryID xadd( + String key, + StreamEntryID id, + Map hash, + long maxLen, + boolean approximateLength) { + return jedisCluster.xadd(key, id, hash, maxLen, approximateLength); + } + + @Override + public Long xlen(String key) { + return jedisCluster.xlen(key); + } + + @Override + public List xrange(String key, StreamEntryID start, StreamEntryID end, int count) { + return jedisCluster.xrange(key, start, end, count); + } + + @Override + public List xrevrange( + String key, StreamEntryID end, StreamEntryID start, int count) { + return jedisCluster.xrevrange(key, end, start, count); + } + + @Override + public long xack(String key, String group, StreamEntryID... ids) { + return jedisCluster.xack(key, group, ids); + } + + @Override + public String xgroupCreate(String key, String groupname, StreamEntryID id, boolean makeStream) { + return jedisCluster.xgroupCreate(key, groupname, id, makeStream); + } + + @Override + public String xgroupSetID(String key, String groupname, StreamEntryID id) { + return jedisCluster.xgroupSetID(key, groupname, id); + } + + @Override + public long xgroupDestroy(String key, String groupname) { + return jedisCluster.xgroupDestroy(key, groupname); + } + + @Override + public Long xgroupDelConsumer(String key, String groupname, String consumername) { + return jedisCluster.xgroupDelConsumer(key, groupname, consumername); + } + + @Override + public List xpending( + String key, + String groupname, + StreamEntryID start, + StreamEntryID end, + int count, + String consumername) { + return jedisCluster.xpending(key, groupname, start, end, count, consumername); + } + + @Override + public long xdel(String key, StreamEntryID... ids) { + return jedisCluster.xdel(key, ids); + } + + @Override + public long xtrim(String key, long maxLen, boolean approximate) { + return jedisCluster.xtrim(key, maxLen, approximate); + } + + @Override + public List xclaim( + String key, + String group, + String consumername, + long minIdleTime, + long newIdleTime, + int retries, + boolean force, + StreamEntryID... ids) { + return jedisCluster.xclaim( + key, group, consumername, minIdleTime, newIdleTime, retries, force, ids); + } + + @Override + public StreamInfo xinfoStream(String key) { + return null; + } + + @Override + public List xinfoGroup(String key) { + return null; + } + + @Override + public List xinfoConsumers(String key, String group) { + return null; + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisMock.java b/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisMock.java new file mode 100644 index 0000000..73f534c --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisMock.java @@ -0,0 +1,1204 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.jedis; + +import java.util.*; +import java.util.Map.Entry; + +import org.rarefiedredis.redis.IRedisClient; +import org.rarefiedredis.redis.IRedisSortedSet.ZsetPair; +import org.rarefiedredis.redis.RedisMock; + +import redis.clients.jedis.Jedis; +import redis.clients.jedis.ScanParams; +import redis.clients.jedis.ScanResult; +import redis.clients.jedis.Tuple; +import redis.clients.jedis.exceptions.JedisException; +import redis.clients.jedis.params.ZAddParams; + +public class JedisMock extends Jedis { + + private final IRedisClient redis; + + public JedisMock() { + super(""); + this.redis = new RedisMock(); + } + + private Set toTupleSet(Set pairs) { + Set set = new HashSet<>(); + for (ZsetPair pair : pairs) { + set.add(new Tuple(pair.member, pair.score)); + } + return set; + } + + @Override + public String set(final String key, String value) { + try { + return redis.set(key, value); + } catch (Exception e) { + throw new JedisException(e); + } + } + + public byte[] scriptLoad(final byte[] script) { + return script; + } + + public String set(byte[] key, byte[] value) { + try { + String encodedKey = new String(Base64.getEncoder().encode(key)); + String encodedValue = new String(Base64.getEncoder().encode(value)); + return redis.set(encodedKey, encodedValue); + } catch (Exception e) { + throw new JedisException(e); + } + } + + public String scriptLoad(final String script) { + return script; + } + + public byte[] get(byte[] key) { + try { + String encodedKey = new String(Base64.getEncoder().encode(key)); + String encodedValue = redis.get(encodedKey); + if (encodedValue == null) { + return null; + } + return Base64.getDecoder().decode(encodedValue); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String get(final String key) { + try { + return redis.get(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Boolean exists(final String key) { + try { + return redis.exists(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long del(final String... keys) { + try { + return redis.del(keys); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long del(String key) { + try { + return redis.del(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String type(final String key) { + try { + return redis.type(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long expire(final String key, final int seconds) { + try { + return redis.expire(key, seconds) ? 1L : 0L; + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long expireAt(final String key, final long unixTime) { + try { + return redis.expireat(key, unixTime) ? 1L : 0L; + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long ttl(final String key) { + try { + return redis.ttl(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long move(final String key, final int dbIndex) { + try { + return redis.move(key, dbIndex); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String getSet(final String key, final String value) { + try { + return redis.getset(key, value); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public List mget(final String... keys) { + try { + String[] mget = redis.mget(keys); + List lst = new ArrayList<>(mget.length); + for (String get : mget) { + lst.add(get); + } + return lst; + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long setnx(final String key, final String value) { + try { + return redis.setnx(key, value); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String setex(final String key, final int seconds, final String value) { + try { + return redis.setex(key, seconds, value); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String mset(final String... keysvalues) { + try { + return redis.mset(keysvalues); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long msetnx(final String... keysvalues) { + try { + return redis.msetnx(keysvalues) ? 1L : 0L; + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long decrBy(final String key, final long integer) { + try { + return redis.decrby(key, integer); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long decr(final String key) { + try { + return redis.decr(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long incrBy(final String key, final long integer) { + try { + return redis.incrby(key, integer); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Double incrByFloat(final String key, final double value) { + try { + return Double.parseDouble(redis.incrbyfloat(key, value)); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long incr(final String key) { + try { + return redis.incr(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long append(final String key, final String value) { + try { + return redis.append(key, value); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String substr(final String key, final int start, final int end) { + try { + return redis.getrange(key, start, end); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long hset(final String key, final String field, final String value) { + try { + return redis.hset(key, field, value) ? 1L : 0L; + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String hget(final String key, final String field) { + try { + return redis.hget(key, field); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long hsetnx(final String key, final String field, final String value) { + try { + return redis.hsetnx(key, field, value) ? 1L : 0L; + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String hmset(final String key, final Map hash) { + try { + String field = null, value = null; + String[] args = new String[(hash.size() - 1) * 2]; + int idx = 0; + for (String f : hash.keySet()) { + if (field == null) { + field = f; + value = hash.get(f); + continue; + } + args[idx] = f; + args[idx + 1] = hash.get(f); + idx += 2; + } + return redis.hmset(key, field, value, args); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public List hmget(final String key, final String... fields) { + try { + String field = fields[0]; + String[] f = new String[fields.length - 1]; + for (int idx = 1; idx < fields.length; ++idx) { + f[idx - 1] = fields[idx]; + } + return redis.hmget(key, field, f); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long hincrBy(final String key, final String field, final long value) { + try { + return redis.hincrby(key, field, value); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Double hincrByFloat(final String key, final String field, final double value) { + try { + return Double.parseDouble(redis.hincrbyfloat(key, field, value)); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Boolean hexists(final String key, final String field) { + try { + return redis.hexists(key, field); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long hdel(final String key, final String... fields) { + try { + String field = fields[0]; + String[] f = new String[fields.length - 1]; + for (int idx = 1; idx < fields.length; ++idx) { + f[idx - 1] = fields[idx]; + } + return redis.hdel(key, field, f); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long hlen(final String key) { + try { + return redis.hlen(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set hkeys(final String key) { + try { + return redis.hkeys(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public List hvals(final String key) { + try { + return redis.hvals(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Map hgetAll(final String key) { + try { + return redis.hgetall(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long rpush(final String key, final String... strings) { + try { + String element = strings[0]; + String[] elements = new String[strings.length - 1]; + for (int idx = 1; idx < strings.length; ++idx) { + elements[idx - 1] = strings[idx]; + } + return redis.rpush(key, element, elements); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long lpush(final String key, final String... strings) { + try { + String element = strings[0]; + String[] elements = new String[strings.length - 1]; + for (int idx = 1; idx < strings.length; ++idx) { + elements[idx - 1] = strings[idx]; + } + return redis.lpush(key, element, elements); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long llen(final String key) { + try { + return redis.llen(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public List lrange(final String key, final long start, final long end) { + try { + return redis.lrange(key, start, end); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String ltrim(final String key, final long start, final long end) { + try { + return redis.ltrim(key, start, end); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String lindex(final String key, final long index) { + try { + return redis.lindex(key, index); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String lset(final String key, final long index, final String value) { + try { + return redis.lset(key, index, value); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long lrem(final String key, final long count, final String value) { + try { + return redis.lrem(key, count, value); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String lpop(final String key) { + try { + return redis.lpop(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String rpop(final String key) { + try { + return redis.rpop(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String rpoplpush(final String srckey, final String dstkey) { + try { + return redis.rpoplpush(srckey, dstkey); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long sadd(final String key, final String... members) { + try { + String member = members[0]; + String[] m = new String[members.length - 1]; + for (int idx = 1; idx < members.length; ++idx) { + m[idx - 1] = members[idx]; + } + return redis.sadd(key, member, m); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set smembers(final String key) { + try { + return redis.smembers(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long srem(final String key, final String... members) { + try { + String member = members[0]; + String[] m = new String[members.length - 1]; + for (int idx = 1; idx < members.length; ++idx) { + m[idx - 1] = members[idx]; + } + return redis.srem(key, member, m); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String spop(final String key) { + try { + return redis.spop(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long smove(final String srckey, final String dstkey, final String member) { + try { + return redis.smove(srckey, dstkey, member) ? 1L : 0L; + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long scard(final String key) { + try { + return redis.scard(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Boolean sismember(final String key, final String member) { + try { + return redis.sismember(key, member); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set sinter(final String... keys) { + try { + String key = keys[0]; + String[] k = new String[keys.length - 1]; + for (int idx = 0; idx < keys.length; ++idx) { + k[idx - 1] = keys[idx]; + } + return redis.sinter(key, k); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long sinterstore(final String dstkey, final String... keys) { + try { + String key = keys[0]; + String[] k = new String[keys.length - 1]; + for (int idx = 0; idx < keys.length; ++idx) { + k[idx - 1] = keys[idx]; + } + return redis.sinterstore(dstkey, key, k); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set sunion(final String... keys) { + try { + String key = keys[0]; + String[] k = new String[keys.length - 1]; + for (int idx = 0; idx < keys.length; ++idx) { + k[idx - 1] = keys[idx]; + } + return redis.sunion(key, k); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long sunionstore(final String dstkey, final String... keys) { + try { + String key = keys[0]; + String[] k = new String[keys.length - 1]; + for (int idx = 0; idx < keys.length; ++idx) { + k[idx - 1] = keys[idx]; + } + return redis.sunionstore(dstkey, key, k); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set sdiff(final String... keys) { + try { + String key = keys[0]; + String[] k = new String[keys.length - 1]; + for (int idx = 0; idx < keys.length; ++idx) { + k[idx - 1] = keys[idx]; + } + return redis.sdiff(key, k); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long sdiffstore(final String dstkey, final String... keys) { + try { + String key = keys[0]; + String[] k = new String[keys.length - 1]; + for (int idx = 0; idx < keys.length; ++idx) { + k[idx - 1] = keys[idx]; + } + return redis.sdiffstore(dstkey, key, k); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String srandmember(final String key) { + try { + return redis.srandmember(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public List srandmember(final String key, final int count) { + try { + return redis.srandmember(key, count); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zadd(final String key, final double score, final String member) { + try { + return redis.zadd(key, new ZsetPair(member, score)); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zadd(String key, double score, String member, ZAddParams params) { + + try { + + if (params.getParam("xx") != null) { + Double existing = redis.zscore(key, member); + if (existing == null) { + return 0L; + } + return redis.zadd(key, new ZsetPair(member, score)); + } else { + return redis.zadd(key, new ZsetPair(member, score)); + } + + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zadd(final String key, final Map scoreMembers) { + try { + Double score = null; + String member = null; + List scoresmembers = new ArrayList<>((scoreMembers.size() - 1) * 2); + for (String m : scoreMembers.keySet()) { + if (m == null) { + member = m; + score = scoreMembers.get(m); + continue; + } + scoresmembers.add(new ZsetPair(m, scoreMembers.get(m))); + } + return redis.zadd( + key, new ZsetPair(member, score), (ZsetPair[]) scoresmembers.toArray()); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrange(final String key, final long start, final long end) { + try { + return ZsetPair.members(redis.zrange(key, start, end)); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zrem(final String key, final String... members) { + try { + String member = members[0]; + String[] ms = new String[members.length - 1]; + for (int idx = 1; idx < members.length; ++idx) { + ms[idx - 1] = members[idx]; + } + return redis.zrem(key, member, ms); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Double zincrby(final String key, final double score, final String member) { + try { + return Double.parseDouble(redis.zincrby(key, score, member)); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zrank(final String key, final String member) { + try { + return redis.zrank(key, member); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zrevrank(final String key, final String member) { + try { + return redis.zrevrank(key, member); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrevrange(final String key, final long start, final long end) { + try { + return ZsetPair.members(redis.zrevrange(key, start, end)); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrangeWithScores(final String key, final long start, final long end) { + try { + return toTupleSet(redis.zrange(key, start, end, "withscores")); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrevrangeWithScores(final String key, final long start, final long end) { + try { + return toTupleSet(redis.zrevrange(key, start, end, "withscores")); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zcard(final String key) { + try { + return redis.zcard(key); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Double zscore(final String key, final String member) { + try { + return redis.zscore(key, member); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public String watch(final String... keys) { + try { + for (String key : keys) { + redis.watch(key); + } + return "OK"; + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zcount(final String key, final double min, final double max) { + try { + return redis.zcount(key, min, max); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zcount(final String key, final String min, final String max) { + try { + return redis.zcount(key, Double.parseDouble(min), Double.parseDouble(max)); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrangeByScore(final String key, final double min, final double max) { + try { + return ZsetPair.members( + redis.zrangebyscore(key, String.valueOf(min), String.valueOf(max))); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrangeByScore(final String key, final String min, final String max) { + try { + return ZsetPair.members(redis.zrangebyscore(key, min, max)); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrangeByScore( + final String key, + final double min, + final double max, + final int offset, + final int count) { + try { + return ZsetPair.members( + redis.zrangebyscore( + key, + String.valueOf(min), + String.valueOf(max), + "limit", + String.valueOf(offset), + String.valueOf(count))); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrangeByScore( + final String key, + final String min, + final String max, + final int offset, + final int count) { + try { + return ZsetPair.members( + redis.zrangebyscore( + key, min, max, "limit", String.valueOf(offset), String.valueOf(count))); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrangeByScoreWithScores( + final String key, final double min, final double max) { + try { + return toTupleSet( + redis.zrangebyscore( + key, String.valueOf(min), String.valueOf(max), "withscores")); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrangeByScoreWithScores( + final String key, final String min, final String max) { + try { + return toTupleSet(redis.zrangebyscore(key, min, max, "withscores")); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrangeByScoreWithScores( + final String key, + final double min, + final double max, + final int offset, + final int count) { + try { + return toTupleSet( + redis.zrangebyscore( + key, + String.valueOf(min), + String.valueOf(max), + "limit", + String.valueOf(offset), + String.valueOf(count), + "withscores")); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrangeByScoreWithScores( + final String key, + final String min, + final String max, + final int offset, + final int count) { + try { + return toTupleSet( + redis.zrangebyscore( + key, + min, + max, + "limit", + String.valueOf(offset), + String.valueOf(count), + "withscores")); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrevrangeByScore(final String key, final double max, final double min) { + try { + return ZsetPair.members( + redis.zrevrangebyscore(key, String.valueOf(max), String.valueOf(min))); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrevrangeByScore(final String key, final String max, final String min) { + try { + return ZsetPair.members(redis.zrevrangebyscore(key, max, min)); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrevrangeByScore( + final String key, + final double max, + final double min, + final int offset, + final int count) { + try { + return ZsetPair.members( + redis.zrevrangebyscore( + key, + String.valueOf(max), + String.valueOf(min), + "limit", + String.valueOf(offset), + String.valueOf(count))); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrevrangeByScoreWithScores( + final String key, final double max, final double min) { + try { + return toTupleSet( + redis.zrevrangebyscore( + key, String.valueOf(max), String.valueOf(min), "withscores")); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrevrangeByScoreWithScores( + final String key, + final double max, + final double min, + final int offset, + final int count) { + try { + return toTupleSet( + redis.zrevrangebyscore( + key, + String.valueOf(max), + String.valueOf(min), + "limit", + String.valueOf(offset), + String.valueOf(count), + "withscores")); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrevrangeByScoreWithScores( + final String key, + final String max, + final String min, + final int offset, + final int count) { + try { + return toTupleSet( + redis.zrevrangebyscore( + key, + max, + min, + "limit", + String.valueOf(offset), + String.valueOf(count), + "withscores")); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrevrangeByScore( + final String key, + final String max, + final String min, + final int offset, + final int count) { + try { + return ZsetPair.members( + redis.zrevrangebyscore( + key, max, min, "limit", String.valueOf(offset), String.valueOf(count))); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Set zrevrangeByScoreWithScores( + final String key, final String max, final String min) { + try { + return toTupleSet(redis.zrevrangebyscore(key, max, min, "withscores")); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zremrangeByRank(final String key, final long start, final long end) { + try { + return redis.zremrangebyrank(key, start, end); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zremrangeByScore(final String key, final double start, final double end) { + try { + return redis.zremrangebyscore(key, String.valueOf(start), String.valueOf(end)); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zremrangeByScore(final String key, final String start, final String end) { + try { + return redis.zremrangebyscore(key, start, end); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public Long zunionstore(final String dstkey, final String... sets) { + try { + return redis.zunionstore(dstkey, sets.length, sets); + } catch (Exception e) { + throw new JedisException(e); + } + } + + @Override + public ScanResult sscan(String key, String cursor, ScanParams params) { + try { + org.rarefiedredis.redis.ScanResult> sr = + redis.sscan(key, Long.parseLong(cursor), "count", "1000000"); + List list = new ArrayList<>(sr.results); + return new ScanResult<>("0", list); + } catch (Exception e) { + throw new JedisException(e); + } + } + + public ScanResult> hscan(final String key, final String cursor) { + try { + org.rarefiedredis.redis.ScanResult> mockr = + redis.hscan(key, Long.parseLong(cursor), "count", "1000000"); + Map results = mockr.results; + List> list = new ArrayList<>(results.entrySet()); + return new ScanResult<>("0", list); + } catch (Exception e) { + throw new JedisException(e); + } + } + + public ScanResult zscan(final String key, final String cursor) { + try { + org.rarefiedredis.redis.ScanResult> sr = + redis.zscan(key, Long.parseLong(cursor), "count", "1000000"); + List list = new ArrayList<>(sr.results); + List tl = new LinkedList<>(); + list.forEach(p -> tl.add(new Tuple(p.member, p.score))); + return new ScanResult<>("0", tl); + } catch (Exception e) { + throw new JedisException(e); + } + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisSentinel.java b/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisSentinel.java new file mode 100644 index 0000000..7e305b0 --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisSentinel.java @@ -0,0 +1,1276 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.jedis; + +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import redis.clients.jedis.BitPosParams; +import redis.clients.jedis.GeoCoordinate; +import redis.clients.jedis.GeoRadiusResponse; +import redis.clients.jedis.GeoUnit; +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisPoolAbstract; +import redis.clients.jedis.ListPosition; +import redis.clients.jedis.ScanParams; +import redis.clients.jedis.ScanResult; +import redis.clients.jedis.SortingParams; +import redis.clients.jedis.StreamConsumersInfo; +import redis.clients.jedis.StreamEntry; +import redis.clients.jedis.StreamEntryID; +import redis.clients.jedis.StreamGroupInfo; +import redis.clients.jedis.StreamInfo; +import redis.clients.jedis.StreamPendingEntry; +import redis.clients.jedis.Tuple; +import redis.clients.jedis.commands.JedisCommands; +import redis.clients.jedis.params.GeoRadiusParam; +import redis.clients.jedis.params.SetParams; +import redis.clients.jedis.params.ZAddParams; +import redis.clients.jedis.params.ZIncrByParams; + +public class JedisSentinel implements JedisCommands { + + private final JedisPoolAbstract jedisPool; + + public JedisSentinel(JedisPoolAbstract jedisPool) { + this.jedisPool = jedisPool; + } + + @Override + public String set(String key, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.set(key, value); + } + } + + @Override + public String set(String key, String value, SetParams params) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.set(key, value, params); + } + } + + @Override + public String get(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.get(key); + } + } + + @Override + public Boolean exists(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.exists(key); + } + } + + @Override + public Long persist(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.persist(key); + } + } + + @Override + public String type(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.type(key); + } + } + + @Override + public byte[] dump(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.dump(key); + } + } + + @Override + public String restore(String key, int ttl, byte[] serializedValue) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.restore(key, ttl, serializedValue); + } + } + + @Override + public String restoreReplace(String key, int ttl, byte[] serializedValue) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.restoreReplace(key, ttl, serializedValue); + } + } + + @Override + public Long expire(String key, int seconds) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.expire(key, seconds); + } + } + + @Override + public Long pexpire(String key, long milliseconds) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.pexpire(key, milliseconds); + } + } + + @Override + public Long expireAt(String key, long unixTime) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.expireAt(key, unixTime); + } + } + + @Override + public Long pexpireAt(String key, long millisecondsTimestamp) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.pexpireAt(key, millisecondsTimestamp); + } + } + + @Override + public Long ttl(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.ttl(key); + } + } + + @Override + public Long pttl(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.pttl(key); + } + } + + @Override + public Long touch(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.touch(key); + } + } + + @Override + public Boolean setbit(String key, long offset, boolean value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.setbit(key, offset, value); + } + } + + @Override + public Boolean setbit(String key, long offset, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.setbit(key, offset, value); + } + } + + @Override + public Boolean getbit(String key, long offset) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.getbit(key, offset); + } + } + + @Override + public Long setrange(String key, long offset, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.setrange(key, offset, value); + } + } + + @Override + public String getrange(String key, long startOffset, long endOffset) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.getrange(key, startOffset, endOffset); + } + } + + @Override + public String getSet(String key, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.getSet(key, value); + } + } + + @Override + public Long setnx(String key, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.setnx(key, value); + } + } + + @Override + public String setex(String key, int seconds, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.setex(key, seconds, value); + } + } + + @Override + public String psetex(String key, long milliseconds, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.psetex(key, milliseconds, value); + } + } + + @Override + public Long decrBy(String key, long integer) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.decrBy(key, integer); + } + } + + @Override + public Long decr(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.decr(key); + } + } + + @Override + public Long incrBy(String key, long integer) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.incrBy(key, integer); + } + } + + @Override + public Double incrByFloat(String key, double value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.incrByFloat(key, value); + } + } + + @Override + public Long incr(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.incr(key); + } + } + + @Override + public Long append(String key, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.append(key, value); + } + } + + @Override + public String substr(String key, int start, int end) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.substr(key, start, end); + } + } + + @Override + public Long hset(String key, String field, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hset(key, field, value); + } + } + + @Override + public Long hset(String key, Map hash) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hset(key, hash); + } + } + + @Override + public String hget(String key, String field) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hget(key, field); + } + } + + @Override + public Long hsetnx(String key, String field, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hsetnx(key, field, value); + } + } + + @Override + public String hmset(String key, Map hash) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hmset(key, hash); + } + } + + @Override + public List hmget(String key, String... fields) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hmget(key, fields); + } + } + + @Override + public Long hincrBy(String key, String field, long value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hincrBy(key, field, value); + } + } + + @Override + public Double hincrByFloat(String key, String field, double value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hincrByFloat(key, field, value); + } + } + + @Override + public Boolean hexists(String key, String field) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hexists(key, field); + } + } + + @Override + public Long hdel(String key, String... field) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hdel(key, field); + } + } + + @Override + public Long hlen(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hlen(key); + } + } + + @Override + public Set hkeys(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hkeys(key); + } + } + + @Override + public List hvals(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hvals(key); + } + } + + @Override + public Map hgetAll(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hgetAll(key); + } + } + + @Override + public Long rpush(String key, String... string) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.rpush(key, string); + } + } + + @Override + public Long lpush(String key, String... string) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.lpush(key, string); + } + } + + @Override + public Long llen(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.llen(key); + } + } + + @Override + public List lrange(String key, long start, long end) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.lrange(key, start, end); + } + } + + @Override + public String ltrim(String key, long start, long end) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.ltrim(key, start, end); + } + } + + @Override + public String lindex(String key, long index) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.lindex(key, index); + } + } + + @Override + public String lset(String key, long index, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.lset(key, index, value); + } + } + + @Override + public Long lrem(String key, long count, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.lrem(key, count, value); + } + } + + @Override + public String lpop(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.lpop(key); + } + } + + @Override + public String rpop(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.rpop(key); + } + } + + @Override + public Long sadd(String key, String... member) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.sadd(key, member); + } + } + + @Override + public Set smembers(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.smembers(key); + } + } + + @Override + public Long srem(String key, String... member) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.srem(key, member); + } + } + + @Override + public String spop(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.spop(key); + } + } + + @Override + public Set spop(String key, long count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.spop(key, count); + } + } + + @Override + public Long scard(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.scard(key); + } + } + + @Override + public Boolean sismember(String key, String member) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.sismember(key, member); + } + } + + @Override + public String srandmember(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.srandmember(key); + } + } + + @Override + public List srandmember(String key, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.srandmember(key, count); + } + } + + @Override + public Long strlen(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.strlen(key); + } + } + + @Override + public Long zadd(String key, double score, String member) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zadd(key, score, member); + } + } + + @Override + public Long zadd(String key, double score, String member, ZAddParams params) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zadd(key, score, member, params); + } + } + + @Override + public Long zadd(String key, Map scoreMembers) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zadd(key, scoreMembers); + } + } + + @Override + public Long zadd(String key, Map scoreMembers, ZAddParams params) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zadd(key, scoreMembers, params); + } + } + + @Override + public Set zrange(String key, long start, long end) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrange(key, start, end); + } + } + + @Override + public Long zrem(String key, String... member) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrem(key, member); + } + } + + @Override + public Double zincrby(String key, double score, String member) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zincrby(key, score, member); + } + } + + @Override + public Double zincrby(String key, double score, String member, ZIncrByParams params) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zincrby(key, score, member, params); + } + } + + @Override + public Long zrank(String key, String member) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrank(key, member); + } + } + + @Override + public Long zrevrank(String key, String member) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrank(key, member); + } + } + + @Override + public Set zrevrange(String key, long start, long end) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrange(key, start, end); + } + } + + @Override + public Set zrangeWithScores(String key, long start, long end) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrangeWithScores(key, start, end); + } + } + + @Override + public Set zrevrangeWithScores(String key, long start, long end) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrangeWithScores(key, start, end); + } + } + + @Override + public Long zcard(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zcard(key); + } + } + + @Override + public Double zscore(String key, String member) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zscore(key, member); + } + } + + @Override + public Tuple zpopmax(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zpopmax(key); + } + } + + @Override + public Set zpopmax(String key, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zpopmax(key, count); + } + } + + @Override + public Tuple zpopmin(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zpopmin(key); + } + } + + @Override + public Set zpopmin(String key, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zpopmin(key, count); + } + } + + @Override + public List sort(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.sort(key); + } + } + + @Override + public List sort(String key, SortingParams sortingParameters) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.sort(key, sortingParameters); + } + } + + @Override + public Long zcount(String key, double min, double max) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zcount(key, min, max); + } + } + + @Override + public Long zcount(String key, String min, String max) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zcount(key, min, max); + } + } + + @Override + public Set zrangeByScore(String key, double min, double max) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrangeByScore(key, min, max); + } + } + + @Override + public Set zrangeByScore(String key, String min, String max) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrangeByScore(key, min, max); + } + } + + @Override + public Set zrevrangeByScore(String key, double max, double min) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrangeByScore(key, max, min); + } + } + + @Override + public Set zrangeByScore(String key, double min, double max, int offset, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrangeByScore(key, min, max, offset, count); + } + } + + @Override + public Set zrevrangeByScore(String key, String max, String min) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrangeByScore(key, max, min); + } + } + + @Override + public Set zrangeByScore(String key, String min, String max, int offset, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrangeByScore(key, min, max, offset, count); + } + } + + @Override + public Set zrevrangeByScore(String key, double max, double min, int offset, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrangeByScore(key, max, min, offset, count); + } + } + + @Override + public Set zrangeByScoreWithScores(String key, double min, double max) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrangeByScoreWithScores(key, min, max); + } + } + + @Override + public Set zrevrangeByScoreWithScores(String key, double max, double min) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrangeByScoreWithScores(key, max, min); + } + } + + @Override + public Set zrangeByScoreWithScores( + String key, double min, double max, int offset, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrangeByScoreWithScores(key, min, max, offset, count); + } + } + + @Override + public Set zrevrangeByScore(String key, String max, String min, int offset, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrangeByScore(key, max, min, offset, count); + } + } + + @Override + public Set zrangeByScoreWithScores(String key, String min, String max) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrangeByScoreWithScores(key, min, max); + } + } + + @Override + public Set zrevrangeByScoreWithScores(String key, String max, String min) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrangeByScoreWithScores(key, max, min); + } + } + + @Override + public Set zrangeByScoreWithScores( + String key, String min, String max, int offset, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrangeByScoreWithScores(key, min, max, offset, count); + } + } + + @Override + public Set zrevrangeByScoreWithScores( + String key, double max, double min, int offset, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrangeByScoreWithScores(key, max, min, offset, count); + } + } + + @Override + public Set zrevrangeByScoreWithScores( + String key, String max, String min, int offset, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrangeByScoreWithScores(key, max, min, offset, count); + } + } + + @Override + public Long zremrangeByRank(String key, long start, long end) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zremrangeByRank(key, start, end); + } + } + + @Override + public Long zremrangeByScore(String key, double start, double end) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zremrangeByScore(key, start, end); + } + } + + @Override + public Long zremrangeByScore(String key, String start, String end) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zremrangeByScore(key, start, end); + } + } + + @Override + public Long zlexcount(String key, String min, String max) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zlexcount(key, min, max); + } + } + + @Override + public Set zrangeByLex(String key, String min, String max) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrangeByLex(key, min, max); + } + } + + @Override + public Set zrangeByLex(String key, String min, String max, int offset, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrangeByLex(key, min, max, offset, count); + } + } + + @Override + public Set zrevrangeByLex(String key, String max, String min) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrangeByLex(key, max, min); + } + } + + @Override + public Set zrevrangeByLex(String key, String max, String min, int offset, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zrevrangeByLex(key, max, min, offset, count); + } + } + + @Override + public Long zremrangeByLex(String key, String min, String max) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zremrangeByLex(key, min, max); + } + } + + @Override + public Long linsert(String key, ListPosition where, String pivot, String value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.linsert(key, where, pivot, value); + } + } + + @Override + public Long lpushx(String key, String... string) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.lpushx(key, string); + } + } + + @Override + public Long rpushx(String key, String... string) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.rpushx(key, string); + } + } + + @Override + public List blpop(int timeout, String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.blpop(timeout, key); + } + } + + @Override + public List brpop(int timeout, String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.brpop(timeout, key); + } + } + + @Override + public Long del(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.del(key); + } + } + + @Override + public Long unlink(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.unlink(key); + } + } + + @Override + public String echo(String string) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.echo(string); + } + } + + @Override + public Long move(String key, int dbIndex) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.move(key, dbIndex); + } + } + + @Override + public Long bitcount(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.bitcount(key); + } + } + + @Override + public Long bitcount(String key, long start, long end) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.bitcount(key, start, end); + } + } + + @Override + public Long bitpos(String key, boolean value) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.bitpos(key, value); + } + } + + @Override + public Long bitpos(String key, boolean value, BitPosParams params) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.bitpos(key, value, params); + } + } + + @Override + public ScanResult> hscan(String key, String cursor) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hscan(key, cursor); + } + } + + @Override + public ScanResult> hscan(String key, String cursor, ScanParams params) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hscan(key, cursor, params); + } + } + + @Override + public ScanResult sscan(String key, String cursor) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.sscan(key, cursor); + } + } + + @Override + public ScanResult sscan(String key, String cursor, ScanParams params) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.sscan(key, cursor, params); + } + } + + @Override + public ScanResult zscan(String key, String cursor) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zscan(key, cursor); + } + } + + @Override + public ScanResult zscan(String key, String cursor, ScanParams params) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zscan(key, cursor, params); + } + } + + @Override + public Long pfadd(String key, String... elements) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.pfadd(key, elements); + } + } + + @Override + public long pfcount(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.pfcount(key); + } + } + + @Override + public Long geoadd(String key, double longitude, double latitude, String member) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.geoadd(key, longitude, latitude, member); + } + } + + @Override + public Long geoadd(String key, Map memberCoordinateMap) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.geoadd(key, memberCoordinateMap); + } + } + + @Override + public Double geodist(String key, String member1, String member2) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.geodist(key, member1, member2); + } + } + + @Override + public Double geodist(String key, String member1, String member2, GeoUnit unit) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.geodist(key, member1, member2, unit); + } + } + + @Override + public List geohash(String key, String... members) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.geohash(key, members); + } + } + + @Override + public List geopos(String key, String... members) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.geopos(key, members); + } + } + + @Override + public List georadius( + String key, double longitude, double latitude, double radius, GeoUnit unit) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.georadius(key, longitude, latitude, radius, unit); + } + } + + @Override + public List georadiusReadonly( + String key, double longitude, double latitude, double radius, GeoUnit unit) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.georadiusReadonly(key, longitude, latitude, radius, unit); + } + } + + @Override + public List georadius( + String key, + double longitude, + double latitude, + double radius, + GeoUnit unit, + GeoRadiusParam param) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.georadius(key, longitude, latitude, radius, unit, param); + } + } + + @Override + public List georadiusReadonly( + String key, + double longitude, + double latitude, + double radius, + GeoUnit unit, + GeoRadiusParam param) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.georadiusReadonly(key, longitude, latitude, radius, unit, param); + } + } + + @Override + public List georadiusByMember( + String key, String member, double radius, GeoUnit unit) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.georadiusByMember(key, member, radius, unit); + } + } + + @Override + public List georadiusByMemberReadonly( + String key, String member, double radius, GeoUnit unit) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.georadiusByMemberReadonly(key, member, radius, unit); + } + } + + @Override + public List georadiusByMember( + String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.georadiusByMember(key, member, radius, unit, param); + } + } + + @Override + public List georadiusByMemberReadonly( + String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.georadiusByMemberReadonly(key, member, radius, unit, param); + } + } + + @Override + public List bitfield(String key, String... arguments) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.bitfield(key, arguments); + } + } + + @Override + public List bitfieldReadonly(String key, String... arguments) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.bitfieldReadonly(key, arguments); + } + } + + @Override + public Long hstrlen(String key, String field) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.hstrlen(key, field); + } + } + + @Override + public StreamEntryID xadd(String key, StreamEntryID id, Map hash) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xadd(key, id, hash); + } + } + + @Override + public StreamEntryID xadd( + String key, + StreamEntryID id, + Map hash, + long maxLen, + boolean approximateLength) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xadd(key, id, hash, maxLen, approximateLength); + } + } + + @Override + public Long xlen(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xlen(key); + } + } + + @Override + public List xrange(String key, StreamEntryID start, StreamEntryID end, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xrange(key, start, end, count); + } + } + + @Override + public List xrevrange( + String key, StreamEntryID end, StreamEntryID start, int count) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xrevrange(key, end, start, count); + } + } + + @Override + public long xack(String key, String group, StreamEntryID... ids) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xack(key, group, ids); + } + } + + @Override + public String xgroupCreate(String key, String groupname, StreamEntryID id, boolean makeStream) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xgroupCreate(key, groupname, id, makeStream); + } + } + + @Override + public String xgroupSetID(String key, String groupname, StreamEntryID id) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xgroupSetID(key, groupname, id); + } + } + + @Override + public long xgroupDestroy(String key, String groupname) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xgroupDestroy(key, groupname); + } + } + + @Override + public Long xgroupDelConsumer(String key, String groupname, String consumername) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xgroupDelConsumer(key, groupname, consumername); + } + } + + @Override + public List xpending( + String key, + String groupname, + StreamEntryID start, + StreamEntryID end, + int count, + String consumername) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xpending(key, groupname, start, end, count, consumername); + } + } + + @Override + public long xdel(String key, StreamEntryID... ids) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xdel(key, ids); + } + } + + @Override + public long xtrim(String key, long maxLen, boolean approximate) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xtrim(key, maxLen, approximate); + } + } + + @Override + public List xclaim( + String key, + String group, + String consumername, + long minIdleTime, + long newIdleTime, + int retries, + boolean force, + StreamEntryID... ids) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xclaim( + key, group, consumername, minIdleTime, newIdleTime, retries, force, ids); + } + } + + @Override + public StreamInfo xinfoStream(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xinfoStream(key); + } + } + + @Override + public List xinfoGroup(String key) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xinfoGroup(key); + } + } + + @Override + public List xinfoConsumers(String key, String group) { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.xinfoConsumers(key, group); + } + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisStandalone.java b/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisStandalone.java new file mode 100644 index 0000000..12c360a --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisStandalone.java @@ -0,0 +1,975 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.jedis; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Component; + +import redis.clients.jedis.BitPosParams; +import redis.clients.jedis.GeoCoordinate; +import redis.clients.jedis.GeoRadiusResponse; +import redis.clients.jedis.GeoUnit; +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisPool; +import redis.clients.jedis.ListPosition; +import redis.clients.jedis.ScanParams; +import redis.clients.jedis.ScanResult; +import redis.clients.jedis.SortingParams; +import redis.clients.jedis.StreamConsumersInfo; +import redis.clients.jedis.StreamEntry; +import redis.clients.jedis.StreamEntryID; +import redis.clients.jedis.StreamGroupInfo; +import redis.clients.jedis.StreamInfo; +import redis.clients.jedis.StreamPendingEntry; +import redis.clients.jedis.Tuple; +import redis.clients.jedis.commands.JedisCommands; +import redis.clients.jedis.params.GeoRadiusParam; +import redis.clients.jedis.params.SetParams; +import redis.clients.jedis.params.ZAddParams; +import redis.clients.jedis.params.ZIncrByParams; + +/** A {@link JedisCommands} implementation that delegates to {@link JedisPool}. */ +@Component +@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_standalone") +public class JedisStandalone implements JedisCommands { + + private final JedisPool jedisPool; + + public JedisStandalone(JedisPool jedisPool) { + this.jedisPool = jedisPool; + } + + private R executeInJedis(Function function) { + try (Jedis jedis = jedisPool.getResource()) { + return function.apply(jedis); + } + } + + @Override + public String set(String key, String value) { + return executeInJedis(jedis -> jedis.set(key, value)); + } + + @Override + public String set(String key, String value, SetParams params) { + return executeInJedis(jedis -> jedis.set(key, value, params)); + } + + @Override + public String get(String key) { + return executeInJedis(jedis -> jedis.get(key)); + } + + @Override + public Boolean exists(String key) { + return executeInJedis(jedis -> jedis.exists(key)); + } + + @Override + public Long persist(String key) { + return executeInJedis(jedis -> jedis.persist(key)); + } + + @Override + public String type(String key) { + return executeInJedis(jedis -> jedis.type(key)); + } + + @Override + public byte[] dump(String key) { + return executeInJedis(jedis -> jedis.dump(key)); + } + + @Override + public String restore(String key, int ttl, byte[] serializedValue) { + return executeInJedis(jedis -> jedis.restore(key, ttl, serializedValue)); + } + + @Override + public String restoreReplace(String key, int ttl, byte[] serializedValue) { + return executeInJedis(jedis -> jedis.restoreReplace(key, ttl, serializedValue)); + } + + @Override + public Long expire(String key, int seconds) { + return executeInJedis(jedis -> jedis.expire(key, seconds)); + } + + @Override + public Long pexpire(String key, long milliseconds) { + return executeInJedis(jedis -> jedis.pexpire(key, milliseconds)); + } + + @Override + public Long expireAt(String key, long unixTime) { + return executeInJedis(jedis -> jedis.expireAt(key, unixTime)); + } + + @Override + public Long pexpireAt(String key, long millisecondsTimestamp) { + return executeInJedis(jedis -> jedis.pexpireAt(key, millisecondsTimestamp)); + } + + @Override + public Long ttl(String key) { + return executeInJedis(jedis -> jedis.ttl(key)); + } + + @Override + public Long pttl(String key) { + return executeInJedis(jedis -> jedis.pttl(key)); + } + + @Override + public Long touch(String key) { + return executeInJedis(jedis -> jedis.touch(key)); + } + + @Override + public Boolean setbit(String key, long offset, boolean value) { + return executeInJedis(jedis -> jedis.setbit(key, offset, value)); + } + + @Override + public Boolean setbit(String key, long offset, String value) { + return executeInJedis(jedis -> jedis.setbit(key, offset, value)); + } + + @Override + public Boolean getbit(String key, long offset) { + return executeInJedis(jedis -> jedis.getbit(key, offset)); + } + + @Override + public Long setrange(String key, long offset, String value) { + return executeInJedis(jedis -> jedis.setrange(key, offset, value)); + } + + @Override + public String getrange(String key, long startOffset, long endOffset) { + return executeInJedis(jedis -> jedis.getrange(key, startOffset, endOffset)); + } + + @Override + public String getSet(String key, String value) { + return executeInJedis(jedis -> jedis.getSet(key, value)); + } + + @Override + public Long setnx(String key, String value) { + return executeInJedis(jedis -> jedis.setnx(key, value)); + } + + @Override + public String setex(String key, int seconds, String value) { + return executeInJedis(jedis -> jedis.setex(key, seconds, value)); + } + + @Override + public String psetex(String key, long milliseconds, String value) { + return executeInJedis(jedis -> jedis.psetex(key, milliseconds, value)); + } + + @Override + public Long decrBy(String key, long decrement) { + return executeInJedis(jedis -> jedis.decrBy(key, decrement)); + } + + @Override + public Long decr(String key) { + return executeInJedis(jedis -> jedis.decr(key)); + } + + @Override + public Long incrBy(String key, long increment) { + return executeInJedis(jedis -> jedis.incrBy(key, increment)); + } + + @Override + public Double incrByFloat(String key, double increment) { + return executeInJedis(jedis -> jedis.incrByFloat(key, increment)); + } + + @Override + public Long incr(String key) { + return executeInJedis(jedis -> jedis.incr(key)); + } + + @Override + public Long append(String key, String value) { + return executeInJedis(jedis -> jedis.append(key, value)); + } + + @Override + public String substr(String key, int start, int end) { + return executeInJedis(jedis -> jedis.substr(key, start, end)); + } + + @Override + public Long hset(String key, String field, String value) { + return executeInJedis(jedis -> jedis.hset(key, field, value)); + } + + @Override + public Long hset(String key, Map hash) { + return executeInJedis(jedis -> jedis.hset(key, hash)); + } + + @Override + public String hget(String key, String field) { + return executeInJedis(jedis -> jedis.hget(key, field)); + } + + @Override + public Long hsetnx(String key, String field, String value) { + return executeInJedis(jedis -> jedis.hsetnx(key, field, value)); + } + + @Override + public String hmset(String key, Map hash) { + return executeInJedis(jedis -> jedis.hmset(key, hash)); + } + + @Override + public List hmget(String key, String... fields) { + return executeInJedis(jedis -> jedis.hmget(key, fields)); + } + + @Override + public Long hincrBy(String key, String field, long value) { + return executeInJedis(jedis -> jedis.hincrBy(key, field, value)); + } + + @Override + public Double hincrByFloat(String key, String field, double value) { + return executeInJedis(jedis -> jedis.hincrByFloat(key, field, value)); + } + + @Override + public Boolean hexists(String key, String field) { + return executeInJedis(jedis -> jedis.hexists(key, field)); + } + + @Override + public Long hdel(String key, String... field) { + return executeInJedis(jedis -> jedis.hdel(key, field)); + } + + @Override + public Long hlen(String key) { + return executeInJedis(jedis -> jedis.hlen(key)); + } + + @Override + public Set hkeys(String key) { + return executeInJedis(jedis -> jedis.hkeys(key)); + } + + @Override + public List hvals(String key) { + return executeInJedis(jedis -> jedis.hvals(key)); + } + + @Override + public Map hgetAll(String key) { + return executeInJedis(jedis -> jedis.hgetAll(key)); + } + + @Override + public Long rpush(String key, String... string) { + return executeInJedis(jedis -> jedis.rpush(key)); + } + + @Override + public Long lpush(String key, String... string) { + return executeInJedis(jedis -> jedis.lpush(key, string)); + } + + @Override + public Long llen(String key) { + return executeInJedis(jedis -> jedis.llen(key)); + } + + @Override + public List lrange(String key, long start, long stop) { + return executeInJedis(jedis -> jedis.lrange(key, start, stop)); + } + + @Override + public String ltrim(String key, long start, long stop) { + return executeInJedis(jedis -> jedis.ltrim(key, start, stop)); + } + + @Override + public String lindex(String key, long index) { + return executeInJedis(jedis -> jedis.lindex(key, index)); + } + + @Override + public String lset(String key, long index, String value) { + return executeInJedis(jedis -> jedis.lset(key, index, value)); + } + + @Override + public Long lrem(String key, long count, String value) { + return executeInJedis(jedis -> jedis.lrem(key, count, value)); + } + + @Override + public String lpop(String key) { + return executeInJedis(jedis -> jedis.lpop(key)); + } + + @Override + public String rpop(String key) { + return executeInJedis(jedis -> jedis.rpop(key)); + } + + @Override + public Long sadd(String key, String... member) { + return executeInJedis(jedis -> jedis.sadd(key, member)); + } + + @Override + public Set smembers(String key) { + return executeInJedis(jedis -> jedis.smembers(key)); + } + + @Override + public Long srem(String key, String... member) { + return executeInJedis(jedis -> jedis.srem(key, member)); + } + + @Override + public String spop(String key) { + return executeInJedis(jedis -> jedis.spop(key)); + } + + @Override + public Set spop(String key, long count) { + return executeInJedis(jedis -> jedis.spop(key, count)); + } + + @Override + public Long scard(String key) { + return executeInJedis(jedis -> jedis.scard(key)); + } + + @Override + public Boolean sismember(String key, String member) { + return executeInJedis(jedis -> jedis.sismember(key, member)); + } + + @Override + public String srandmember(String key) { + return executeInJedis(jedis -> jedis.srandmember(key)); + } + + @Override + public List srandmember(String key, int count) { + return executeInJedis(jedis -> jedis.srandmember(key, count)); + } + + @Override + public Long strlen(String key) { + return executeInJedis(jedis -> jedis.strlen(key)); + } + + @Override + public Long zadd(String key, double score, String member) { + return executeInJedis(jedis -> jedis.zadd(key, score, member)); + } + + @Override + public Long zadd(String key, double score, String member, ZAddParams params) { + return executeInJedis(jedis -> jedis.zadd(key, score, member, params)); + } + + @Override + public Long zadd(String key, Map scoreMembers) { + return executeInJedis(jedis -> jedis.zadd(key, scoreMembers)); + } + + @Override + public Long zadd(String key, Map scoreMembers, ZAddParams params) { + return executeInJedis(jedis -> jedis.zadd(key, scoreMembers, params)); + } + + @Override + public Set zrange(String key, long start, long stop) { + return executeInJedis(jedis -> jedis.zrange(key, start, stop)); + } + + @Override + public Long zrem(String key, String... members) { + return executeInJedis(jedis -> jedis.zrem(key, members)); + } + + @Override + public Double zincrby(String key, double increment, String member) { + return executeInJedis(jedis -> jedis.zincrby(key, increment, member)); + } + + @Override + public Double zincrby(String key, double increment, String member, ZIncrByParams params) { + return executeInJedis(jedis -> jedis.zincrby(key, increment, member, params)); + } + + @Override + public Long zrank(String key, String member) { + return executeInJedis(jedis -> jedis.zrank(key, member)); + } + + @Override + public Long zrevrank(String key, String member) { + return executeInJedis(jedis -> jedis.zrevrank(key, member)); + } + + @Override + public Set zrevrange(String key, long start, long stop) { + return executeInJedis(jedis -> jedis.zrevrange(key, start, stop)); + } + + @Override + public Set zrangeWithScores(String key, long start, long stop) { + return executeInJedis(jedis -> jedis.zrangeWithScores(key, start, stop)); + } + + @Override + public Set zrevrangeWithScores(String key, long start, long stop) { + return executeInJedis(jedis -> jedis.zrevrangeWithScores(key, start, stop)); + } + + @Override + public Long zcard(String key) { + return executeInJedis(jedis -> jedis.zcard(key)); + } + + @Override + public Double zscore(String key, String member) { + return executeInJedis(jedis -> jedis.zscore(key, member)); + } + + @Override + public Tuple zpopmax(String key) { + return executeInJedis(jedis -> jedis.zpopmax(key)); + } + + @Override + public Set zpopmax(String key, int count) { + return executeInJedis(jedis -> jedis.zpopmax(key, count)); + } + + @Override + public Tuple zpopmin(String key) { + return executeInJedis(jedis -> jedis.zpopmin(key)); + } + + @Override + public Set zpopmin(String key, int count) { + return executeInJedis(jedis -> jedis.zpopmin(key, count)); + } + + @Override + public List sort(String key) { + return executeInJedis(jedis -> jedis.sort(key)); + } + + @Override + public List sort(String key, SortingParams sortingParameters) { + return executeInJedis(jedis -> jedis.sort(key, sortingParameters)); + } + + @Override + public Long zcount(String key, double min, double max) { + return executeInJedis(jedis -> jedis.zcount(key, min, max)); + } + + @Override + public Long zcount(String key, String min, String max) { + return executeInJedis(jedis -> jedis.zcount(key, min, max)); + } + + @Override + public Set zrangeByScore(String key, double min, double max) { + return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max)); + } + + @Override + public Set zrangeByScore(String key, String min, String max) { + return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max)); + } + + @Override + public Set zrevrangeByScore(String key, double max, double min) { + return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min)); + } + + @Override + public Set zrangeByScore(String key, double min, double max, int offset, int count) { + return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max, offset, count)); + } + + @Override + public Set zrevrangeByScore(String key, String max, String min) { + return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min)); + } + + @Override + public Set zrangeByScore(String key, String min, String max, int offset, int count) { + return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max, offset, count)); + } + + @Override + public Set zrevrangeByScore(String key, double max, double min, int offset, int count) { + return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min, offset, count)); + } + + @Override + public Set zrangeByScoreWithScores(String key, double min, double max) { + return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max)); + } + + @Override + public Set zrevrangeByScoreWithScores(String key, double max, double min) { + return executeInJedis(jedis -> jedis.zrevrangeByScoreWithScores(key, max, min)); + } + + @Override + public Set zrangeByScoreWithScores( + String key, double min, double max, int offset, int count) { + return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max, offset, count)); + } + + @Override + public Set zrevrangeByScore(String key, String max, String min, int offset, int count) { + return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min, offset, count)); + } + + @Override + public Set zrangeByScoreWithScores(String key, String min, String max) { + return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max)); + } + + @Override + public Set zrevrangeByScoreWithScores(String key, String max, String min) { + return executeInJedis(jedis -> jedis.zrevrangeByScoreWithScores(key, max, min)); + } + + @Override + public Set zrangeByScoreWithScores( + String key, String min, String max, int offset, int count) { + return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max, offset, count)); + } + + @Override + public Set zrevrangeByScoreWithScores( + String key, double max, double min, int offset, int count) { + return executeInJedis( + jedis -> jedis.zrevrangeByScoreWithScores(key, max, min, offset, count)); + } + + @Override + public Set zrevrangeByScoreWithScores( + String key, String max, String min, int offset, int count) { + return executeInJedis( + jedis -> jedis.zrevrangeByScoreWithScores(key, max, min, offset, count)); + } + + @Override + public Long zremrangeByRank(String key, long start, long stop) { + return executeInJedis(jedis -> jedis.zremrangeByRank(key, start, stop)); + } + + @Override + public Long zremrangeByScore(String key, double min, double max) { + return executeInJedis(jedis -> jedis.zremrangeByScore(key, min, max)); + } + + @Override + public Long zremrangeByScore(String key, String min, String max) { + return executeInJedis(jedis -> jedis.zremrangeByScore(key, min, max)); + } + + @Override + public Long zlexcount(String key, String min, String max) { + return executeInJedis(jedis -> jedis.zlexcount(key, min, max)); + } + + @Override + public Set zrangeByLex(String key, String min, String max) { + return executeInJedis(jedis -> jedis.zrangeByLex(key, min, max)); + } + + @Override + public Set zrangeByLex(String key, String min, String max, int offset, int count) { + return executeInJedis(jedis -> jedis.zrangeByLex(key, min, max, offset, count)); + } + + @Override + public Set zrevrangeByLex(String key, String max, String min) { + return executeInJedis(jedis -> jedis.zrevrangeByLex(key, max, min)); + } + + @Override + public Set zrevrangeByLex(String key, String max, String min, int offset, int count) { + return executeInJedis(jedis -> jedis.zrevrangeByLex(key, max, min, offset, count)); + } + + @Override + public Long zremrangeByLex(String key, String min, String max) { + return executeInJedis(jedis -> jedis.zremrangeByLex(key, min, max)); + } + + @Override + public Long linsert(String key, ListPosition where, String pivot, String value) { + return executeInJedis(jedis -> jedis.linsert(key, where, pivot, value)); + } + + @Override + public Long lpushx(String key, String... string) { + return executeInJedis(jedis -> jedis.lpushx(key, string)); + } + + @Override + public Long rpushx(String key, String... string) { + return executeInJedis(jedis -> jedis.rpushx(key, string)); + } + + @Override + public List blpop(int timeout, String key) { + return executeInJedis(jedis -> jedis.blpop(timeout, key)); + } + + @Override + public List brpop(int timeout, String key) { + return executeInJedis(jedis -> jedis.brpop(timeout, key)); + } + + @Override + public Long del(String key) { + return executeInJedis(jedis -> jedis.del(key)); + } + + @Override + public Long unlink(String key) { + return executeInJedis(jedis -> jedis.unlink(key)); + } + + @Override + public String echo(String string) { + return executeInJedis(jedis -> jedis.echo(string)); + } + + @Override + public Long move(String key, int dbIndex) { + return executeInJedis(jedis -> jedis.move(key, dbIndex)); + } + + @Override + public Long bitcount(String key) { + return executeInJedis(jedis -> jedis.bitcount(key)); + } + + @Override + public Long bitcount(String key, long start, long end) { + return executeInJedis(jedis -> jedis.bitcount(key, start, end)); + } + + @Override + public Long bitpos(String key, boolean value) { + return executeInJedis(jedis -> jedis.bitpos(key, value)); + } + + @Override + public Long bitpos(String key, boolean value, BitPosParams params) { + return executeInJedis(jedis -> jedis.bitpos(key, value, params)); + } + + @Override + public ScanResult> hscan(String key, String cursor) { + return executeInJedis(jedis -> jedis.hscan(key, cursor)); + } + + @Override + public ScanResult> hscan( + String key, String cursor, ScanParams params) { + return executeInJedis(jedis -> jedis.hscan(key, cursor, params)); + } + + @Override + public ScanResult sscan(String key, String cursor) { + return executeInJedis(jedis -> jedis.sscan(key, cursor)); + } + + @Override + public ScanResult zscan(String key, String cursor) { + return executeInJedis(jedis -> jedis.zscan(key, cursor)); + } + + @Override + public ScanResult zscan(String key, String cursor, ScanParams params) { + return executeInJedis(jedis -> jedis.zscan(key, cursor, params)); + } + + @Override + public ScanResult sscan(String key, String cursor, ScanParams params) { + return executeInJedis(jedis -> jedis.sscan(key, cursor, params)); + } + + @Override + public Long pfadd(String key, String... elements) { + return executeInJedis(jedis -> jedis.pfadd(key, elements)); + } + + @Override + public long pfcount(String key) { + return executeInJedis(jedis -> jedis.pfcount(key)); + } + + @Override + public Long geoadd(String key, double longitude, double latitude, String member) { + return executeInJedis(jedis -> jedis.geoadd(key, longitude, latitude, member)); + } + + @Override + public Long geoadd(String key, Map memberCoordinateMap) { + return executeInJedis(jedis -> jedis.geoadd(key, memberCoordinateMap)); + } + + @Override + public Double geodist(String key, String member1, String member2) { + return executeInJedis(jedis -> jedis.geodist(key, member1, member2)); + } + + @Override + public Double geodist(String key, String member1, String member2, GeoUnit unit) { + return executeInJedis(jedis -> jedis.geodist(key, member1, member2, unit)); + } + + @Override + public List geohash(String key, String... members) { + return executeInJedis(jedis -> jedis.geohash(key, members)); + } + + @Override + public List geopos(String key, String... members) { + return executeInJedis(jedis -> jedis.geopos(key, members)); + } + + @Override + public List georadius( + String key, double longitude, double latitude, double radius, GeoUnit unit) { + return executeInJedis(jedis -> jedis.georadius(key, longitude, latitude, radius, unit)); + } + + @Override + public List georadiusReadonly( + String key, double longitude, double latitude, double radius, GeoUnit unit) { + return executeInJedis( + jedis -> jedis.georadiusReadonly(key, longitude, latitude, radius, unit)); + } + + @Override + public List georadius( + String key, + double longitude, + double latitude, + double radius, + GeoUnit unit, + GeoRadiusParam param) { + return executeInJedis( + jedis -> jedis.georadius(key, longitude, latitude, radius, unit, param)); + } + + @Override + public List georadiusReadonly( + String key, + double longitude, + double latitude, + double radius, + GeoUnit unit, + GeoRadiusParam param) { + return executeInJedis( + jedis -> jedis.georadiusReadonly(key, longitude, latitude, radius, unit, param)); + } + + @Override + public List georadiusByMember( + String key, String member, double radius, GeoUnit unit) { + return executeInJedis(jedis -> jedis.georadiusByMember(key, member, radius, unit)); + } + + @Override + public List georadiusByMemberReadonly( + String key, String member, double radius, GeoUnit unit) { + return executeInJedis(jedis -> jedis.georadiusByMemberReadonly(key, member, radius, unit)); + } + + @Override + public List georadiusByMember( + String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) { + return executeInJedis(jedis -> jedis.georadiusByMember(key, member, radius, unit, param)); + } + + @Override + public List georadiusByMemberReadonly( + String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) { + return executeInJedis( + jedis -> jedis.georadiusByMemberReadonly(key, member, radius, unit, param)); + } + + @Override + public List bitfield(String key, String... arguments) { + return executeInJedis(jedis -> jedis.bitfield(key, arguments)); + } + + @Override + public List bitfieldReadonly(String key, String... arguments) { + return executeInJedis(jedis -> jedis.bitfieldReadonly(key, arguments)); + } + + @Override + public Long hstrlen(String key, String field) { + return executeInJedis(jedis -> jedis.hstrlen(key, field)); + } + + @Override + public StreamEntryID xadd(String key, StreamEntryID id, Map hash) { + return executeInJedis(jedis -> jedis.xadd(key, id, hash)); + } + + @Override + public StreamEntryID xadd( + String key, + StreamEntryID id, + Map hash, + long maxLen, + boolean approximateLength) { + return executeInJedis(jedis -> jedis.xadd(key, id, hash, maxLen, approximateLength)); + } + + @Override + public Long xlen(String key) { + return executeInJedis(jedis -> jedis.xlen(key)); + } + + @Override + public List xrange(String key, StreamEntryID start, StreamEntryID end, int count) { + return executeInJedis(jedis -> jedis.xrange(key, start, end, count)); + } + + @Override + public List xrevrange( + String key, StreamEntryID end, StreamEntryID start, int count) { + return executeInJedis(jedis -> jedis.xrevrange(key, end, start, count)); + } + + @Override + public long xack(String key, String group, StreamEntryID... ids) { + return executeInJedis(jedis -> jedis.xack(key, group, ids)); + } + + @Override + public String xgroupCreate(String key, String groupname, StreamEntryID id, boolean makeStream) { + return executeInJedis(jedis -> jedis.xgroupCreate(key, groupname, id, makeStream)); + } + + @Override + public String xgroupSetID(String key, String groupname, StreamEntryID id) { + return executeInJedis(jedis -> jedis.xgroupSetID(key, groupname, id)); + } + + @Override + public long xgroupDestroy(String key, String groupname) { + return executeInJedis(jedis -> jedis.xgroupDestroy(key, groupname)); + } + + @Override + public Long xgroupDelConsumer(String key, String groupname, String consumername) { + return executeInJedis(jedis -> jedis.hsetnx(key, groupname, consumername)); + } + + @Override + public List xpending( + String key, + String groupname, + StreamEntryID start, + StreamEntryID end, + int count, + String consumername) { + return executeInJedis( + jedis -> jedis.xpending(key, groupname, start, end, count, consumername)); + } + + @Override + public long xdel(String key, StreamEntryID... ids) { + return executeInJedis(jedis -> jedis.xdel(key, ids)); + } + + @Override + public long xtrim(String key, long maxLen, boolean approximate) { + return executeInJedis(jedis -> jedis.xtrim(key, maxLen, approximate)); + } + + @Override + public List xclaim( + String key, + String group, + String consumername, + long minIdleTime, + long newIdleTime, + int retries, + boolean force, + StreamEntryID... ids) { + return executeInJedis( + jedis -> + jedis.xclaim( + key, + group, + consumername, + minIdleTime, + newIdleTime, + retries, + force, + ids)); + } + + @Override + public StreamInfo xinfoStream(String key) { + return executeInJedis(jedis -> jedis.xinfoStream(key)); + } + + @Override + public List xinfoGroup(String key) { + return executeInJedis(jedis -> jedis.xinfoGroup(key)); + } + + @Override + public List xinfoConsumers(String key, String group) { + return executeInJedis(jedis -> jedis.xinfoConsumers(key, group)); + } + + public String set(byte[] key, byte[] value) { + return executeInJedis(jedis -> jedis.set(key, value)); + } + + public byte[] getBytes(byte[] key) { + return executeInJedis(jedis -> jedis.get(key)); + } +} diff --git a/persistence/src/main/java/com/netflix/conductor/redis/jedis/OrkesJedisProxy.java b/persistence/src/main/java/com/netflix/conductor/redis/jedis/OrkesJedisProxy.java new file mode 100644 index 0000000..451222b --- /dev/null +++ b/persistence/src/main/java/com/netflix/conductor/redis/jedis/OrkesJedisProxy.java @@ -0,0 +1,242 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.jedis; + +import java.nio.charset.StandardCharsets; +import java.util.*; +import java.util.Map.Entry; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.annotation.Conditional; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.redis.config.AnyRedisCondition; + +import redis.clients.jedis.JedisPool; +import redis.clients.jedis.ScanParams; +import redis.clients.jedis.ScanResult; +import redis.clients.jedis.Tuple; +import redis.clients.jedis.commands.JedisCommands; +import redis.clients.jedis.params.ZAddParams; + +/** Proxy for the {@link JedisCommands} object. */ +@Component +@Conditional(AnyRedisCondition.class) +public class OrkesJedisProxy { + + private static final Logger LOGGER = LoggerFactory.getLogger(OrkesJedisProxy.class); + + protected JedisStandalone jedisCommands; + + public OrkesJedisProxy(JedisPool jedisPool) { + this.jedisCommands = new JedisStandalone(jedisPool); + } + + public Set zrange(String key, long start, long end) { + return jedisCommands.zrange(key, start, end); + } + + public Set zrangeByScoreWithScores(String key, double maxScore, int count) { + return jedisCommands.zrangeByScoreWithScores(key, 0, maxScore, 0, count); + } + + public Set zrangeByScore(String key, double maxScore, int count) { + return jedisCommands.zrangeByScore(key, 0, maxScore, 0, count); + } + + public Set zrangeByScore(String key, double minScore, double maxScore, int count) { + return jedisCommands.zrangeByScore(key, minScore, maxScore, 0, count); + } + + public ScanResult zscan(String key, int cursor) { + return jedisCommands.zscan(key, "" + cursor); + } + + public String get(String key) { + return jedisCommands.get(key); + } + + public Long zcard(String key) { + return jedisCommands.zcard(key); + } + + public Long del(String key) { + return jedisCommands.del(key); + } + + public Long zrem(String key, String member) { + return jedisCommands.zrem(key, member); + } + + public long zremrangeByScore(String key, String start, String end) { + return jedisCommands.zremrangeByScore(key, start, end); + } + + public long zcount(String key, double min, double max) { + return jedisCommands.zcount(key, min, max); + } + + public String set(String key, String value) { + return jedisCommands.set(key, value); + } + + public Long setnx(String key, String value) { + return jedisCommands.setnx(key, value); + } + + public Long zadd(String key, double score, String member) { + return jedisCommands.zadd(key, score, member); + } + + public Long zaddnx(String key, double score, String member) { + ZAddParams params = ZAddParams.zAddParams().nx(); + return jedisCommands.zadd(key, score, member, params); + } + + public Long hset(String key, String field, String value) { + return jedisCommands.hset(key, field, value); + } + + public Long hsetnx(String key, String field, String value) { + return jedisCommands.hsetnx(key, field, value); + } + + public Long hlen(String key) { + return jedisCommands.hlen(key); + } + + public String hget(String key, String field) { + return jedisCommands.hget(key, field); + } + + public Optional optionalHget(String key, String field) { + return Optional.ofNullable(jedisCommands.hget(key, field)); + } + + public Map hscan(String key, int count) { + Map m = new HashMap<>(); + int cursor = 0; + do { + ScanResult> scanResult = jedisCommands.hscan(key, "" + cursor); + cursor = Integer.parseInt(scanResult.getCursor()); + for (Entry r : scanResult.getResult()) { + m.put(r.getKey(), r.getValue()); + } + if (m.size() > count) { + break; + } + } while (cursor > 0); + + return m; + } + + public Map hgetAll(String key) { + Map m = new HashMap<>(); + int cursor = 0; + do { + ScanResult> scanResult = jedisCommands.hscan(key, "" + cursor); + cursor = Integer.parseInt(scanResult.getCursor()); + for (Entry r : scanResult.getResult()) { + m.put(r.getKey(), r.getValue()); + } + } while (cursor > 0); + + return m; + } + + public List hvals(String key) { + LOGGER.trace("hvals {}", key); + return jedisCommands.hvals(key); + } + + public Set hkeys(String key) { + LOGGER.trace("hkeys {}", key); + Set keys = new HashSet<>(); + int cursor = 0; + do { + ScanResult> sr = jedisCommands.hscan(key, "" + cursor); + cursor = Integer.parseInt(sr.getCursor()); + List> result = sr.getResult(); + for (Entry e : result) { + keys.add(e.getKey()); + } + } while (cursor > 0); + + return keys; + } + + public Long hdel(String key, String... fields) { + LOGGER.trace("hdel {} {}", key, fields[0]); + return jedisCommands.hdel(key, fields); + } + + public Long expire(String key, int seconds) { + return jedisCommands.expire(key, seconds); + } + + public Boolean hexists(String key, String field) { + return jedisCommands.hexists(key, field); + } + + public Long sadd(String key, String value) { + LOGGER.trace("sadd {} {}", key, value); + return jedisCommands.sadd(key, value); + } + + public Long srem(String key, String member) { + LOGGER.trace("srem {} {}", key, member); + return jedisCommands.srem(key, member); + } + + public boolean sismember(String key, String member) { + return jedisCommands.sismember(key, member); + } + + public Set smembers(String key) { + LOGGER.trace("smembers {}", key); + Set r = new HashSet<>(); + int cursor = 0; + ScanParams sp = new ScanParams(); + sp.count(50); + + do { + ScanResult scanResult = jedisCommands.sscan(key, "" + cursor, sp); + cursor = Integer.parseInt(scanResult.getCursor()); + r.addAll(scanResult.getResult()); + } while (cursor > 0); + + return r; + } + + // Use the actual smembers command - use if the set is smlal enough + public Set smembers2(String key) { + return jedisCommands.smembers(key); + } + + public Long scard(String key) { + return jedisCommands.scard(key); + } + + public String set(String key, byte[] value) { + return jedisCommands.set(key.getBytes(StandardCharsets.UTF_8), value); + } + + public void increment(String key, long value) { + jedisCommands.incrBy(key, value); + } + + public byte[] getBytes(String key) { + return jedisCommands.getBytes(key.getBytes(StandardCharsets.UTF_8)); + } +} diff --git a/persistence/src/main/java/com/netflix/dyno/connectionpool/Host.java b/persistence/src/main/java/com/netflix/dyno/connectionpool/Host.java new file mode 100644 index 0000000..1fa48fe --- /dev/null +++ b/persistence/src/main/java/com/netflix/dyno/connectionpool/Host.java @@ -0,0 +1,230 @@ +/* + * Copyright 2011 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.dyno.connectionpool; + +import java.net.InetSocketAddress; +import java.util.Objects; + +import org.apache.commons.lang3.StringUtils; + +/** + * Class encapsulating information about a host. + * + *

This is immutable except for the host status. Note that the HostSupplier may not know the + * Dynomite port, whereas the Host object created by the load balancer may receive the port the + * cluster_describe REST call. Hence, we must not use the port in the equality and hashCode + * calculations. + * + * @author poberai + * @author ipapapa + */ +public class Host implements Comparable { + + public static final int DEFAULT_PORT = 8102; + public static final int DEFAULT_DATASTORE_PORT = 22122; + public static final Host NO_HOST = + new HostBuilder() + .setHostname("UNKNOWN") + .setIpAddress("UNKNOWN") + .setPort(0) + .setRack("UNKNOWN") + .createHost(); + + private final String hostname; + private final String ipAddress; + private final int port; + private final int securePort; + private final int datastorePort; + private final InetSocketAddress socketAddress; + private final String rack; + private final String datacenter; + private String hashtag; + private Status status = Status.Down; + private final String password; + + public enum Status { + Up, + Down; + } + + public Host( + String hostname, + String ipAddress, + int port, + int securePort, + int datastorePort, + String rack, + String datacenter, + Status status, + String hashtag, + String password) { + this.hostname = hostname; + this.ipAddress = ipAddress; + this.port = port; + this.securePort = securePort; + this.datastorePort = datastorePort; + this.rack = rack; + this.status = status; + this.datacenter = datacenter; + this.hashtag = hashtag; + this.password = StringUtils.isEmpty(password) ? null : password; + + // Used for the unit tests to prevent host name resolution + if (port != -1) { + this.socketAddress = new InetSocketAddress(hostname, port); + } else { + this.socketAddress = null; + } + } + + public String getHostAddress() { + if (this.ipAddress != null) { + return ipAddress; + } + return hostname; + } + + public String getHostName() { + return hostname; + } + + public String getIpAddress() { + return ipAddress; + } + + public int getPort() { + return port; + } + + public int getSecurePort() { + return securePort; + } + + public int getDatastorePort() { + return datastorePort; + } + + public String getDatacenter() { + return datacenter; + } + + public String getRack() { + return rack; + } + + public String getHashtag() { + return hashtag; + } + + public void setHashtag(String hashtag) { + this.hashtag = hashtag; + } + + public Host setStatus(Status condition) { + status = condition; + return this; + } + + public boolean isUp() { + return status == Status.Up; + } + + public String getPassword() { + return password; + } + + public Status getStatus() { + return status; + } + + public InetSocketAddress getSocketAddress() { + return socketAddress; + } + + /** + * Equality checks will fail in collections between Host objects created from the HostSupplier, + * which may not know the Dynomite port, and the Host objects created by the token map supplier. + */ + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((hostname == null) ? 0 : hostname.hashCode()); + result = prime * result + ((rack == null) ? 0 : rack.hashCode()); + + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + + if (getClass() != obj.getClass()) return false; + + Host other = (Host) obj; + + boolean equals = true; + + equals &= (hostname != null) ? hostname.equals(other.hostname) : other.hostname == null; + equals &= (rack != null) ? rack.equals(other.rack) : other.rack == null; + + return equals; + } + + @Override + public int compareTo(Host o) { + int compared = this.hostname.compareTo(o.hostname); + if (compared != 0) { + return compared; + } + return this.rack.compareTo(o.rack); + } + + @Override + public String toString() { + + return "Host [hostname=" + + hostname + + ", ipAddress=" + + ipAddress + + ", port=" + + port + + ", rack: " + + rack + + ", datacenter: " + + datacenter + + ", status: " + + status.name() + + ", hashtag=" + + hashtag + + ", password=" + + (Objects.nonNull(password) ? "masked" : "null") + + "]"; + } + + public static Host clone(Host host) { + return new HostBuilder() + .setHostname(host.getHostName()) + .setIpAddress(host.getIpAddress()) + .setPort(host.getPort()) + .setSecurePort(host.getSecurePort()) + .setRack(host.getRack()) + .setDatastorePort(host.getDatastorePort()) + .setDatacenter(host.getDatacenter()) + .setStatus(host.getStatus()) + .setHashtag(host.getHashtag()) + .setPassword(host.getPassword()) + .createHost(); + } +} diff --git a/persistence/src/main/java/com/netflix/dyno/connectionpool/HostBuilder.java b/persistence/src/main/java/com/netflix/dyno/connectionpool/HostBuilder.java new file mode 100644 index 0000000..d31f5b9 --- /dev/null +++ b/persistence/src/main/java/com/netflix/dyno/connectionpool/HostBuilder.java @@ -0,0 +1,93 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.dyno.connectionpool; + +import static com.netflix.dyno.connectionpool.Host.DEFAULT_DATASTORE_PORT; +import static com.netflix.dyno.connectionpool.Host.DEFAULT_PORT; + +public class HostBuilder { + private String hostname; + private int port = DEFAULT_PORT; + private String rack; + private String ipAddress = null; + private int securePort = DEFAULT_PORT; + private int datastorePort = DEFAULT_DATASTORE_PORT; + private String datacenter = null; + private Host.Status status = Host.Status.Down; + private String hashtag = null; + private String password = null; + + public HostBuilder setPort(int port) { + this.port = port; + return this; + } + + public HostBuilder setRack(String rack) { + this.rack = rack; + return this; + } + + public HostBuilder setHostname(String hostname) { + this.hostname = hostname; + return this; + } + + public HostBuilder setIpAddress(String ipAddress) { + this.ipAddress = ipAddress; + return this; + } + + public HostBuilder setSecurePort(int securePort) { + this.securePort = securePort; + return this; + } + + public HostBuilder setDatacenter(String datacenter) { + this.datacenter = datacenter; + return this; + } + + public HostBuilder setStatus(Host.Status status) { + this.status = status; + return this; + } + + public HostBuilder setHashtag(String hashtag) { + this.hashtag = hashtag; + return this; + } + + public HostBuilder setPassword(String password) { + this.password = password; + return this; + } + + public HostBuilder setDatastorePort(int datastorePort) { + this.datastorePort = datastorePort; + return this; + } + + public Host createHost() { + return new Host( + hostname, + ipAddress, + port, + securePort, + datastorePort, + rack, + datacenter, + status, + hashtag, + password); + } +} diff --git a/persistence/src/main/java/com/netflix/dyno/connectionpool/HostSupplier.java b/persistence/src/main/java/com/netflix/dyno/connectionpool/HostSupplier.java new file mode 100644 index 0000000..bdfed2b --- /dev/null +++ b/persistence/src/main/java/com/netflix/dyno/connectionpool/HostSupplier.java @@ -0,0 +1,20 @@ +/* + * Copyright 2011 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.dyno.connectionpool; + +import java.util.List; + +public interface HostSupplier { + + public List getHosts(); +} diff --git a/persistence/src/test/java/com/netflix/conductor/common/config/TestObjectMapperConfiguration.java b/persistence/src/test/java/com/netflix/conductor/common/config/TestObjectMapperConfiguration.java new file mode 100644 index 0000000..e2b9ee3 --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/common/config/TestObjectMapperConfiguration.java @@ -0,0 +1,28 @@ +/* + * Copyright 2021 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.common.config; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import com.fasterxml.jackson.databind.ObjectMapper; + +/** Supplies the standard Conductor {@link ObjectMapper} for tests that need them. */ +@Configuration +public class TestObjectMapperConfiguration { + + @Bean + public ObjectMapper testObjectMapper() { + return new ObjectMapperProvider().getObjectMapper(); + } +} diff --git a/persistence/src/test/java/com/netflix/conductor/dao/ExecutionDAOTest.java b/persistence/src/test/java/com/netflix/conductor/dao/ExecutionDAOTest.java new file mode 100644 index 0000000..b8f0ba1 --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/dao/ExecutionDAOTest.java @@ -0,0 +1,432 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.dao; + +import java.util.*; +import java.util.stream.Collectors; + +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; + +import static org.junit.Assert.*; + +public abstract class ExecutionDAOTest { + + protected abstract ExecutionDAO getExecutionDAO(); + + protected ConcurrentExecutionLimitDAO getConcurrentExecutionLimitDAO() { + return (ConcurrentExecutionLimitDAO) getExecutionDAO(); + } + + @Rule public ExpectedException expectedException = ExpectedException.none(); + + @Test + public void testTaskExceedsLimit() { + TaskDef taskDefinition = new TaskDef(); + taskDefinition.setName("task1"); + taskDefinition.setConcurrentExecLimit(1); + + WorkflowTask workflowTask = new WorkflowTask(); + workflowTask.setName("task1"); + workflowTask.setTaskDefinition(taskDefinition); + workflowTask.setTaskDefinition(taskDefinition); + + List tasks = new LinkedList<>(); + for (int i = 0; i < 15; i++) { + TaskModel task = new TaskModel(); + task.setScheduledTime(1L); + task.setSeq(i + 1); + task.setTaskId("t_" + i); + task.setWorkflowInstanceId("workflow_" + i); + task.setReferenceTaskName("task1"); + task.setTaskDefName("task1"); + tasks.add(task); + task.setStatus(TaskModel.Status.SCHEDULED); + task.setWorkflowTask(workflowTask); + } + + getExecutionDAO().createTasks(tasks); + assertFalse(getConcurrentExecutionLimitDAO().exceedsLimit(tasks.get(0))); + tasks.get(0).setStatus(TaskModel.Status.IN_PROGRESS); + getExecutionDAO().updateTask(tasks.get(0)); + + for (TaskModel task : tasks) { + assertTrue(getConcurrentExecutionLimitDAO().exceedsLimit(task)); + } + } + + @Test + public void testCreateTaskException() { + TaskModel task = new TaskModel(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId(UUID.randomUUID().toString()); + task.setTaskDefName("task1"); + + expectedException.expect(IllegalArgumentException.class); + expectedException.expectMessage("Workflow instance id cannot be null"); + getExecutionDAO().createTasks(Collections.singletonList(task)); + + task.setWorkflowInstanceId(UUID.randomUUID().toString()); + expectedException.expect(IllegalArgumentException.class); + expectedException.expectMessage("Task reference name cannot be null"); + getExecutionDAO().createTasks(Collections.singletonList(task)); + } + + @Test + public void testCreateTaskException2() { + TaskModel task = new TaskModel(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId(UUID.randomUUID().toString()); + task.setTaskDefName("task1"); + task.setWorkflowInstanceId(UUID.randomUUID().toString()); + + expectedException.expect(IllegalArgumentException.class); + expectedException.expectMessage("Task reference name cannot be null"); + getExecutionDAO().createTasks(Collections.singletonList(task)); + } + + @Test + public void testTaskCreateDups() { + List tasks = new LinkedList<>(); + String workflowId = UUID.randomUUID().toString(); + + for (int i = 0; i < 3; i++) { + TaskModel task = new TaskModel(); + task.setScheduledTime(1L); + task.setSeq(i + 1); + task.setTaskId(workflowId + "_t" + i); + task.setReferenceTaskName("t" + i); + task.setRetryCount(0); + task.setWorkflowInstanceId(workflowId); + task.setTaskDefName("task" + i); + task.setStatus(TaskModel.Status.IN_PROGRESS); + tasks.add(task); + } + + // Let's insert a retried task + TaskModel task = new TaskModel(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId(workflowId + "_t" + 2); + task.setReferenceTaskName("t" + 2); + task.setRetryCount(1); + task.setWorkflowInstanceId(workflowId); + task.setTaskDefName("task" + 2); + task.setStatus(TaskModel.Status.IN_PROGRESS); + tasks.add(task); + + // Duplicate task! + task = new TaskModel(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId(workflowId + "_t" + 1); + task.setReferenceTaskName("t" + 1); + task.setRetryCount(0); + task.setWorkflowInstanceId(workflowId); + task.setTaskDefName("task" + 1); + task.setStatus(TaskModel.Status.IN_PROGRESS); + tasks.add(task); + + List created = getExecutionDAO().createTasks(tasks); + assertEquals(tasks.size() - 1, created.size()); // 1 less + + Set srcIds = + tasks.stream() + .map(t -> t.getReferenceTaskName() + "." + t.getRetryCount()) + .collect(Collectors.toSet()); + Set createdIds = + created.stream() + .map(t -> t.getReferenceTaskName() + "." + t.getRetryCount()) + .collect(Collectors.toSet()); + + assertEquals(srcIds, createdIds); + + List pending = getExecutionDAO().getPendingTasksByWorkflow("task0", workflowId); + assertNotNull(pending); + assertEquals(1, pending.size()); + assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), pending.get(0))); + + List found = getExecutionDAO().getTasks(tasks.get(0).getTaskDefName(), null, 1); + assertNotNull(found); + assertEquals(1, found.size()); + assertTrue(EqualsBuilder.reflectionEquals(tasks.get(0), found.get(0))); + } + + @Test + public void testTaskOps() { + List tasks = new LinkedList<>(); + String workflowId = UUID.randomUUID().toString(); + + for (int i = 0; i < 3; i++) { + TaskModel task = new TaskModel(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId(workflowId + "_t" + i); + task.setReferenceTaskName("testTaskOps" + i); + task.setRetryCount(0); + task.setWorkflowInstanceId(workflowId); + task.setTaskDefName("testTaskOps" + i); + task.setStatus(TaskModel.Status.IN_PROGRESS); + tasks.add(task); + } + + for (int i = 0; i < 3; i++) { + TaskModel task = new TaskModel(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId("x" + workflowId + "_t" + i); + task.setReferenceTaskName("testTaskOps" + i); + task.setRetryCount(0); + task.setWorkflowInstanceId("x" + workflowId); + task.setTaskDefName("testTaskOps" + i); + task.setStatus(TaskModel.Status.IN_PROGRESS); + getExecutionDAO().createTasks(Collections.singletonList(task)); + } + + List created = getExecutionDAO().createTasks(tasks); + assertEquals(tasks.size(), created.size()); + + List pending = + getExecutionDAO().getPendingTasksForTaskType(tasks.get(0).getTaskDefName()); + assertNotNull(pending); + assertEquals(2, pending.size()); + // Pending list can come in any order. finding the one we are looking for and then + // comparing + TaskModel matching = + pending.stream() + .filter(task -> task.getTaskId().equals(tasks.get(0).getTaskId())) + .findAny() + .get(); + assertTrue(EqualsBuilder.reflectionEquals(matching, tasks.get(0))); + + for (int i = 0; i < 3; i++) { + TaskModel found = getExecutionDAO().getTask(workflowId + "_t" + i); + assertNotNull(found); + found.getOutputData().put("updated", true); + found.setStatus(TaskModel.Status.COMPLETED); + getExecutionDAO().updateTask(found); + } + + List taskIds = + tasks.stream().map(TaskModel::getTaskId).collect(Collectors.toList()); + List found = getExecutionDAO().getTasks(taskIds); + assertEquals(taskIds.size(), found.size()); + found.forEach( + task -> { + assertTrue(task.getOutputData().containsKey("updated")); + assertEquals(true, task.getOutputData().get("updated")); + boolean removed = getExecutionDAO().removeTask(task.getTaskId()); + assertTrue(removed); + }); + + found = getExecutionDAO().getTasks(taskIds); + assertTrue(found.isEmpty()); + } + + @Test + public void testPending() { + WorkflowDef def = new WorkflowDef(); + def.setName("pending_count_test"); + + WorkflowModel workflow = createTestWorkflow(); + workflow.setWorkflowDefinition(def); + + List workflowIds = generateWorkflows(workflow, 10); + long count = getExecutionDAO().getPendingWorkflowCount(def.getName()); + assertEquals(10, count); + + for (int i = 0; i < 10; i++) { + getExecutionDAO().removeFromPendingWorkflow(def.getName(), workflowIds.get(i)); + } + + count = getExecutionDAO().getPendingWorkflowCount(def.getName()); + assertEquals(0, count); + } + + @Test + public void complexExecutionTest() { + WorkflowModel workflow = createTestWorkflow(); + int numTasks = workflow.getTasks().size(); + + String workflowId = getExecutionDAO().createWorkflow(workflow); + assertEquals(workflow.getWorkflowId(), workflowId); + + List created = getExecutionDAO().createTasks(workflow.getTasks()); + assertEquals(workflow.getTasks().size(), created.size()); + + WorkflowModel workflowWithTasks = + getExecutionDAO().getWorkflow(workflow.getWorkflowId(), true); + assertEquals(workflowId, workflowWithTasks.getWorkflowId()); + assertEquals(numTasks, workflowWithTasks.getTasks().size()); + + WorkflowModel found = getExecutionDAO().getWorkflow(workflowId, false); + assertTrue(found.getTasks().isEmpty()); + + workflow.getTasks().clear(); + assertEquals(workflow, found); + + workflow.getInput().put("updated", true); + getExecutionDAO().updateWorkflow(workflow); + found = getExecutionDAO().getWorkflow(workflowId); + assertNotNull(found); + assertTrue(found.getInput().containsKey("updated")); + assertEquals(true, found.getInput().get("updated")); + + List running = + getExecutionDAO() + .getRunningWorkflowIds( + workflow.getWorkflowName(), workflow.getWorkflowVersion()); + assertNotNull(running); + assertTrue(running.isEmpty()); + + workflow.setStatus(WorkflowModel.Status.RUNNING); + getExecutionDAO().updateWorkflow(workflow); + + running = + getExecutionDAO() + .getRunningWorkflowIds( + workflow.getWorkflowName(), workflow.getWorkflowVersion()); + assertNotNull(running); + assertEquals(1, running.size()); + assertEquals(workflow.getWorkflowId(), running.get(0)); + + List pending = + getExecutionDAO() + .getPendingWorkflowsByType( + workflow.getWorkflowName(), workflow.getWorkflowVersion()); + assertNotNull(pending); + assertEquals(1, pending.size()); + assertEquals(3, pending.get(0).getTasks().size()); + pending.get(0).getTasks().clear(); + assertEquals(workflow, pending.get(0)); + + workflow.setStatus(WorkflowModel.Status.COMPLETED); + getExecutionDAO().updateWorkflow(workflow); + running = + getExecutionDAO() + .getRunningWorkflowIds( + workflow.getWorkflowName(), workflow.getWorkflowVersion()); + assertNotNull(running); + assertTrue(running.isEmpty()); + + List bytime = + getExecutionDAO() + .getWorkflowsByType( + workflow.getWorkflowName(), + System.currentTimeMillis(), + System.currentTimeMillis() + 100); + assertNotNull(bytime); + assertTrue(bytime.isEmpty()); + + bytime = + getExecutionDAO() + .getWorkflowsByType( + workflow.getWorkflowName(), + workflow.getCreateTime() - 10, + workflow.getCreateTime() + 10); + assertNotNull(bytime); + assertEquals(1, bytime.size()); + } + + protected WorkflowModel createTestWorkflow() { + WorkflowDef def = new WorkflowDef(); + def.setName("Junit Workflow"); + def.setVersion(3); + def.setSchemaVersion(2); + + WorkflowModel workflow = new WorkflowModel(); + workflow.setWorkflowDefinition(def); + workflow.setCorrelationId("correlationX"); + workflow.setCreatedBy("junit_tester"); + workflow.setEndTime(200L); + + Map input = new HashMap<>(); + input.put("param1", "param1 value"); + input.put("param2", 100); + workflow.setInput(input); + + Map output = new HashMap<>(); + output.put("ouput1", "output 1 value"); + output.put("op2", 300); + workflow.setOutput(output); + + workflow.setOwnerApp("workflow"); + workflow.setParentWorkflowId("parentWorkflowId"); + workflow.setParentWorkflowTaskId("parentWFTaskId"); + workflow.setReasonForIncompletion("missing recipe"); + workflow.setReRunFromWorkflowId("re-run from id1"); + workflow.setCreateTime(90L); + workflow.setStatus(WorkflowModel.Status.FAILED); + workflow.setWorkflowId(UUID.randomUUID().toString()); + + List tasks = new LinkedList<>(); + + TaskModel task = new TaskModel(); + task.setScheduledTime(1L); + task.setSeq(1); + task.setTaskId(UUID.randomUUID().toString()); + task.setReferenceTaskName("t1"); + task.setWorkflowInstanceId(workflow.getWorkflowId()); + task.setTaskDefName("task1"); + + TaskModel task2 = new TaskModel(); + task2.setScheduledTime(2L); + task2.setSeq(2); + task2.setTaskId(UUID.randomUUID().toString()); + task2.setReferenceTaskName("t2"); + task2.setWorkflowInstanceId(workflow.getWorkflowId()); + task2.setTaskDefName("task2"); + + TaskModel task3 = new TaskModel(); + task3.setScheduledTime(2L); + task3.setSeq(3); + task3.setTaskId(UUID.randomUUID().toString()); + task3.setReferenceTaskName("t3"); + task3.setWorkflowInstanceId(workflow.getWorkflowId()); + task3.setTaskDefName("task3"); + + tasks.add(task); + tasks.add(task2); + tasks.add(task3); + + workflow.setTasks(tasks); + + workflow.setUpdatedBy("junit_tester"); + workflow.setUpdatedTime(800L); + + return workflow; + } + + protected List generateWorkflows(WorkflowModel base, int count) { + List workflowIds = new ArrayList<>(); + for (int i = 0; i < count; i++) { + String workflowId = UUID.randomUUID().toString(); + base.setWorkflowId(workflowId); + base.setCorrelationId("corr001"); + base.setStatus(WorkflowModel.Status.RUNNING); + getExecutionDAO().createWorkflow(base); + workflowIds.add(workflowId); + } + return workflowIds; + } +} diff --git a/persistence/src/test/java/com/netflix/conductor/dao/PollDataDAOTest.java b/persistence/src/test/java/com/netflix/conductor/dao/PollDataDAOTest.java new file mode 100644 index 0000000..5906ccc --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/dao/PollDataDAOTest.java @@ -0,0 +1,54 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.dao; + +import java.util.List; + +import org.junit.Ignore; +import org.junit.Test; + +import com.netflix.conductor.common.metadata.tasks.PollData; + +import static org.junit.Assert.*; + +public abstract class PollDataDAOTest { + + protected abstract PollDataDAO getPollDataDAO(); + + // TODO review this test + @Ignore + @Test + public void testPollData() { + getPollDataDAO().updateLastPollData("taskDef", null, "workerId1"); + PollData pollData = getPollDataDAO().getPollData("taskDef", null); + assertNotNull(pollData); + assertTrue(pollData.getLastPollTime() > 0); + assertEquals(pollData.getQueueName(), "taskDef"); + assertNull(pollData.getDomain()); + assertEquals(pollData.getWorkerId(), "workerId1"); + + getPollDataDAO().updateLastPollData("taskDef", "domain1", "workerId1"); + pollData = getPollDataDAO().getPollData("taskDef", "domain1"); + assertNotNull(pollData); + assertTrue(pollData.getLastPollTime() > 0); + assertEquals(pollData.getQueueName(), "taskDef"); + assertEquals(pollData.getDomain(), "domain1"); + assertEquals(pollData.getWorkerId(), "workerId1"); + + List pData = getPollDataDAO().getPollData("taskDef"); + assertEquals(pData.size(), 2); + + pollData = getPollDataDAO().getPollData("taskDef", "domain2"); + assertNull(pollData); + } +} diff --git a/persistence/src/test/java/com/netflix/conductor/redis/dao/BaseDynoDAOTest.java b/persistence/src/test/java/com/netflix/conductor/redis/dao/BaseDynoDAOTest.java new file mode 100644 index 0000000..af699c4 --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/redis/dao/BaseDynoDAOTest.java @@ -0,0 +1,63 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; + +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class BaseDynoDAOTest { + + @Mock private OrkesJedisProxy jedisProxy; + + @Mock private ObjectMapper objectMapper; + + private RedisProperties properties; + private ConductorProperties conductorProperties; + + private BaseDynoDAO baseDynoDAO; + + @Before + public void setUp() { + properties = mock(RedisProperties.class); + conductorProperties = mock(ConductorProperties.class); + this.baseDynoDAO = + new BaseDynoDAO(jedisProxy, objectMapper, conductorProperties, properties); + } + + @Test + public void testNsKey() { + assertEquals("", baseDynoDAO.nsKey()); + + String[] keys = {"key1", "key2"}; + assertEquals("key1.key2", baseDynoDAO.nsKey(keys)); + + when(properties.getWorkflowNamespacePrefix()).thenReturn("test"); + assertEquals("test", baseDynoDAO.nsKey()); + + assertEquals("test.key1.key2", baseDynoDAO.nsKey(keys)); + + when(conductorProperties.getStack()).thenReturn("stack"); + assertEquals("test.stack.key1.key2", baseDynoDAO.nsKey(keys)); + } +} diff --git a/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisEventHandlerDAOTest.java b/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisEventHandlerDAOTest.java new file mode 100644 index 0000000..51f7535 --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisEventHandlerDAOTest.java @@ -0,0 +1,108 @@ +/* + * Copyright 2021 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.time.Duration; +import java.util.List; +import java.util.UUID; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringRunner; + +import com.netflix.conductor.common.config.TestObjectMapperConfiguration; +import com.netflix.conductor.common.metadata.events.EventHandler; +import com.netflix.conductor.common.metadata.events.EventHandler.Action; +import com.netflix.conductor.common.metadata.events.EventHandler.Action.Type; +import com.netflix.conductor.common.metadata.events.EventHandler.StartWorkflow; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.JedisMock; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import redis.clients.jedis.JedisPool; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@ContextConfiguration(classes = {TestObjectMapperConfiguration.class}) +@RunWith(SpringRunner.class) +public class RedisEventHandlerDAOTest { + + private RedisEventHandlerDAO redisEventHandlerDAO; + + @Autowired private ObjectMapper objectMapper; + + @Before + public void init() { + ConductorProperties conductorProperties = mock(ConductorProperties.class); + RedisProperties properties = mock(RedisProperties.class); + when(properties.getTaskDefCacheRefreshInterval()).thenReturn(Duration.ofSeconds(60)); + JedisPool jedisPool = mock(JedisPool.class); + when(jedisPool.getResource()).thenReturn(new JedisMock()); + OrkesJedisProxy orkesJedisProxy = new OrkesJedisProxy(jedisPool); + + redisEventHandlerDAO = + new RedisEventHandlerDAO( + orkesJedisProxy, objectMapper, conductorProperties, properties); + } + + @Test + public void testEventHandlers() { + String event1 = "SQS::arn:account090:sqstest1"; + String event2 = "SQS::arn:account090:sqstest2"; + + EventHandler eventHandler = new EventHandler(); + eventHandler.setName(UUID.randomUUID().toString()); + eventHandler.setActive(false); + Action action = new Action(); + action.setAction(Type.start_workflow); + action.setStart_workflow(new StartWorkflow()); + action.getStart_workflow().setName("test_workflow"); + eventHandler.getActions().add(action); + eventHandler.setEvent(event1); + + redisEventHandlerDAO.addEventHandler(eventHandler); + List allEventHandlers = redisEventHandlerDAO.getAllEventHandlers(); + assertNotNull(allEventHandlers); + assertEquals(1, allEventHandlers.size()); + assertEquals(eventHandler.getName(), allEventHandlers.get(0).getName()); + assertEquals(eventHandler.getEvent(), allEventHandlers.get(0).getEvent()); + + List byEvents = redisEventHandlerDAO.getEventHandlersForEvent(event1, true); + assertNotNull(byEvents); + assertEquals(0, byEvents.size()); // event is marked as in-active + + eventHandler.setActive(true); + eventHandler.setEvent(event2); + redisEventHandlerDAO.updateEventHandler(eventHandler); + + allEventHandlers = redisEventHandlerDAO.getAllEventHandlers(); + assertNotNull(allEventHandlers); + assertEquals(1, allEventHandlers.size()); + + byEvents = redisEventHandlerDAO.getEventHandlersForEvent(event1, true); + assertNotNull(byEvents); + assertEquals(0, byEvents.size()); + + byEvents = redisEventHandlerDAO.getEventHandlersForEvent(event2, true); + assertNotNull(byEvents); + assertEquals(1, byEvents.size()); + } +} diff --git a/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisExecutionDAOTest.java b/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisExecutionDAOTest.java new file mode 100644 index 0000000..e34224d --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisExecutionDAOTest.java @@ -0,0 +1,98 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.time.Duration; +import java.util.Collections; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringRunner; + +import com.netflix.conductor.common.config.TestObjectMapperConfiguration; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.dao.ExecutionDAO; +import com.netflix.conductor.dao.ExecutionDAOTest; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.JedisMock; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import redis.clients.jedis.JedisPool; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@ContextConfiguration(classes = {TestObjectMapperConfiguration.class}) +@RunWith(SpringRunner.class) +public class RedisExecutionDAOTest extends ExecutionDAOTest { + + private RedisExecutionDAO executionDAO; + + @Autowired private ObjectMapper objectMapper; + + @Before + public void init() { + ConductorProperties conductorProperties = mock(ConductorProperties.class); + RedisProperties properties = mock(RedisProperties.class); + when(properties.getTaskDefCacheRefreshInterval()).thenReturn(Duration.ofSeconds(60)); + JedisPool jedisPool = mock(JedisPool.class); + when(jedisPool.getResource()).thenReturn(new JedisMock()); + OrkesJedisProxy orkesJedisProxy = new OrkesJedisProxy(jedisPool); + + executionDAO = + new RedisExecutionDAO( + orkesJedisProxy, objectMapper, conductorProperties, properties); + } + + @Test + public void testCorrelateTaskToWorkflowInDS() { + String workflowId = "workflowId"; + String taskId = "taskId1"; + String taskDefName = "task1"; + + TaskDef def = new TaskDef(); + def.setName("task1"); + def.setConcurrentExecLimit(1); + + TaskModel task = new TaskModel(); + task.setTaskId(taskId); + task.setWorkflowInstanceId(workflowId); + task.setReferenceTaskName("ref_name"); + task.setTaskDefName(taskDefName); + task.setTaskType(taskDefName); + task.setStatus(TaskModel.Status.IN_PROGRESS); + List tasks = executionDAO.createTasks(Collections.singletonList(task)); + assertNotNull(tasks); + assertEquals(1, tasks.size()); + + executionDAO.correlateTaskToWorkflowInDS(taskId, workflowId); + tasks = executionDAO.getTasksForWorkflow(workflowId); + assertNotNull(tasks); + assertEquals(workflowId, tasks.get(0).getWorkflowInstanceId()); + assertEquals(taskId, tasks.get(0).getTaskId()); + } + + @Override + protected ExecutionDAO getExecutionDAO() { + return executionDAO; + } +} diff --git a/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisMetadataDAOTest.java b/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisMetadataDAOTest.java new file mode 100644 index 0000000..e7fb871 --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisMetadataDAOTest.java @@ -0,0 +1,231 @@ +/* + * Copyright 2021 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.time.Duration; +import java.util.*; +import java.util.stream.Collectors; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringRunner; + +import com.netflix.conductor.common.config.TestObjectMapperConfiguration; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.tasks.TaskDef.RetryLogic; +import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.core.exception.ConflictException; +import com.netflix.conductor.core.exception.NotFoundException; +import com.netflix.conductor.dao.EventHandlerDAO; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.JedisMock; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import redis.clients.jedis.JedisPool; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@ContextConfiguration(classes = {TestObjectMapperConfiguration.class}) +@RunWith(SpringRunner.class) +public class RedisMetadataDAOTest { + + private RedisMetadataDAO redisMetadataDAO; + + @Autowired private ObjectMapper objectMapper; + + @Before + public void init() { + ConductorProperties conductorProperties = mock(ConductorProperties.class); + RedisProperties properties = mock(RedisProperties.class); + when(properties.getTaskDefCacheRefreshInterval()).thenReturn(Duration.ofSeconds(60)); + JedisPool jedisPool = mock(JedisPool.class); + when(jedisPool.getResource()).thenReturn(new JedisMock()); + OrkesJedisProxy orkesJedisProxy = new OrkesJedisProxy(jedisPool); + EventHandlerDAO eventHandlerDAO = + new RedisEventHandlerDAO( + orkesJedisProxy, objectMapper, conductorProperties, properties); + redisMetadataDAO = + new OrkesMetadataDAO( + orkesJedisProxy, + objectMapper, + conductorProperties, + properties, + eventHandlerDAO); + } + + @Test(expected = ConflictException.class) + public void testDup() { + WorkflowDef def = new WorkflowDef(); + def.setName("testDup"); + def.setVersion(1); + + redisMetadataDAO.createWorkflowDef(def); + redisMetadataDAO.createWorkflowDef(def); + } + + @Test + public void testWorkflowDefOperations() { + + WorkflowDef def = new WorkflowDef(); + def.setName("test"); + def.setVersion(1); + def.setDescription("description"); + def.setCreatedBy("unit_test"); + def.setCreateTime(1L); + def.setOwnerApp("ownerApp"); + def.setUpdatedBy("unit_test2"); + def.setUpdateTime(2L); + + redisMetadataDAO.createWorkflowDef(def); + + List all = redisMetadataDAO.getAllWorkflowDefs(); + assertNotNull(all); + assertEquals(1, all.size()); + assertEquals("test", all.get(0).getName()); + assertEquals(1, all.get(0).getVersion()); + + WorkflowDef found = redisMetadataDAO.getWorkflowDef("test", 1).get(); + assertEquals(def, found); + + def.setVersion(2); + redisMetadataDAO.createWorkflowDef(def); + + all = redisMetadataDAO.getAllWorkflowDefs(); + assertNotNull(all); + assertEquals(2, all.size()); + assertEquals("test", all.get(0).getName()); + assertEquals(1, all.get(0).getVersion()); + + found = redisMetadataDAO.getLatestWorkflowDef(def.getName()).get(); + assertEquals(def.getName(), found.getName()); + assertEquals(def.getVersion(), found.getVersion()); + assertEquals(2, found.getVersion()); + + all = redisMetadataDAO.getAllVersions(def.getName()); + assertNotNull(all); + assertEquals(2, all.size()); + assertEquals("test", all.get(0).getName()); + assertEquals("test", all.get(1).getName()); + assertEquals(1, all.get(0).getVersion()); + assertEquals(2, all.get(1).getVersion()); + + def.setDescription("updated"); + redisMetadataDAO.updateWorkflowDef(def); + found = redisMetadataDAO.getWorkflowDef(def.getName(), def.getVersion()).get(); + assertEquals(def.getDescription(), found.getDescription()); + + List allnames = redisMetadataDAO.findAll(); + assertNotNull(allnames); + assertEquals(1, allnames.size()); + assertEquals(def.getName(), allnames.get(0)); + + redisMetadataDAO.removeWorkflowDef("test", 1); + Optional deleted = redisMetadataDAO.getWorkflowDef("test", 1); + assertFalse(deleted.isPresent()); + redisMetadataDAO.removeWorkflowDef("test", 2); + Optional latestDef = redisMetadataDAO.getLatestWorkflowDef("test"); + assertFalse(latestDef.isPresent()); + + WorkflowDef[] workflowDefsArray = new WorkflowDef[3]; + for (int i = 1; i <= 3; i++) { + workflowDefsArray[i - 1] = new WorkflowDef(); + workflowDefsArray[i - 1].setName("test"); + workflowDefsArray[i - 1].setVersion(i); + workflowDefsArray[i - 1].setDescription("description"); + workflowDefsArray[i - 1].setCreatedBy("unit_test"); + workflowDefsArray[i - 1].setCreateTime(1L); + workflowDefsArray[i - 1].setOwnerApp("ownerApp"); + workflowDefsArray[i - 1].setUpdatedBy("unit_test2"); + workflowDefsArray[i - 1].setUpdateTime(2L); + redisMetadataDAO.createWorkflowDef(workflowDefsArray[i - 1]); + } + redisMetadataDAO.removeWorkflowDef("test", 1); + redisMetadataDAO.removeWorkflowDef("test", 2); + WorkflowDef workflow = redisMetadataDAO.getLatestWorkflowDef("test").get(); + assertEquals(workflow.getVersion(), 3); + } + + @Test(expected = NotFoundException.class) + public void removeInvalidWorkflowDef() { + redisMetadataDAO.removeWorkflowDef("hello", 1); + } + + @Test + public void testTaskDefOperations() { + + TaskDef def = new TaskDef("taskA"); + def.setDescription("description"); + def.setCreatedBy("unit_test"); + def.setCreateTime(1L); + def.setInputKeys(Arrays.asList("a", "b", "c")); + def.setOutputKeys(Arrays.asList("01", "o2")); + def.setOwnerApp("ownerApp"); + def.setRetryCount(3); + def.setRetryDelaySeconds(100); + def.setRetryLogic(RetryLogic.FIXED); + def.setTimeoutPolicy(TimeoutPolicy.ALERT_ONLY); + def.setUpdatedBy("unit_test2"); + def.setUpdateTime(2L); + def.setRateLimitPerFrequency(50); + def.setRateLimitFrequencyInSeconds(1); + + redisMetadataDAO.createTaskDef(def); + + TaskDef found = redisMetadataDAO.getTaskDef(def.getName()); + assertEquals(def, found); + + def.setDescription("updated description"); + redisMetadataDAO.updateTaskDef(def); + found = redisMetadataDAO.getTaskDef(def.getName()); + assertEquals(def, found); + assertEquals("updated description", found.getDescription()); + + for (int i = 0; i < 9; i++) { + TaskDef tdf = new TaskDef("taskA" + i); + redisMetadataDAO.createTaskDef(tdf); + } + + List all = redisMetadataDAO.getAllTaskDefs(); + assertNotNull(all); + assertEquals(10, all.size()); + Set allnames = all.stream().map(TaskDef::getName).collect(Collectors.toSet()); + assertEquals(10, allnames.size()); + List sorted = allnames.stream().sorted().collect(Collectors.toList()); + assertEquals(def.getName(), sorted.get(0)); + + for (int i = 0; i < 9; i++) { + assertEquals(def.getName() + i, sorted.get(i + 1)); + } + + for (int i = 0; i < 9; i++) { + redisMetadataDAO.removeTaskDef(def.getName() + i); + } + all = redisMetadataDAO.getAllTaskDefs(); + assertNotNull(all); + assertEquals(1, all.size()); + assertEquals(def.getName(), all.get(0).getName()); + } + + @Test(expected = NotFoundException.class) + public void testRemoveTaskDef() { + redisMetadataDAO.removeTaskDef("test" + UUID.randomUUID().toString()); + } +} diff --git a/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisPollDataDAOTest.java b/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisPollDataDAOTest.java new file mode 100644 index 0000000..0324f9d --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisPollDataDAOTest.java @@ -0,0 +1,63 @@ +/* + * Copyright 2021 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.time.Duration; + +import org.junit.Before; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringRunner; + +import com.netflix.conductor.common.config.TestObjectMapperConfiguration; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.dao.PollDataDAO; +import com.netflix.conductor.dao.PollDataDAOTest; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.JedisMock; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import redis.clients.jedis.JedisPool; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@ContextConfiguration(classes = {TestObjectMapperConfiguration.class}) +@RunWith(SpringRunner.class) +public class RedisPollDataDAOTest extends PollDataDAOTest { + + private PollDataDAO redisPollDataDAO; + + @Autowired private ObjectMapper objectMapper; + + @Before + public void init() { + ConductorProperties conductorProperties = mock(ConductorProperties.class); + RedisProperties properties = mock(RedisProperties.class); + when(properties.getTaskDefCacheRefreshInterval()).thenReturn(Duration.ofSeconds(60)); + JedisPool jedisPool = mock(JedisPool.class); + when(jedisPool.getResource()).thenReturn(new JedisMock()); + OrkesJedisProxy orkesJedisProxy = new OrkesJedisProxy(jedisPool); + + redisPollDataDAO = + new RedisPollDataDAO( + orkesJedisProxy, objectMapper, conductorProperties, properties); + } + + @Override + protected PollDataDAO getPollDataDAO() { + return redisPollDataDAO; + } +} diff --git a/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisRateLimitDAOTest.java b/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisRateLimitDAOTest.java new file mode 100644 index 0000000..a8fec64 --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/redis/dao/RedisRateLimitDAOTest.java @@ -0,0 +1,94 @@ +/* + * Copyright 2021 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.dao; + +import java.time.Duration; +import java.util.UUID; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringRunner; + +import com.netflix.conductor.common.config.TestObjectMapperConfiguration; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.jedis.JedisMock; +import com.netflix.conductor.redis.jedis.OrkesJedisProxy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import redis.clients.jedis.JedisPool; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@ContextConfiguration(classes = {TestObjectMapperConfiguration.class}) +@RunWith(SpringRunner.class) +public class RedisRateLimitDAOTest { + + private RedisRateLimitingDAO rateLimitingDao; + + @Autowired private ObjectMapper objectMapper; + + @Before + public void init() { + ConductorProperties conductorProperties = mock(ConductorProperties.class); + RedisProperties properties = mock(RedisProperties.class); + when(properties.getTaskDefCacheRefreshInterval()).thenReturn(Duration.ofSeconds(60)); + JedisPool jedisPool = mock(JedisPool.class); + when(jedisPool.getResource()).thenReturn(new JedisMock()); + OrkesJedisProxy orkesJedisProxy = new OrkesJedisProxy(jedisPool); + + rateLimitingDao = + new RedisRateLimitingDAO( + orkesJedisProxy, objectMapper, conductorProperties, properties); + } + + @Test + public void testExceedsRateLimitWhenNoRateLimitSet() { + TaskDef taskDef = new TaskDef("TestTaskDefinition"); + TaskModel task = new TaskModel(); + task.setTaskId(UUID.randomUUID().toString()); + task.setTaskDefName(taskDef.getName()); + assertFalse(rateLimitingDao.exceedsRateLimitPerFrequency(task, taskDef)); + } + + @Test + public void testExceedsRateLimitWithinLimit() { + TaskDef taskDef = new TaskDef("TestTaskDefinition"); + taskDef.setRateLimitFrequencyInSeconds(60); + taskDef.setRateLimitPerFrequency(20); + TaskModel task = new TaskModel(); + task.setTaskId(UUID.randomUUID().toString()); + task.setTaskDefName(taskDef.getName()); + assertFalse(rateLimitingDao.exceedsRateLimitPerFrequency(task, taskDef)); + } + + @Test + public void testExceedsRateLimitOutOfLimit() { + TaskDef taskDef = new TaskDef("TestTaskDefinition"); + taskDef.setRateLimitFrequencyInSeconds(60); + taskDef.setRateLimitPerFrequency(1); + TaskModel task = new TaskModel(); + task.setTaskId(UUID.randomUUID().toString()); + task.setTaskDefName(taskDef.getName()); + assertFalse(rateLimitingDao.exceedsRateLimitPerFrequency(task, taskDef)); + assertTrue(rateLimitingDao.exceedsRateLimitPerFrequency(task, taskDef)); + } +} diff --git a/persistence/src/test/java/com/netflix/conductor/redis/jedis/ConfigurationHostSupplierTest.java b/persistence/src/test/java/com/netflix/conductor/redis/jedis/ConfigurationHostSupplierTest.java new file mode 100644 index 0000000..e7c8126 --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/redis/jedis/ConfigurationHostSupplierTest.java @@ -0,0 +1,89 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.jedis; + +import java.util.List; + +import org.junit.Before; +import org.junit.Test; + +import com.netflix.conductor.redis.config.RedisProperties; +import com.netflix.conductor.redis.dynoqueue.ConfigurationHostSupplier; +import com.netflix.dyno.connectionpool.Host; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ConfigurationHostSupplierTest { + + private RedisProperties properties; + + private ConfigurationHostSupplier configurationHostSupplier; + + @Before + public void setUp() { + properties = mock(RedisProperties.class); + configurationHostSupplier = new ConfigurationHostSupplier(properties); + } + + @Test + public void getHost() { + when(properties.getHosts()).thenReturn("dyno1:8102:us-east-1c"); + + List hosts = configurationHostSupplier.getHosts(); + assertEquals(1, hosts.size()); + + Host firstHost = hosts.get(0); + assertEquals("dyno1", firstHost.getHostName()); + assertEquals(8102, firstHost.getPort()); + assertEquals("us-east-1c", firstHost.getRack()); + assertTrue(firstHost.isUp()); + } + + @Test + public void getMultipleHosts() { + when(properties.getHosts()).thenReturn("dyno1:8102:us-east-1c;dyno2:8103:us-east-1c"); + + List hosts = configurationHostSupplier.getHosts(); + assertEquals(2, hosts.size()); + + Host firstHost = hosts.get(0); + assertEquals("dyno1", firstHost.getHostName()); + assertEquals(8102, firstHost.getPort()); + assertEquals("us-east-1c", firstHost.getRack()); + assertTrue(firstHost.isUp()); + + Host secondHost = hosts.get(1); + assertEquals("dyno2", secondHost.getHostName()); + assertEquals(8103, secondHost.getPort()); + assertEquals("us-east-1c", secondHost.getRack()); + assertTrue(secondHost.isUp()); + } + + @Test + public void getAuthenticatedHost() { + when(properties.getHosts()).thenReturn("redis1:6432:us-east-1c:password"); + + List hosts = configurationHostSupplier.getHosts(); + assertEquals(1, hosts.size()); + + Host firstHost = hosts.get(0); + assertEquals("redis1", firstHost.getHostName()); + assertEquals(6432, firstHost.getPort()); + assertEquals("us-east-1c", firstHost.getRack()); + assertEquals("password", firstHost.getPassword()); + assertTrue(firstHost.isUp()); + } +} diff --git a/persistence/src/test/java/com/netflix/conductor/redis/jedis/JedisClusterTest.java b/persistence/src/test/java/com/netflix/conductor/redis/jedis/JedisClusterTest.java new file mode 100644 index 0000000..8467113 --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/redis/jedis/JedisClusterTest.java @@ -0,0 +1,614 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.jedis; + +import java.util.AbstractMap; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; + +import org.junit.Test; +import org.mockito.Mockito; + +import redis.clients.jedis.GeoUnit; +import redis.clients.jedis.ListPosition; +import redis.clients.jedis.ScanParams; +import redis.clients.jedis.ScanResult; +import redis.clients.jedis.SortingParams; +import redis.clients.jedis.params.GeoRadiusParam; +import redis.clients.jedis.params.SetParams; +import redis.clients.jedis.params.ZAddParams; +import redis.clients.jedis.params.ZIncrByParams; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class JedisClusterTest { + + private final redis.clients.jedis.JedisCluster mockCluster = + mock(redis.clients.jedis.JedisCluster.class); + private final JedisCluster jedisCluster = new JedisCluster(mockCluster); + + @Test + public void testSet() { + jedisCluster.set("key", "value"); + jedisCluster.set("key", "value", SetParams.setParams()); + } + + @Test + public void testGet() { + jedisCluster.get("key"); + } + + @Test + public void testExists() { + jedisCluster.exists("key"); + } + + @Test + public void testPersist() { + jedisCluster.persist("key"); + } + + @Test + public void testType() { + jedisCluster.type("key"); + } + + @Test + public void testExpire() { + jedisCluster.expire("key", 1337); + } + + @Test + public void testPexpire() { + jedisCluster.pexpire("key", 1337); + } + + @Test + public void testExpireAt() { + jedisCluster.expireAt("key", 1337); + } + + @Test + public void testPexpireAt() { + jedisCluster.pexpireAt("key", 1337); + } + + @Test + public void testTtl() { + jedisCluster.ttl("key"); + } + + @Test + public void testPttl() { + jedisCluster.pttl("key"); + } + + @Test + public void testSetbit() { + jedisCluster.setbit("key", 1337, "value"); + jedisCluster.setbit("key", 1337, true); + } + + @Test + public void testGetbit() { + jedisCluster.getbit("key", 1337); + } + + @Test + public void testSetrange() { + jedisCluster.setrange("key", 1337, "value"); + } + + @Test + public void testGetrange() { + jedisCluster.getrange("key", 1337, 1338); + } + + @Test + public void testGetSet() { + jedisCluster.getSet("key", "value"); + } + + @Test + public void testSetnx() { + jedisCluster.setnx("test", "value"); + } + + @Test + public void testSetex() { + jedisCluster.setex("key", 1337, "value"); + } + + @Test + public void testPsetex() { + jedisCluster.psetex("key", 1337, "value"); + } + + @Test + public void testDecrBy() { + jedisCluster.decrBy("key", 1337); + } + + @Test + public void testDecr() { + jedisCluster.decr("key"); + } + + @Test + public void testIncrBy() { + jedisCluster.incrBy("key", 1337); + } + + @Test + public void testIncrByFloat() { + jedisCluster.incrByFloat("key", 1337); + } + + @Test + public void testIncr() { + jedisCluster.incr("key"); + } + + @Test + public void testAppend() { + jedisCluster.append("key", "value"); + } + + @Test + public void testSubstr() { + jedisCluster.substr("key", 1337, 1338); + } + + @Test + public void testHset() { + jedisCluster.hset("key", "field", "value"); + } + + @Test + public void testHget() { + jedisCluster.hget("key", "field"); + } + + @Test + public void testHsetnx() { + jedisCluster.hsetnx("key", "field", "value"); + } + + @Test + public void testHmset() { + jedisCluster.hmset("key", new HashMap<>()); + } + + @Test + public void testHmget() { + jedisCluster.hmget("key", "fields"); + } + + @Test + public void testHincrBy() { + jedisCluster.hincrBy("key", "field", 1337); + } + + @Test + public void testHincrByFloat() { + jedisCluster.hincrByFloat("key", "field", 1337); + } + + @Test + public void testHexists() { + jedisCluster.hexists("key", "field"); + } + + @Test + public void testHdel() { + jedisCluster.hdel("key", "field"); + } + + @Test + public void testHlen() { + jedisCluster.hlen("key"); + } + + @Test + public void testHkeys() { + jedisCluster.hkeys("key"); + } + + @Test + public void testHvals() { + jedisCluster.hvals("key"); + } + + @Test + public void testGgetAll() { + jedisCluster.hgetAll("key"); + } + + @Test + public void testRpush() { + jedisCluster.rpush("key", "string"); + } + + @Test + public void testLpush() { + jedisCluster.lpush("key", "string"); + } + + @Test + public void testLlen() { + jedisCluster.llen("key"); + } + + @Test + public void testLrange() { + jedisCluster.lrange("key", 1337, 1338); + } + + @Test + public void testLtrim() { + jedisCluster.ltrim("key", 1337, 1338); + } + + @Test + public void testLindex() { + jedisCluster.lindex("key", 1337); + } + + @Test + public void testLset() { + jedisCluster.lset("key", 1337, "value"); + } + + @Test + public void testLrem() { + jedisCluster.lrem("key", 1337, "value"); + } + + @Test + public void testLpop() { + jedisCluster.lpop("key"); + } + + @Test + public void testRpop() { + jedisCluster.rpop("key"); + } + + @Test + public void testSadd() { + jedisCluster.sadd("key", "member"); + } + + @Test + public void testSmembers() { + jedisCluster.smembers("key"); + } + + @Test + public void testSrem() { + jedisCluster.srem("key", "member"); + } + + @Test + public void testSpop() { + jedisCluster.spop("key"); + jedisCluster.spop("key", 1337); + } + + @Test + public void testScard() { + jedisCluster.scard("key"); + } + + @Test + public void testSismember() { + jedisCluster.sismember("key", "member"); + } + + @Test + public void testSrandmember() { + jedisCluster.srandmember("key"); + jedisCluster.srandmember("key", 1337); + } + + @Test + public void testStrlen() { + jedisCluster.strlen("key"); + } + + @Test + public void testZadd() { + jedisCluster.zadd("key", new HashMap<>()); + jedisCluster.zadd("key", new HashMap<>(), ZAddParams.zAddParams()); + jedisCluster.zadd("key", 1337, "members"); + jedisCluster.zadd("key", 1337, "members", ZAddParams.zAddParams()); + } + + @Test + public void testZrange() { + jedisCluster.zrange("key", 1337, 1338); + } + + @Test + public void testZrem() { + jedisCluster.zrem("key", "member"); + } + + @Test + public void testZincrby() { + jedisCluster.zincrby("key", 1337, "member"); + jedisCluster.zincrby("key", 1337, "member", ZIncrByParams.zIncrByParams()); + } + + @Test + public void testZrank() { + jedisCluster.zrank("key", "member"); + } + + @Test + public void testZrevrank() { + jedisCluster.zrevrank("key", "member"); + } + + @Test + public void testZrevrange() { + jedisCluster.zrevrange("key", 1337, 1338); + } + + @Test + public void testZrangeWithScores() { + jedisCluster.zrangeWithScores("key", 1337, 1338); + } + + @Test + public void testZrevrangeWithScores() { + jedisCluster.zrevrangeWithScores("key", 1337, 1338); + } + + @Test + public void testZcard() { + jedisCluster.zcard("key"); + } + + @Test + public void testZscore() { + jedisCluster.zscore("key", "member"); + } + + @Test + public void testSort() { + jedisCluster.sort("key"); + jedisCluster.sort("key", new SortingParams()); + } + + @Test + public void testZcount() { + jedisCluster.zcount("key", "min", "max"); + jedisCluster.zcount("key", 1337, 1338); + } + + @Test + public void testZrangeByScore() { + jedisCluster.zrangeByScore("key", "min", "max"); + jedisCluster.zrangeByScore("key", 1337, 1338); + jedisCluster.zrangeByScore("key", "min", "max", 1337, 1338); + jedisCluster.zrangeByScore("key", 1337, 1338, 1339, 1340); + } + + @Test + public void testZrevrangeByScore() { + jedisCluster.zrevrangeByScore("key", "max", "min"); + jedisCluster.zrevrangeByScore("key", 1337, 1338); + jedisCluster.zrevrangeByScore("key", "max", "min", 1337, 1338); + jedisCluster.zrevrangeByScore("key", 1337, 1338, 1339, 1340); + } + + @Test + public void testZrangeByScoreWithScores() { + jedisCluster.zrangeByScoreWithScores("key", "min", "max"); + jedisCluster.zrangeByScoreWithScores("key", "min", "max", 1337, 1338); + jedisCluster.zrangeByScoreWithScores("key", 1337, 1338); + jedisCluster.zrangeByScoreWithScores("key", 1337, 1338, 1339, 1340); + } + + @Test + public void testZrevrangeByScoreWithScores() { + jedisCluster.zrevrangeByScoreWithScores("key", "max", "min"); + jedisCluster.zrevrangeByScoreWithScores("key", "max", "min", 1337, 1338); + jedisCluster.zrevrangeByScoreWithScores("key", 1337, 1338); + jedisCluster.zrevrangeByScoreWithScores("key", 1337, 1338, 1339, 1340); + } + + @Test + public void testZremrangeByRank() { + jedisCluster.zremrangeByRank("key", 1337, 1338); + } + + @Test + public void testZremrangeByScore() { + jedisCluster.zremrangeByScore("key", "start", "end"); + jedisCluster.zremrangeByScore("key", 1337, 1338); + } + + @Test + public void testZlexcount() { + jedisCluster.zlexcount("key", "min", "max"); + } + + @Test + public void testZrangeByLex() { + jedisCluster.zrangeByLex("key", "min", "max"); + jedisCluster.zrangeByLex("key", "min", "max", 1337, 1338); + } + + @Test + public void testZrevrangeByLex() { + jedisCluster.zrevrangeByLex("key", "max", "min"); + jedisCluster.zrevrangeByLex("key", "max", "min", 1337, 1338); + } + + @Test + public void testZremrangeByLex() { + jedisCluster.zremrangeByLex("key", "min", "max"); + } + + @Test + public void testLinsert() { + jedisCluster.linsert("key", ListPosition.AFTER, "pivot", "value"); + } + + @Test + public void testLpushx() { + jedisCluster.lpushx("key", "string"); + } + + @Test + public void testRpushx() { + jedisCluster.rpushx("key", "string"); + } + + @Test + public void testBlpop() { + jedisCluster.blpop(1337, "arg"); + } + + @Test + public void testBrpop() { + jedisCluster.brpop(1337, "arg"); + } + + @Test + public void testDel() { + jedisCluster.del("key"); + } + + @Test + public void testEcho() { + jedisCluster.echo("string"); + } + + @Test(expected = UnsupportedOperationException.class) + public void testMove() { + jedisCluster.move("key", 1337); + } + + @Test + public void testBitcount() { + jedisCluster.bitcount("key"); + jedisCluster.bitcount("key", 1337, 1338); + } + + @Test(expected = UnsupportedOperationException.class) + public void testBitpos() { + jedisCluster.bitpos("key", true); + } + + @Test + public void testHscan() { + jedisCluster.hscan("key", "cursor"); + + ScanResult> scanResult = + new ScanResult<>( + "cursor".getBytes(), + Arrays.asList( + new AbstractMap.SimpleEntry<>("key1".getBytes(), "val1".getBytes()), + new AbstractMap.SimpleEntry<>( + "key2".getBytes(), "val2".getBytes()))); + + when(mockCluster.hscan(Mockito.any(), Mockito.any(), Mockito.any(ScanParams.class))) + .thenReturn(scanResult); + ScanResult> result = + jedisCluster.hscan("key", "cursor", new ScanParams()); + + assertEquals("cursor", result.getCursor()); + assertEquals(2, result.getResult().size()); + assertEquals("val1", result.getResult().get(0).getValue()); + } + + @Test + public void testSscan() { + jedisCluster.sscan("key", "cursor"); + + ScanResult scanResult = + new ScanResult<>( + "sscursor".getBytes(), Arrays.asList("val1".getBytes(), "val2".getBytes())); + + when(mockCluster.sscan(Mockito.any(), Mockito.any(), Mockito.any(ScanParams.class))) + .thenReturn(scanResult); + + ScanResult result = jedisCluster.sscan("key", "cursor", new ScanParams()); + assertEquals("sscursor", result.getCursor()); + assertEquals(2, result.getResult().size()); + assertEquals("val1", result.getResult().get(0)); + } + + @Test + public void testZscan() { + jedisCluster.zscan("key", "cursor"); + jedisCluster.zscan("key", "cursor", new ScanParams()); + } + + @Test + public void testPfadd() { + jedisCluster.pfadd("key", "elements"); + } + + @Test + public void testPfcount() { + jedisCluster.pfcount("key"); + } + + @Test + public void testGeoadd() { + jedisCluster.geoadd("key", new HashMap<>()); + jedisCluster.geoadd("key", 1337, 1338, "member"); + } + + @Test + public void testGeodist() { + jedisCluster.geodist("key", "member1", "member2"); + jedisCluster.geodist("key", "member1", "member2", GeoUnit.KM); + } + + @Test + public void testGeohash() { + jedisCluster.geohash("key", "members"); + } + + @Test + public void testGeopos() { + jedisCluster.geopos("key", "members"); + } + + @Test + public void testGeoradius() { + jedisCluster.georadius("key", 1337, 1338, 32, GeoUnit.KM); + jedisCluster.georadius("key", 1337, 1338, 32, GeoUnit.KM, GeoRadiusParam.geoRadiusParam()); + } + + @Test + public void testGeoradiusByMember() { + jedisCluster.georadiusByMember("key", "member", 1337, GeoUnit.KM); + jedisCluster.georadiusByMember( + "key", "member", 1337, GeoUnit.KM, GeoRadiusParam.geoRadiusParam()); + } + + @Test + public void testBitfield() { + jedisCluster.bitfield("key", "arguments"); + } +} diff --git a/persistence/src/test/java/com/netflix/conductor/redis/jedis/JedisSentinelTest.java b/persistence/src/test/java/com/netflix/conductor/redis/jedis/JedisSentinelTest.java new file mode 100644 index 0000000..7ea0fcc --- /dev/null +++ b/persistence/src/test/java/com/netflix/conductor/redis/jedis/JedisSentinelTest.java @@ -0,0 +1,588 @@ +/* + * Copyright 2020 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.redis.jedis; + +import java.util.HashMap; + +import org.junit.Before; +import org.junit.Test; + +import redis.clients.jedis.GeoUnit; +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisSentinelPool; +import redis.clients.jedis.ListPosition; +import redis.clients.jedis.ScanParams; +import redis.clients.jedis.SortingParams; +import redis.clients.jedis.params.GeoRadiusParam; +import redis.clients.jedis.params.SetParams; +import redis.clients.jedis.params.ZAddParams; +import redis.clients.jedis.params.ZIncrByParams; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class JedisSentinelTest { + + private final Jedis jedis = mock(Jedis.class); + private final JedisSentinelPool jedisPool = mock(JedisSentinelPool.class); + private final JedisSentinel jedisSentinel = new JedisSentinel(jedisPool); + + @Before + public void init() { + when(this.jedisPool.getResource()).thenReturn(this.jedis); + } + + @Test + public void testSet() { + jedisSentinel.set("key", "value"); + jedisSentinel.set("key", "value", SetParams.setParams()); + } + + @Test + public void testGet() { + jedisSentinel.get("key"); + } + + @Test + public void testExists() { + jedisSentinel.exists("key"); + } + + @Test + public void testPersist() { + jedisSentinel.persist("key"); + } + + @Test + public void testType() { + jedisSentinel.type("key"); + } + + @Test + public void testExpire() { + jedisSentinel.expire("key", 1337); + } + + @Test + public void testPexpire() { + jedisSentinel.pexpire("key", 1337); + } + + @Test + public void testExpireAt() { + jedisSentinel.expireAt("key", 1337); + } + + @Test + public void testPexpireAt() { + jedisSentinel.pexpireAt("key", 1337); + } + + @Test + public void testTtl() { + jedisSentinel.ttl("key"); + } + + @Test + public void testPttl() { + jedisSentinel.pttl("key"); + } + + @Test + public void testSetbit() { + jedisSentinel.setbit("key", 1337, "value"); + jedisSentinel.setbit("key", 1337, true); + } + + @Test + public void testGetbit() { + jedisSentinel.getbit("key", 1337); + } + + @Test + public void testSetrange() { + jedisSentinel.setrange("key", 1337, "value"); + } + + @Test + public void testGetrange() { + jedisSentinel.getrange("key", 1337, 1338); + } + + @Test + public void testGetSet() { + jedisSentinel.getSet("key", "value"); + } + + @Test + public void testSetnx() { + jedisSentinel.setnx("test", "value"); + } + + @Test + public void testSetex() { + jedisSentinel.setex("key", 1337, "value"); + } + + @Test + public void testPsetex() { + jedisSentinel.psetex("key", 1337, "value"); + } + + @Test + public void testDecrBy() { + jedisSentinel.decrBy("key", 1337); + } + + @Test + public void testDecr() { + jedisSentinel.decr("key"); + } + + @Test + public void testIncrBy() { + jedisSentinel.incrBy("key", 1337); + } + + @Test + public void testIncrByFloat() { + jedisSentinel.incrByFloat("key", 1337); + } + + @Test + public void testIncr() { + jedisSentinel.incr("key"); + } + + @Test + public void testAppend() { + jedisSentinel.append("key", "value"); + } + + @Test + public void testSubstr() { + jedisSentinel.substr("key", 1337, 1338); + } + + @Test + public void testHset() { + jedisSentinel.hset("key", "field", "value"); + } + + @Test + public void testHget() { + jedisSentinel.hget("key", "field"); + } + + @Test + public void testHsetnx() { + jedisSentinel.hsetnx("key", "field", "value"); + } + + @Test + public void testHmset() { + jedisSentinel.hmset("key", new HashMap<>()); + } + + @Test + public void testHmget() { + jedisSentinel.hmget("key", "fields"); + } + + @Test + public void testHincrBy() { + jedisSentinel.hincrBy("key", "field", 1337); + } + + @Test + public void testHincrByFloat() { + jedisSentinel.hincrByFloat("key", "field", 1337); + } + + @Test + public void testHexists() { + jedisSentinel.hexists("key", "field"); + } + + @Test + public void testHdel() { + jedisSentinel.hdel("key", "field"); + } + + @Test + public void testHlen() { + jedisSentinel.hlen("key"); + } + + @Test + public void testHkeys() { + jedisSentinel.hkeys("key"); + } + + @Test + public void testHvals() { + jedisSentinel.hvals("key"); + } + + @Test + public void testGgetAll() { + jedisSentinel.hgetAll("key"); + } + + @Test + public void testRpush() { + jedisSentinel.rpush("key", "string"); + } + + @Test + public void testLpush() { + jedisSentinel.lpush("key", "string"); + } + + @Test + public void testLlen() { + jedisSentinel.llen("key"); + } + + @Test + public void testLrange() { + jedisSentinel.lrange("key", 1337, 1338); + } + + @Test + public void testLtrim() { + jedisSentinel.ltrim("key", 1337, 1338); + } + + @Test + public void testLindex() { + jedisSentinel.lindex("key", 1337); + } + + @Test + public void testLset() { + jedisSentinel.lset("key", 1337, "value"); + } + + @Test + public void testLrem() { + jedisSentinel.lrem("key", 1337, "value"); + } + + @Test + public void testLpop() { + jedisSentinel.lpop("key"); + } + + @Test + public void testRpop() { + jedisSentinel.rpop("key"); + } + + @Test + public void testSadd() { + jedisSentinel.sadd("key", "member"); + } + + @Test + public void testSmembers() { + jedisSentinel.smembers("key"); + } + + @Test + public void testSrem() { + jedisSentinel.srem("key", "member"); + } + + @Test + public void testSpop() { + jedisSentinel.spop("key"); + jedisSentinel.spop("key", 1337); + } + + @Test + public void testScard() { + jedisSentinel.scard("key"); + } + + @Test + public void testSismember() { + jedisSentinel.sismember("key", "member"); + } + + @Test + public void testSrandmember() { + jedisSentinel.srandmember("key"); + jedisSentinel.srandmember("key", 1337); + } + + @Test + public void testStrlen() { + jedisSentinel.strlen("key"); + } + + @Test + public void testZadd() { + jedisSentinel.zadd("key", new HashMap<>()); + jedisSentinel.zadd("key", new HashMap<>(), ZAddParams.zAddParams()); + jedisSentinel.zadd("key", 1337, "members"); + jedisSentinel.zadd("key", 1337, "members", ZAddParams.zAddParams()); + } + + @Test + public void testZrange() { + jedisSentinel.zrange("key", 1337, 1338); + } + + @Test + public void testZrem() { + jedisSentinel.zrem("key", "member"); + } + + @Test + public void testZincrby() { + jedisSentinel.zincrby("key", 1337, "member"); + jedisSentinel.zincrby("key", 1337, "member", ZIncrByParams.zIncrByParams()); + } + + @Test + public void testZrank() { + jedisSentinel.zrank("key", "member"); + } + + @Test + public void testZrevrank() { + jedisSentinel.zrevrank("key", "member"); + } + + @Test + public void testZrevrange() { + jedisSentinel.zrevrange("key", 1337, 1338); + } + + @Test + public void testZrangeWithScores() { + jedisSentinel.zrangeWithScores("key", 1337, 1338); + } + + @Test + public void testZrevrangeWithScores() { + jedisSentinel.zrevrangeWithScores("key", 1337, 1338); + } + + @Test + public void testZcard() { + jedisSentinel.zcard("key"); + } + + @Test + public void testZscore() { + jedisSentinel.zscore("key", "member"); + } + + @Test + public void testSort() { + jedisSentinel.sort("key"); + jedisSentinel.sort("key", new SortingParams()); + } + + @Test + public void testZcount() { + jedisSentinel.zcount("key", "min", "max"); + jedisSentinel.zcount("key", 1337, 1338); + } + + @Test + public void testZrangeByScore() { + jedisSentinel.zrangeByScore("key", "min", "max"); + jedisSentinel.zrangeByScore("key", 1337, 1338); + jedisSentinel.zrangeByScore("key", "min", "max", 1337, 1338); + jedisSentinel.zrangeByScore("key", 1337, 1338, 1339, 1340); + } + + @Test + public void testZrevrangeByScore() { + jedisSentinel.zrevrangeByScore("key", "max", "min"); + jedisSentinel.zrevrangeByScore("key", 1337, 1338); + jedisSentinel.zrevrangeByScore("key", "max", "min", 1337, 1338); + jedisSentinel.zrevrangeByScore("key", 1337, 1338, 1339, 1340); + } + + @Test + public void testZrangeByScoreWithScores() { + jedisSentinel.zrangeByScoreWithScores("key", "min", "max"); + jedisSentinel.zrangeByScoreWithScores("key", "min", "max", 1337, 1338); + jedisSentinel.zrangeByScoreWithScores("key", 1337, 1338); + jedisSentinel.zrangeByScoreWithScores("key", 1337, 1338, 1339, 1340); + } + + @Test + public void testZrevrangeByScoreWithScores() { + jedisSentinel.zrevrangeByScoreWithScores("key", "max", "min"); + jedisSentinel.zrevrangeByScoreWithScores("key", "max", "min", 1337, 1338); + jedisSentinel.zrevrangeByScoreWithScores("key", 1337, 1338); + jedisSentinel.zrevrangeByScoreWithScores("key", 1337, 1338, 1339, 1340); + } + + @Test + public void testZremrangeByRank() { + jedisSentinel.zremrangeByRank("key", 1337, 1338); + } + + @Test + public void testZremrangeByScore() { + jedisSentinel.zremrangeByScore("key", "start", "end"); + jedisSentinel.zremrangeByScore("key", 1337, 1338); + } + + @Test + public void testZlexcount() { + jedisSentinel.zlexcount("key", "min", "max"); + } + + @Test + public void testZrangeByLex() { + jedisSentinel.zrangeByLex("key", "min", "max"); + jedisSentinel.zrangeByLex("key", "min", "max", 1337, 1338); + } + + @Test + public void testZrevrangeByLex() { + jedisSentinel.zrevrangeByLex("key", "max", "min"); + jedisSentinel.zrevrangeByLex("key", "max", "min", 1337, 1338); + } + + @Test + public void testZremrangeByLex() { + jedisSentinel.zremrangeByLex("key", "min", "max"); + } + + @Test + public void testLinsert() { + jedisSentinel.linsert("key", ListPosition.AFTER, "pivot", "value"); + } + + @Test + public void testLpushx() { + jedisSentinel.lpushx("key", "string"); + } + + @Test + public void testRpushx() { + jedisSentinel.rpushx("key", "string"); + } + + @Test + public void testBlpop() { + jedisSentinel.blpop(1337, "arg"); + } + + @Test + public void testBrpop() { + jedisSentinel.brpop(1337, "arg"); + } + + @Test + public void testDel() { + jedisSentinel.del("key"); + } + + @Test + public void testEcho() { + jedisSentinel.echo("string"); + } + + @Test + public void testMove() { + jedisSentinel.move("key", 1337); + } + + @Test + public void testBitcount() { + jedisSentinel.bitcount("key"); + jedisSentinel.bitcount("key", 1337, 1338); + } + + @Test + public void testBitpos() { + jedisSentinel.bitpos("key", true); + } + + @Test + public void testHscan() { + jedisSentinel.hscan("key", "cursor"); + jedisSentinel.hscan("key", "cursor", new ScanParams()); + } + + @Test + public void testSscan() { + jedisSentinel.sscan("key", "cursor"); + jedisSentinel.sscan("key", "cursor", new ScanParams()); + } + + @Test + public void testZscan() { + jedisSentinel.zscan("key", "cursor"); + jedisSentinel.zscan("key", "cursor", new ScanParams()); + } + + @Test + public void testPfadd() { + jedisSentinel.pfadd("key", "elements"); + } + + @Test + public void testPfcount() { + jedisSentinel.pfcount("key"); + } + + @Test + public void testGeoadd() { + jedisSentinel.geoadd("key", new HashMap<>()); + jedisSentinel.geoadd("key", 1337, 1338, "member"); + } + + @Test + public void testGeodist() { + jedisSentinel.geodist("key", "member1", "member2"); + jedisSentinel.geodist("key", "member1", "member2", GeoUnit.KM); + } + + @Test + public void testGeohash() { + jedisSentinel.geohash("key", "members"); + } + + @Test + public void testGeopos() { + jedisSentinel.geopos("key", "members"); + } + + @Test + public void testGeoradius() { + jedisSentinel.georadius("key", 1337, 1338, 32, GeoUnit.KM); + jedisSentinel.georadius("key", 1337, 1338, 32, GeoUnit.KM, GeoRadiusParam.geoRadiusParam()); + } + + @Test + public void testGeoradiusByMember() { + jedisSentinel.georadiusByMember("key", "member", 1337, GeoUnit.KM); + jedisSentinel.georadiusByMember( + "key", "member", 1337, GeoUnit.KM, GeoRadiusParam.geoRadiusParam()); + } + + @Test + public void testBitfield() { + jedisSentinel.bitfield("key", "arguments"); + } +} diff --git a/persistence/src/test/resources/wf.json b/persistence/src/test/resources/wf.json new file mode 100644 index 0000000..f035bd0 --- /dev/null +++ b/persistence/src/test/resources/wf.json @@ -0,0 +1,391 @@ +{ + "updateTime": 1653589985475, + "name": "performance_test", + "version": 1, + "tasks": [ + { + "name": "call_remote", + "taskReferenceName": "call_remote", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://catfact.ninja/fact" + } + }, + "type": "HTTPv3", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "taskDefinition": { + "createTime": 1653589944218, + "createdBy": "", + "name": "call_remote", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "boney@orkes.io", + "backoffScaleFactor": 1 + }, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "set_state", + "taskReferenceName": "set_state", + "inputParameters": { + "call_made": true, + "length": "${call_remote.output.response.body.length}" + }, + "type": "SET_VARIABLE", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "sub_flow", + "taskReferenceName": "sub_flow", + "inputParameters": {}, + "type": "SUB_WORKFLOW", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "subWorkflowParam": { + "name": "PopulationMinMax", + "version": 1 + }, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "dynamic_fork_prep", + "taskReferenceName": "dynamic_fork_prep", + "inputParameters": {}, + "type": "SIMPLE", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "taskDefinition": { + "createTime": 1653589944141, + "createdBy": "", + "name": "dynamic_fork_prep", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "boney@orkes.io", + "backoffScaleFactor": 1 + }, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "dynamic_fork", + "taskReferenceName": "dynamic_fork", + "inputParameters": { + "forkedTasks": "${dynamic_fork_prep.output.forkedTasks}", + "forkedTasksInputs": "${dynamic_fork_prep.output.forkedTasksInputs}" + }, + "type": "FORK_JOIN_DYNAMIC", + "decisionCases": {}, + "dynamicForkTasksParam": "forkedTasks", + "dynamicForkTasksInputParamName": "forkedTasksInputs", + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "dynamic_fork_join", + "taskReferenceName": "dynamic_fork_join", + "inputParameters": {}, + "type": "JOIN", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "fork", + "taskReferenceName": "fork", + "inputParameters": {}, + "type": "FORK_JOIN", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [ + [ + { + "name": "loop_until_success", + "taskReferenceName": "loop_until_success", + "inputParameters": { + "value": 2 + }, + "type": "DO_WHILE", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": true, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopCondition": "if ( $.loop_until_success['iteration'] < $.value) { true; } else { false; }", + "loopOver": [ + { + "name": "fact_length", + "taskReferenceName": "fact_length", + "description": "Fail if the fact is too short", + "inputParameters": { + "fact_length": "${call_remote.output.response.body.length}" + }, + "type": "SWITCH", + "decisionCases": { + "LONG": [ + { + "name": "call_remote_again", + "taskReferenceName": "call_remote_again", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://catfact.ninja/fact" + } + }, + "type": "HTTPv3", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "taskDefinition": { + "createTime": 1653589944187, + "createdBy": "", + "name": "call_remote_again", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "boney@orkes.io", + "backoffScaleFactor": 1 + }, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + "SHORT": [ + { + "name": "too_short", + "taskReferenceName": "too_short", + "inputParameters": { + "terminationReason": "value too short", + "terminationStatus": "FAILED" + }, + "type": "TERMINATE", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ] + }, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [], + "evaluatorType": "javascript", + "expression": "$.fact_length < 30 ? 'SHORT':'LONG'" + } + ] + }, + { + "name": "sub_flow_inline", + "taskReferenceName": "sub_flow_inline", + "inputParameters": {}, + "type": "SUB_WORKFLOW", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "subWorkflowParam": { + "name": "inline_sub", + "version": 1, + "workflowDefinition": { + "name": "inline_sub", + "version": 1, + "tasks": [ + { + "name": "call_something", + "taskReferenceName": "call_something", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://catfact.ninja/fact" + } + }, + "type": "HTTPv3", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + "inputParameters": [], + "outputParameters": {}, + "schemaVersion": 2, + "restartable": true, + "workflowStatusListenerEnabled": false, + "timeoutPolicy": "ALERT_ONLY", + "timeoutSeconds": 0, + "variables": {}, + "inputTemplate": {} + } + }, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + [ + { + "name": "another", + "taskReferenceName": "another", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://catfact.ninja/fact" + } + }, + "type": "HTTPv3", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "taskDefinition": { + "createTime": 1653589944143, + "createdBy": "", + "name": "another", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 0, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "boney@orkes.io", + "backoffScaleFactor": 1 + }, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ] + ], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "fork_join", + "taskReferenceName": "fork_join", + "inputParameters": {}, + "type": "JOIN", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + "inputParameters": [], + "outputParameters": {}, + "schemaVersion": 2, + "restartable": true, + "workflowStatusListenerEnabled": false, + "ownerEmail": "boney@orkes.io", + "timeoutPolicy": "ALERT_ONLY", + "timeoutSeconds": 0, + "variables": {}, + "inputTemplate": {} +} \ No newline at end of file diff --git a/persistence/src/test/resources/wf2.json b/persistence/src/test/resources/wf2.json new file mode 100644 index 0000000..83b2b3d --- /dev/null +++ b/persistence/src/test/resources/wf2.json @@ -0,0 +1,391 @@ +{ + "updateTime": 16535899854, + "name": "performance_test", + "version": 1, + "tasks": [ + { + "name": "call_remote", + "taskReferenceName": "call_remote", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://catfact.ninja/fact" + } + }, + "type": "HTTPv3", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "taskDefinition": { + "createTime": 16535899854, + "createdBy": "", + "name": "call_remote", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "boney@orkes.io", + "backoffScaleFactor": 1 + }, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "set_state", + "taskReferenceName": "set_state", + "inputParameters": { + "call_made": true, + "length": "${call_remote.output.response.body.length}" + }, + "type": "SET_VARIABLE", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "sub_flow", + "taskReferenceName": "sub_flow", + "inputParameters": {}, + "type": "SUB_WORKFLOW", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "subWorkflowParam": { + "name": "PopulationMinMax", + "version": 1 + }, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "dynamic_fork_prep", + "taskReferenceName": "dynamic_fork_prep", + "inputParameters": {}, + "type": "SIMPLE", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "taskDefinition": { + "createTime": 16535944141, + "createdBy": "", + "name": "dynamic_fork_prep", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "boney@orkes.io", + "backoffScaleFactor": 1 + }, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "dynamic_fork", + "taskReferenceName": "dynamic_fork", + "inputParameters": { + "forkedTasks": "${dynamic_fork_prep.output.forkedTasks}", + "forkedTasksInputs": "${dynamic_fork_prep.output.forkedTasksInputs}" + }, + "type": "FORK_JOIN_DYNAMIC", + "decisionCases": {}, + "dynamicForkTasksParam": "forkedTasks", + "dynamicForkTasksInputParamName": "forkedTasksInputs", + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "dynamic_fork_join", + "taskReferenceName": "dynamic_fork_join", + "inputParameters": {}, + "type": "JOIN", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "fork", + "taskReferenceName": "fork", + "inputParameters": {}, + "type": "FORK_JOIN", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [ + [ + { + "name": "loop_until_success", + "taskReferenceName": "loop_until_success", + "inputParameters": { + "value": 2 + }, + "type": "DO_WHILE", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": true, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopCondition": "if ( $.loop_until_success['iteration'] < $.value) { true; } else { false; }", + "loopOver": [ + { + "name": "fact_length", + "taskReferenceName": "fact_length", + "description": "Fail if the fact is too short", + "inputParameters": { + "fact_length": "${call_remote.output.response.body.length}" + }, + "type": "SWITCH", + "decisionCases": { + "LONG": [ + { + "name": "call_remote_again", + "taskReferenceName": "call_remote_again", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://catfact.ninja/fact" + } + }, + "type": "HTTPv3", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "taskDefinition": { + "createTime": 1653589944187, + "createdBy": "", + "name": "call_remote_again", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "boney@orkes.io", + "backoffScaleFactor": 1 + }, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + "SHORT": [ + { + "name": "too_short", + "taskReferenceName": "too_short", + "inputParameters": { + "terminationReason": "value too short", + "terminationStatus": "FAILED" + }, + "type": "TERMINATE", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ] + }, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [], + "evaluatorType": "javascript", + "expression": "$.fact_length < 30 ? 'SHORT':'LONG'" + } + ] + }, + { + "name": "sub_flow_inline", + "taskReferenceName": "sub_flow_inline", + "inputParameters": {}, + "type": "SUB_WORKFLOW", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "subWorkflowParam": { + "name": "inline_sub", + "version": 1, + "workflowDefinition": { + "name": "inline_sub", + "version": 1, + "tasks": [ + { + "name": "call_something", + "taskReferenceName": "call_something", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://catfact.ninja/fact" + } + }, + "type": "HTTPv3", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + "inputParameters": [], + "outputParameters": {}, + "schemaVersion": 2, + "restartable": true, + "workflowStatusListenerEnabled": false, + "timeoutPolicy": "ALERT_ONLY", + "timeoutSeconds": 0, + "variables": {}, + "inputTemplate": {} + } + }, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + [ + { + "name": "another", + "taskReferenceName": "another", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://catfact.ninja/fact" + } + }, + "type": "HTTPv3", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "taskDefinition": { + "createTime": 1653589944143, + "createdBy": "", + "name": "another", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 0, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "boney@orkes.io", + "backoffScaleFactor": 1 + }, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ] + ], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "fork_join", + "taskReferenceName": "fork_join", + "inputParameters": {}, + "type": "JOIN", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + "inputParameters": [], + "outputParameters": {}, + "schemaVersion": 2, + "restartable": true, + "workflowStatusListenerEnabled": false, + "ownerEmail": "boney@orkes.io", + "timeoutPolicy": "ALERT_ONLY", + "timeoutSeconds": 0, + "variables": {}, + "inputTemplate": {} +} \ No newline at end of file diff --git a/redis-queues/build.gradle b/redis-queues/build.gradle new file mode 100644 index 0000000..9598775 --- /dev/null +++ b/redis-queues/build.gradle @@ -0,0 +1,21 @@ +dependencies { + + implementation "com.netflix.conductor:conductor-core:${versions.conductorfork}" + implementation 'org.springframework.boot:spring-boot-starter' + implementation "io.micrometer:micrometer-core:1.7.5" + + implementation "redis.clients:jedis:${versions.revJedis}" + implementation 'org.slf4j:slf4j-api' + + //Guava + implementation "com.google.guava:guava:${versions.revGuava}" + + testImplementation "junit:junit:4.11" + testImplementation 'org.junit.jupiter:junit-jupiter-api:5.7.0' + testRuntimeOnly 'org.junit.jupiter:junit-jupiter-engine:5.7.0' + testImplementation "org.testcontainers:testcontainers:1.17.2" +} + +test { + useJUnitPlatform() +} \ No newline at end of file diff --git a/redis-queues/src/main/java/io/orkes/conductor/mq/ConductorQueue.java b/redis-queues/src/main/java/io/orkes/conductor/mq/ConductorQueue.java new file mode 100644 index 0000000..0f45bb4 --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/mq/ConductorQueue.java @@ -0,0 +1,75 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.mq; + +import java.math.BigDecimal; +import java.math.MathContext; +import java.util.List; +import java.util.concurrent.TimeUnit; + +public interface ConductorQueue { + + static final BigDecimal HUNDRED = new BigDecimal(100); + + static final BigDecimal MILLION = new BigDecimal(1000_000); + + static final MathContext PRECISION_MC = new MathContext(20); + + String getName(); + + List pop(int count, int waitTime, TimeUnit timeUnit); + + boolean ack(String messageId); + + void push(List messages); + + boolean setUnacktimeout(String messageId, long unackTimeout); + + boolean exists(String messageId); + + void remove(String messageId); + + QueueMessage get(String messageId); + + void flush(); + + long size(); + + int getQueueUnackTime(); + + void setQueueUnackTime(int queueUnackTime); + + String getShardName(); + + default double getScore(long now, QueueMessage msg) { + double score = 0; + if (msg.getTimeout() > 0) { + + // Use the priority as a fraction to ensure that the messages with the same priority + // Gets ordered for within that one millisecond duration + BigDecimal timeout = new BigDecimal(now + msg.getTimeout()); + BigDecimal divideByOne = + BigDecimal.ONE.divide(new BigDecimal(msg.getPriority() + 1), PRECISION_MC); + BigDecimal oneMinusDivByOne = BigDecimal.ONE.subtract(divideByOne); + BigDecimal bd = timeout.add(oneMinusDivByOne); + score = bd.doubleValue(); + + } else { + // double score = now + msg.getTimeout() + priority; --> This was the old logic - + // for the reference + score = msg.getPriority() > 0 ? msg.getPriority() : now; + } + + return score; + } +} diff --git a/redis-queues/src/main/java/io/orkes/conductor/mq/QueueMessage.java b/redis-queues/src/main/java/io/orkes/conductor/mq/QueueMessage.java new file mode 100644 index 0000000..90fffff --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/mq/QueueMessage.java @@ -0,0 +1,103 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.mq; + +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +public class QueueMessage { + + private String id; + + private String payload; + + /** Time in millisecond for delayed pop */ + private long timeout; + + /** Priority - 0 being the highest priority */ + private int priority; + + private long expiry; + + public QueueMessage(String id, String payload) { + this(id, payload, 0, 100); + } + + public QueueMessage(String id, String payload, long timeout) { + this(id, payload, timeout, 100); + } + + public QueueMessage(String id, String payload, long timeout, int priority) { + this.id = id; + this.payload = payload; + this.timeout = timeout; + this.priority = priority; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getPayload() { + return payload; + } + + public void setPayload(String payload) { + this.payload = payload; + } + + public long getTimeout() { + return timeout; + } + + public void setTimeout(long timeout) { + this.timeout = timeout; + } + + public void setTimeout(long timeout, TimeUnit timeUnit) { + this.timeout = timeUnit.convert(timeout, TimeUnit.MILLISECONDS); + } + + public int getPriority() { + return priority; + } + + public void setPriority(int priority) { + this.priority = priority; + } + + public long getExpiry() { + return expiry; + } + + public void setExpiry(long expiry) { + this.expiry = expiry; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + QueueMessage message = (QueueMessage) o; + return Objects.equals(id, message.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } +} diff --git a/redis-queues/src/main/java/io/orkes/conductor/mq/redis/QueueMonitor.java b/redis-queues/src/main/java/io/orkes/conductor/mq/redis/QueueMonitor.java new file mode 100644 index 0000000..6784e51 --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/mq/redis/QueueMonitor.java @@ -0,0 +1,167 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.mq.redis; + +import java.math.BigDecimal; +import java.time.Clock; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; + +import io.orkes.conductor.mq.QueueMessage; + +import com.google.common.util.concurrent.Uninterruptibles; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public abstract class QueueMonitor { + + private static final BigDecimal HUNDRED = new BigDecimal(100); + + private final Clock clock; + + private final LinkedBlockingDeque peekedMessages; + + private final ExecutorService executorService; + + private final AtomicInteger pollCount = new AtomicInteger(0); + private final String queueName; + + private int queueUnackTime = 30_000; + + private long nextUpdate = 0; + + private long size = 0; + + private int maxPollCount = 100; + + public QueueMonitor(String queueName) { + this.queueName = queueName; + this.clock = Clock.systemDefaultZone(); + this.peekedMessages = new LinkedBlockingDeque<>(); + this.executorService = + new ThreadPoolExecutor( + 1, 1, 0L, TimeUnit.MILLISECONDS, new ArrayBlockingQueue<>(maxPollCount)); + } + + public List pop(int count, int waitTime, TimeUnit timeUnit) { + + List messages = new ArrayList<>(); + int pendingCount = pollCount.addAndGet(count); + if (peekedMessages.isEmpty()) { + __peekedMessages(); + } else if (peekedMessages.size() < pendingCount) { + try { + executorService.submit(() -> __peekedMessages()); + } catch (RejectedExecutionException rejectedExecutionException) { + } + } + + long now = clock.millis(); + boolean waited = false; + for (int i = 0; i < count; i++) { + try { + // Why not use poll with timeout? + // poll with timeout method seem to be using spinlock that takes up more CPU + // The sleep method below, just does Thread.wait should be more CPU friendly + QueueMessage message = peekedMessages.poll(); + if (message == null) { + if (!waited) { + Uninterruptibles.sleepUninterruptibly(waitTime, timeUnit); + waited = true; + continue; + } else { + return messages; + } + } + if (now > message.getExpiry()) { + continue; + } + messages.add(message); + } catch (Exception e) { + log.error(e.getMessage(), e); + } + } + return messages; + } + + public int getQueueUnackTime() { + return queueUnackTime; + } + + public void setQueueUnackTime(int queueUnackTime) { + this.queueUnackTime = queueUnackTime; + } + + protected abstract List pollMessages(double now, double maxTime, int batchSize); + + protected abstract long queueSize(); + + private synchronized void __peekedMessages() { + try { + + int count = Math.min(maxPollCount, pollCount.get()); + if (count <= 0) { + if (count < 0) { + log.warn("Negative poll count {}", pollCount.get()); + pollCount.set(0); + } + // Negative number shouldn't happen, but it can be zero and in that case we don't do + // anything! + return; + } + if (getQueuedMessagesLen() == 0) { + pollCount.set(0); // There isn't anything in the queue + return; + } + + log.trace("Polling {} messages from {} with size {}", count, queueName, size); + + double now = Long.valueOf(clock.millis() + 1).doubleValue(); + double maxTime = now + queueUnackTime; + long messageExpiry = (long) now + (queueUnackTime); + List response = pollMessages(now, maxTime, count); + if (response == null) { + return; + } + for (int i = 0; i < response.size(); i += 2) { + + long timeout = 0; + String id = response.get(i); + String scoreString = response.get(i + 1); + + int priority = + new BigDecimal(scoreString) + .remainder(BigDecimal.ONE) + .multiply(HUNDRED) + .intValue(); + QueueMessage message = new QueueMessage(id, "", timeout, priority); + message.setExpiry(messageExpiry); + peekedMessages.add(message); + } + pollCount.addAndGet(-1 * (response.size() / 2)); + } catch (Throwable t) { + log.warn(t.getMessage(), t); + } + } + + private long getQueuedMessagesLen() { + long now = clock.millis(); + if (now > nextUpdate) { + size = queueSize(); + nextUpdate = now + 1000; // Cache for 1000 ms + } + return size; + } +} diff --git a/redis-queues/src/main/java/io/orkes/conductor/mq/redis/cluster/ClusteredQueueMonitor.java b/redis-queues/src/main/java/io/orkes/conductor/mq/redis/cluster/ClusteredQueueMonitor.java new file mode 100644 index 0000000..6efc5a9 --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/mq/redis/cluster/ClusteredQueueMonitor.java @@ -0,0 +1,84 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.mq.redis.cluster; + +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.*; +import java.util.concurrent.*; + +import io.orkes.conductor.mq.redis.QueueMonitor; + +import lombok.extern.slf4j.Slf4j; +import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.exceptions.JedisNoScriptException; + +@Slf4j +public class ClusteredQueueMonitor extends QueueMonitor { + + private final JedisCluster jedisCluster; + + private final String scriptSha; + + private final String queueName; + + public ClusteredQueueMonitor(JedisCluster jedisCluster, String queueName) { + super(queueName); + this.queueName = queueName; + this.jedisCluster = jedisCluster; + this.scriptSha = loadScript(); + } + + private String loadScript() { + try { + + InputStream stream = getClass().getResourceAsStream("/pop_batch.lua"); + byte[] script = stream.readAllBytes(); + byte[] response = + jedisCluster.scriptLoad(script, queueName.getBytes(StandardCharsets.UTF_8)); + String sha = new String(response); + return sha; + + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + protected long queueSize() { + return jedisCluster.zcard(queueName); + } + + @Override + protected List pollMessages(double now, double maxTime, int batchSize) { + try { + + Object popResponse = + jedisCluster.evalsha( + scriptSha, + Arrays.asList(queueName), + Arrays.asList("" + now, "" + maxTime, "" + batchSize)); + + if (popResponse == null) { + return null; + } + + return (List) popResponse; + + } catch (JedisNoScriptException jedisNoScriptException) { + // This will happen if the redis server was restarted + loadScript(); + return null; + } + } +} diff --git a/redis-queues/src/main/java/io/orkes/conductor/mq/redis/cluster/ConductorRedisClusterQueue.java b/redis-queues/src/main/java/io/orkes/conductor/mq/redis/cluster/ConductorRedisClusterQueue.java new file mode 100644 index 0000000..b86fb82 --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/mq/redis/cluster/ConductorRedisClusterQueue.java @@ -0,0 +1,146 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.mq.redis.cluster; + +import java.math.BigDecimal; +import java.time.Clock; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import io.orkes.conductor.mq.ConductorQueue; +import io.orkes.conductor.mq.QueueMessage; + +import com.google.common.base.Stopwatch; +import lombok.extern.slf4j.Slf4j; +import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.params.ZAddParams; + +@Slf4j +public class ConductorRedisClusterQueue implements ConductorQueue { + + private int queueUnackTime = 30_000; + + private final JedisCluster jedis; + + private final Clock clock; + + private final String queueName; + + private static final BigDecimal HUNDRED = new BigDecimal(100); + + private final ClusteredQueueMonitor queueMonitor; + + public ConductorRedisClusterQueue(String queueName, JedisCluster jedisCluster) { + this.jedis = jedisCluster; + this.clock = Clock.systemDefaultZone(); + this.queueName = queueName; + this.queueMonitor = new ClusteredQueueMonitor(jedisCluster, queueName); + + log.info("ConductorRedisClusterQueue started serving {}", queueName); + } + + @Override + public String getName() { + return queueName; + } + + @Override + public List pop(int count, int waitTime, TimeUnit timeUnit) { + return queueMonitor.pop(count, waitTime, timeUnit); + } + + @Override + public boolean ack(String messageId) { + + Stopwatch sw = Stopwatch.createStarted(); + Long removed = jedis.zrem(queueName, messageId); + return removed > 0; + } + + @Override + public void push(List messages) { + + long now = clock.millis(); + for (QueueMessage msg : messages) { + double score = getScore(now, msg); + String messageId = msg.getId(); + jedis.zadd(queueName, score, messageId); + } + } + + @Override + public boolean setUnacktimeout(String messageId, long unackTimeout) { + double score = clock.millis() + unackTimeout; + ZAddParams params = + ZAddParams.zAddParams() + .xx() // only update, do NOT add + .ch(); // return modified elements count + Long modified = jedis.zadd(queueName, score, messageId, params); + return modified != null && modified > 0; + } + + @Override + public boolean exists(String messageId) { + Double score = jedis.zscore(queueName, messageId); + if (score != null) { + return true; + } + return false; + } + + @Override + public void remove(String messageId) { + jedis.zrem(queueName, messageId); + } + + @Override + public QueueMessage get(String messageId) { + + Double score = jedis.zscore(queueName, messageId); + if (score == null) { + return null; + } + int priority = + new BigDecimal(score.doubleValue()) + .remainder(BigDecimal.ONE) + .multiply(HUNDRED) + .intValue(); + QueueMessage message = new QueueMessage(messageId, "", score.longValue(), priority); + return message; + } + + @Override + public void flush() { + jedis.del(queueName); + } + + @Override + public long size() { + return jedis.zcard(queueName); + } + + @Override + public int getQueueUnackTime() { + return queueUnackTime; + } + + @Override + public void setQueueUnackTime(int queueUnackTime) { + this.queueUnackTime = queueUnackTime; + } + + @Override + public String getShardName() { + return null; + } +} diff --git a/redis-queues/src/main/java/io/orkes/conductor/mq/redis/single/ConductorRedisQueue.java b/redis-queues/src/main/java/io/orkes/conductor/mq/redis/single/ConductorRedisQueue.java new file mode 100644 index 0000000..cfd42c6 --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/mq/redis/single/ConductorRedisQueue.java @@ -0,0 +1,163 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.mq.redis.single; + +import java.math.BigDecimal; +import java.time.Clock; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import io.orkes.conductor.mq.ConductorQueue; +import io.orkes.conductor.mq.QueueMessage; +import io.orkes.conductor.mq.redis.QueueMonitor; + +import lombok.extern.slf4j.Slf4j; +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisPoolAbstract; +import redis.clients.jedis.Pipeline; +import redis.clients.jedis.params.ZAddParams; + +@Slf4j +public class ConductorRedisQueue implements ConductorQueue { + + private final JedisPoolAbstract jedisPool; + + private final Clock clock; + + private String queueName; + + private final QueueMonitor queueMonitor; + + public ConductorRedisQueue(String queueName, JedisPoolAbstract jedisPool) { + this.jedisPool = jedisPool; + this.clock = Clock.systemDefaultZone(); + this.queueName = queueName; + this.queueMonitor = new RedisQueueMonitor(jedisPool, queueName); + log.info("ConductorRedisQueue started serving {}", queueName); + } + + @Override + public String getName() { + return queueName; + } + + @Override + public List pop(int count, int waitTime, TimeUnit timeUnit) { + return queueMonitor.pop(count, waitTime, timeUnit); + } + + @Override + public boolean ack(String messageId) { + Long removed; + try (Jedis jedis = jedisPool.getResource()) { + removed = jedis.zrem(queueName, messageId); + } + return removed > 0; + } + + @Override + public void remove(String messageId) { + + try (Jedis jedis = jedisPool.getResource()) { + jedis.zrem(queueName, messageId); + } + + return; + } + + @Override + public void push(List messages) { + + long now = clock.millis(); + + try (Jedis jedis = jedisPool.getResource()) { + Pipeline pipe = jedis.pipelined(); + for (QueueMessage msg : messages) { + double score = getScore(now, msg); + String messageId = msg.getId(); + pipe.zadd(queueName, score, messageId); + } + + pipe.sync(); + pipe.close(); + } + } + + @Override + public boolean setUnacktimeout(String messageId, long unackTimeout) { + double score = clock.millis() + unackTimeout; + try (Jedis jedis = jedisPool.getResource()) { + ZAddParams params = + ZAddParams.zAddParams() + .xx() // only update, do NOT add + .ch(); // return modified elements count + Long modified = jedis.zadd(queueName, score, messageId, params); + return modified != null && modified > 0; + } + } + + @Override + public boolean exists(String messageId) { + try (Jedis jedis = jedisPool.getResource()) { + Double score = jedis.zscore(queueName, messageId); + if (score != null) { + return true; + } + } + return false; + } + + @Override + public QueueMessage get(String messageId) { + + try (Jedis jedis = jedisPool.getResource()) { + Double score = jedis.zscore(queueName, messageId); + if (score == null) { + return null; + } + int priority = + new BigDecimal(score).remainder(BigDecimal.ONE).multiply(HUNDRED).intValue(); + QueueMessage message = new QueueMessage(messageId, "", score.longValue(), priority); + return message; + } + } + + @Override + public void flush() { + try (Jedis jedis = jedisPool.getResource()) { + jedis.del(queueName); + } + } + + @Override + public long size() { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zcard(queueName); + } + } + + @Override + public int getQueueUnackTime() { + return queueMonitor.getQueueUnackTime(); + } + + @Override + public void setQueueUnackTime(int queueUnackTime) { + queueMonitor.setQueueUnackTime(queueUnackTime); + } + + @Override + public String getShardName() { + return null; + } +} diff --git a/redis-queues/src/main/java/io/orkes/conductor/mq/redis/single/RedisQueueMonitor.java b/redis-queues/src/main/java/io/orkes/conductor/mq/redis/single/RedisQueueMonitor.java new file mode 100644 index 0000000..405351e --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/mq/redis/single/RedisQueueMonitor.java @@ -0,0 +1,83 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.mq.redis.single; + +import java.io.InputStream; +import java.util.*; + +import io.orkes.conductor.mq.redis.QueueMonitor; + +import lombok.extern.slf4j.Slf4j; +import redis.clients.jedis.Jedis; +import redis.clients.jedis.JedisPoolAbstract; +import redis.clients.jedis.exceptions.JedisNoScriptException; + +@Slf4j +public class RedisQueueMonitor extends QueueMonitor { + + private final JedisPoolAbstract jedisPool; + + private final String queueName; + + private final String scriptSha; + + public RedisQueueMonitor(JedisPoolAbstract jedisPool, String queueName) { + super(queueName); + this.jedisPool = jedisPool; + this.queueName = queueName; + this.scriptSha = loadScript(); + } + + @Override + protected List pollMessages(double now, double maxTime, int batchSize) { + try (Jedis jedis = jedisPool.getResource()) { + Object popResponse = + jedis.evalsha( + scriptSha, + Arrays.asList(queueName), + Arrays.asList("" + now, "" + maxTime, "" + batchSize)); + + if (popResponse == null) { + return null; + } + + return (List) popResponse; + } catch (JedisNoScriptException noScriptException) { + // This will happen if the redis server was restarted + loadScript(); + return null; + } + } + + @Override + protected long queueSize() { + try (Jedis jedis = jedisPool.getResource()) { + return jedis.zcard(queueName); + } + } + + private String loadScript() { + try { + + InputStream stream = getClass().getResourceAsStream("/pop_batch.lua"); + byte[] script = stream.readAllBytes(); + try (Jedis jedis = jedisPool.getResource()) { + byte[] response = jedis.scriptLoad(script); + return new String(response); + } + + } catch (Exception e) { + throw new RuntimeException(e); + } + } +} diff --git a/redis-queues/src/main/java/io/orkes/conductor/queue/config/RedisQueueConfiguration.java b/redis-queues/src/main/java/io/orkes/conductor/queue/config/RedisQueueConfiguration.java new file mode 100644 index 0000000..7fa1aa9 --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/queue/config/RedisQueueConfiguration.java @@ -0,0 +1,69 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.queue.config; + +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Primary; + +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.dao.QueueDAO; + +import io.orkes.conductor.queue.dao.ClusteredRedisQueueDAO; +import io.orkes.conductor.queue.dao.QueueRedisProperties; +import io.orkes.conductor.queue.dao.RedisQueueDAO; + +import io.micrometer.core.instrument.MeterRegistry; +import lombok.extern.slf4j.Slf4j; +import redis.clients.jedis.JedisCluster; +import redis.clients.jedis.JedisPool; +import redis.clients.jedis.JedisSentinelPool; + +@Configuration +@Slf4j +public class RedisQueueConfiguration { + + @Bean + @Primary + @ConditionalOnProperty(name = "conductor.queue.type", havingValue = "redis_standalone") + public QueueDAO getQueueDAOStandalone( + JedisPool jedisPool, + MeterRegistry registry, + QueueRedisProperties queueRedisProperties, + ConductorProperties properties) { + return new RedisQueueDAO(registry, jedisPool, queueRedisProperties, properties); + } + + @Bean + @Primary + @ConditionalOnProperty(name = "conductor.queue.type", havingValue = "redis_sentinel") + public QueueDAO getQueueDAOSentinel( + JedisSentinelPool jedisSentinelPool, + MeterRegistry registry, + QueueRedisProperties queueRedisProperties, + ConductorProperties properties) { + return new RedisQueueDAO(registry, jedisSentinelPool, queueRedisProperties, properties); + } + + @Bean + @Primary + @ConditionalOnProperty(name = "conductor.queue.type", havingValue = "redis_cluster") + public QueueDAO getQueueDAOCluster( + JedisCluster jedisCluster, + MeterRegistry registry, + QueueRedisProperties queueRedisProperties, + ConductorProperties properties) { + return new ClusteredRedisQueueDAO(registry, jedisCluster, queueRedisProperties, properties); + } +} diff --git a/redis-queues/src/main/java/io/orkes/conductor/queue/dao/BaseRedisQueueDAO.java b/redis-queues/src/main/java/io/orkes/conductor/queue/dao/BaseRedisQueueDAO.java new file mode 100644 index 0000000..47aafdf --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/queue/dao/BaseRedisQueueDAO.java @@ -0,0 +1,191 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.queue.dao; + +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.core.events.queue.Message; +import com.netflix.conductor.dao.QueueDAO; + +import io.orkes.conductor.mq.ConductorQueue; +import io.orkes.conductor.mq.QueueMessage; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public abstract class BaseRedisQueueDAO implements QueueDAO { + + private final String queueNamespace; + + private final String queueShard; + + private final ConcurrentHashMap queues; + + public BaseRedisQueueDAO( + QueueRedisProperties queueRedisProperties, ConductorProperties properties) { + + // Stack is used for the backward compatibility with the DynoQueues + this.queueNamespace = + queueRedisProperties.getQueueNamespacePrefix() + "." + properties.getStack(); + + String az = queueRedisProperties.getAvailabilityZone(); + this.queueShard = az.substring(az.length() - 1); + this.queues = new ConcurrentHashMap<>(); + } + + protected abstract ConductorQueue getConductorQueue(String queueKey); + + private final ConductorQueue get(String queueName) { + // This scheme ensures full backward compatibility with existing DynoQueues as the drop in + // replacement + String queueKey = queueNamespace + ".QUEUE." + queueName + "." + queueShard; + return queues.computeIfAbsent(queueName, (keyToCompute) -> getConductorQueue(queueKey)); + } + + @Override + public final void push(String queueName, String id, long offsetTimeInSecond) { + QueueMessage message = new QueueMessage(id, "", offsetTimeInSecond * 1000); + get(queueName).push(Arrays.asList(message)); + } + + @Override + public final void push(String queueName, String id, int priority, long offsetTimeInSecond) { + QueueMessage message = new QueueMessage(id, "", offsetTimeInSecond * 1000, priority); + get(queueName).push(Arrays.asList(message)); + } + + @Override + public final void push(String queueName, List messages) { + List queueMessages = new ArrayList<>(); + for (Message message : messages) { + queueMessages.add( + new QueueMessage( + message.getId(), message.getPayload(), 0, message.getPriority())); + } + get(queueName).push(queueMessages); + } + + @Override + public final boolean pushIfNotExists(String queueName, String id, long offsetTimeInSecond) { + if (get(queueName).exists(id)) { + return false; + } + push(queueName, id, offsetTimeInSecond); + return true; + } + + @Override + public final boolean pushIfNotExists( + String queueName, String id, int priority, long offsetTimeInSecond) { + if (get(queueName).exists(id)) { + return false; + } + push(queueName, id, priority, offsetTimeInSecond); + return true; + } + + @Override + public final List pop(String queueName, int count, int timeout) { + // Keep the timeout to a minimum of 100ms + if (timeout < 100) { + timeout = 100; + } + List messages = get(queueName).pop(count, timeout, TimeUnit.MILLISECONDS); + return messages.stream().map(msg -> msg.getId()).collect(Collectors.toList()); + } + + @Override + public final List pollMessages(String queueName, int count, int timeout) { + List queueMessages = + get(queueName).pop(count, timeout, TimeUnit.MILLISECONDS); + return queueMessages.stream() + .map( + msg -> + new Message( + msg.getId(), + msg.getPayload(), + msg.getId(), + msg.getPriority())) + .collect(Collectors.toList()); + } + + @Override + public final void remove(String queueName, String messageId) { + get(queueName).remove(messageId); + } + + @Override + public final int getSize(String queueName) { + return (int) get(queueName).size(); + } + + @Override + public final boolean ack(String queueName, String messageId) { + get(queueName).ack(messageId); + return false; + } + + @Override + public final boolean setUnackTimeout(String queueName, String messageId, long unackTimeout) { + return get(queueName).setUnacktimeout(messageId, unackTimeout); + } + + @Override + public final void flush(String queueName) { + get(queueName).flush(); + } + + @Override + public final Map queuesDetail() { + Map sizes = new HashMap<>(); + for (Map.Entry entry : queues.entrySet()) { + sizes.put(entry.getKey(), entry.getValue().size()); + } + return sizes; + } + + @Override + public final Map>> queuesDetailVerbose() { + Map>> queueDetails = new HashMap<>(); + for (ConductorQueue conductorRedisQueue : queues.values()) { + Map> verbose = new HashMap<>(); + + Map sizes = new HashMap<>(); + sizes.put("size", conductorRedisQueue.size()); + sizes.put("uacked", 0L); // we do not keep a separate queue + verbose.put(conductorRedisQueue.getShardName(), sizes); + queueDetails.put(conductorRedisQueue.getName(), verbose); + } + return queueDetails; + } + + @Override + public final boolean resetOffsetTime(String queueName, String id) { + return get(queueName).setUnacktimeout(id, 0); + } + + @Override + public final boolean containsMessage(String queueName, String messageId) { + return get(queueName).exists(messageId); + } + + public boolean postpone( + String queueName, String messageId, int priority, long postponeDurationInSeconds) { + push(queueName, messageId, priority, postponeDurationInSeconds); + return true; + } +} diff --git a/redis-queues/src/main/java/io/orkes/conductor/queue/dao/ClusteredRedisQueueDAO.java b/redis-queues/src/main/java/io/orkes/conductor/queue/dao/ClusteredRedisQueueDAO.java new file mode 100644 index 0000000..b796e60 --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/queue/dao/ClusteredRedisQueueDAO.java @@ -0,0 +1,49 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.queue.dao; + +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.dao.QueueDAO; + +import io.orkes.conductor.mq.ConductorQueue; +import io.orkes.conductor.mq.redis.cluster.ConductorRedisClusterQueue; + +import io.micrometer.core.instrument.MeterRegistry; +import lombok.extern.slf4j.Slf4j; +import redis.clients.jedis.JedisCluster; + +@Slf4j +public class ClusteredRedisQueueDAO extends BaseRedisQueueDAO implements QueueDAO { + + private final JedisCluster jedisCluster; + + private final MeterRegistry registry; + + public ClusteredRedisQueueDAO( + MeterRegistry registry, + JedisCluster jedisCluster, + QueueRedisProperties queueRedisProperties, + ConductorProperties conductorProperties) { + + super(queueRedisProperties, conductorProperties); + this.registry = registry; + this.jedisCluster = jedisCluster; + log.info("Queues initialized using {}", ClusteredRedisQueueDAO.class.getName()); + } + + @Override + protected ConductorQueue getConductorQueue(String queueKey) { + ConductorRedisClusterQueue queue = new ConductorRedisClusterQueue(queueKey, jedisCluster); + return queue; + } +} diff --git a/redis-queues/src/main/java/io/orkes/conductor/queue/dao/QueueRedisProperties.java b/redis-queues/src/main/java/io/orkes/conductor/queue/dao/QueueRedisProperties.java new file mode 100644 index 0000000..6e18b85 --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/queue/dao/QueueRedisProperties.java @@ -0,0 +1,69 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.queue.dao; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +import com.netflix.conductor.core.config.ConductorProperties; + +import lombok.Data; + +@Configuration +@ConfigurationProperties("conductor.redis") +@Data +public class QueueRedisProperties { + + private final ConductorProperties conductorProperties; + + @Autowired + public QueueRedisProperties(ConductorProperties conductorProperties) { + this.conductorProperties = conductorProperties; + } + + /** + * Local rack / availability zone. For AWS deployments, the value is something like us-east-1a, + * etc. + */ + private String availabilityZone = "us-east-1c"; + + /** Redis Cluster details. Format is host:port:rack separated by semicolon */ + private String hosts = null; + + /** The prefix used to prepend workflow data in redis */ + private String workflowNamespacePrefix = null; + + /** The prefix used to prepend keys for queues in redis */ + private String queueNamespacePrefix = null; + + /** + * The domain name to be used in the key prefix for logical separation of workflow data and + * queues in a shared redis setup + */ + private String keyspaceDomain = null; + + /** + * The maximum number of connections that can be managed by the connection pool on a given + * instance + */ + private int maxConnectionsPerHost = 10; + + public String getQueuePrefix() { + String prefix = getQueueNamespacePrefix() + "." + conductorProperties.getStack(); + if (getKeyspaceDomain() != null) { + prefix = prefix + "." + getKeyspaceDomain(); + } + return prefix; + } +} diff --git a/redis-queues/src/main/java/io/orkes/conductor/queue/dao/RedisQueueDAO.java b/redis-queues/src/main/java/io/orkes/conductor/queue/dao/RedisQueueDAO.java new file mode 100644 index 0000000..980695e --- /dev/null +++ b/redis-queues/src/main/java/io/orkes/conductor/queue/dao/RedisQueueDAO.java @@ -0,0 +1,48 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.queue.dao; + +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.dao.QueueDAO; + +import io.orkes.conductor.mq.ConductorQueue; +import io.orkes.conductor.mq.redis.single.ConductorRedisQueue; + +import io.micrometer.core.instrument.MeterRegistry; +import lombok.extern.slf4j.Slf4j; +import redis.clients.jedis.JedisPoolAbstract; + +@Slf4j +public class RedisQueueDAO extends BaseRedisQueueDAO implements QueueDAO { + + private final JedisPoolAbstract jedisPool; + + private final MeterRegistry registry; + + public RedisQueueDAO( + MeterRegistry registry, + JedisPoolAbstract jedisPool, + QueueRedisProperties queueRedisProperties, + ConductorProperties conductorProperties) { + + super(queueRedisProperties, conductorProperties); + this.registry = registry; + this.jedisPool = jedisPool; + log.info("Queues initialized using {}", RedisQueueDAO.class.getName()); + } + + @Override + protected ConductorQueue getConductorQueue(String queueKey) { + return new ConductorRedisQueue(queueKey, jedisPool); + } +} diff --git a/redis-queues/src/main/resources/pop.lua b/redis-queues/src/main/resources/pop.lua new file mode 100644 index 0000000..0a91cd4 --- /dev/null +++ b/redis-queues/src/main/resources/pop.lua @@ -0,0 +1,16 @@ +local message_queue = KEYS[1] +local timeout = ARGV[1] +local new_timeout = ARGV[2] + +local msg_array = redis.call("ZRANGEBYSCORE", message_queue, 0, timeout, "LIMIT", 0, 1) +local msg = msg_array[1] +if msg == nil or #msg_array == 0 then + return nil +end + +local added = redis.call("ZADD", message_queue, "XX", "CH", new_timeout, msg) +if added == 0 then + return nil +end + +return msg \ No newline at end of file diff --git a/redis-queues/src/main/resources/pop_batch.lua b/redis-queues/src/main/resources/pop_batch.lua new file mode 100644 index 0000000..cd06cf6 --- /dev/null +++ b/redis-queues/src/main/resources/pop_batch.lua @@ -0,0 +1,18 @@ +local message_queue = KEYS[1] +local timeout = ARGV[1] +local new_timeout = ARGV[2] +local batch_size = ARGV[3] + +local msg_array = redis.call("ZRANGEBYSCORE", message_queue, 0, timeout, "WITHSCORES", "LIMIT", 0, batch_size) +if #msg_array == 0 then + return nil +end + +local j +j = 1 +for i = 1, #msg_array/2 do + local added = redis.call("ZADD", message_queue, "XX", "CH", new_timeout, msg_array[j]) + j = j + 2 +end + +return msg_array \ No newline at end of file diff --git a/redis-queues/src/test/java/io/orkes/conductor/mq/benchmark/ConductorRedisQueueTest.java b/redis-queues/src/test/java/io/orkes/conductor/mq/benchmark/ConductorRedisQueueTest.java new file mode 100644 index 0000000..648d911 --- /dev/null +++ b/redis-queues/src/test/java/io/orkes/conductor/mq/benchmark/ConductorRedisQueueTest.java @@ -0,0 +1,458 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.mq.benchmark; + +import java.time.Clock; +import java.time.Duration; +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import org.junit.Assert; +import org.junit.Rule; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.utility.DockerImageName; + +import io.orkes.conductor.mq.QueueMessage; +import io.orkes.conductor.mq.redis.single.ConductorRedisQueue; + +import com.google.common.util.concurrent.Uninterruptibles; +import redis.clients.jedis.JedisPool; +import redis.clients.jedis.JedisPoolConfig; + +import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +public class ConductorRedisQueueTest { + + private static final String redisKeyPrefix = "test_queue"; + + private static final String queueName = "test"; + + @Rule + public static GenericContainer redis = + new GenericContainer(DockerImageName.parse("redis:6.2.6-alpine")) + .withExposedPorts(6379); + + @Rule static ConductorRedisQueue redisQueue; + + private static JedisPool jedisPool; + + @BeforeAll + public static void setUp() { + + redis.start(); + + JedisPoolConfig config = new JedisPoolConfig(); + config.setMinIdle(2); + config.setMaxTotal(10); + + jedisPool = new JedisPool(config, redis.getHost(), redis.getFirstMappedPort()); + redisQueue = new ConductorRedisQueue(queueName, jedisPool); + } + + private QueueMessage popOne() { + List messages = redisQueue.pop(1, 10, TimeUnit.MILLISECONDS); + if (messages.isEmpty()) { + return null; + } + return messages.get(0); + } + + @Test + public void testEmptyPoll() { + redisQueue.flush(); + ConductorRedisQueue redisQueue2 = new ConductorRedisQueue(queueName + "X", jedisPool); + int count = 0; + for (int i = 0; i < 10; i++) { + QueueMessage message = popOne(); + if (message != null) { + count++; + } + } + assertEquals(0, count); + } + + @Test + public void testExists() { + redisQueue.flush(); + String id = UUID.randomUUID().toString(); + QueueMessage msg = new QueueMessage(id, "Hello World-" + id); + msg.setTimeout(100, TimeUnit.MILLISECONDS); + redisQueue.push(Arrays.asList(msg)); + + assertTrue(redisQueue.exists(id)); + } + + @Test + public void testTimeoutUpdate() { + + redisQueue.flush(); + + String id = UUID.randomUUID().toString(); + QueueMessage msg = new QueueMessage(id, "Hello World-" + id); + msg.setTimeout(100, TimeUnit.MILLISECONDS); + redisQueue.push(Arrays.asList(msg)); + + QueueMessage popped = popOne(); + assertNull(popped); + + Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS); + + popped = popOne(); + Assert.assertNotNull(popped); + assertEquals(id, popped.getId()); + + boolean updated = redisQueue.setUnacktimeout(id, 500); + assertTrue(updated); + popped = popOne(); + assertNull(popped); + + Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS); + popped = popOne(); + Assert.assertNotNull(popped); + + redisQueue.ack(id); + popped = popOne(); + assertNull(popped); + + QueueMessage found = redisQueue.get(id); + assertNull(found); + } + + @Test + public void testConcurrency() throws InterruptedException, ExecutionException { + + redisQueue.flush(); + + final int count = 100; + final AtomicInteger published = new AtomicInteger(0); + + ScheduledExecutorService ses = Executors.newScheduledThreadPool(6); + CountDownLatch publishLatch = new CountDownLatch(1); + Runnable publisher = + new Runnable() { + + @Override + public void run() { + List messages = new LinkedList<>(); + for (int i = 0; i < 10; i++) { + QueueMessage msg = + new QueueMessage( + UUID.randomUUID().toString(), "Hello World-" + i); + msg.setPriority(new Random().nextInt(98)); + messages.add(msg); + } + if (published.get() >= count) { + publishLatch.countDown(); + return; + } + + published.addAndGet(messages.size()); + redisQueue.push(messages); + } + }; + + for (int p = 0; p < 3; p++) { + ses.scheduleWithFixedDelay(publisher, 1, 1, TimeUnit.MILLISECONDS); + } + publishLatch.await(); + CountDownLatch latch = new CountDownLatch(count); + List allMsgs = new CopyOnWriteArrayList<>(); + AtomicInteger consumed = new AtomicInteger(0); + AtomicInteger counter = new AtomicInteger(0); + Runnable consumer = + () -> { + if (consumed.get() >= count) { + return; + } + List popped = redisQueue.pop(100, 1, TimeUnit.MILLISECONDS); + allMsgs.addAll(popped); + consumed.addAndGet(popped.size()); + popped.stream().forEach(p -> latch.countDown()); + counter.incrementAndGet(); + }; + for (int c = 0; c < 2; c++) { + ses.scheduleWithFixedDelay(consumer, 1, 10, TimeUnit.MILLISECONDS); + } + Uninterruptibles.awaitUninterruptibly(latch); + System.out.println( + "Consumed: " + + consumed.get() + + ", all: " + + allMsgs.size() + + " counter: " + + counter.get()); + Set uniqueMessages = allMsgs.stream().collect(Collectors.toSet()); + + assertEquals(count, allMsgs.size()); + assertEquals(count, uniqueMessages.size()); + List more = redisQueue.pop(1, 1, TimeUnit.SECONDS); + // If we published more than we consumed since we could've published more than we consumed + // in which case this + // will not be empty + if (published.get() == consumed.get()) assertEquals(0, more.size()); + else assertEquals(1, more.size()); + + ses.shutdownNow(); + } + + @Test + public void testSetTimeout() { + + redisQueue.flush(); + + QueueMessage msg = new QueueMessage("x001yx", "Hello World"); + msg.setPriority(3); + msg.setTimeout(10_000); + redisQueue.push(Arrays.asList(msg)); + + List popped = redisQueue.pop(1, 1, TimeUnit.SECONDS); + assertTrue(popped.isEmpty()); + + boolean updated = redisQueue.setUnacktimeout(msg.getId(), 0); + assertTrue(updated); + popped = redisQueue.pop(2, 1, TimeUnit.SECONDS); + assertEquals(1, popped.size()); + } + + @Test + public void testPushAgain() { + + redisQueue.flush(); + + QueueMessage msg = new QueueMessage(UUID.randomUUID().toString(), null); + msg.setTimeout(100); + msg.setPriority(0); + redisQueue.push(Arrays.asList(msg)); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + + List popped = redisQueue.pop(1, 100, TimeUnit.MILLISECONDS); + assertEquals(1, popped.size()); + + msg.setTimeout(10_000); + redisQueue.push(Arrays.asList(msg)); // push again! + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + popped = redisQueue.pop(1, 100, TimeUnit.MILLISECONDS); + assertEquals(0, popped.size()); // Nothing should come out + + msg.setTimeout(1); + redisQueue.push(Arrays.asList(msg)); // push again! + Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS); + popped = redisQueue.pop(1, 10, TimeUnit.MILLISECONDS); + assertEquals(1, popped.size()); // Now it should come out + } + + @Test + public void testClearQueues() { + redisQueue.flush(); + int count = 10; + List messages = new LinkedList<>(); + for (int i = 0; i < count; i++) { + QueueMessage msg = new QueueMessage("x" + i, "Hello World-" + i); + msg.setPriority(count - i); + messages.add(msg); + } + + redisQueue.push(messages); + assertEquals(count, redisQueue.size()); + redisQueue.flush(); + assertEquals(0, redisQueue.size()); + } + + @Test + public void testPriority() { + redisQueue.flush(); + int count = 10; + List messages = new LinkedList<>(); + for (int i = 0; i < count; i++) { + int priority = new Random().nextInt(20); + QueueMessage msg = + new QueueMessage( + "x" + UUID.randomUUID().toString() + "-" + priority, + "Hello World-" + i); + msg.setPriority(priority); + messages.add(msg); + } + redisQueue.push(messages); + assertEquals(count, redisQueue.size()); + + List popped = redisQueue.pop(count, 100, TimeUnit.MILLISECONDS); + assertNotNull(popped); + assertEquals(count, popped.size()); + for (int i = 0; i < popped.size(); i++) { + QueueMessage msg = popped.get(i); + // assertEquals(msg.getPriority(), i); + System.out.println(msg.getId()); + } + } + + @Test + public void testDelayedPriority() { + + redisQueue.flush(); + int count = 100; + List messages = new LinkedList<>(); + for (int i = 0; i < count; i++) { + int priority = new Random().nextInt(1000) + 1; + priority = i + 1; + QueueMessage msg = + new QueueMessage("x" + UUID.randomUUID() + "-" + priority, "Hello World-" + i); + msg.setPriority(priority); + msg.setTimeout(1000); + messages.add(msg); + } + redisQueue.push(messages); + assertEquals(count, redisQueue.size()); + + Uninterruptibles.sleepUninterruptibly(Duration.ofMillis(1100)); + + List popped = redisQueue.pop(count, 1000, TimeUnit.MILLISECONDS); + assertNotNull(popped); + assertEquals(count, popped.size()); + int last = 0; + for (int i = 0; i < popped.size(); i++) { + QueueMessage msg = popped.get(i); + System.out.println(msg.getId()); + int priority = + Integer.parseInt(msg.getId().substring(msg.getId().lastIndexOf('-') + 1)); + // assertTrue("Priority " + priority + " not greater than " + last, priority >= last ); + last = priority; + } + } + + @Test + public void testScoreCalculation() { + Clock clock = Clock.systemDefaultZone(); + long now = clock.millis(); + QueueMessage msg = new QueueMessage("a", null, 30, 44553333); + double score = redisQueue.getScore(now, msg); + System.out.println("diff: " + (score - now)); + System.out.printf("%.5f\n", score); + } + + @Test + public void testAck() { + redisQueue.flush(); + redisQueue.setQueueUnackTime(1000); // 1 sec + assertEquals(1000, redisQueue.getQueueUnackTime()); + + int count = 10; + List messages = new LinkedList<>(); + for (int i = 0; i < count; i++) { + QueueMessage msg = new QueueMessage("x" + i, "Hello World-" + i); + msg.setPriority(count - i); + messages.add(msg); + } + redisQueue.push(messages); + + assertEquals(count, redisQueue.size()); + List popped = redisQueue.pop(count, 100, TimeUnit.MILLISECONDS); + assertNotNull(popped); + assertEquals(count, popped.size()); + + // Wait for time longer than queue unack and messages should be available again! + Uninterruptibles.sleepUninterruptibly(1200, TimeUnit.MILLISECONDS); + popped = redisQueue.pop(count, 100, TimeUnit.MILLISECONDS); + assertNotNull(popped); + assertEquals(count, popped.size()); + + // One more time, just to confirm! + Uninterruptibles.sleepUninterruptibly(1200, TimeUnit.MILLISECONDS); + List popped2 = redisQueue.pop(count, 100, TimeUnit.MILLISECONDS); + assertNotNull(popped2); + assertEquals(count, popped2.size()); + popped2.stream().forEach(msg -> redisQueue.ack(msg.getId())); + + popped2 = redisQueue.pop(count, 100, TimeUnit.MILLISECONDS); + assertNotNull(popped2); + assertEquals(0, popped2.size()); + + // try to ack again + for (QueueMessage message : popped) { + assertFalse(redisQueue.ack(message.getId())); + } + + assertEquals(0, redisQueue.size()); + + // reset it back + redisQueue.setQueueUnackTime(30_000); + } + + @Test + public void testRemove() { + redisQueue.flush(); + + int count = 10; + List messages = new LinkedList<>(); + for (int i = 0; i < count; i++) { + QueueMessage msg = new QueueMessage("x" + i, "Hello World-" + i); + msg.setPriority(count - i); + messages.add(msg); + } + redisQueue.push(messages); + + assertEquals(count, redisQueue.size()); + List popped = redisQueue.pop(count, 100, TimeUnit.MILLISECONDS); + assertNotNull(popped); + assertEquals(count, popped.size()); + + popped.stream().forEach(msg -> redisQueue.remove(msg.getId())); + assertEquals(0, redisQueue.size()); + popped = redisQueue.pop(count, 100, TimeUnit.MILLISECONDS); + assertNotNull(popped); + assertEquals(0, popped.size()); + } + + @Test + public void testAll() { + + redisQueue.flush(); + assertEquals(0, redisQueue.size()); + + int count = 10; + List messages = new LinkedList<>(); + for (int i = 0; i < count; i++) { + QueueMessage msg = new QueueMessage("" + i, "Hello World-" + i); + msg.setPriority(count - 1); + messages.add(msg); + } + redisQueue.push(messages); + + long size = redisQueue.size(); + assertEquals(count, size); + + List poped = redisQueue.pop(count, 1, TimeUnit.SECONDS); + assertNotNull(poped); + assertEquals(count, poped.size()); + assertEquals(messages, poped); + + Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); + + for (QueueMessage msg : messages) { + QueueMessage found = redisQueue.get(msg.getId()); + assertNotNull(found); + assertEquals(msg.getId(), found.getId()); + } + assertNull(redisQueue.get("some fake id")); + assertEquals(count, redisQueue.size()); + List messages3 = redisQueue.pop(count, 1, TimeUnit.SECONDS); + if (messages3.size() < count) { + List messages4 = redisQueue.pop(count, 1, TimeUnit.SECONDS); + messages3.addAll(messages4); + } + } +} diff --git a/scripts/run_local.sh b/scripts/run_local.sh new file mode 100755 index 0000000..7037f3a --- /dev/null +++ b/scripts/run_local.sh @@ -0,0 +1,27 @@ +#!/bin/sh +UI_PORT=1234 +read -p "Enter the port for UI [1234]: " UI_PORT + * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.core.execution; + +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.util.Optional; +import java.util.concurrent.*; + +import org.apache.commons.lang3.StringUtils; +import org.springframework.context.annotation.Lazy; +import org.springframework.context.annotation.Primary; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.common.metadata.tasks.TaskResult; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.core.dal.ExecutionDAOFacade; +import com.netflix.conductor.core.exception.NotFoundException; +import com.netflix.conductor.core.execution.tasks.SystemTaskRegistry; +import com.netflix.conductor.core.listener.WorkflowStatusListener; +import com.netflix.conductor.core.metadata.MetadataMapperService; +import com.netflix.conductor.core.utils.IDGenerator; +import com.netflix.conductor.core.utils.ParametersUtils; +import com.netflix.conductor.core.utils.QueueUtils; +import com.netflix.conductor.core.utils.Utils; +import com.netflix.conductor.dao.MetadataDAO; +import com.netflix.conductor.dao.QueueDAO; +import com.netflix.conductor.metrics.Monitors; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.service.ExecutionLockService; + +import io.orkes.conductor.id.TimeBasedUUIDGenerator; +import io.orkes.conductor.metrics.MetricsCollector; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import lombok.extern.slf4j.Slf4j; + +import static com.netflix.conductor.core.utils.Utils.DECIDER_QUEUE; +import static com.netflix.conductor.model.TaskModel.Status.SCHEDULED; + +@Component +@Slf4j +@Primary +public class OrkesWorkflowExecutor extends WorkflowExecutor { + + private static final LocalDateTime ORKES_EPOCH_TIME = LocalDateTime.of(2021, 1, 1, 0, 0); + + + private final QueueDAO queueDAO; + + + + private final ExecutionDAOFacade orkesExecutionDAOFacade; + private final SystemTaskRegistry systemTaskRegistry; + private final ExecutorService taskUpdateExecutor; + + private final MetricsCollector metricsCollector; + + public OrkesWorkflowExecutor( + DeciderService deciderService, + MetadataDAO metadataDAO, + QueueDAO queueDAO, + MetadataMapperService metadataMapperService, + WorkflowStatusListener workflowStatusListener, + ExecutionDAOFacade executionDAOFacade, + ConductorProperties properties, + ExecutionLockService executionLockService, + @Lazy SystemTaskRegistry systemTaskRegistry, + ParametersUtils parametersUtils, + IDGenerator idGenerator, + MetricsCollector metricsCollector) { + super( + deciderService, + metadataDAO, + queueDAO, + metadataMapperService, + workflowStatusListener, + executionDAOFacade, + properties, + executionLockService, + systemTaskRegistry, + parametersUtils, + idGenerator); + + this.queueDAO = queueDAO; + this.orkesExecutionDAOFacade = executionDAOFacade; + this.systemTaskRegistry = systemTaskRegistry; + this.metricsCollector = metricsCollector; + + int threadPoolSize = Runtime.getRuntime().availableProcessors() * 10; + this.taskUpdateExecutor = + new ThreadPoolExecutor( + threadPoolSize, + threadPoolSize, + 0, + TimeUnit.SECONDS, + new ArrayBlockingQueue<>(threadPoolSize) { + @Override + public boolean offer(Runnable runnable) { + try { + return super.offer(runnable, 100, TimeUnit.MILLISECONDS); + } catch (InterruptedException ie) { + return false; + } + } + }, + new ThreadFactoryBuilder().setNameFormat("task-update-thread-%d").build()); + + log.info("OrkesWorkflowExecutor initialized"); + } + + public void updateTask(TaskResult taskResult) { + if (taskResult == null) { + throw new RuntimeException("Task object is null"); + } + + log.trace("Update Task {} - {}", taskResult.getTaskId(), taskResult.getStatus()); + + String workflowId = taskResult.getWorkflowInstanceId(); + + TaskModel task = + Optional.ofNullable(orkesExecutionDAOFacade.getTaskModel(taskResult.getTaskId())) + .orElseThrow( + () -> + new NotFoundException( + "No such task found by id: " + + taskResult.getTaskId())); + + log.trace( + "Task: {} belonging to Workflow {} being updated", + task, + task.getWorkflowInstanceId()); + + String taskQueueName = QueueUtils.getQueueName(task); + + if (task.getStatus().isTerminal()) { + // Task was already updated.... + queueDAO.remove(taskQueueName, taskResult.getTaskId()); + log.debug( + "Task: {} has already finished execution with status: {} within workflow: {}. Removed task from queue: {}", + task.getTaskId(), + task.getStatus(), + task.getWorkflowInstanceId(), + taskQueueName); + Monitors.recordUpdateConflict(task.getTaskType(), "", task.getStatus()); + return; + } + + // for system tasks, setting to SCHEDULED would mean restarting the task which is + // undesirable + // for worker tasks, set status to SCHEDULED and push to the queue + if (!systemTaskRegistry.isSystemTask(task.getTaskType()) + && taskResult.getStatus() == TaskResult.Status.IN_PROGRESS) { + task.setStatus(SCHEDULED); + } else { + task.setStatus(TaskModel.Status.valueOf(taskResult.getStatus().name())); + } + task.setOutputMessage(taskResult.getOutputMessage()); + task.setReasonForIncompletion(taskResult.getReasonForIncompletion()); + task.setWorkerId(taskResult.getWorkerId()); + task.setCallbackAfterSeconds(taskResult.getCallbackAfterSeconds()); + task.setOutputData(taskResult.getOutputData()); + task.setSubWorkflowId(taskResult.getSubWorkflowId()); + + if (StringUtils.isNotBlank(taskResult.getExternalOutputPayloadStoragePath())) { + task.setExternalOutputPayloadStoragePath( + taskResult.getExternalOutputPayloadStoragePath()); + } + + if (task.getStatus().isTerminal()) { + task.setEndTime(System.currentTimeMillis()); + } + + // Update message in Task queue based on Task status + switch (task.getStatus()) { + case COMPLETED: + case CANCELED: + case FAILED: + case FAILED_WITH_TERMINAL_ERROR: + case TIMED_OUT: + try { + queueDAO.remove(taskQueueName, taskResult.getTaskId()); + log.debug( + "Task: {} removed from taskQueue: {} since the task status is {}", + task, + taskQueueName, + task.getStatus().name()); + } catch (Exception e) { + // Ignore exceptions on queue remove as it wouldn't impact task and workflow + // execution, and will be cleaned up eventually + String errorMsg = + String.format( + "Error removing the message in queue for task: %s for workflow: %s", + task.getTaskId(), workflowId); + log.warn(errorMsg, e); + Monitors.recordTaskQueueOpError(task.getTaskType(), ""); + } + break; + case IN_PROGRESS: + case SCHEDULED: + try { + long callBack = taskResult.getCallbackAfterSeconds(); + queueDAO.postpone( + taskQueueName, task.getTaskId(), task.getWorkflowPriority(), callBack); + log.debug( + "Task: {} postponed in taskQueue: {} since the task status is {} with callbackAfterSeconds: {}", + task, + taskQueueName, + task.getStatus().name(), + callBack); + } catch (Exception e) { + // Throw exceptions on queue postpone, this would impact task execution + String errorMsg = + String.format( + "Error postponing the message in queue for task: %s for workflow: %s", + task.getTaskId(), workflowId); + log.error(errorMsg, e); + Monitors.recordTaskQueueOpError(task.getTaskType(), ""); + throw new RuntimeException(e); + } + break; + default: + break; + } + + // Throw an ApplicationException if below operations fail to avoid workflow inconsistencies. + try { + orkesExecutionDAOFacade.updateTask(task); + } catch (Exception e) { + String errorMsg = + String.format( + "Error updating task: %s for workflow: %s", + task.getTaskId(), workflowId); + log.error(errorMsg, e); + Monitors.recordTaskUpdateError(task.getTaskType(), ""); + throw new RuntimeException(e); + } + + taskResult.getLogs().forEach(taskExecLog -> taskExecLog.setTaskId(task.getTaskId())); + orkesExecutionDAOFacade.addTaskExecLog(taskResult.getLogs()); + + if (task.getStatus().isTerminal()) { + long duration = getTaskDuration(0, task); + long lastDuration = task.getEndTime() - task.getStartTime(); + Monitors.recordTaskExecutionTime( + task.getTaskDefName(), duration, true, task.getStatus()); + Monitors.recordTaskExecutionTime( + task.getTaskDefName(), lastDuration, false, task.getStatus()); + } + + try { + taskUpdateExecutor.submit(() -> decide(workflowId)); + } catch (RejectedExecutionException ree) { + metricsCollector.getCounter("task_update_deferred").increment(); + queueDAO.push( + Utils.DECIDER_QUEUE, + taskResult.getWorkflowInstanceId(), + getWorkflowFIFOPriority( + taskResult.getWorkflowInstanceId(), task.getWorkflowPriority()), + 0); + } + } + + private long getTaskDuration(long s, TaskModel task) { + long duration = task.getEndTime() - task.getStartTime(); + s += duration; + if (task.getRetriedTaskId() == null) { + return s; + } + return s + + getTaskDuration(s, orkesExecutionDAOFacade.getTaskModel(task.getRetriedTaskId())); + } + + public void addTaskToQueue(TaskModel task) { + // put in queue + String taskQueueName = QueueUtils.getQueueName(task); + if (task.getCallbackAfterSeconds() > 0) { + queueDAO.push( + taskQueueName, + task.getTaskId(), + task.getWorkflowPriority(), + task.getCallbackAfterSeconds()); + } else { + // Tasks should be prioritized based on the start time of the workflow + int priority = + getWorkflowFIFOPriority( + task.getWorkflowInstanceId(), task.getWorkflowPriority()); + queueDAO.push(taskQueueName, task.getTaskId(), priority, 0); + } + log.trace( + "Added task {} with priority {} to queue {} with call back seconds {}", + task, + task.getWorkflowPriority(), + taskQueueName, + task.getCallbackAfterSeconds()); + } + + static int getWorkflowFIFOPriority(String workflowId, int priority) { + if (priority != 0) { + return priority; + } + long workflowCreationTime = TimeBasedUUIDGenerator.getDate(workflowId); + LocalDateTime creationTime = + LocalDateTime.ofInstant( + Instant.ofEpochMilli(workflowCreationTime), ZoneId.systemDefault()); + long secondsFromOrkesEpoch = Duration.between(ORKES_EPOCH_TIME, creationTime).getSeconds(); + return Long.valueOf(secondsFromOrkesEpoch).intValue(); + } +} diff --git a/server/src/main/java/com/netflix/conductor/core/execution/mapper/OrkesForkJoinDynamicTaskMapper.java b/server/src/main/java/com/netflix/conductor/core/execution/mapper/OrkesForkJoinDynamicTaskMapper.java new file mode 100644 index 0000000..27aa3fc --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/core/execution/mapper/OrkesForkJoinDynamicTaskMapper.java @@ -0,0 +1,389 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.core.execution.mapper; + +import java.util.*; +import java.util.stream.Collectors; + +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.annotations.VisibleForTesting; +import com.netflix.conductor.common.metadata.tasks.TaskType; +import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTaskList; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowTask; +import com.netflix.conductor.core.exception.TerminateWorkflowException; +import com.netflix.conductor.core.utils.IDGenerator; +import com.netflix.conductor.core.utils.ParametersUtils; +import com.netflix.conductor.dao.MetadataDAO; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; + +/** + * An implementation of {@link TaskMapper} to map a {@link WorkflowTask} of type {@link + * TaskType#FORK_JOIN_DYNAMIC} to a LinkedList of {@link TaskModel} beginning with a {@link + * TaskType#TASK_TYPE_FORK}, followed by the user defined dynamic tasks and a {@link TaskType#JOIN} + * at the end + */ +@Component +public class OrkesForkJoinDynamicTaskMapper implements TaskMapper { + + public static final Logger LOGGER = + LoggerFactory.getLogger(OrkesForkJoinDynamicTaskMapper.class); + + private final IDGenerator idGenerator; + private final ParametersUtils parametersUtils; + private final ObjectMapper objectMapper; + private final MetadataDAO metadataDAO; + private static final TypeReference> ListOfWorkflowTasks = + new TypeReference<>() {}; + + @Autowired + public OrkesForkJoinDynamicTaskMapper( + IDGenerator idGenerator, + ParametersUtils parametersUtils, + ObjectMapper objectMapper, + MetadataDAO metadataDAO) { + this.idGenerator = idGenerator; + this.parametersUtils = parametersUtils; + this.objectMapper = objectMapper; + this.metadataDAO = metadataDAO; + LOGGER.info("OrkesForkJoinDynamicTaskMapper initialized"); + } + + @Override + public TaskType getTaskType() { + return TaskType.FORK_JOIN_DYNAMIC; + } + + /** + * This method gets the list of tasks that need to scheduled when the task to scheduled is of + * type {@link TaskType#FORK_JOIN_DYNAMIC}. Creates a Fork Task, followed by the Dynamic tasks + * and a final JOIN task. + * + *

The definitions of the dynamic forks that need to be scheduled are available in the {@link + * WorkflowTask#getInputParameters()} which are accessed using the {@link + * TaskMapperContext#getWorkflowTask()}. The dynamic fork task definitions are referred by a key + * value either by {@link WorkflowTask#getDynamicForkTasksParam()} or by {@link + * WorkflowTask#getDynamicForkJoinTasksParam()} When creating the list of tasks to be scheduled + * a set of preconditions are validated: + * + *

    + *
  • If the input parameter representing the Dynamic fork tasks is available as part of + * {@link WorkflowTask#getDynamicForkTasksParam()} then the input for the dynamic task is + * validated to be a map by using {@link WorkflowTask#getDynamicForkTasksInputParamName()} + *
  • If the input parameter representing the Dynamic fork tasks is available as part of + * {@link WorkflowTask#getDynamicForkJoinTasksParam()} then the input for the dynamic + * tasks is available in the payload of the tasks definition. + *
  • A check is performed that the next following task in the {@link WorkflowDef} is a + * {@link TaskType#JOIN} + *
+ * + * @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link + * WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId + * @return List of tasks in the following order: + *
    + *
  • {@link TaskType#TASK_TYPE_FORK} with {@link TaskModel.Status#COMPLETED} + *
  • Might be any kind of task, but this is most cases is a UserDefinedTask with {@link + * TaskModel.Status#SCHEDULED} + *
  • {@link TaskType#JOIN} with {@link TaskModel.Status#IN_PROGRESS} + *
+ * + * @throws TerminateWorkflowException In case of: + *
    + *
  • When the task after {@link TaskType#FORK_JOIN_DYNAMIC} is not a {@link + * TaskType#JOIN} + *
  • When the input parameters for the dynamic tasks are not of type {@link Map} + *
+ */ + @Override + public List getMappedTasks(TaskMapperContext taskMapperContext) + throws TerminateWorkflowException { + LOGGER.debug("TaskMapperContext {} in ForkJoinDynamicTaskMapper", taskMapperContext); + + WorkflowTask workflowTask = taskMapperContext.getWorkflowTask(); + WorkflowModel workflowModel = taskMapperContext.getWorkflowModel(); + int retryCount = taskMapperContext.getRetryCount(); + + List mappedTasks = new LinkedList<>(); + // Get the list of dynamic tasks and the input for the tasks + Pair, Map>> workflowTasksAndInputPair = + Optional.ofNullable(workflowTask.getDynamicForkTasksParam()) + .map( + dynamicForkTaskParam -> + getDynamicForkTasksAndInput( + workflowTask, workflowModel, dynamicForkTaskParam)) + .orElseGet( + () -> getDynamicForkJoinTasksAndInput(workflowTask, workflowModel)); + + List dynForkTasks = workflowTasksAndInputPair.getLeft(); + Map> tasksInput = workflowTasksAndInputPair.getRight(); + + // Create Fork Task which needs to be followed by the dynamic tasks + TaskModel forkDynamicTask = createDynamicForkTask(taskMapperContext, dynForkTasks); + + mappedTasks.add(forkDynamicTask); + + Optional exists = + workflowModel.getTasks().stream() + .filter( + task -> + task.getReferenceTaskName() + .equals( + taskMapperContext + .getWorkflowTask() + .getTaskReferenceName())) + .findAny(); + List joinOnTaskRefs = new LinkedList<>(); + + if (!exists.isPresent()) { + // Add each dynamic task to the mapped tasks and also get the last dynamic task in the + // list, + // which indicates that the following task after that needs to be a join task + for (WorkflowTask dynForkTask : + dynForkTasks) { // TODO this is a cyclic dependency, break it out using function + // composition + List forkedTasks = + taskMapperContext + .getDeciderService() + .getTasksToBeScheduled(workflowModel, dynForkTask, retryCount); + + for (TaskModel forkedTask : forkedTasks) { + Map forkedTaskInput = + tasksInput.get(forkedTask.getReferenceTaskName()); + forkedTask.getInputData().putAll(forkedTaskInput); + } + mappedTasks.addAll(forkedTasks); + // Get the last of the dynamic tasks so that the join can be performed once this + // task is + // done + TaskModel last = forkedTasks.get(forkedTasks.size() - 1); + joinOnTaskRefs.add(last.getReferenceTaskName()); + } + } + + // From the workflow definition get the next task and make sure that it is a JOIN task. + // The dynamic fork tasks need to be followed by a join task + WorkflowTask joinWorkflowTask = + workflowModel + .getWorkflowDefinition() + .getNextTask(workflowTask.getTaskReferenceName()); + + if (joinWorkflowTask == null || !joinWorkflowTask.getType().equals(TaskType.JOIN.name())) { + throw new TerminateWorkflowException( + "Dynamic join definition is not followed by a join task. Check the workflow definition."); + } + + // Create Join task + HashMap joinInput = new HashMap<>(); + joinInput.put("joinOn", joinOnTaskRefs); + TaskModel joinTask = createJoinTask(workflowModel, joinWorkflowTask, joinInput); + mappedTasks.add(joinTask); + + return mappedTasks; + } + + /** + * This method creates a FORK task and adds the list of dynamic fork tasks keyed by + * "forkedTaskDefs" and their names keyed by "forkedTasks" into {@link TaskModel#getInputData()} + * + * @param taskMapperContext: The {@link TaskMapperContext} which wraps workflowTask, workflowDef + * and workflowModel + * @param dynForkTasks: The list of dynamic forked tasks, the reference names of these tasks + * will be added to the forkDynamicTask + * @return A new instance of {@link TaskModel} representing a {@link TaskType#TASK_TYPE_FORK} + */ + @VisibleForTesting + TaskModel createDynamicForkTask( + TaskMapperContext taskMapperContext, List dynForkTasks) { + TaskModel forkDynamicTask = taskMapperContext.createTaskModel(); + forkDynamicTask.setTaskType(TaskType.TASK_TYPE_FORK); + forkDynamicTask.setTaskDefName(TaskType.TASK_TYPE_FORK); + forkDynamicTask.setStartTime(System.currentTimeMillis()); + forkDynamicTask.setEndTime(System.currentTimeMillis()); + forkDynamicTask.setExecuted(true); + List forkedTaskNames = + dynForkTasks.stream() + .map(WorkflowTask::getTaskReferenceName) + .collect(Collectors.toList()); + forkDynamicTask.getInputData().put("forkedTasks", forkedTaskNames); + forkDynamicTask + .getInputData() + .put( + "forkedTaskDefs", + dynForkTasks); // TODO: Remove this parameter in the later releases + forkDynamicTask.setStatus(TaskModel.Status.COMPLETED); + return forkDynamicTask; + } + + /** + * This method creates a JOIN task that is used in the {@link + * this#getMappedTasks(TaskMapperContext)} at the end to add a join task to be scheduled after + * all the fork tasks + * + * @param workflowModel: A instance of the {@link WorkflowModel} which represents the workflow + * being executed. + * @param joinWorkflowTask: A instance of {@link WorkflowTask} which is of type {@link + * TaskType#JOIN} + * @param joinInput: The input which is set in the {@link TaskModel#setInputData(Map)} + * @return a new instance of {@link TaskModel} representing a {@link TaskType#JOIN} + */ + @VisibleForTesting + TaskModel createJoinTask( + WorkflowModel workflowModel, + WorkflowTask joinWorkflowTask, + HashMap joinInput) { + TaskModel joinTask = new TaskModel(); + joinTask.setTaskType(TaskType.TASK_TYPE_JOIN); + joinTask.setTaskDefName(TaskType.TASK_TYPE_JOIN); + joinTask.setReferenceTaskName(joinWorkflowTask.getTaskReferenceName()); + joinTask.setWorkflowInstanceId(workflowModel.getWorkflowId()); + joinTask.setWorkflowType(workflowModel.getWorkflowName()); + joinTask.setCorrelationId(workflowModel.getCorrelationId()); + joinTask.setScheduledTime(System.currentTimeMillis()); + joinTask.setStartTime(System.currentTimeMillis()); + joinTask.setInputData(joinInput); + joinTask.setTaskId(idGenerator.generate()); + joinTask.setStatus(TaskModel.Status.IN_PROGRESS); + joinTask.setWorkflowTask(joinWorkflowTask); + joinTask.setWorkflowPriority(workflowModel.getPriority()); + return joinTask; + } + + /** + * This method is used to get the List of dynamic workflow tasks and their input based on the + * {@link WorkflowTask#getDynamicForkTasksParam()} + * + * @param workflowTask: The Task of type FORK_JOIN_DYNAMIC that needs to scheduled, which has + * the input parameters + * @param workflowModel: The instance of the {@link WorkflowModel} which represents the workflow + * being executed. + * @param dynamicForkTaskParam: The key representing the dynamic fork join json payload which is + * available in {@link WorkflowTask#getInputParameters()} + * @return a {@link Pair} representing the list of dynamic fork tasks in {@link Pair#getLeft()} + * and the input for the dynamic fork tasks in {@link Pair#getRight()} + * @throws TerminateWorkflowException : In case of input parameters of the dynamic fork tasks + * not represented as {@link Map} + */ + @SuppressWarnings("unchecked") + @VisibleForTesting + Pair, Map>> getDynamicForkTasksAndInput( + WorkflowTask workflowTask, WorkflowModel workflowModel, String dynamicForkTaskParam) + throws TerminateWorkflowException { + + Map input = + parametersUtils.getTaskInput( + workflowTask.getInputParameters(), workflowModel, null, null); + Object dynamicForkTasksJson = input.get(dynamicForkTaskParam); + List dynamicForkWorkflowTasks = + objectMapper.convertValue(dynamicForkTasksJson, ListOfWorkflowTasks); + if (dynamicForkWorkflowTasks == null) { + dynamicForkWorkflowTasks = new ArrayList<>(); + } + for (WorkflowTask dynamicForkWorkflowTask : dynamicForkWorkflowTasks) { + if ((dynamicForkWorkflowTask.getTaskDefinition() == null) + && StringUtils.isNotBlank(dynamicForkWorkflowTask.getName())) { + dynamicForkWorkflowTask.setTaskDefinition( + metadataDAO.getTaskDef(dynamicForkWorkflowTask.getName())); + } + } + Object dynamicForkTasksInput = input.get(workflowTask.getDynamicForkTasksInputParamName()); + if (!(dynamicForkTasksInput instanceof Map)) { + throw new TerminateWorkflowException( + "Input to the dynamically forked tasks is not a map -> expecting a map of K,V but found " + + dynamicForkTasksInput); + } + return new ImmutablePair<>( + dynamicForkWorkflowTasks, (Map>) dynamicForkTasksInput); + } + + /** + * This method is used to get the List of dynamic workflow tasks and their input based on the + * {@link WorkflowTask#getDynamicForkJoinTasksParam()} + * + *

NOTE: This method is kept for legacy reasons, new workflows should use the {@link + * #getDynamicForkTasksAndInput} + * + * @param workflowTask: The Task of type FORK_JOIN_DYNAMIC that needs to scheduled, which has + * the input parameters + * @param workflowModel: The instance of the {@link WorkflowModel} which represents the workflow + * being executed. + * @return {@link Pair} representing the list of dynamic fork tasks in {@link Pair#getLeft()} + * and the input for the dynamic fork tasks in {@link Pair#getRight()} + * @throws TerminateWorkflowException : In case of the {@link WorkflowTask#getInputParameters()} + * does not have a payload that contains the list of the dynamic tasks + */ + @VisibleForTesting + Pair, Map>> getDynamicForkJoinTasksAndInput( + WorkflowTask workflowTask, WorkflowModel workflowModel) + throws TerminateWorkflowException { + String dynamicForkJoinTaskParam = workflowTask.getDynamicForkJoinTasksParam(); + Map input = + parametersUtils.getTaskInput( + workflowTask.getInputParameters(), workflowModel, null, null); + Object paramValue = input.get(dynamicForkJoinTaskParam); + DynamicForkJoinTaskList dynamicForkJoinTaskList = + objectMapper.convertValue(paramValue, DynamicForkJoinTaskList.class); + + if (dynamicForkJoinTaskList == null) { + String reason = + String.format( + "Dynamic tasks could not be created. The value of %s from task's input %s has no dynamic tasks to be scheduled", + dynamicForkJoinTaskParam, input); + LOGGER.error(reason); + throw new TerminateWorkflowException(reason); + } + + Map> dynamicForkJoinTasksInput = new HashMap<>(); + + List dynamicForkJoinWorkflowTasks = + dynamicForkJoinTaskList.getDynamicTasks().stream() + .peek( + dynamicForkJoinTask -> + dynamicForkJoinTasksInput.put( + dynamicForkJoinTask.getReferenceName(), + dynamicForkJoinTask + .getInput())) // TODO create a custom pair + // collector + .map( + dynamicForkJoinTask -> { + WorkflowTask dynamicForkJoinWorkflowTask = new WorkflowTask(); + dynamicForkJoinWorkflowTask.setTaskReferenceName( + dynamicForkJoinTask.getReferenceName()); + dynamicForkJoinWorkflowTask.setName( + dynamicForkJoinTask.getTaskName()); + dynamicForkJoinWorkflowTask.setType( + dynamicForkJoinTask.getType()); + if (dynamicForkJoinWorkflowTask.getTaskDefinition() == null + && StringUtils.isNotBlank( + dynamicForkJoinWorkflowTask.getName())) { + dynamicForkJoinWorkflowTask.setTaskDefinition( + metadataDAO.getTaskDef( + dynamicForkJoinTask.getTaskName())); + } + return dynamicForkJoinWorkflowTask; + }) + .collect(Collectors.toCollection(LinkedList::new)); + + return new ImmutablePair<>(dynamicForkJoinWorkflowTasks, dynamicForkJoinTasksInput); + } +} diff --git a/server/src/main/java/com/netflix/conductor/core/execution/tasks/HttpSync.java b/server/src/main/java/com/netflix/conductor/core/execution/tasks/HttpSync.java new file mode 100644 index 0000000..dc53c55 --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/core/execution/tasks/HttpSync.java @@ -0,0 +1,58 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.core.execution.tasks; + +import org.springframework.stereotype.Component; + +import com.netflix.conductor.core.execution.WorkflowExecutor; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; +import com.netflix.conductor.tasks.http.HttpTask; +import com.netflix.conductor.tasks.http.providers.RestTemplateProvider; + +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.extern.slf4j.Slf4j; + +import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_HTTP; + +@Slf4j +@Component(TASK_TYPE_HTTP) +public class HttpSync extends WorkflowSystemTask { + + private HttpTask httpTask; + + public HttpSync(RestTemplateProvider restTemplateProvider, ObjectMapper objectMapper) { + super(TASK_TYPE_HTTP); + httpTask = new HttpTask(restTemplateProvider, objectMapper); + } + + @SuppressWarnings("unchecked") + @Override + public boolean isAsync() { + return false; + } + + @Override + public boolean execute(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) { + httpTask.execute(workflow, task, executor); + return true; + } + + public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) { + try { + httpTask.start(workflow, task, executor); + } catch (Exception e) { + log.error(e.getMessage(), e); + } + } +} diff --git a/server/src/main/java/com/netflix/conductor/core/execution/tasks/SubWorkflowSync.java b/server/src/main/java/com/netflix/conductor/core/execution/tasks/SubWorkflowSync.java new file mode 100644 index 0000000..4bf9492 --- /dev/null +++ b/server/src/main/java/com/netflix/conductor/core/execution/tasks/SubWorkflowSync.java @@ -0,0 +1,72 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package com.netflix.conductor.core.execution.tasks; + +import org.springframework.stereotype.Component; + +import com.netflix.conductor.core.execution.WorkflowExecutor; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; + +import com.fasterxml.jackson.databind.ObjectMapper; +import lombok.extern.slf4j.Slf4j; + +import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_SUB_WORKFLOW; + +@Component(TASK_TYPE_SUB_WORKFLOW) +@Slf4j +public class SubWorkflowSync extends WorkflowSystemTask { + + private static final String SUB_WORKFLOW_ID = "subWorkflowId"; + + private final SubWorkflow subWorkflow; + private final ObjectMapper objectMapper; + + public SubWorkflowSync(ObjectMapper objectMapper) { + super(TASK_TYPE_SUB_WORKFLOW); + this.subWorkflow = new SubWorkflow(objectMapper); + this.objectMapper = objectMapper; + } + + @SuppressWarnings("unchecked") + @Override + public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) { + subWorkflow.start(workflow, task, workflowExecutor); + } + + @Override + public boolean execute( + WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) { + return subWorkflow.execute(workflow, task, workflowExecutor); + } + + @Override + public void cancel(WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) { + subWorkflow.cancel(workflow, task, workflowExecutor); + } + + @Override + public boolean isAsync() { + return false; + } + + @Override + public boolean isAsyncComplete(TaskModel task) { + return true; + } + + @Override + public String toString() { + return subWorkflow.toString(); + } +} diff --git a/server/src/main/java/io/orkes/conductor/OrkesConductorApplication.java b/server/src/main/java/io/orkes/conductor/OrkesConductorApplication.java new file mode 100644 index 0000000..8041104 --- /dev/null +++ b/server/src/main/java/io/orkes/conductor/OrkesConductorApplication.java @@ -0,0 +1,173 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import javax.annotation.PostConstruct; + +import org.apache.commons.lang3.StringUtils; +import org.springdoc.core.customizers.OpenApiCustomiser; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.env.Environment; +import org.springframework.core.io.FileSystemResource; + +import com.netflix.conductor.common.config.ObjectMapperProvider; +import com.netflix.conductor.common.metadata.tasks.TaskDef; +import com.netflix.conductor.common.metadata.workflow.WorkflowDef; +import com.netflix.conductor.core.execution.mapper.ForkJoinDynamicTaskMapper; +import com.netflix.conductor.core.execution.tasks.SubWorkflow; +import com.netflix.conductor.dao.MetadataDAO; +import com.netflix.conductor.tasks.http.HttpTask; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.swagger.v3.oas.models.OpenAPI; +import io.swagger.v3.oas.models.info.Info; +import io.swagger.v3.oas.models.servers.Server; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@SpringBootApplication(exclude = {DataSourceAutoConfiguration.class}) +@ComponentScan( + basePackages = {"com.netflix.conductor", "io.orkes.conductor"}, + excludeFilters = + @ComponentScan.Filter( + type = FilterType.ASSIGNABLE_TYPE, + classes = { + ForkJoinDynamicTaskMapper.class, + HttpTask.class, + SubWorkflow.class + })) +@RequiredArgsConstructor +public class OrkesConductorApplication { + + @Autowired private MetadataDAO metadataDAO; + + public static void main(String[] args) throws IOException { + System.setProperty("spring.devtools.restart.enabled", "false"); + loadExternalConfig(); + + log.info("Completed loading external configuration"); + System.setProperty("es.set.netty.runtime.available.processors", "false"); + + SpringApplication.run(OrkesConductorApplication.class, args); + } + + @Bean + public OpenApiCustomiser openApiCustomiser(Environment environment) { + List servers = new ArrayList<>(); + Server server = new Server(); + server.setDescription("Conductor API Server"); + server.setUrl(environment.getProperty("conductor.swagger.url")); + servers.add(server); + return openApi -> + openApi.servers(servers).getPaths().values().stream() + .flatMap(pathItem -> pathItem.readOperations().stream()); + } + + @Bean + public OpenAPI openAPI() { + log.info("openAPI Configuration...."); + return new OpenAPI() + .info( + new Info() + .title("Orkes Conductor API Server") + .description("Orkes Conductor API Server") + .version("v2")); + } + + private static void loadExternalConfig() throws IOException { + String configFile = System.getProperty("CONDUCTOR_CONFIG_FILE"); + if (configFile == null) { + configFile = System.getenv("CONDUCTOR_CONFIG_FILE"); + } + log.info("\nUsing {} as the configuration file", configFile); + if (!StringUtils.isEmpty(configFile)) { + FileSystemResource resource = new FileSystemResource(configFile); + if (resource.exists()) { + System.getenv() + .forEach( + (k, v) -> { + log.info("System Env Props - Key: {}, Value: {}", k, v); + if (k.startsWith("conductor")) { + log.info( + "\n\tSetting env property to system property: {}", + k); + System.setProperty(k, v); + } + }); + Properties existingProperties = System.getProperties(); + existingProperties.forEach( + (k, v) -> log.info("Env Props - Key: {}, Value: {}", k, v)); + Properties properties = new Properties(); + properties.load(resource.getInputStream()); + properties.forEach( + (key, value) -> { + String keyString = (String) key; + if (existingProperties.getProperty(keyString) != null) { + log.info( + "Property : {} already exists with value: {}", + keyString, + value); + } else { + log.info("Setting {} - {}", keyString, value); + System.setProperty(keyString, (String) value); + } + }); + log.info("Loaded {} properties from {}", properties.size(), configFile); + } else { + log.warn("Ignoring {} since it does not exist", configFile); + } + } + } + + @PostConstruct + public void loadSample() { + try { + + log.info("Loading samples {}", metadataDAO); + + ObjectMapper om = new ObjectMapperProvider().getObjectMapper(); + InputStream tasksInputStream = + OrkesConductorApplication.class.getResourceAsStream("/tasks.json"); + InputStream workflowsInputStream = + OrkesConductorApplication.class.getResourceAsStream("/workflows.json"); + + TypeReference> tasks = new TypeReference>() {}; + TypeReference> workflows = new TypeReference>() {}; + + List taskDefs = om.readValue(new InputStreamReader(tasksInputStream), tasks); + List workflowDefs = + om.readValue(new InputStreamReader(workflowsInputStream), workflows); + + taskDefs.forEach(taskDef -> metadataDAO.updateTaskDef(taskDef)); + workflowDefs.forEach(workflowDef -> metadataDAO.updateWorkflowDef(workflowDef)); + + } catch (Exception e) { + log.error("Error while loading sample workflows and tasks {}", e.getMessage(), e); + } + } +} diff --git a/server/src/main/java/io/orkes/conductor/config/AWSCredentialsConfiguration.java b/server/src/main/java/io/orkes/conductor/config/AWSCredentialsConfiguration.java new file mode 100644 index 0000000..d942669 --- /dev/null +++ b/server/src/main/java/io/orkes/conductor/config/AWSCredentialsConfiguration.java @@ -0,0 +1,28 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.config; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; + +@Configuration +public class AWSCredentialsConfiguration { + + @Bean + AWSCredentialsProvider createAWSCredentialsProvider() { + return new DefaultAWSCredentialsProviderChain(); + } +} diff --git a/server/src/main/java/io/orkes/conductor/config/SecurityDisabledConfig.java b/server/src/main/java/io/orkes/conductor/config/SecurityDisabledConfig.java new file mode 100644 index 0000000..4384f1e --- /dev/null +++ b/server/src/main/java/io/orkes/conductor/config/SecurityDisabledConfig.java @@ -0,0 +1,28 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.config; + +import org.springframework.context.annotation.Configuration; +import org.springframework.security.config.annotation.web.builders.HttpSecurity; +import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; + +@Configuration +public class SecurityDisabledConfig extends WebSecurityConfigurerAdapter { + + public SecurityDisabledConfig() { + super(true); // Disable defaults + } + + @Override + protected void configure(HttpSecurity http) throws Exception {} +} diff --git a/server/src/main/java/io/orkes/conductor/rest/VersionResource.java b/server/src/main/java/io/orkes/conductor/rest/VersionResource.java new file mode 100644 index 0000000..6bb7248 --- /dev/null +++ b/server/src/main/java/io/orkes/conductor/rest/VersionResource.java @@ -0,0 +1,33 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.rest; + +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +import io.swagger.v3.oas.annotations.Operation; +import lombok.RequiredArgsConstructor; + +import static org.springframework.http.MediaType.TEXT_PLAIN_VALUE; + +@RestController +@RequiredArgsConstructor +public class VersionResource { + + @GetMapping(value = "/api/version", produces = TEXT_PLAIN_VALUE) + @Operation(summary = "Get the server's version") + public String getVersion() { + String version = getClass().getPackage().getImplementationVersion(); + return version == null ? "n/a" : version; + } +} diff --git a/server/src/main/java/io/orkes/conductor/server/service/OrkesSweeperProperties.java b/server/src/main/java/io/orkes/conductor/server/service/OrkesSweeperProperties.java new file mode 100644 index 0000000..9cec440 --- /dev/null +++ b/server/src/main/java/io/orkes/conductor/server/service/OrkesSweeperProperties.java @@ -0,0 +1,30 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.server.service; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +import lombok.Getter; +import lombok.Setter; +import lombok.ToString; + +@Configuration +@ConfigurationProperties("conductor.app.sweeper") +@Getter +@Setter +@ToString +public class OrkesSweeperProperties { + private int sweepBatchSize = 5; + private int queuePopTimeout = 100; +} diff --git a/server/src/main/java/io/orkes/conductor/server/service/OrkesWorkflowSweeper.java b/server/src/main/java/io/orkes/conductor/server/service/OrkesWorkflowSweeper.java new file mode 100644 index 0000000..21df5c8 --- /dev/null +++ b/server/src/main/java/io/orkes/conductor/server/service/OrkesWorkflowSweeper.java @@ -0,0 +1,256 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.server.service; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import org.apache.commons.lang3.StringUtils; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Component; + +import com.netflix.conductor.core.LifecycleAwareComponent; +import com.netflix.conductor.core.config.ConductorProperties; +import com.netflix.conductor.core.exception.NotFoundException; +import com.netflix.conductor.core.execution.OrkesWorkflowExecutor; +import com.netflix.conductor.core.execution.tasks.SystemTaskRegistry; +import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask; +import com.netflix.conductor.core.utils.QueueUtils; +import com.netflix.conductor.core.utils.Utils; +import com.netflix.conductor.dao.ExecutionDAO; +import com.netflix.conductor.dao.QueueDAO; +import com.netflix.conductor.metrics.Monitors; +import com.netflix.conductor.model.TaskModel; +import com.netflix.conductor.model.WorkflowModel; + +import io.orkes.conductor.metrics.MetricsCollector; + +import com.google.common.util.concurrent.Uninterruptibles; +import lombok.extern.slf4j.Slf4j; + +import static com.netflix.conductor.core.config.SchedulerConfiguration.SWEEPER_EXECUTOR_NAME; +import static com.netflix.conductor.core.utils.Utils.DECIDER_QUEUE; + +@Component +@ConditionalOnProperty(name = "conductor.orkes.sweeper.enabled", havingValue = "true") +@Slf4j +public class OrkesWorkflowSweeper extends LifecycleAwareComponent { + + private final QueueDAO queueDAO; + private final ConductorProperties properties; + private final OrkesSweeperProperties sweeperProperties; + private final OrkesWorkflowExecutor workflowExecutor; + private final ExecutionDAO executionDAO; + private final MetricsCollector metricsCollector; + private final SystemTaskRegistry systemTaskRegistry; + + public OrkesWorkflowSweeper( + @Qualifier(SWEEPER_EXECUTOR_NAME) Executor sweeperExecutor, + QueueDAO queueDAO, + OrkesWorkflowExecutor workflowExecutor, + ExecutionDAO executionDAO, + MetricsCollector metricsCollector, + SystemTaskRegistry systemTaskRegistry, + ConductorProperties properties, + OrkesSweeperProperties sweeperProperties) { + this.queueDAO = queueDAO; + this.executionDAO = executionDAO; + this.metricsCollector = metricsCollector; + this.systemTaskRegistry = systemTaskRegistry; + this.properties = properties; + this.sweeperProperties = sweeperProperties; + this.workflowExecutor = workflowExecutor; + log.info("Initializing sweeper with {} threads", properties.getSweeperThreadCount()); + for (int i = 0; i < properties.getSweeperThreadCount(); i++) { + sweeperExecutor.execute(this::pollAndSweep); + } + } + + private void pollAndSweep() { + try { + while (true) { + try { + if (!isRunning()) { + log.trace("Component stopped, skip workflow sweep"); + } else { + List workflowIds = + queueDAO.pop( + DECIDER_QUEUE, + sweeperProperties.getSweepBatchSize(), + sweeperProperties.getQueuePopTimeout()); + if (workflowIds != null) { + workflowIds.forEach( + workflowId -> + metricsCollector + .getTimer("workflowSweeper") + .record(() -> sweep(workflowId))); + } else { + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + } + } + } catch (Exception e) { + log.warn("Error while processing sweeper - ", e); + Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); + } + } + } catch (Exception e) { + log.error("Error polling for sweep entries", e); + } + } + + private boolean shouldTaskExistInQueue(TaskModel task) { + if (systemTaskRegistry.isSystemTask(task.getTaskType())) { + WorkflowSystemTask workflowSystemTask = systemTaskRegistry.get(task.getTaskType()); + return workflowSystemTask.isAsync() // Is Async + // Not async complete OR is async complete, but in scheduled state + && (!workflowSystemTask.isAsyncComplete(task) + || (workflowSystemTask.isAsyncComplete(task) + && task.getStatus() == TaskModel.Status.SCHEDULED)) + // Status is IN_PROGRESS or SCHEDULED + && (task.getStatus() == TaskModel.Status.IN_PROGRESS + || task.getStatus() == TaskModel.Status.SCHEDULED); + } + return task.getStatus() == TaskModel.Status.SCHEDULED; + } + + public void sweep(String workflowId) { + try { + log.info("Running sweeper for workflow {}", workflowId); + // 1. Run decide on the workflow + WorkflowModel workflow = decideAndRemove(workflowId); + if (workflow == null || workflow.getStatus().isTerminal()) { + return; + } + + // 2. If decide returns false + // - Check if the workflow has at least one scheduled or in progress task? + // - If scheduled or in progress - Check if it exissts in its corresponding queue, if + // not add it back + // - If no scheduled or in progress task exists + // 1. Set the last task as isExecuted = false to force a re-evaluation + // 2. Call decide + if (workflow == null) { + // The workflow does not exist anymore, possible if it was completed and archived + queueDAO.remove(DECIDER_QUEUE, workflowId); + return; + } + + if (System.currentTimeMillis() - workflow.getUpdatedTime() < 60) { + // Only do this once every 60 second + return; + } + List pendingTasks = getAllPendingTasks(workflow); + if (pendingTasks.size() > 0) { + pendingTasks.forEach(this::ensurePendingTaskIsInQueue); + } else { + log.warn( + "Workflow {} doesn't have an open pending task, requires force evaluation", + workflow.getWorkflowId()); + forceSetLastTaskAsNotExecuted(workflow); + workflow = decideAndRemove(workflowId); + log.debug( + "Force evaluation result for workflow {} - {}", + workflowId, + workflow.getStatus()); + if (workflow == null || workflow.getStatus().isTerminal()) { + return; + } + } + // 3. If parent workflow exists, call repair on that too - meaning ensure the parent is + // in the decider queue + if (workflow.getParentWorkflowId() != null) { + ensureWorkflowExistsInDecider(workflow.getParentWorkflowId()); + } + } catch (NotFoundException e) { + queueDAO.remove(DECIDER_QUEUE, workflowId); + log.info("Workflow NOT found for id:{}. Removed it from decider queue", workflowId, e); + return; + } catch (Exception e) { + log.error("Error running sweep for " + workflowId, e); + } + + // 4. TODO: Don't do this now - Check the min timeout for all running tasks and set + // Math.min(minTime, 1 hour) for decider queue + queueDAO.setUnackTimeout( + DECIDER_QUEUE, workflowId, properties.getWorkflowOffsetTimeout().toMillis()); + } + + private void forceSetLastTaskAsNotExecuted(WorkflowModel workflow) { + if (workflow.getTasks() != null && workflow.getTasks().size() > 0) { + TaskModel taskModel = workflow.getTasks().get(workflow.getTasks().size() - 1); + log.warn( + "Force setting isExecuted to false for last task - {} for workflow {}", + taskModel.getTaskId(), + taskModel.getWorkflowInstanceId()); + taskModel.setExecuted(false); + executionDAO.updateTask(taskModel); + } + } + + private List getAllPendingTasks(WorkflowModel workflow) { + if (workflow.getTasks() != null && workflow.getTasks().size() > 0) { + return workflow.getTasks().stream() + .filter(taskModel -> !taskModel.isExecuted()) + .collect(Collectors.toList()); + } + return Collections.emptyList(); + } + + private WorkflowModel decideAndRemove(String workflowId) { + WorkflowModel workflowModel = workflowExecutor.decide(workflowId); + if (workflowModel == null) { + return null; + } + if (workflowModel.getStatus().isTerminal()) { + queueDAO.remove(DECIDER_QUEUE, workflowId); + } + return workflowModel; + } + + boolean ensurePendingTaskIsInQueue(TaskModel task) { + if (shouldTaskExistInQueue(task)) { + // Ensure QueueDAO contains this taskId + String taskQueueName = QueueUtils.getQueueName(task); + if (!queueDAO.containsMessage(taskQueueName, task.getTaskId())) { + queueDAO.push(taskQueueName, task.getTaskId(), task.getCallbackAfterSeconds()); + log.info( + "Task {} in workflow {} re-queued for repairs", + task.getTaskId(), + task.getWorkflowInstanceId()); + metricsCollector + .getCounter("repairTaskReQueued", task.getTaskDefName()) + .increment(); + return true; + } + } + return false; + } + + private boolean ensureWorkflowExistsInDecider(String workflowId) { + if (StringUtils.isNotEmpty(workflowId)) { + String queueName = Utils.DECIDER_QUEUE; + if (!queueDAO.containsMessage(queueName, workflowId)) { + queueDAO.push( + queueName, workflowId, properties.getWorkflowOffsetTimeout().getSeconds()); + log.info("Workflow {} re-queued for repairs", workflowId); + Monitors.recordQueueMessageRepushFromRepairService(queueName); + return true; + } + } + return false; + } +} diff --git a/server/src/main/java/io/orkes/conductor/ui/UIContextGenerator.java b/server/src/main/java/io/orkes/conductor/ui/UIContextGenerator.java new file mode 100644 index 0000000..e7eb9a0 --- /dev/null +++ b/server/src/main/java/io/orkes/conductor/ui/UIContextGenerator.java @@ -0,0 +1,92 @@ +/* + * Copyright 2022 Orkes, Inc. + *

+ * Licensed under the Orkes Community License (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * https://github.com/orkes-io/licenses/blob/main/community/LICENSE.txt + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package io.orkes.conductor.ui; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Map; + +import org.springframework.beans.factory.annotation.Value; +import org.springframework.core.env.Environment; +import org.springframework.stereotype.Component; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectWriter; +import lombok.extern.slf4j.Slf4j; + +@Component +@Slf4j +public class UIContextGenerator { + + private final Environment environment; + private final ObjectMapper objectMapper; + private final boolean isSecurityEnabled; + private final String contextFilePath; + + public UIContextGenerator( + Environment environment, + ObjectMapper objectMapper, + @Value("${conductor.ui.context.file:/usr/share/nginx/html/context.js}") + String contextFilePath) { + this.environment = environment; + this.objectMapper = objectMapper; + this.isSecurityEnabled = false; + this.contextFilePath = contextFilePath; + } + + public boolean writeUIContextFile() throws IOException { + log.info("Writing UI Context file: {}", contextFilePath); + File contextFile = new File(contextFilePath); + if (Files.notExists(Paths.get(contextFile.toURI()))) { + log.info("UI Context File doesn't exist, skipping"); + return false; + } + + try (FileWriter fileWriter = new FileWriter(contextFile)) { + ObjectWriter jsonWriter = objectMapper.writerWithDefaultPrettyPrinter(); + fileWriter.write( + String.format( + "window.conductor = %s;\n\n", conductorFeaturesAsJson(jsonWriter))); + fileWriter.write( + String.format( + "\nwindow.auth0Identifiers = %s;\n", + jsonWriter.writeValueAsString( + Map.of( + "clientId", "NOT_ENABLED", + "domain", "NOT_ENABLED")))); + + fileWriter.flush(); + } + + return true; + } + + private String conductorFeaturesAsJson(ObjectWriter jsonWriter) throws JsonProcessingException { + return jsonWriter.writeValueAsString( + Map.of( + "TASK_VISIBILITY", + environment.getProperty( + "conductor.security.default.taskVisibility", "READ"), + "ACCESS_MANAGEMENT", isSecurityEnabled, + "CREATOR_ENABLE_CREATOR", + environment.getProperty( + "conductor.creatorUi.featuresEnabled", "false"), + "CREATOR_ENABLE_REAFLOW_DIAGRAM", + environment.getProperty( + "conductor.creatorUi.reaflowDiagramEnabled", "false"))); + } +} diff --git a/server/src/main/resources/application.properties b/server/src/main/resources/application.properties new file mode 100644 index 0000000..0c03ff1 --- /dev/null +++ b/server/src/main/resources/application.properties @@ -0,0 +1,84 @@ +conductor.app.workflow-execution-lock-enabled=true +conductor.workflow-execution-lock.type=redis +conductor.redis-lock.serverAddress=redis://localhost:6379 +#in millisecond the amount of time to wait to obtain the lock on workflow +conductor.app.lockTimeToTry=50 + +conductor.db.type=redis_standalone +conductor.queue.type=redis_standalone +conductor.id.generator=time_based + +conductor.redis.hosts=localhost:6379:us-east-1c + +#Misc conductor server configuration +conductor.default-event-queue.type=sqs +conductor.metrics-logger.enabled=false +conductor.app.owner-email-mandatory=false + +#Redis Properties +conductor.redis.queueNamespacePrefix=conductor_queues +conductor.redis.workflowNamespacePrefix=conductor +conductor.redis.taskDefCacheRefreshInterval=1 + +#Workflow archival and indexing +conductor.archive.db.enabled=true +conductor.archive.db.type=postgres +conductor.archive.db.indexer.threadCount=4 +conductor.archive.db.indexer.pollingInterval=10 + +#postgres database +spring.datasource.url=jdbc:postgresql://localhost:5432/postgres +spring.datasource.username=postgres +spring.datasource.password=postgres + +#JDBC datasource configuration +spring.datasource.hikari.connection-init-sql=SET statement_timeout = '30s' +spring.datasource.hikari.maximum-pool-size=8 +spring.datasource.hikari.auto-commit=true +spring.search-datasource.hikari.maximum-pool-size=8 +spring.search-datasource.hikari.auto-commit=true + + +#Background sweeper job +conductor.workflow-monitor.enabled=true +#Disable default +conductor.workflow-reconciler.enabled=false +conductor.workflow-repair-service.enabled=false + +#System Task Workers +conductor.app.systemTaskWorkerPollInterval=1 +conductor.app.systemTaskMaxPollCount=10 +conductor.app.systemTaskWorkerThreadCount=10 + +#Enable the Orkes version +conductor.orkes.sweeper.enabled=true +conductor.app.sweeperThreadCount=10 +conductor.sweep-frequency.millis=1 + +#metrics -- only enable what is necessary +management.endpoints.web.exposure.include=prometheus,health +management.metrics.web.server.request.autotime.percentiles=0.50,0.75,0.90,0.95,0.99 + +# MAX Payload configuration +conductor.app.maxTaskOutputPayloadSizeThreshold=102400 +conductor.app.maxTaskInputPayloadSizeThreshold=102400 +conductor.app.taskOutputPayloadSizeThreshold=102400 +conductor.app.taskInputPayloadSizeThreshold=102400 + +# Additional modules for metrics collection exposed to Datadog (optional) +management.metrics.export.datadog.enabled=${conductor.metrics-datadog.enabled:false} +management.metrics.export.datadog.api-key=${conductor.metrics-datadog.api-key:} + +#Swagger - OpenAPI configuration +springdoc.swagger-ui.tagsSorter=alpha +springdoc.swagger-ui.operationsSorter=alpha +springdoc.writer-with-order-by-keys=true +springdoc.api-docs.path=/api-docs +springdoc.swagger-ui.disable-swagger-default-url=true +springdoc.swagger-ui.queryConfigEnabled=false +springdoc.swagger-ui.filter=true + +conductor.swagger.url=http://localhost:8080/ + + + diff --git a/server/src/main/resources/banner.txt b/server/src/main/resources/banner.txt new file mode 100644 index 0000000..aa0c0b4 --- /dev/null +++ b/server/src/main/resources/banner.txt @@ -0,0 +1,16 @@ + + ______ .______ __ ___ _______ _______. + / __ \ | _ \ | |/ / | ____| / | +| | | | | |_) | | ' / | |__ | (----` +| | | | | / | < | __| \ \ +| `--' | | |\ \----.| . \ | |____.----) | + \______/ | _| `._____||__|\__\ |_______|_______/ + + ______ ______ .__ __. _______ __ __ ______ .___________. ______ .______ + / | / __ \ | \ | | | \ | | | | / || | / __ \ | _ \ +| ,----'| | | | | \| | | .--. || | | | | ,----'`---| |----`| | | | | |_) | +| | | | | | | . ` | | | | || | | | | | | | | | | | | / +| `----.| `--' | | |\ | | '--' || `--' | | `----. | | | `--' | | |\ \----. + \______| \______/ |__| \__| |_______/ \______/ \______| |__| \______/ | _| `._____| + +Orkes Conductor Community Licensed diff --git a/server/src/main/resources/logback-spring.xml b/server/src/main/resources/logback-spring.xml new file mode 100644 index 0000000..778efe0 --- /dev/null +++ b/server/src/main/resources/logback-spring.xml @@ -0,0 +1,36 @@ + + + + + + + %black(%d{ISO8601}) %highlight(%-5level) [%blue(%t)] %yellow(%C{1.}): %msg%n%throwable + + + + + + + ${LOG_FILE} + + + ${LOG_FILE}.%d{yyyy-MM-dd}.%i + + 200MB + 10 + 10GB + + + %d{ISO8601} %-5level [%t]: %msg%n%throwable + + + + + + + + + \ No newline at end of file diff --git a/server/src/main/resources/tasks.json b/server/src/main/resources/tasks.json new file mode 100644 index 0000000..8347d71 --- /dev/null +++ b/server/src/main/resources/tasks.json @@ -0,0 +1,116 @@ +[ + { + "createTime": 1662241150448, + "createdBy": "", + "name": "simple_task_5", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "viren@orkes.io", + "backoffScaleFactor": 1 + }, + { + "createTime": 1662241156390, + "createdBy": "", + "name": "simple_task_6", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "viren@orkes.io", + "backoffScaleFactor": 1 + }, + { + "createTime": 1662241120549, + "createdBy": "", + "name": "simple_task_3", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "viren@orkes.io", + "backoffScaleFactor": 1 + }, + { + "createTime": 1662241125214, + "createdBy": "", + "name": "simple_task_4", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "viren@orkes.io", + "backoffScaleFactor": 1 + }, + { + "createTime": 1662241109699, + "createdBy": "", + "name": "simple_task_1", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "viren@orkes.io", + "backoffScaleFactor": 1 + }, + { + "createTime": 1662241116155, + "createdBy": "", + "name": "simple_task_2", + "description": "Edit or extend this sample task. Set the task name to get started", + "retryCount": 3, + "timeoutSeconds": 3600, + "inputKeys": [], + "outputKeys": [], + "timeoutPolicy": "TIME_OUT_WF", + "retryLogic": "FIXED", + "retryDelaySeconds": 60, + "responseTimeoutSeconds": 600, + "inputTemplate": {}, + "rateLimitPerFrequency": 0, + "rateLimitFrequencyInSeconds": 1, + "ownerEmail": "viren@orkes.io", + "backoffScaleFactor": 1 + } +] \ No newline at end of file diff --git a/server/src/main/resources/workflows.json b/server/src/main/resources/workflows.json new file mode 100644 index 0000000..56b4c80 --- /dev/null +++ b/server/src/main/resources/workflows.json @@ -0,0 +1,524 @@ +[{ + "createTime": 1662240965151, + "updateTime": 1656532352405, + "name": "load_test", + "version": 1, + "tasks": [ + { + "name": "simple_task_0", + "taskReferenceName": "call_remote", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "http://10.1.12.67/test.json" + } + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false, + "asyncComplete": false + }, + { + "name": "sub_flow", + "taskReferenceName": "sub_flow", + "inputParameters": {}, + "type": "SET_VARIABLE", + "startDelay": 0, + "subWorkflowParam": { + "name": "PopulationMinMax" + }, + "optional": false, + "asyncComplete": false + }, + { + "name": "dynamic_fork_prep", + "taskReferenceName": "dynamic_fork_prep", + "inputParameters": {}, + "type": "SIMPLE", + "startDelay": 0, + "optional": false, + "asyncComplete": false + }, + { + "name": "dynamic_fork", + "taskReferenceName": "dynamic_fork", + "inputParameters": { + "forkedTasks": "${dynamic_fork_prep.output.forkedTasks}", + "forkedTasksInputs": "${dynamic_fork_prep.output.forkedTasksInputs}" + }, + "type": "FORK_JOIN_DYNAMIC", + "dynamicForkTasksParam": "forkedTasks", + "dynamicForkTasksInputParamName": "forkedTasksInputs", + "startDelay": 0, + "optional": false, + "asyncComplete": false + }, + { + "name": "dynamic_fork_join", + "taskReferenceName": "dynamic_fork_join", + "inputParameters": {}, + "type": "JOIN", + "startDelay": 0, + "optional": false, + "asyncComplete": false + }, + { + "name": "fork", + "taskReferenceName": "fork", + "inputParameters": {}, + "type": "FORK_JOIN", + "forkTasks": [ + [ + { + "name": "fact_length", + "taskReferenceName": "fact_length", + "description": "Fail if the fact is too short", + "inputParameters": { + "fact_length": "${call_remote.output.number}", + "switchCaseValue": "LONG" + }, + "type": "SWITCH", + "decisionCases": { + "LONG": [ + { + "name": "simple_task_3", + "taskReferenceName": "simple_task_3", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "http://10.1.12.67/test.json" + } + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false, + "asyncComplete": false + } + ], + "SHORT": [ + { + "name": "too_short", + "taskReferenceName": "too_short", + "inputParameters": { + "terminationReason": "value too short", + "terminationStatus": "FAILED" + }, + "type": "TERMINATE", + "startDelay": 0, + "optional": false, + "asyncComplete": false + } + ] + }, + "startDelay": 0, + "optional": false, + "asyncComplete": false, + "evaluatorType": "value-param", + "expression": "switchCaseValue" + }, + { + "name": "sub_flow_inline", + "taskReferenceName": "sub_flow_inline", + "inputParameters": {}, + "type": "SUB_WORKFLOW", + "startDelay": 0, + "subWorkflowParam": { + "name": "inline_sub", + "version": 1, + "workflowDefinition": { + "name": "inline_sub", + "version": 1, + "tasks": [ + { + "name": "simple_task_5", + "taskReferenceName": "simple_task_5", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "http://10.1.12.67/test.json" + } + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false, + "asyncComplete": false + } + ], + "inputParameters": [], + "outputParameters": {}, + "schemaVersion": 2, + "restartable": true, + "workflowStatusListenerEnabled": false, + "timeoutPolicy": "ALERT_ONLY", + "timeoutSeconds": 0, + "variables": {}, + "inputTemplate": {} + } + }, + "optional": false, + "asyncComplete": false + } + ], + [ + { + "name": "simple_task_1", + "taskReferenceName": "simple_task_1", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "http://10.1.12.67/test.json" + } + }, + "type": "SIMPLE", + "startDelay": 0, + "optional": false, + "asyncComplete": false + } + ] + ], + "startDelay": 0, + "optional": false, + "asyncComplete": false + }, + { + "name": "fork_join", + "taskReferenceName": "fork_join", + "inputParameters": {}, + "type": "JOIN", + "startDelay": 0, + "joinOn": [ + "sub_flow_inline", + "simple_task_1" + ], + "optional": false, + "asyncComplete": false + } + ], + "inputParameters": [], + "outputParameters": {}, + "schemaVersion": 2, + "restartable": true, + "workflowStatusListenerEnabled": false, + "ownerEmail": "viren@orkes.io", + "timeoutPolicy": "ALERT_ONLY", + "timeoutSeconds": 0, + "variables": {}, + "inputTemplate": {} +}, + { + "ownerApp": null, + "createTime": 1662278776157, + "updateTime": null, + "createdBy": null, + "updatedBy": null, + "name": "http", + "description": "Edit or extend this sample workflow. Set the workflow name to get started", + "version": 1, + "tasks": [ + { + "name": "get_population_data", + "taskReferenceName": "get_population_data", + "description": null, + "inputParameters": { + "http_request": { + "uri": "https://orkes-api-tester.orkesconductor.com/get", + "method": "GET", + "readTimeout": 60000, + "connectTimeout": 60000 + } + }, + "type": "HTTP", + "dynamicTaskNameParam": null, + "caseValueParam": null, + "caseExpression": null, + "scriptExpression": null, + "dynamicForkJoinTasksParam": null, + "dynamicForkTasksParam": null, + "dynamicForkTasksInputParamName": null, + "startDelay": 0, + "subWorkflowParam": null, + "sink": null, + "optional": false, + "taskDefinition": null, + "rateLimited": null, + "asyncComplete": false, + "loopCondition": null, + "retryCount": null, + "evaluatorType": null, + "expression": null + } + ], + "inputParameters": [], + "outputParameters": { + "data": "${get_population_data.output.response.body.data}", + "source": "${get_population_data.output.response.body.source}" + }, + "failureWorkflow": null, + "schemaVersion": 2, + "restartable": true, + "workflowStatusListenerEnabled": false, + "ownerEmail": "example@email.com", + "timeoutPolicy": "ALERT_ONLY", + "timeoutSeconds": 0, + "variables": {}, + "inputTemplate": {} + }, { + "updateTime": 1662336222118, + "name": "http_perf_test", + "version": 1, + "tasks": [ + { + "name": "http", + "taskReferenceName": "http_0", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://orkes-api-tester.orkesconductor.com/get" + } + }, + "type": "HTTP", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "http", + "taskReferenceName": "http_1", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://orkes-api-tester.orkesconductor.com/get" + } + }, + "type": "HTTP", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "http", + "taskReferenceName": "http_2", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://orkes-api-tester.orkesconductor.com/get" + } + }, + "type": "HTTP", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "http", + "taskReferenceName": "http_20", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://orkes-api-tester.orkesconductor.com/get" + } + }, + "type": "HTTP", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "fork", + "taskReferenceName": "fork", + "inputParameters": {}, + "type": "FORK_JOIN", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [ + [ + { + "name": "http", + "taskReferenceName": "http_11", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://orkes-api-tester.orkesconductor.com/get" + } + }, + "type": "HTTP", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + [ + { + "name": "http", + "taskReferenceName": "http_13", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://orkes-api-tester.orkesconductor.com/get" + } + }, + "type": "HTTP", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + [ + { + "name": "http", + "taskReferenceName": "http_12", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://orkes-api-tester.orkesconductor.com/get" + } + }, + "type": "HTTP", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + [ + { + "name": "http", + "taskReferenceName": "http_15", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://orkes-api-tester.orkesconductor.com/get" + } + }, + "type": "HTTP", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + [ + { + "name": "http", + "taskReferenceName": "http_21", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://orkes-api-tester.orkesconductor.com/get" + } + }, + "type": "HTTP", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + [ + { + "name": "http", + "taskReferenceName": "http_22", + "inputParameters": { + "http_request": { + "method": "GET", + "uri": "https://orkes-api-tester.orkesconductor.com/get" + } + }, + "type": "HTTP", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ] + ], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + }, + { + "name": "join", + "taskReferenceName": "join", + "inputParameters": {}, + "type": "JOIN", + "decisionCases": {}, + "defaultCase": [], + "forkTasks": [], + "startDelay": 0, + "joinOn": [], + "optional": false, + "defaultExclusiveJoinTask": [], + "asyncComplete": false, + "loopOver": [] + } + ], + "inputParameters": [], + "outputParameters": {}, + "schemaVersion": 2, + "restartable": true, + "workflowStatusListenerEnabled": false, + "ownerEmail": "viren@orkes.io", + "timeoutPolicy": "ALERT_ONLY", + "timeoutSeconds": 0, + "variables": {}, + "inputTemplate": {} +}] \ No newline at end of file diff --git a/server/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker b/server/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker new file mode 100644 index 0000000..ca6ee9c --- /dev/null +++ b/server/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker @@ -0,0 +1 @@ +mock-maker-inline \ No newline at end of file diff --git a/settings.gradle b/settings.gradle new file mode 100644 index 0000000..c907adc --- /dev/null +++ b/settings.gradle @@ -0,0 +1,7 @@ +rootProject.name = 'orkes-conductor' +include 'redis-queues' +include 'archive' +include 'persistence' +include 'server' + +rootProject.children.each { it.name = "${rootProject.name}-${it.name}" } \ No newline at end of file