diff --git a/.github/workflows/nebula-ci.yml b/.github/workflows/nebula-ci.yml new file mode 100644 index 00000000..d04d2851 --- /dev/null +++ b/.github/workflows/nebula-ci.yml @@ -0,0 +1,49 @@ +name: "CI" +on: + push: + branches: + - '*' + tags-ignore: + - '*' + pull_request: + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + # test against JDK 8 + java: [ 8 ] + name: CI with Java ${{ matrix.java }} + steps: + - uses: actions/checkout@v1 + - name: Setup git user + run: | + git config --global user.name "Netflix OSS Maintainers" + git config --global user.email "netflixoss@netflix.com" + - name: Setup jdk + uses: actions/setup-java@v1 + with: + java-version: ${{ matrix.java }} + - uses: actions/cache@v1 + id: gradle-cache + with: + path: ~/.gradle/caches + key: ${{ runner.os }}-gradle-${{ hashFiles('**/gradle/dependency-locks/*.lockfile') }} + restore-keys: | + - ${{ runner.os }}-gradle- + - uses: actions/cache@v1 + id: gradle-wrapper-cache + with: + path: ~/.gradle/wrapper + key: ${{ runner.os }}-gradlewrapper-${{ hashFiles('gradle/wrapper/*') }} + restore-keys: | + - ${{ runner.os }}-gradlewrapper- + - name: Build with Gradle + run: ./gradlew --info --stacktrace build + env: + CI_NAME: github_actions + CI_BUILD_NUMBER: ${{ github.sha }} + CI_BUILD_URL: 'https://github.com/${{ github.repository }}' + CI_BRANCH: ${{ github.ref }} + COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/nebula-publish.yml b/.github/workflows/nebula-publish.yml new file mode 100644 index 00000000..cbcc261c --- /dev/null +++ b/.github/workflows/nebula-publish.yml @@ -0,0 +1,52 @@ +name: "Publish candidate/release to NetflixOSS and Maven Central" +on: + push: + tags: + - v*.*.* + - v*.*.*-rc.* + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Setup git user + run: | + git config --global user.name "Netflix OSS Maintainers" + git config --global user.email "netflixoss@netflix.com" + - name: Setup jdk 8 + uses: actions/setup-java@v1 + with: + java-version: 1.8 + - uses: actions/cache@v1 + id: gradle-cache + with: + path: ~/.gradle/caches + key: ${{ runner.os }}-gradle-${{ hashFiles('**/gradle/dependency-locks/*.lockfile') }} + restore-keys: | + - ${{ runner.os }}-gradle- + - uses: actions/cache@v1 + id: gradle-wrapper-cache + with: + path: ~/.gradle/wrapper + key: ${{ runner.os }}-gradlewrapper-${{ hashFiles('gradle/wrapper/*') }} + restore-keys: | + - ${{ runner.os }}-gradlewrapper- + - name: Publish candidate + if: contains(github.ref, '-rc.') + run: ./gradlew --info --stacktrace -Prelease.useLastTag=true candidate + env: + NETFLIX_OSS_SIGNING_KEY: ${{ secrets.ORG_SIGNING_KEY }} + NETFLIX_OSS_SIGNING_PASSWORD: ${{ secrets.ORG_SIGNING_PASSWORD }} + NETFLIX_OSS_REPO_USERNAME: ${{ secrets.ORG_NETFLIXOSS_USERNAME }} + NETFLIX_OSS_REPO_PASSWORD: ${{ secrets.ORG_NETFLIXOSS_PASSWORD }} + - name: Publish release + if: (!contains(github.ref, '-rc.')) + run: ./gradlew --info -Prelease.useLastTag=true final + env: + NETFLIX_OSS_SONATYPE_USERNAME: ${{ secrets.ORG_SONATYPE_USERNAME }} + NETFLIX_OSS_SONATYPE_PASSWORD: ${{ secrets.ORG_SONATYPE_PASSWORD }} + NETFLIX_OSS_SIGNING_KEY: ${{ secrets.ORG_SIGNING_KEY }} + NETFLIX_OSS_SIGNING_PASSWORD: ${{ secrets.ORG_SIGNING_PASSWORD }} + NETFLIX_OSS_REPO_USERNAME: ${{ secrets.ORG_NETFLIXOSS_USERNAME }} + NETFLIX_OSS_REPO_PASSWORD: ${{ secrets.ORG_NETFLIXOSS_PASSWORD }} diff --git a/.github/workflows/nebula-snapshot.yml b/.github/workflows/nebula-snapshot.yml new file mode 100644 index 00000000..2f60ba6c --- /dev/null +++ b/.github/workflows/nebula-snapshot.yml @@ -0,0 +1,41 @@ +name: "Publish snapshot to NetflixOSS and Maven Central" + +on: + push: + branches: + - main + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Setup git user + run: | + git config --global user.name "Netflix OSS Maintainers" + git config --global user.email "netflixoss@netflix.com" + - name: Set up JDK + uses: actions/setup-java@v1 + with: + java-version: 8 + - uses: actions/cache@v2 + id: gradle-cache + with: + path: | + ~/.gradle/caches + key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle') }} + - uses: actions/cache@v2 + id: gradle-wrapper-cache + with: + path: | + ~/.gradle/wrapper + key: ${{ runner.os }}-gradlewrapper-${{ hashFiles('gradle/wrapper/*') }} + - name: Build + run: ./gradlew build snapshot + env: + NETFLIX_OSS_SIGNING_KEY: ${{ secrets.ORG_SIGNING_KEY }} + NETFLIX_OSS_SIGNING_PASSWORD: ${{ secrets.ORG_SIGNING_PASSWORD }} + NETFLIX_OSS_REPO_USERNAME: ${{ secrets.ORG_NETFLIXOSS_USERNAME }} + NETFLIX_OSS_REPO_PASSWORD: ${{ secrets.ORG_NETFLIXOSS_PASSWORD }} diff --git a/.gitignore b/.gitignore index 6a85b5a3..707e3f36 100644 --- a/.gitignore +++ b/.gitignore @@ -48,3 +48,5 @@ atlassian-ide-plugin.xml .settings .metadata +# publishing secrets +secrets/signing-key diff --git a/.travis.yml b/.travis.yml deleted file mode 100755 index fcad80f7..00000000 --- a/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: java -jdk: -- openjdk8 -sudo: false -install: "./installViaTravis.sh" -script: "./buildViaTravis.sh" -cache: - directories: - - "$HOME/.gradle/caches/" -env: - global: - - secure: iiM3vM0du9afTc+GaB7Yisx2Tll6uIewOIHAeyM14X4b/sJFSa7uP0kw6BcLw153KqaXj6hLuYS/xAZS91RwAkCyUxVFh5WgAYcUqkynmYcyVrfRTW2ZGpcgMtCUL2ZC00ROxDsYOmYZvW3mS3KirDSi4Exzpu/rqKBlgNlGpAfT2k9Rq0bpw4wbI5mOmO2ZbEw8cihi+ECo+Vs4uIdazNVP0Ra9CvBATviLSFrTZlTHoNiqTxN6a+2Lk7Lg7sAuVrh+oODl3Tez3LYQ23/GbC3uNiKb3CfmDQgZNP6QiSGZ2AnYwIASkCpffycndQpRww4CkHrbc9yDrpOV8xS97PW0/QjhuphPv694hApZxmJTaDQlUuWC05nMXPVhu4t7qhjalVD/8L+Gh03CN1ma5zsMY13gWk4cPPkPbIXgeAev8JYtvo3SDCA7D4Ti48PVwomxcKa2gkvZHVkArzNzAbHqQPB/enxbmdugLU6x0LCQvCynxcLmXLJOBKnrTfA5N/Upt1yWtZMNOt2eFxd0S26WQXM8+Q6x33hoDN3aV2hdtALgxq8+8qUVeynkltJZozeyPTOiXakOh6PwjiP15zsBnYKpM77FByWj7rIHoTcrXPY7OHKm3CvhaXlgg0btlA+uYuZlMWT3SlRUnOSrwvz8vhEs8ewivOiddhWl5fQ= - - secure: t3TDEBk6+vK10rItQMrzX6A/MbMByKInZVLmwxJHOjxk/q1rqpircDgwthvJk1qLhak/TqJN7CCZJ3Aax7owLWe3anMRzr0RHHpGPSc2gyv9SKBOrIqMlHer4mA08msDXCakaF1yqFamLmc0G0pqT9blWXZjqaX6Ow0bCsQHrlg+e4LBYGAbILlalOxUTwgtdBo05WOwSaFzInUKRe7nL17aM1zA42DZaPQWMUdky9a4jogk0jQ5OHUJtk+Db4QXKyX0Xf2OKUx99Le/7VGGYtP9F/PVm3u8Um5dteXzAPzaqSTOOdhQrjLtkHMOu1/dn/JpvCc4iiNBsy1yXX037jR19m2+hkPqm9x6e39uiUx0OZ4hVCVWEK2H6xiYXrIneyh9C7f5m6ppuDuLTWKRfBW1ZjRRav6qsSdXiieVQHZ3Cy0XwCKImxwYg8+WiHGyoJeGAWLecWJ66hT90uRNVyfy4Tr2eCByGDFM42BKhdlUwN6A52rDIeSL1l+DG5ZAGgzfEazstnPbe6K3bhUsRtaCXst90rdakV+zymoHOvcUW2/Exaib6ozp8UOJd+ZnK0i4RaFwkTdLwY4YE631ZOPRKA7mDIRWLsoFj0UnpZe28BK/L+vFSz1av2Ild/n9eeNCsIPX7/S+Pv2VEKpnz91mlR/+0xSre4gyX1knbzI= - - secure: FThW2CPBT9YnbsaJu4v4/N1YiVCxBI/3ZYlMO6zfeAalBBjhSwhovhK0wYTsJd+Qb+QxyLX935q2EQfjsXpbMayII0w0ONlFcQBHmAt9xDcaGBOrLYEmn7SlgaSQXa0OgP9JEOuVnY77G46CQ4OupkvexKfKKgfEgurhqV+9nwbPqUm3KGEBdXft9b1a32GvUbSOtIH9LzeXIFcQaVYv+z6rgX7exXy0jc/vefWbpr0e6AyavGHs8bf4T0kgxYDyBgITmQSf1el0oAKxn/UZV4vXH8efdI/mzYM6DAj8s0QryZqqUFlapE4yz6KSy6MQrSbH3WTp4qqXoTECVHVl4e+EP5o4NBNpv2MJr20EtBV31PrSdtaD5yIx0x5P0haa08KubgEzfXVWIBKLIWZLC93GhQsCzGPt1M8u3dHgSMdNJiD3VuWrpF5PYD1vlb1zzDXk9sAgKDO0X2SN9DYn7+O78BnARQNlndp4K+t954dl3NfnTtrDuHnxSDXnCVkZuEHwkD0bSDxh3duypiqaBYD9mgx4wffiQMAQEA/l1UgKGIUNtjRGhZeFDBXdnJ64qOqZ+oKAmL538zvJI85R0NgUbQQJKNnc3d8k3lsy2qOy3hQgglEIUKKf7OoRTIqOcCQf/qSEEwQUsLk4Vqu4REDznQKvm0wU6eEWaeNrW1E= - - secure: EHMFNebGZ22YIIw+pMTZMOaAS0LXnAAG2/zV3Lk0t5EQPqgxpoWIZd9JFAt5yQPBk6IxD+rbAhd8D6QA0RNxfdN1LQBdZkAL+m7I8XGftDIQTecaGxZpjmw0OwC+gBz6p7sFHlkNw54DO5dh+waW5ro3yd0dvNHRDzUzD7VNq41joKFqTSnKdZMiBDGBgbvPBG6MrNldl1Cpl5LhtGDmSdG2DSKuKJAuWUhwOC0ZJGruFHhJO/iHef6RwQR3BTBKqsiH4KeN5lit+UetmYZOhswJIghRpCIcgBNG7V873agKeAf1WbHB0OlQw/FqIzLfs51SHF+8easL2Jrcuk+T4Ritct6CE826Cp4JILn6NUpOWLMpS/1pyr39IFwmRO+pTDhaZ6Ff7jXztx4QY1nZi1hhDy/VvOzeeGy2PiYdUO88dq5lW2sfjvv6BUvrRR7DAiMDen+KxogDpf3jDQJ8etsVqRY28ln3Ewu+I6cW4+AGZ45+Lbg2LwJBiTIqfQa8kHmg2j0TWPk7L8EgVFveZWGlxdhnmco7obZ3J5UhrDSczLR667LfitDdDX49nEsYhM3mF0W28Bf1n6RzRO8ujZZItt5WEKQSTRqs6+5LrX0uGbQy2AQzxGB+SSOxk+zEIcIuQrmZ4U5ZLPSkmo1USdRvd19zHLuBBs0uGPHh4zM= diff --git a/README.md b/README.md index 9be89ae9..d7444309 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ In this example a GRPC server is configured with a single adaptive limiter that // Create and configure a server builder ServerBuilder builder = ...; -builder.addService(ServerInterceptor.intercept(service, +builder.addService(ServerInterceptors.intercept(service, ConcurrencyLimitServerInterceptor.newBuilder( new GrpcServerLimiterBuilder() .partitionByHeader(GROUP_HEADER) diff --git a/build.gradle b/build.gradle index 5fe762ee..b431ed41 100644 --- a/build.gradle +++ b/build.gradle @@ -1,6 +1,11 @@ +import org.gradle.api.tasks.testing.Test + buildscript { repositories { - jcenter() + mavenCentral() + maven { + url = 'https://plugins.gradle.org/m2' + } } dependencies { classpath 'org.junit.platform:junit-platform-gradle-plugin:1.0.2' @@ -8,7 +13,7 @@ buildscript { } plugins { - id 'nebula.netflixoss' version '6.1.0' + id 'nebula.netflixoss' version '11.5.0' } // Establish version and status @@ -16,10 +21,26 @@ ext.githubProjectName = rootProject.name subprojects { apply plugin: 'nebula.netflixoss' + apply plugin: 'java-library' group = "com.netflix.${githubProjectName}" repositories { - jcenter() + mavenCentral() maven { url 'https://jitpack.io' } } + + + ext { + grpcVersion = "1.9.0" + jUnitVersion = "5.+" + jUnitLegacyVersion = "4.+" + mockitoVersion = "4.+" + slf4jVersion = "1.7.+" + spectatorVersion = "1.+" + springVersion = "5.+" + } + + tasks.withType(Test) { + useJUnitPlatform() + } } diff --git a/buildViaTravis.sh b/buildViaTravis.sh deleted file mode 100755 index 879e34c6..00000000 --- a/buildViaTravis.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# This script will build the project. - -if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then - echo -e "Build Pull Request #$TRAVIS_PULL_REQUEST => Branch [$TRAVIS_BRANCH]" - ./gradlew build -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" == "" ]; then - echo -e 'Build Branch with Snapshot => Branch ['$TRAVIS_BRANCH']' - ./gradlew -Prelease.travisci=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -PsonatypeUsername="${sonatypeUsername}" -PsonatypePassword="${sonatypePassword}" build snapshot --info --stacktrace -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" != "" ]; then - echo -e 'Build Branch for Release => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG']' - case "$TRAVIS_TAG" in - *-rc\.*) - ./gradlew -Prelease.travisci=true -Prelease.useLastTag=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -PsonatypeUsername="${sonatypeUsername}" -PsonatypePassword="${sonatypePassword}" candidate --info --stacktrace - ;; - *) - ./gradlew -Prelease.travisci=true -Prelease.useLastTag=true -PbintrayUser="${bintrayUser}" -PbintrayKey="${bintrayKey}" -PsonatypeUsername="${sonatypeUsername}" -PsonatypePassword="${sonatypePassword}" final --info --stacktrace - ;; - esac -else - echo -e 'WARN: Should not be here => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG'] Pull Request ['$TRAVIS_PULL_REQUEST']' - ./gradlew build -fi diff --git a/concurrency-limits-core/build.gradle b/concurrency-limits-core/build.gradle index 411a453c..b21985ee 100644 --- a/concurrency-limits-core/build.gradle +++ b/concurrency-limits-core/build.gradle @@ -5,10 +5,10 @@ plugins { sourceCompatibility = JavaVersion.VERSION_1_8 dependencies { - compile "org.slf4j:slf4j-api:1.7.+" - - testCompile 'junit:junit-dep:4.10' - testCompile "org.slf4j:slf4j-log4j12:1.7.+" + implementation "org.slf4j:slf4j-api:${slf4jVersion}" - testCompile 'com.github.kevinmost:junit-retry-rule:cbdd972d7c' + testCompileOnly "junit:junit:${jUnitLegacyVersion}" + testImplementation "org.slf4j:slf4j-log4j12:${slf4jVersion}" + testRuntimeOnly "org.junit.vintage:junit-vintage-engine:${jUnitVersion}" + testRuntimeOnly "org.junit.jupiter:junit-jupiter-engine:${jUnitVersion}" } diff --git a/concurrency-limits-core/dependencies.lock b/concurrency-limits-core/dependencies.lock new file mode 100644 index 00000000..37efc834 --- /dev/null +++ b/concurrency-limits-core/dependencies.lock @@ -0,0 +1,37 @@ +{ + "compileClasspath": { + "org.slf4j:slf4j-api": { + "locked": "1.7.36" + } + }, + "runtimeClasspath": { + "org.slf4j:slf4j-api": { + "locked": "1.7.36" + } + }, + "testCompileClasspath": { + "junit:junit": { + "locked": "4.13.2" + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.36" + }, + "org.slf4j:slf4j-log4j12": { + "locked": "1.7.36" + } + }, + "testRuntimeClasspath": { + "org.junit.jupiter:junit-jupiter-engine": { + "locked": "5.10.2" + }, + "org.junit.vintage:junit-vintage-engine": { + "locked": "5.10.2" + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.36" + }, + "org.slf4j:slf4j-log4j12": { + "locked": "1.7.36" + } + } +} \ No newline at end of file diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/MetricRegistry.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/MetricRegistry.java index e01b1b78..058da789 100644 --- a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/MetricRegistry.java +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/MetricRegistry.java @@ -27,6 +27,14 @@ public interface MetricRegistry { */ interface SampleListener { void addSample(Number value); + + default void addLongSample(long value) { + addSample(value); + } + + default void addDoubleSample(double value) { + addSample(value); + } } interface Counter { @@ -59,7 +67,7 @@ default SampleListener distribution(String id, String... tagNameValuePairs) { */ @Deprecated default void registerGauge(String id, Supplier supplier, String... tagNameValuePairs) { - throw new UnsupportedOperationException("registerDistribution is deprecated"); + throw new UnsupportedOperationException("registerGauge is deprecated"); } /** diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/AIMDLimit.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/AIMDLimit.java index 940d13d6..1506f374 100644 --- a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/AIMDLimit.java +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/AIMDLimit.java @@ -17,6 +17,8 @@ import com.netflix.concurrency.limits.Limit; import com.netflix.concurrency.limits.internal.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.concurrent.TimeUnit; @@ -26,6 +28,7 @@ */ public final class AIMDLimit extends AbstractLimit { private static final long DEFAULT_TIMEOUT = TimeUnit.SECONDS.toNanos(5); + private static final Logger LOG = LoggerFactory.getLogger(AIMDLimit.class); public static class Builder { private int minLimit = 20; @@ -68,6 +71,12 @@ public Builder timeout(long timeout, TimeUnit units) { } public AIMDLimit build() { + if (initialLimit > maxLimit) { + LOG.warn("Initial limit {} exceeded maximum limit {}", initialLimit, maxLimit); + } + if (initialLimit < minLimit) { + LOG.warn("Initial limit {} is less than minimum limit {}", initialLimit, minLimit); + } return new AIMDLimit(this); } } @@ -99,10 +108,6 @@ protected int _update(long startTime, long rtt, int inflight, boolean didDrop) { currentLimit = currentLimit + 1; } - if (currentLimit >= maxLimit) { - currentLimit = currentLimit / 2; - } - return Math.min(maxLimit, Math.max(minLimit, currentLimit)); } diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/Gradient2Limit.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/Gradient2Limit.java index 11ec7d57..08ea57f7 100644 --- a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/Gradient2Limit.java +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/Gradient2Limit.java @@ -22,13 +22,12 @@ import com.netflix.concurrency.limits.internal.Preconditions; import com.netflix.concurrency.limits.limit.measurement.ExpAvgMeasurement; import com.netflix.concurrency.limits.limit.measurement.Measurement; -import com.netflix.concurrency.limits.limit.measurement.SingleMeasurement; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.function.Function; +import java.util.function.IntUnaryOperator; /** * Concurrency limit algorithm that adjusts the limit based on the gradient of change of the current average RTT and @@ -78,7 +77,7 @@ public static class Builder { private int maxConcurrency = 200; private double smoothing = 0.2; - private Function queueSize = concurrency -> 4; + private IntUnaryOperator queueSize = concurrency -> 4; private MetricRegistry registry = EmptyMetricRegistry.INSTANCE; private int longWindow = 600; private double rttTolerance = 1.5; @@ -129,10 +128,23 @@ public Builder queueSize(int queueSize) { /** * Function to dynamically determine the amount the estimated limit can grow while * latencies remain low as a function of the current limit. - * @param queueSize + * @param queueSize the queue size function * @return Chainable builder + * @deprecated use {@link #queueSizeFunction(IntUnaryOperator)} */ + @Deprecated public Builder queueSize(Function queueSize) { + this.queueSize = queueSize::apply; + return this; + } + + /** + * Function to dynamically determine the amount the estimated limit can grow while + * latencies remain low as a function of the current limit. + * @param queueSize the queue size function + * @return Chainable builder + */ + public Builder queueSizeFunction(IntUnaryOperator queueSize) { this.queueSize = queueSize; return this; } @@ -192,6 +204,12 @@ public Builder longWindow(int n) { } public Gradient2Limit build() { + if (initialLimit > maxConcurrency) { + LOG.warn("Initial limit {} exceeded maximum limit {}", initialLimit, maxConcurrency); + } + if (initialLimit < minLimit) { + LOG.warn("Initial limit {} is less than minimum limit {}", initialLimit, minLimit); + } return new Gradient2Limit(this); } } @@ -227,7 +245,7 @@ public static Gradient2Limit newDefault() { private final int minLimit; - private final Function queueSize; + private final IntUnaryOperator queueSize; private final double smoothing; @@ -258,15 +276,16 @@ private Gradient2Limit(Builder builder) { @Override public int _update(final long startTime, final long rtt, final int inflight, final boolean didDrop) { - final double queueSize = this.queueSize.apply((int)this.estimatedLimit); + double estimatedLimit = this.estimatedLimit; + final double queueSize = this.queueSize.applyAsInt((int) estimatedLimit); this.lastRtt = rtt; - final double shortRtt = (double)rtt; + final double shortRtt = (double) rtt; final double longRtt = this.longRtt.add(rtt).doubleValue(); - shortRttSampleListener.addSample(shortRtt); - longRttSampleListener.addSample(longRtt); - queueSizeSampleListener.addSample(queueSize); + shortRttSampleListener.addDoubleSample(shortRtt); + longRttSampleListener.addDoubleSample(longRtt); + queueSizeSampleListener.addDoubleSample(queueSize); // If the long RTT is substantially larger than the short RTT then reduce the long RTT measurement. // This can happen when latency returns to normal after a prolonged prior of excessive load. Reducing the @@ -289,7 +308,7 @@ public int _update(final long startTime, final long rtt, final int inflight, fin newLimit = estimatedLimit * (1 - smoothing) + newLimit * smoothing; newLimit = Math.max(minLimit, Math.min(maxLimit, newLimit)); - if ((int)estimatedLimit != newLimit) { + if ((int) estimatedLimit != newLimit && LOG.isDebugEnabled()) { LOG.debug("New limit={} shortRtt={} ms longRtt={} ms queueSize={} gradient={}", (int)newLimit, getLastRtt(TimeUnit.MICROSECONDS) / 1000.0, @@ -298,9 +317,9 @@ public int _update(final long startTime, final long rtt, final int inflight, fin gradient); } - estimatedLimit = newLimit; + this.estimatedLimit = newLimit; - return (int)estimatedLimit; + return (int) newLimit; } public long getLastRtt(TimeUnit units) { diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/GradientLimit.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/GradientLimit.java index 1cdb142d..3961dac0 100644 --- a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/GradientLimit.java +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/GradientLimit.java @@ -184,6 +184,12 @@ public Builder probeInterval(int probeInterval) { } public GradientLimit build() { + if (initialLimit > maxConcurrency) { + LOG.warn("Initial limit {} exceeded maximum limit {}", initialLimit, maxConcurrency); + } + if (initialLimit < minLimit) { + LOG.warn("Initial limit {} is less than minimum limit {}", initialLimit, minLimit); + } return new GradientLimit(this); } } @@ -258,10 +264,10 @@ private int nextProbeCountdown() { @Override public int _update(final long startTime, final long rtt, final int inflight, final boolean didDrop) { lastRtt = rtt; - minWindowRttSampleListener.addSample(rtt); + minWindowRttSampleListener.addLongSample(rtt); final double queueSize = this.queueSize.apply((int)this.estimatedLimit); - queueSizeSampleListener.addSample(queueSize); + queueSizeSampleListener.addDoubleSample(queueSize); // Reset or probe for a new noload RTT and a new estimatedLimit. It's necessary to cut the limit // in half to avoid having the limit drift upwards when the RTT is probed during heavy load. @@ -277,7 +283,7 @@ public int _update(final long startTime, final long rtt, final int inflight, fin } final long rttNoLoad = rttNoLoadMeasurement.add(rtt).longValue(); - minRttSampleListener.addSample(rttNoLoad); + minRttSampleListener.addLongSample(rttNoLoad); // Rtt could be higher than rtt_noload because of smoothing rtt noload updates // so set to 1.0 to indicate no queuing. Otherwise calculate the slope and don't diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/VegasLimit.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/VegasLimit.java index 8dd72b10..17ce2f4a 100644 --- a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/VegasLimit.java +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/VegasLimit.java @@ -19,14 +19,15 @@ import com.netflix.concurrency.limits.MetricRegistry; import com.netflix.concurrency.limits.MetricRegistry.SampleListener; import com.netflix.concurrency.limits.internal.EmptyMetricRegistry; -import com.netflix.concurrency.limits.internal.Preconditions; -import com.netflix.concurrency.limits.limit.functions.Log10RootFunction; +import com.netflix.concurrency.limits.limit.functions.Log10RootIntFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; +import java.util.function.DoubleUnaryOperator; import java.util.function.Function; +import java.util.function.IntUnaryOperator; /** * Limiter based on TCP Vegas where the limit increases by alpha if the queue_use is small ({@literal <} alpha) @@ -41,7 +42,7 @@ public class VegasLimit extends AbstractLimit { private static final Logger LOG = LoggerFactory.getLogger(VegasLimit.class); - private static final Function LOG10 = Log10RootFunction.create(0); + private static final IntUnaryOperator LOG10 = Log10RootIntFunction.create(0); public static class Builder { private int initialLimit = 20; @@ -49,11 +50,11 @@ public static class Builder { private MetricRegistry registry = EmptyMetricRegistry.INSTANCE; private double smoothing = 1.0; - private Function alphaFunc = (limit) -> 3 * LOG10.apply(limit.intValue()); - private Function betaFunc = (limit) -> 6 * LOG10.apply(limit.intValue()); - private Function thresholdFunc = (limit) -> LOG10.apply(limit.intValue()); - private Function increaseFunc = (limit) -> limit + LOG10.apply(limit.intValue()); - private Function decreaseFunc = (limit) -> limit - LOG10.apply(limit.intValue()); + private IntUnaryOperator alphaFunc = (limit) -> 3 * LOG10.applyAsInt(limit); + private IntUnaryOperator betaFunc = (limit) -> 6 * LOG10.applyAsInt(limit); + private IntUnaryOperator thresholdFunc = LOG10; + private DoubleUnaryOperator increaseFunc = (limit) -> limit + LOG10.applyAsInt((int) limit); + private DoubleUnaryOperator decreaseFunc = (limit) -> limit - LOG10.applyAsInt((int) limit); private int probeMultiplier = 30; private Builder() { @@ -74,13 +75,31 @@ public Builder alpha(int alpha) { this.alphaFunc = (ignore) -> alpha; return this; } - + + /** + * @deprecated use {@link #thresholdFunction(IntUnaryOperator)} + */ + @Deprecated public Builder threshold(Function threshold) { + this.thresholdFunc = threshold::apply; + return this; + } + + public Builder thresholdFunction(IntUnaryOperator threshold) { this.thresholdFunc = threshold; return this; } - + + /** + * @deprecated use {@link #alphaFunction(IntUnaryOperator)} + */ + @Deprecated public Builder alpha(Function alpha) { + this.alphaFunc = alpha::apply; + return this; + } + + public Builder alphaFunction(IntUnaryOperator alpha) { this.alphaFunc = alpha; return this; } @@ -89,18 +108,45 @@ public Builder beta(int beta) { this.betaFunc = (ignore) -> beta; return this; } - + + /** + * @deprecated use {@link #betaFunction(IntUnaryOperator)} + */ + @Deprecated public Builder beta(Function beta) { + this.betaFunc = beta::apply; + return this; + } + + public Builder betaFunction(IntUnaryOperator beta) { this.betaFunc = beta; return this; } - + + /** + * @deprecated use {@link #increaseFunction(DoubleUnaryOperator)} + */ + @Deprecated public Builder increase(Function increase) { + this.increaseFunc = increase::apply; + return this; + } + + public Builder increaseFunction(DoubleUnaryOperator increase) { this.increaseFunc = increase; return this; } - + + /** + * @deprecated use {@link #decreaseFunction(DoubleUnaryOperator)} + */ + @Deprecated public Builder decrease(Function decrease) { + this.decreaseFunc = decrease::apply; + return this; + } + + public Builder decreaseFunction(DoubleUnaryOperator decrease) { this.decreaseFunc = decrease; return this; } @@ -136,6 +182,9 @@ public Builder metricRegistry(MetricRegistry registry) { } public VegasLimit build() { + if (initialLimit > maxConcurrency) { + LOG.warn("Initial limit {} exceeded maximum limit {}", initialLimit, maxConcurrency); + } return new VegasLimit(this); } } @@ -161,11 +210,11 @@ public static VegasLimit newDefault() { private final int maxLimit; private final double smoothing; - private final Function alphaFunc; - private final Function betaFunc; - private final Function thresholdFunc; - private final Function increaseFunc; - private final Function decreaseFunc; + private final IntUnaryOperator alphaFunc; + private final IntUnaryOperator betaFunc; + private final IntUnaryOperator thresholdFunc; + private final DoubleUnaryOperator increaseFunc; + private final DoubleUnaryOperator decreaseFunc; private final SampleListener rttSampleListener; private final int probeMultiplier; private int probeCount = 0; @@ -198,69 +247,77 @@ private boolean shouldProbe() { @Override protected int _update(long startTime, long rtt, int inflight, boolean didDrop) { - Preconditions.checkArgument(rtt > 0, "rtt must be >0 but got " + rtt); + if (rtt <= 0) { + throw new IllegalArgumentException("rtt must be >0 but got " + rtt); + } probeCount++; if (shouldProbe()) { - LOG.debug("Probe MinRTT {}", TimeUnit.NANOSECONDS.toMicros(rtt) / 1000.0); + if (LOG.isDebugEnabled()) { + LOG.debug("Probe MinRTT {}", TimeUnit.NANOSECONDS.toMicros(rtt) / 1000.0); + } resetProbeJitter(); probeCount = 0; rtt_noload = rtt; - return (int)estimatedLimit; + return (int) estimatedLimit; } - + + long rtt_noload = this.rtt_noload; if (rtt_noload == 0 || rtt < rtt_noload) { - LOG.debug("New MinRTT {}", TimeUnit.NANOSECONDS.toMicros(rtt) / 1000.0); - rtt_noload = rtt; - return (int)estimatedLimit; + if (LOG.isDebugEnabled()) { + LOG.debug("New MinRTT {}", TimeUnit.NANOSECONDS.toMicros(rtt) / 1000.0); + } + this.rtt_noload = rtt; + return (int) estimatedLimit; } - - rttSampleListener.addSample(rtt_noload); - return updateEstimatedLimit(rtt, inflight, didDrop); + rttSampleListener.addLongSample(rtt_noload); + + return updateEstimatedLimit(rtt, rtt_noload, inflight, didDrop); } - private int updateEstimatedLimit(long rtt, int inflight, boolean didDrop) { - final int queueSize = (int) Math.ceil(estimatedLimit * (1 - (double)rtt_noload / rtt)); + private int updateEstimatedLimit(long rtt, long rtt_noload, int inflight, boolean didDrop) { + double estimatedLimit = this.estimatedLimit; + final int queueSize = (int) Math.ceil(estimatedLimit * (1 - (double) rtt_noload / rtt)); double newLimit; // Treat any drop (i.e timeout) as needing to reduce the limit if (didDrop) { - newLimit = decreaseFunc.apply(estimatedLimit); + newLimit = decreaseFunc.applyAsDouble(estimatedLimit); // Prevent upward drift if not close to the limit } else if (inflight * 2 < estimatedLimit) { - return (int)estimatedLimit; + return (int) estimatedLimit; } else { - int alpha = alphaFunc.apply((int)estimatedLimit); - int beta = betaFunc.apply((int)estimatedLimit); - int threshold = this.thresholdFunc.apply((int)estimatedLimit); + int alpha = alphaFunc.applyAsInt((int) estimatedLimit); + int beta = betaFunc.applyAsInt((int) estimatedLimit); + int threshold = thresholdFunc.applyAsInt((int) estimatedLimit); // Aggressive increase when no queuing if (queueSize <= threshold) { newLimit = estimatedLimit + beta; // Increase the limit if queue is still manageable } else if (queueSize < alpha) { - newLimit = increaseFunc.apply(estimatedLimit); + newLimit = increaseFunc.applyAsDouble(estimatedLimit); // Detecting latency so decrease } else if (queueSize > beta) { - newLimit = decreaseFunc.apply(estimatedLimit); + newLimit = decreaseFunc.applyAsDouble(estimatedLimit); // We're within he sweet spot so nothing to do } else { - return (int)estimatedLimit; + return (int) estimatedLimit; } } newLimit = Math.max(1, Math.min(maxLimit, newLimit)); newLimit = (1 - smoothing) * estimatedLimit + smoothing * newLimit; - if ((int)newLimit != (int)estimatedLimit && LOG.isDebugEnabled()) { + if ((int) newLimit != (int) estimatedLimit && LOG.isDebugEnabled()) { LOG.debug("New limit={} minRtt={} ms winRtt={} ms queueSize={}", - (int)newLimit, + (int) newLimit, TimeUnit.NANOSECONDS.toMicros(rtt_noload) / 1000.0, TimeUnit.NANOSECONDS.toMicros(rtt) / 1000.0, queueSize); } - estimatedLimit = newLimit; - return (int)estimatedLimit; + this.estimatedLimit = newLimit; + return (int) newLimit; } @Override diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/WindowedLimit.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/WindowedLimit.java index c9d0ac88..cf86082f 100644 --- a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/WindowedLimit.java +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/WindowedLimit.java @@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.Consumer; public class WindowedLimit implements Limit { @@ -103,7 +104,7 @@ public WindowedLimit build(Limit delegate) { private final long minRttThreshold; - private final Object lock = new Object(); + private final ReentrantLock lock = new ReentrantLock(); private final SampleWindowFactory sampleWindowFactory; @@ -129,7 +130,7 @@ public void notifyOnChange(Consumer consumer) { @Override public void onSample(long startTime, long rtt, int inflight, boolean didDrop) { - long endTime = startTime + rtt; + final long endTime = startTime + rtt; if (rtt < minRttThreshold) { return; @@ -138,15 +139,25 @@ public void onSample(long startTime, long rtt, int inflight, boolean didDrop) { sample.updateAndGet(current -> current.addSample(rtt, inflight, didDrop)); if (endTime > nextUpdateTime) { - synchronized (lock) { - // Double check under the lock - if (endTime > nextUpdateTime) { - SampleWindow current = sample.getAndSet(sampleWindowFactory.newInstance()); - nextUpdateTime = endTime + Math.min(Math.max(current.getCandidateRttNanos() * 2, minWindowTime), maxWindowTime); - - if (isWindowReady(current)) { - delegate.onSample(startTime, current.getTrackedRttNanos(), current.getMaxInFlight(), current.didDrop()); + // Only allow one thread at propagate the sample to the delegate. + // Other threads are free to continue. They will continue to accumulate + // against 'sample'. + boolean haveLock = lock.tryLock(); + if (haveLock) { + try { + // Double check under the lock, in case of the flow: + // A : check end time , lock , , set nextUpdateTime , unlock , + // B : check end time , lock , [[nextUpdateTime has changed!]] + if (endTime > nextUpdateTime) { + SampleWindow current = sample.getAndSet(sampleWindowFactory.newInstance()); + nextUpdateTime = endTime + Math.min(Math.max(current.getCandidateRttNanos() * 2, minWindowTime), maxWindowTime); + + if (isWindowReady(current)) { + delegate.onSample(startTime, current.getTrackedRttNanos(), current.getMaxInFlight(), current.didDrop()); + } } + } finally { + lock.unlock(); } } } diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/functions/Log10RootFunction.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/functions/Log10RootFunction.java index 78acbe84..4c1079ff 100644 --- a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/functions/Log10RootFunction.java +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/functions/Log10RootFunction.java @@ -19,30 +19,33 @@ import java.util.stream.IntStream; /** - * Function used by limiters to calculate thredsholds using log10 of the current limit. + * Function used by limiters to calculate thresholds using log10 of the current limit. * Here we pre-compute the log10 of numbers up to 1000 as an optimization. + * + * @deprecated use {@link Log10RootIntFunction} */ +@Deprecated public final class Log10RootFunction implements Function { static final int[] lookup = new int[1000]; - + static { IntStream.range(0, 1000).forEach(i -> lookup[i] = Math.max(1, (int)Math.log10(i))); } - + private static final Log10RootFunction INSTANCE = new Log10RootFunction(); - + /** * Create an instance of a function that returns : baseline + sqrt(limit) - * + * * @param baseline * @return */ public static Function create(int baseline) { return INSTANCE.andThen(t -> t + baseline); } - + @Override public Integer apply(Integer t) { return t < 1000 ? lookup[t] : (int)Math.log10(t); } -} +} \ No newline at end of file diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/functions/Log10RootIntFunction.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/functions/Log10RootIntFunction.java new file mode 100644 index 00000000..356cf270 --- /dev/null +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limit/functions/Log10RootIntFunction.java @@ -0,0 +1,37 @@ +package com.netflix.concurrency.limits.limit.functions; + +import java.util.function.IntUnaryOperator; + +/** + * Function used by limiters to calculate thresholds using log10 of the current limit. + * Here we pre-compute the log10 of numbers up to 1000 as an optimization. + */ +public final class Log10RootIntFunction implements IntUnaryOperator { + + private Log10RootIntFunction() {} + + private static final int[] lookup = new int[1000]; + + static { + for (int i = 0; i < lookup.length; i++) { + lookup[i] = Math.max(1, (int) Math.log10(i)); + } + } + + private static final Log10RootIntFunction INSTANCE = new Log10RootIntFunction(); + + /** + * Create an instance of a function that returns : baseline + sqrt(limit) + * + * @param baseline + * @return + */ + public static IntUnaryOperator create(int baseline) { + return baseline == 0 ? INSTANCE : INSTANCE.andThen(t -> t + baseline); + } + + @Override + public int applyAsInt(int t) { + return t < 1000 ? lookup[t] : (int) Math.log10(t); + } +} diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/AbstractLimiter.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/AbstractLimiter.java index 08b1a25a..8013d71f 100644 --- a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/AbstractLimiter.java +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/AbstractLimiter.java @@ -24,21 +24,41 @@ import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.LongSupplier; +import java.util.function.Predicate; import java.util.function.Supplier; public abstract class AbstractLimiter implements Limiter { public static final String ID_TAG = "id"; public static final String STATUS_TAG = "status"; + private static final Listener NOOP_LISTENER = new Listener() { + @Override + public void onSuccess() { + } + @Override + public void onIgnore() { + } + @Override + public void onDropped() { + } + @Override + public String toString() { + return "{NoopListener}"; + } + }; public abstract static class Builder> { private static final AtomicInteger idCounter = new AtomicInteger(); private Limit limit = VegasLimit.newDefault(); - private Supplier clock = System::nanoTime; + private LongSupplier clock = System::nanoTime; protected String name = "unnamed-" + idCounter.incrementAndGet(); protected MetricRegistry registry = EmptyMetricRegistry.INSTANCE; + private final Predicate ALWAYS_FALSE = (context) -> false; + private Predicate bypassResolver = ALWAYS_FALSE; + public BuilderT named(String name) { this.name = name; return self(); @@ -49,7 +69,16 @@ public BuilderT limit(Limit limit) { return self(); } + /** + * @deprecated use {@link #nanoClock(LongSupplier)} + */ + @Deprecated public BuilderT clock(Supplier clock) { + this.clock = clock::get; + return self(); + } + + public BuilderT nanoClock(LongSupplier clock) { this.clock = clock; return self(); } @@ -60,15 +89,41 @@ public BuilderT metricRegistry(MetricRegistry registry) { } protected abstract BuilderT self(); + + /** + * Add a chainable bypass resolver predicate from context. Multiple resolvers may be added and if any of the + * predicate condition returns true the call is bypassed without increasing the limiter inflight count and + * affecting the algorithm. Will not bypass any calls by default if no resolvers are added. + * + * Due to the builders not having access to the ContextT, it is the duty of subclasses to ensure that + * implementations are type safe. + * + * Predicates should not rely strictly on state of the Limiter (such as inflight count) when evaluating + * whether to bypass. There is no guarantee that the state will be synchronized or consistent with respect to + * the bypass predicate, and the bypass predicate may be called by multiple threads concurrently. + * + * @param shouldBypass Predicate condition to bypass limit + * @return Chainable builder + */ + protected final BuilderT bypassLimitResolverInternal(Predicate shouldBypass) { + if (this.bypassResolver == ALWAYS_FALSE) { + this.bypassResolver = (Predicate) shouldBypass; + } else { + this.bypassResolver = bypassResolver.or((Predicate) shouldBypass); + } + return self(); + } } private final AtomicInteger inFlight = new AtomicInteger(); - private final Supplier clock; + private final LongSupplier clock; private final Limit limitAlgorithm; private final MetricRegistry.Counter successCounter; private final MetricRegistry.Counter droppedCounter; private final MetricRegistry.Counter ignoredCounter; private final MetricRegistry.Counter rejectedCounter; + private final MetricRegistry.Counter bypassCounter; + private final Predicate bypassResolver; private volatile int limit; @@ -77,12 +132,18 @@ protected AbstractLimiter(Builder builder) { this.limitAlgorithm = builder.limit; this.limit = limitAlgorithm.getLimit(); this.limitAlgorithm.notifyOnChange(this::onNewLimit); + this.bypassResolver = (Predicate) builder.bypassResolver; builder.registry.gauge(MetricIds.LIMIT_NAME, this::getLimit); this.successCounter = builder.registry.counter(MetricIds.CALL_NAME, ID_TAG, builder.name, STATUS_TAG, "success"); this.droppedCounter = builder.registry.counter(MetricIds.CALL_NAME, ID_TAG, builder.name, STATUS_TAG, "dropped"); this.ignoredCounter = builder.registry.counter(MetricIds.CALL_NAME, ID_TAG, builder.name, STATUS_TAG, "ignored"); this.rejectedCounter = builder.registry.counter(MetricIds.CALL_NAME, ID_TAG, builder.name, STATUS_TAG, "rejected"); + this.bypassCounter = builder.registry.counter(MetricIds.CALL_NAME, ID_TAG, builder.name, STATUS_TAG, "bypassed"); + } + + protected boolean shouldBypass(ContextT context){ + return bypassResolver.test(context); } protected Optional createRejectedListener() { @@ -90,8 +151,13 @@ protected Optional createRejectedListener() { return Optional.empty(); } + protected Optional createBypassListener() { + this.bypassCounter.increment(); + return Optional.of(NOOP_LISTENER); + } + protected Listener createListener() { - final long startTime = clock.get(); + final long startTime = clock.getAsLong(); final int currentInflight = inFlight.incrementAndGet(); return new Listener() { @Override @@ -99,7 +165,7 @@ public void onSuccess() { inFlight.decrementAndGet(); successCounter.increment(); - limitAlgorithm.onSample(startTime, clock.get() - startTime, currentInflight, false); + limitAlgorithm.onSample(startTime, clock.getAsLong() - startTime, currentInflight, false); } @Override @@ -113,7 +179,7 @@ public void onDropped() { inFlight.decrementAndGet(); droppedCounter.increment(); - limitAlgorithm.onSample(startTime, clock.get() - startTime, currentInflight, true); + limitAlgorithm.onSample(startTime, clock.getAsLong() - startTime, currentInflight, true); } }; } diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/AbstractPartitionedLimiter.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/AbstractPartitionedLimiter.java index 57d1e8f1..53109512 100644 --- a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/AbstractPartitionedLimiter.java +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/AbstractPartitionedLimiter.java @@ -30,7 +30,6 @@ import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; public abstract class AbstractPartitionedLimiter extends AbstractLimiter { @@ -105,10 +104,10 @@ public Limiter build() { static class Partition { private final String name; + private final AtomicInteger busy = new AtomicInteger(0); private double percent = 0.0; - private int limit = 0; - private int busy = 0; + private volatile int limit = 0; private long backoffMillis = 0; private MetricRegistry.SampleListener inflightDistribution; @@ -134,17 +133,33 @@ void updateLimit(int totalLimit) { } boolean isLimitExceeded() { - return busy >= limit; + return busy.get() >= limit; } void acquire() { - busy++; - inflightDistribution.addSample(busy); + int nowBusy = busy.incrementAndGet(); + inflightDistribution.addLongSample(nowBusy); + } + + /** + * Try to acquire a slot, returning false if the limit is exceeded. + * @return + */ + boolean tryAcquire() { + int current = busy.get(); + while (current < limit) { + if (busy.compareAndSet(current, current + 1)) { + inflightDistribution.addLongSample(current + 1); + return true; + } + current = busy.get(); + } + return false; } void release() { - busy--; + busy.decrementAndGet(); } int getLimit() { @@ -152,7 +167,7 @@ int getLimit() { } public int getInflight() { - return busy; + return busy.get(); } double getPercent() { @@ -166,14 +181,13 @@ void createMetrics(MetricRegistry registry) { @Override public String toString() { - return "Partition [pct=" + percent + ", limit=" + limit + ", busy=" + busy + "]"; + return "Partition [pct=" + percent + ", limit=" + limit + ", busy=" + busy.get() + "]"; } } private final Map partitions; private final Partition unknownPartition; private final List> partitionResolvers; - private final ReentrantLock lock = new ReentrantLock(); private final AtomicInteger delayedThreads = new AtomicInteger(); private final int maxDelayedThreads; @@ -211,60 +225,67 @@ private Partition resolvePartition(ContextT context) { @Override public Optional acquire(ContextT context) { - final Partition partition = resolvePartition(context); - - try { - lock.lock(); - if (getInflight() >= getLimit() && partition.isLimitExceeded()) { - lock.unlock(); - if (partition.backoffMillis > 0 && delayedThreads.get() < maxDelayedThreads) { - try { - delayedThreads.incrementAndGet(); - TimeUnit.MILLISECONDS.sleep(partition.backoffMillis); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } finally { - delayedThreads.decrementAndGet(); - } - } + if (shouldBypass(context)){ + return createBypassListener(); + } - return createRejectedListener(); - } + final Partition partition = resolvePartition(context); + // This is a little unusual in that the partition is not a hard limit. It is + // only a limit that it is applied if the global limit is exceeded. This allows + // for excess capacity in each partition to allow for bursting over the limit, + // but only if there is spare global capacity. + + final boolean overLimit; + if (getInflight() >= getLimit()) { + // over global limit, so respect partition limit + boolean couldAcquire = partition.tryAcquire(); + overLimit = !couldAcquire; + } else { + // we are below global limit, so no need to respect partition limit partition.acquire(); - final Listener listener = createListener(); - return Optional.of(new Listener() { - @Override - public void onSuccess() { - listener.onSuccess(); - releasePartition(partition); - } + overLimit = false; + } - @Override - public void onIgnore() { - listener.onIgnore(); - releasePartition(partition); + if (overLimit) { + if (partition.backoffMillis > 0 && delayedThreads.get() < maxDelayedThreads) { + try { + delayedThreads.incrementAndGet(); + TimeUnit.MILLISECONDS.sleep(partition.backoffMillis); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + delayedThreads.decrementAndGet(); } + } - @Override - public void onDropped() { - listener.onDropped(); - releasePartition(partition); - } - }); - } finally { - if (lock.isHeldByCurrentThread()) - lock.unlock(); + return createRejectedListener(); } + + final Listener listener = createListener(); + return Optional.of(new Listener() { + @Override + public void onSuccess() { + listener.onSuccess(); + releasePartition(partition); + } + + @Override + public void onIgnore() { + listener.onIgnore(); + releasePartition(partition); + } + + @Override + public void onDropped() { + listener.onDropped(); + releasePartition(partition); + } + }); } private void releasePartition(Partition partition) { - try { - lock.lock(); - partition.release(); - } finally { - lock.unlock(); - } + partition.release(); } @Override diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/LifoBlockingLimiter.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/LifoBlockingLimiter.java index a8b71fb5..45fa4034 100644 --- a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/LifoBlockingLimiter.java +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/LifoBlockingLimiter.java @@ -26,29 +26,29 @@ import com.netflix.concurrency.limits.Limiter; /** - * {@link Limiter} decorator that blocks the caller when the limit has been reached. This + * {@link Limiter} decorator that blocks the caller when the limit has been reached. This * strategy ensures the resource is properly protected but favors availability over latency * by not fast failing requests when the limit has been reached. To help keep success latencies - * low and minimize timeouts any blocked requests are processed in last in/first out order. - * - * Use this limiter only when the threading model allows the limiter to be blocked. - * + * low and minimize timeouts any blocked requests are processed in last in/first out order. + * + * Use this limiter only when the threading model allows the limiter to be blocked. + * * @param */ public final class LifoBlockingLimiter implements Limiter { public static class Builder { - + private final Limiter delegate; private int maxBacklogSize = 100; private Function maxBacklogTimeoutMillis = context -> 1_000L; - + private Builder(Limiter delegate) { this.delegate = delegate; } /** * Set maximum number of blocked threads - * + * * @param size New max size. Default is 100. * @return Chainable builder */ @@ -60,6 +60,7 @@ public Builder backlogSize(int size) { /** * @deprecated Use {@link #backlogSize} */ + @Deprecated public Builder maxBacklogSize(int size) { this.maxBacklogSize = size; return this; @@ -68,7 +69,7 @@ public Builder maxBacklogSize(int size) { /** * Set maximum timeout for threads blocked on the limiter. * Default is 1 second. - * + * * @param timeout * @param units * @return Chainable builder @@ -76,11 +77,11 @@ public Builder maxBacklogSize(int size) { public Builder backlogTimeout(long timeout, TimeUnit units) { return backlogTimeoutMillis(units.toMillis(timeout)); } - + /** * Set maximum timeout for threads blocked on the limiter. * Default is 1 second. - * + * * @param timeout * @return Chainable builder */ @@ -91,7 +92,7 @@ public Builder backlogTimeoutMillis(long timeout) { /** * Function to derive the backlog timeout from the request context. This allows timeouts - * to be set dynamically based on things like request deadlines. + * to be set dynamically based on things like request deadlines. * @param mapper * @param units * @return @@ -105,18 +106,18 @@ public LifoBlockingLimiter build() { return new LifoBlockingLimiter(this); } } - + public static Builder newBuilder(Limiter delegate) { return new Builder(delegate); } - + private final Limiter delegate; - + private static class ListenerHolder { - private volatile Optional listener; + private volatile Optional listener = Optional.empty(); private final CountDownLatch latch = new CountDownLatch(1); private ContextT context; - + public ListenerHolder(ContextT context) { this.context = context; } @@ -124,40 +125,40 @@ public ListenerHolder(ContextT context) { public boolean await(long timeout, TimeUnit unit) throws InterruptedException { return latch.await(timeout, unit); } - + public void set(Optional listener) { this.listener = listener; latch.countDown(); } - + } - + /** * Lock used to block and unblock callers as the limit is reached */ private final Deque> backlog = new LinkedList<>(); - + private final AtomicInteger backlogCounter = new AtomicInteger(); - + private final int backlogSize; - + private final Function backlogTimeoutMillis; - + private final Object lock = new Object(); - + private LifoBlockingLimiter(Builder builder) { this.delegate = builder.delegate; this.backlogSize = builder.maxBacklogSize; this.backlogTimeoutMillis = builder.maxBacklogTimeoutMillis; } - + private Optional tryAcquire(ContextT context) { // Try to acquire a token and return immediately if successful final Optional listener = delegate.acquire(context); if (listener.isPresent()) { return listener; } - + // Restrict backlog size so the queue doesn't grow unbounded during an outage if (backlogCounter.get() >= this.backlogSize) { return Optional.empty(); @@ -172,14 +173,16 @@ private Optional tryAcquire(ContextT context) { synchronized (lock) { backlog.addFirst(event); } - + if (!event.await(backlogTimeoutMillis.apply(context), TimeUnit.MILLISECONDS)) { // Remove the holder from the backlog. This item is likely to be at the end of the // list so do a removeLastOccurance to minimize the number of items to traverse synchronized (lock) { backlog.removeLastOccurrence(event); } - return Optional.empty(); + // if we acquired a token just as we were timing out then return it, otherwise the + // token would get lost + return event.listener; } return event.listener; } catch (InterruptedException e) { @@ -187,12 +190,13 @@ private Optional tryAcquire(ContextT context) { backlog.removeFirstOccurrence(event); } Thread.currentThread().interrupt(); - return Optional.empty(); + // if we acquired a token just as we were interrupted, then return it + return event.listener; } finally { backlogCounter.decrementAndGet(); } } - + private void unblock() { synchronized (lock) { if (!backlog.isEmpty()) { @@ -209,7 +213,7 @@ private void unblock() { } } } - + @Override public Optional acquire(ContextT context) { return tryAcquire(context).map(delegate -> new Listener() { @@ -232,7 +236,7 @@ public void onDropped() { } }); } - + @Override public String toString() { return "BlockingLimiter [" + delegate + "]"; diff --git a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/SimpleLimiter.java b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/SimpleLimiter.java index 9cafea52..0b6cd382 100644 --- a/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/SimpleLimiter.java +++ b/concurrency-limits-core/src/main/java/com/netflix/concurrency/limits/limiter/SimpleLimiter.java @@ -15,10 +15,12 @@ */ package com.netflix.concurrency.limits.limiter; +import com.netflix.concurrency.limits.Limiter; import com.netflix.concurrency.limits.MetricIds; import com.netflix.concurrency.limits.MetricRegistry; import java.util.Optional; +import java.util.concurrent.Semaphore; public class SimpleLimiter extends AbstractLimiter { public static class Builder extends AbstractLimiter.Builder { @@ -35,22 +37,81 @@ protected Builder self() { public static Builder newBuilder() { return new Builder(); } - private final MetricRegistry.SampleListener inflightDistribution; + private final AdjustableSemaphore semaphore; public SimpleLimiter(AbstractLimiter.Builder builder) { super(builder); this.inflightDistribution = builder.registry.distribution(MetricIds.INFLIGHT_NAME); + this.semaphore = new AdjustableSemaphore(getLimit()); + } + + @Override + public Optional acquire(ContextT context) { + Optional listener; + if (shouldBypass(context)) { + listener = createBypassListener(); + } + else if (!semaphore.tryAcquire()) { + listener = createRejectedListener(); + } + else { + listener = Optional.of(new Listener(createListener())); + } + inflightDistribution.addLongSample(getInflight()); + return listener; } @Override - public Optional acquire(ContextT context) { - int currentInFlight = getInflight(); - inflightDistribution.addSample(currentInFlight); - if (currentInFlight >= getLimit()) { - return createRejectedListener(); + protected void onNewLimit(int newLimit) { + int oldLimit = this.getLimit(); + super.onNewLimit(newLimit); + + if (newLimit > oldLimit) { + semaphore.release(newLimit - oldLimit); + } else { + semaphore.reducePermits(oldLimit - newLimit); + } + } + + /** + * Simple Semaphore subclass that allows access to its reducePermits method. + */ + private static final class AdjustableSemaphore extends Semaphore { + AdjustableSemaphore(int permits) { + super(permits); + } + + @Override + public void reducePermits(int reduction) { + super.reducePermits(reduction); + } + } + + private class Listener implements Limiter.Listener { + private final Limiter.Listener delegate; + + Listener(Limiter.Listener delegate) { + this.delegate = delegate; + } + + @Override + public void onSuccess() { + delegate.onSuccess(); + semaphore.release(); + } + + @Override + public void onIgnore() { + delegate.onIgnore(); + semaphore.release(); + } + + @Override + public void onDropped() { + delegate.onDropped(); + semaphore.release(); } - return Optional.of(createListener()); } } diff --git a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/AIMDLimitTest.java b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/AIMDLimitTest.java index d4379c57..20fa2159 100644 --- a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/AIMDLimitTest.java +++ b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/AIMDLimitTest.java @@ -1,6 +1,6 @@ package com.netflix.concurrency.limits.limit; -import junit.framework.Assert; +import org.junit.Assert; import org.junit.Test; import java.util.concurrent.TimeUnit; @@ -25,4 +25,12 @@ public void decreaseOnDrops() { limiter.onSample(0, 0, 0, true); Assert.assertEquals(27, limiter.getLimit()); } + + @Test + public void successOverflow() { + AIMDLimit limiter = AIMDLimit.newBuilder().initialLimit(21).maxLimit(21).minLimit(0).build(); + limiter.onSample(0, TimeUnit.MILLISECONDS.toNanos(1), 10, false); + // after success limit should still be at the max. + Assert.assertEquals(21, limiter.getLimit()); + } } diff --git a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/ExpAvgMeasurementTest.java b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/ExpAvgMeasurementTest.java index f9582ec9..28a9c1ae 100644 --- a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/ExpAvgMeasurementTest.java +++ b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/ExpAvgMeasurementTest.java @@ -1,12 +1,10 @@ package com.netflix.concurrency.limits.limit; import com.netflix.concurrency.limits.limit.measurement.ExpAvgMeasurement; + import org.junit.Assert; import org.junit.Test; -import java.util.Arrays; -import java.util.List; - public class ExpAvgMeasurementTest { @Test public void testWarmup() { diff --git a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/VegasLimitTest.java b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/VegasLimitTest.java index 521becf3..acb4d7d5 100644 --- a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/VegasLimitTest.java +++ b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/VegasLimitTest.java @@ -1,6 +1,6 @@ package com.netflix.concurrency.limits.limit; -import junit.framework.Assert; +import org.junit.Assert; import org.junit.Test; import java.util.concurrent.TimeUnit; @@ -58,7 +58,7 @@ public void noChangeIfWithinThresholds() { @Test public void decreaseSmoothing() { VegasLimit limit = VegasLimit.newBuilder() - .decrease(current -> current / 2) + .decreaseFunction(current -> current / 2) .smoothing(0.5) .initialLimit(100) .maxConcurrency(200) @@ -77,10 +77,33 @@ public void decreaseSmoothing() { Assert.assertEquals(56, limit.getLimit()); } + @Test + public void decreaseSmoothingDeprecatedBuilderMethod() { + @SuppressWarnings("deprecation") + VegasLimit limit = VegasLimit.newBuilder() + .decrease(current -> current / 2) + .smoothing(0.5) + .initialLimit(100) + .maxConcurrency(200) + .build(); + + // Pick up first min-rtt + limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(10), 100, false); + Assert.assertEquals(100, limit.getLimit()); + + // First decrease + limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(20), 100, false); + Assert.assertEquals(75, limit.getLimit()); + + // Second decrease + limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(20), 100, false); + Assert.assertEquals(56, limit.getLimit()); + } + @Test public void decreaseWithoutSmoothing() { VegasLimit limit = VegasLimit.newBuilder() - .decrease(current -> current / 2) + .decreaseFunction(current -> current / 2) .initialLimit(100) .maxConcurrency(200) .build(); @@ -97,4 +120,26 @@ public void decreaseWithoutSmoothing() { limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(20), 100, false); Assert.assertEquals(25, limit.getLimit()); } + + @Test + public void decreaseWithoutSmoothingDeprecatedBuilderMethod() { + @SuppressWarnings("deprecation") + VegasLimit limit = VegasLimit.newBuilder() + .decrease(current -> current / 2) + .initialLimit(100) + .maxConcurrency(200) + .build(); + + // Pick up first min-rtt + limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(10), 100, false); + Assert.assertEquals(100, limit.getLimit()); + + // First decrease + limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(20), 100, false); + Assert.assertEquals(50, limit.getLimit()); + + // Second decrease + limit.onSample(0, TimeUnit.MILLISECONDS.toNanos(20), 100, false); + Assert.assertEquals(25, limit.getLimit()); + } } diff --git a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/functions/Log10RootIntFunctionTest.java b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/functions/Log10RootIntFunctionTest.java new file mode 100644 index 00000000..cbcd04b8 --- /dev/null +++ b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limit/functions/Log10RootIntFunctionTest.java @@ -0,0 +1,26 @@ +package com.netflix.concurrency.limits.limit.functions; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.function.IntUnaryOperator; + +public class Log10RootIntFunctionTest { + @Test + public void test0Index() { + IntUnaryOperator func = Log10RootIntFunction.create(0); + Assert.assertEquals(1, func.applyAsInt(0)); + } + + @Test + public void testInRange() { + IntUnaryOperator func = Log10RootIntFunction.create(0); + Assert.assertEquals(2, func.applyAsInt(100)); + } + + @Test + public void testOutofLookupRange() { + IntUnaryOperator func = Log10RootIntFunction.create(0); + Assert.assertEquals(4, func.applyAsInt(10000)); + } +} \ No newline at end of file diff --git a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/AbstractPartitionedLimiterTest.java b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/AbstractPartitionedLimiterTest.java index cf81b191..505a7f8a 100644 --- a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/AbstractPartitionedLimiterTest.java +++ b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/AbstractPartitionedLimiterTest.java @@ -1,13 +1,23 @@ package com.netflix.concurrency.limits.limiter; import com.netflix.concurrency.limits.Limiter; +import com.netflix.concurrency.limits.Limiter.Listener; import com.netflix.concurrency.limits.limit.FixedLimit; import com.netflix.concurrency.limits.limit.SettableLimit; import org.junit.Assert; import org.junit.Test; +import java.util.Arrays; +import java.util.Map; import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; +import java.util.function.Predicate; public class AbstractPartitionedLimiterTest { public static class TestPartitionedLimiter extends AbstractPartitionedLimiter { @@ -27,6 +37,13 @@ public TestPartitionedLimiter(Builder builder) { } } + public static class ShouldBypassPredicate implements Predicate { + @Override + public boolean test(String s) { + return s.contains("admin"); + } + } + @Test public void limitAllocatedToBins() { AbstractPartitionedLimiter limiter = (AbstractPartitionedLimiter) TestPartitionedLimiter.newBuilder() @@ -156,4 +173,149 @@ public void setLimitReservesBusy() { Assert.assertEquals(1, limiter.getPartition("batch").getInflight()); Assert.assertEquals(1, limiter.getInflight()); } + + @Test + public void testBypassPartitionedLimiter() { + + AbstractPartitionedLimiter limiter = (AbstractPartitionedLimiter) TestPartitionedLimiter.newBuilder() + .partitionResolver(Function.identity()) + .partition("batch", 0.1) + .partition("live", 0.9) + .limit(FixedLimit.of(10)) + .bypassLimitResolverInternal(new ShouldBypassPredicate()) + .build(); + + Assert.assertTrue(limiter.acquire("batch").isPresent()); + Assert.assertEquals(1, limiter.getPartition("batch").getInflight()); + Assert.assertTrue(limiter.acquire("admin").isPresent()); + + for (int i = 0; i < 9; i++) { + Assert.assertTrue(limiter.acquire("live").isPresent()); + Assert.assertEquals(i+1, limiter.getPartition("live").getInflight()); + Assert.assertTrue(limiter.acquire("admin").isPresent()); + } + + // Verify that bypassed requests are able to proceed even when the limiter is full + Assert.assertFalse(limiter.acquire("batch").isPresent()); + Assert.assertEquals(1, limiter.getPartition("batch").getInflight()); + Assert.assertFalse(limiter.acquire("live").isPresent()); + Assert.assertEquals(9, limiter.getPartition("live").getInflight()); + Assert.assertEquals(10, limiter.getInflight()); + Assert.assertTrue(limiter.acquire("admin").isPresent()); + } + + @Test + public void testBypassSimpleLimiter() { + + SimpleLimiter limiter = (SimpleLimiter) TestPartitionedLimiter.newBuilder() + .limit(FixedLimit.of(10)) + .bypassLimitResolverInternal(new ShouldBypassPredicate()) + .build(); + + int inflightCount = 0; + for (int i = 0; i < 5; i++) { + Assert.assertTrue(limiter.acquire("request").isPresent()); + Assert.assertEquals(i+1, limiter.getInflight()); + inflightCount++; + } + + for (int i = 0; i < 15; i++) { + Assert.assertTrue(limiter.acquire("admin").isPresent()); + Assert.assertEquals(inflightCount, limiter.getInflight()); + } + + for (int i = 0; i < 5; i++) { + Assert.assertTrue(limiter.acquire("request").isPresent()); + Assert.assertEquals(inflightCount+i+1, limiter.getInflight()); + } + + // Calls with passing bypass condition will return a token + // whereas remaining calls will be throttled since inflight count is greater than the limit + for (int i = 0; i < 10; i++) { + Assert.assertFalse(limiter.acquire("request").isPresent()); + Assert.assertTrue(limiter.acquire("admin").isPresent()); + } + } + + @Test + public void testConcurrentPartitions() throws InterruptedException { + final int THREAD_COUNT = 5; + final int ITERATIONS = 500; + final int LIMIT = 20; + + AbstractPartitionedLimiter limiter = (AbstractPartitionedLimiter) TestPartitionedLimiter.newBuilder() + .limit(FixedLimit.of(LIMIT)) + .partitionResolver(Function.identity()) + .partition("A", 0.5) + .partition("B", 0.3) + .partition("C", 0.2) + .build(); + + ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT * 3); + CountDownLatch startLatch = new CountDownLatch(1); + CountDownLatch endLatch = new CountDownLatch(THREAD_COUNT * 3); + Map successCounts = new ConcurrentHashMap<>(); + Map rejectionCounts = new ConcurrentHashMap<>(); + Map maxConcurrents = new ConcurrentHashMap<>(); + AtomicInteger globalMaxInflight = new AtomicInteger(0); + + for (String partition : Arrays.asList("A", "B", "C")) { + successCounts.put(partition, new AtomicInteger(0)); + rejectionCounts.put(partition, new AtomicInteger(0)); + maxConcurrents.put(partition, new AtomicInteger(0)); + + for (int i = 0; i < THREAD_COUNT; i++) { + executor.submit(() -> { + try { + startLatch.await(); + for (int j = 0; j < ITERATIONS; j++) { + Optional listener = limiter.acquire(partition); + if (listener.isPresent()) { + try { + int current = limiter.getPartition(partition).getInflight(); + maxConcurrents.get(partition).updateAndGet(max -> Math.max(max, current)); + successCounts.get(partition).incrementAndGet(); + globalMaxInflight.updateAndGet(max -> Math.max(max, limiter.getInflight())); + Thread.sleep(1); // Simulate some work + } finally { + listener.get().onSuccess(); + } + } else { + rejectionCounts.get(partition).incrementAndGet(); + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + endLatch.countDown(); + } + }); + } + } + + startLatch.countDown(); + endLatch.await(); + executor.shutdown(); + executor.awaitTermination(10, TimeUnit.SECONDS); + + StringBuilder resultSummary = new StringBuilder(); + for (String partition : Arrays.asList("A", "B", "C")) { + int successCount = successCounts.get(partition).get(); + int rejectionCount = rejectionCounts.get(partition).get(); + int maxConcurrent = maxConcurrents.get(partition).get(); + + resultSummary.append(String.format("%s(success=%d,reject=%d,maxConcurrent=%d) ", + partition, successCount, rejectionCount, maxConcurrent)); + + Assert.assertTrue("Max concurrent for " + partition + " should not exceed global limit. " + resultSummary, + maxConcurrent <= LIMIT); + Assert.assertEquals("Total attempts for " + partition + " should equal success + rejections. " + resultSummary, + THREAD_COUNT * ITERATIONS, + successCount + rejectionCount); + } + + Assert.assertTrue("Global max inflight should not exceed total limit. " + resultSummary, + globalMaxInflight.get() <= LIMIT + THREAD_COUNT); + } + } diff --git a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/LifoBlockingLimiterTest.java b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/LifoBlockingLimiterTest.java index f0ee1d23..6b651437 100644 --- a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/LifoBlockingLimiterTest.java +++ b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/LifoBlockingLimiterTest.java @@ -1,22 +1,23 @@ package com.netflix.concurrency.limits.limiter; -import com.kevinmost.junit_retry_rule.Retry; -import com.kevinmost.junit_retry_rule.RetryRule; import com.netflix.concurrency.limits.Limiter; import com.netflix.concurrency.limits.limit.SettableLimit; import org.junit.Assert; -import org.junit.Rule; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Arrays; +import java.util.ArrayList; import java.util.List; import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.Executor; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -24,10 +25,7 @@ public class LifoBlockingLimiterTest { private static final Logger LOGGER = LoggerFactory.getLogger(LifoBlockingLimiterTest.class); - @Rule - public final RetryRule retry = new RetryRule(); - - final Executor executor = Executors.newCachedThreadPool(); + final ExecutorService executor = Executors.newCachedThreadPool(); final SettableLimit limit = SettableLimit.startingAt(4); @@ -69,7 +67,7 @@ public void unblockWhenFullBeforeTimeout() { long start = System.nanoTime(); Optional listener = blockingLimiter.acquire(null); long duration = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); - Assert.assertTrue(duration >= 250); + Assert.assertTrue("Duration = " + duration, duration >= 200); Assert.assertTrue(listener.isPresent()); } @@ -119,32 +117,31 @@ public void adaptWhenLimitDecreases() { } @Test - @Retry(times=5) public void verifyFifoOrder() { // Make sure all tokens are acquired List> firstBatch = acquireN(blockingLimiter, 4); // Kick off 5 requests with a small delay to ensure futures are created in the correct order List values = new CopyOnWriteArrayList<>(); - List> futures = IntStream.range(0, 5) + List> futures = IntStream.range(0, 5) .peek(i -> { try { TimeUnit.MILLISECONDS.sleep(50); } catch (InterruptedException e) { } }) - .mapToObj(i -> CompletableFuture.supplyAsync(() -> { + .mapToObj(i -> CompletableFuture.supplyAsync(() -> { Optional listener = blockingLimiter.acquire(null); if (!listener.isPresent()) { - return -1; + values.add(-1); } try { - return i; + values.add(i); } finally { listener.get().onSuccess(); } + return null; }, executor)) - .peek(future -> future.whenComplete((value, error) -> values.add(value))) .collect(Collectors.toList()); // Release the first batch of tokens @@ -168,6 +165,51 @@ public void verifyFifoOrder() { Assert.assertEquals(Arrays.asList(4, 3, 2, 1, 0), values); } + // this test reproduces the condition where a thread acquires a token just as it is timing out. + // before that was fixed, it would lead to a token getting lost. + @Test + public void timeoutAcquireRaceCondition() throws InterruptedException, ExecutionException { + // a limiter with a short timeout, and large backlog (we don't want it to hit that limit) + LifoBlockingLimiter limiter = LifoBlockingLimiter.newBuilder(simpleLimiter) + .backlogSize(1000) + .backlogTimeout(10, TimeUnit.MILLISECONDS) + .build(); + + // acquire all except one token + acquireN(limiter, 3); + + // try to reproduce the problem a couple of times + for (int round = 0; round < 10; round++) { + // indicates if there has already been a timeout + AtomicBoolean firstTimeout = new AtomicBoolean(false); + // take the last token + Limiter.Listener one = limiter.acquire(null).get(); + // in a bunch of threads in parallel, try to take one more. all of these will start to + // time out at around the same time + List> futures = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + futures.add(executor.submit(() -> { + Optional listener = limiter.acquire(null); + if (listener.isPresent()) { + // if we got the last token, release it again. this might give it to a + // thread that is in the process of timing out + listener.get().onSuccess(); + } else if (firstTimeout.compareAndSet(false, true)) { + // if this is the first one that times out, then other threads are going to + // start timing out soon too, so it's time to release a token + one.onSuccess(); + } + return null; + })); + } + // wait for this round to finish + for (Future future : futures) { + future.get(); + } + Assert.assertEquals(3, simpleLimiter.getInflight()); + } + } + private List> acquireN(Limiter limiter, int N) { return IntStream.range(0, N) .mapToObj(i -> limiter.acquire(null)) diff --git a/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/SimpleLimiterTest.java b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/SimpleLimiterTest.java new file mode 100644 index 00000000..f6065c67 --- /dev/null +++ b/concurrency-limits-core/src/test/java/com/netflix/concurrency/limits/limiter/SimpleLimiterTest.java @@ -0,0 +1,155 @@ +package com.netflix.concurrency.limits.limiter; + +import com.netflix.concurrency.limits.Limiter; +import com.netflix.concurrency.limits.Limiter.Listener; +import com.netflix.concurrency.limits.limit.FixedLimit; +import com.netflix.concurrency.limits.limiter.AbstractPartitionedLimiterTest.TestPartitionedLimiter; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +public class SimpleLimiterTest { + + @Test + public void useLimiterCapacityUntilTotalLimit() { + SimpleLimiter limiter = SimpleLimiter.newBuilder() + .limit(FixedLimit.of(10)) + .build(); + + for (int i = 0; i < 10; i++) { + Assert.assertTrue(limiter.acquire("live").isPresent()); + } + + // Rejected call after total limit is utilized + Assert.assertFalse(limiter.acquire("live").isPresent()); + Assert.assertEquals(10, limiter.getInflight()); + } + + @Test + public void testReleaseLimit() { + SimpleLimiter limiter = SimpleLimiter.newBuilder() + .limit(FixedLimit.of(10)) + .build(); + + Optional completion = limiter.acquire("live"); + for (int i = 1; i < 10; i++) { + Assert.assertTrue(limiter.acquire("live").isPresent()); + } + + Assert.assertEquals(10, limiter.getInflight()); + Assert.assertFalse(limiter.acquire("live").isPresent()); + + // Release token + completion.get().onSuccess(); + Assert.assertEquals(9, limiter.getInflight()); + + Assert.assertTrue(limiter.acquire("live").isPresent()); + Assert.assertEquals(10, limiter.getInflight()); + } + + @Test + public void testSimpleBypassLimiter() { + SimpleLimiter limiter = SimpleLimiter.newBuilder() + .limit(FixedLimit.of(10)) + .bypassLimitResolverInternal((context) -> context.equals("admin")) + .build(); + + for (int i = 0; i < 10; i++) { + Assert.assertTrue(limiter.acquire("live").isPresent()); + Assert.assertEquals(i+1, limiter.getInflight()); + } + + // Verify calls with passing bypass condition will return a token + // whereas remaining calls will be throttled since inflight count is greater than the limit + for (int i = 0; i < 10; i++) { + Assert.assertFalse(limiter.acquire("live").isPresent()); + Assert.assertTrue(limiter.acquire("admin").isPresent()); + } + } + + @Test + public void testSimpleBypassLimiterDefault() { + SimpleLimiter limiter = SimpleLimiter.newBuilder() + .limit(FixedLimit.of(10)) + .build(); + + for (int i = 0; i < 10; i++) { + Assert.assertTrue(limiter.acquire("live").isPresent()); + Assert.assertEquals(i+1, limiter.getInflight()); + } + + // Verify that no calls are bypassed by default + Assert.assertFalse(limiter.acquire("live").isPresent()); + Assert.assertFalse(limiter.acquire("admin").isPresent()); + } + + @Test + public void testConcurrentSimple() throws InterruptedException { + final int THREAD_COUNT = 100; + final int ITERATIONS = 1000; + final int LIMIT = 10; + + SimpleLimiter limiter = (SimpleLimiter) TestPartitionedLimiter.newBuilder() + .limit(FixedLimit.of(LIMIT)) + .partition("default", 1.0) + .build(); + + ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT); + CountDownLatch startLatch = new CountDownLatch(1); + CountDownLatch endLatch = new CountDownLatch(THREAD_COUNT); + AtomicInteger successCount = new AtomicInteger(0); + AtomicInteger rejectionCount = new AtomicInteger(0); + AtomicInteger maxConcurrent = new AtomicInteger(0); + + for (int i = 0; i < THREAD_COUNT; i++) { + executor.submit(() -> { + try { + startLatch.await(); + for (int j = 0; j < ITERATIONS; j++) { + Optional listener = limiter.acquire("default"); + if (listener.isPresent()) { + try { + int current = limiter.getInflight(); + maxConcurrent.updateAndGet(max -> Math.max(max, current)); + successCount.incrementAndGet(); + Thread.sleep(1); // Simulate some work + } finally { + listener.get().onSuccess(); + } + } else { + rejectionCount.incrementAndGet(); + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + endLatch.countDown(); + } + }); + } + + startLatch.countDown(); + endLatch.await(); + executor.shutdown(); + executor.awaitTermination(10, TimeUnit.SECONDS); + + StringBuilder resultBuilder = new StringBuilder(); + resultBuilder.append("Success count: ").append(successCount.get()) + .append(" | Rejection count: ").append(rejectionCount.get()) + .append(" | Max concurrent: ").append(maxConcurrent.get()); + String results = resultBuilder.toString(); + + Assert.assertTrue("Max concurrent should not exceed limit. " + results, + maxConcurrent.get() <= LIMIT); + Assert.assertEquals("Total attempts should equal success + rejections. " + results, + THREAD_COUNT * ITERATIONS, successCount.get() + rejectionCount.get()); + } + +} diff --git a/concurrency-limits-grpc/build.gradle b/concurrency-limits-grpc/build.gradle index be045bd4..26640bc2 100644 --- a/concurrency-limits-grpc/build.gradle +++ b/concurrency-limits-grpc/build.gradle @@ -5,16 +5,19 @@ plugins { sourceCompatibility = JavaVersion.VERSION_1_8 dependencies { - compile project(":concurrency-limits-core") + implementation "org.slf4j:slf4j-api:${slf4jVersion}" + implementation project(":concurrency-limits-core") - compileOnly "io.grpc:grpc-core:1.9.0" + compileOnly "io.grpc:grpc-core:${grpcVersion}" - testCompile project(":concurrency-limits-spectator") + testImplementation project(":concurrency-limits-spectator") - testCompile "org.mockito:mockito-core:1.+" - testCompile "io.grpc:grpc-netty:1.9.0" - testCompile "io.grpc:grpc-stub:1.9.0" - testCompile "junit:junit-dep:4.10" - testCompile "org.slf4j:slf4j-log4j12:1.7.+" - testCompile "org.apache.commons:commons-math3:3.6.1" + testCompileOnly "junit:junit:${jUnitLegacyVersion}" + testImplementation "org.mockito:mockito-core:${mockitoVersion}" + testImplementation "io.grpc:grpc-netty:${grpcVersion}" + testImplementation "io.grpc:grpc-stub:${grpcVersion}" + testImplementation "org.slf4j:slf4j-log4j12:${slf4jVersion}" + testImplementation "org.apache.commons:commons-math3:3+" + testRuntimeOnly "org.junit.vintage:junit-vintage-engine:${jUnitVersion}" + testRuntimeOnly "org.junit.jupiter:junit-jupiter-engine:${jUnitVersion}" } diff --git a/concurrency-limits-grpc/dependencies.lock b/concurrency-limits-grpc/dependencies.lock new file mode 100644 index 00000000..54d3e6b2 --- /dev/null +++ b/concurrency-limits-grpc/dependencies.lock @@ -0,0 +1,104 @@ +{ + "compileClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "io.grpc:grpc-core": { + "locked": "1.9.0" + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.36" + } + }, + "runtimeClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "org.slf4j:slf4j-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-core" + ], + "locked": "1.7.36" + } + }, + "testCompileClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "com.netflix.concurrency-limits:concurrency-limits-spectator": { + "project": true + }, + "com.netflix.spectator:spectator-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "locked": "1.7.9" + }, + "io.grpc:grpc-netty": { + "locked": "1.9.0" + }, + "io.grpc:grpc-stub": { + "locked": "1.9.0" + }, + "junit:junit": { + "locked": "4.13.2" + }, + "org.apache.commons:commons-math3": { + "locked": "3.6.1" + }, + "org.mockito:mockito-core": { + "locked": "4.11.0" + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.36" + }, + "org.slf4j:slf4j-log4j12": { + "locked": "1.7.36" + } + }, + "testRuntimeClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "project": true + }, + "com.netflix.concurrency-limits:concurrency-limits-spectator": { + "project": true + }, + "com.netflix.spectator:spectator-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "locked": "1.7.9" + }, + "io.grpc:grpc-netty": { + "locked": "1.9.0" + }, + "io.grpc:grpc-stub": { + "locked": "1.9.0" + }, + "org.apache.commons:commons-math3": { + "locked": "3.6.1" + }, + "org.junit.jupiter:junit-jupiter-engine": { + "locked": "5.10.2" + }, + "org.junit.vintage:junit-vintage-engine": { + "locked": "5.10.2" + }, + "org.mockito:mockito-core": { + "locked": "4.11.0" + }, + "org.slf4j:slf4j-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-core", + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "locked": "1.7.36" + }, + "org.slf4j:slf4j-log4j12": { + "locked": "1.7.36" + } + } +} \ No newline at end of file diff --git a/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/client/GrpcClientLimiterBuilder.java b/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/client/GrpcClientLimiterBuilder.java index 672ffe88..5beff578 100644 --- a/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/client/GrpcClientLimiterBuilder.java +++ b/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/client/GrpcClientLimiterBuilder.java @@ -18,8 +18,8 @@ import com.netflix.concurrency.limits.Limiter; import com.netflix.concurrency.limits.limiter.AbstractPartitionedLimiter; import com.netflix.concurrency.limits.limiter.BlockingLimiter; -import com.netflix.concurrency.limits.limiter.SimpleLimiter; import io.grpc.CallOptions; +import java.util.function.Predicate; /** * Builder to simplify creating a {@link Limiter} specific to GRPC clients. @@ -34,6 +34,40 @@ public GrpcClientLimiterBuilder partitionByMethod() { public GrpcClientLimiterBuilder partitionByCallOption(CallOptions.Key option) { return partitionResolver(context -> context.getCallOptions().getOption(option)); } + + /** + * Add a chainable bypass resolver predicate from context. Multiple resolvers may be added and if any of the + * predicate condition returns true the call is bypassed without increasing the limiter inflight count and + * affecting the algorithm. Will not bypass any calls by default if no resolvers are added. + * + * @param shouldBypass Predicate condition to bypass limit + * @return Chainable builder + */ + public GrpcClientLimiterBuilder bypassLimitResolver(Predicate shouldBypass) { + return bypassLimitResolverInternal(shouldBypass); + } + + /** + * Bypass limit if the request's full method name matches the specified gRPC method's full name. + * @param fullMethodName The full method name to check against the {@link GrpcClientRequestContext}'s method. + * If the request's method matches this fullMethodName, the limit will be bypassed. + * @return Chainable builder + */ + public GrpcClientLimiterBuilder bypassLimitByMethod(String fullMethodName) { + return bypassLimitResolver(context -> fullMethodName.equals(context.getMethod().getFullMethodName())); + } + + /** + * Bypass limit if the value of the specified call option matches the provided value. + * @param option The call option key to check against the {@link GrpcClientRequestContext}'s call options. + * @param value The value to compare against the value of the specified call option in the request. + * If they match, the limit will be bypassed. + * @param The type of the call option value. + * @return Chainable builder + */ + public GrpcClientLimiterBuilder bypassLimitByCallOption(CallOptions.Key option, T value) { + return bypassLimitResolver(context -> value.equals(context.getCallOptions().getOption(option))); + } /** * When set to true new calls to the channel will block when the limit has been reached instead diff --git a/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/server/GrpcServerLimiterBuilder.java b/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/server/GrpcServerLimiterBuilder.java index 03cf9f64..c82ffc6c 100644 --- a/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/server/GrpcServerLimiterBuilder.java +++ b/concurrency-limits-grpc/src/main/java/com/netflix/concurrency/limits/grpc/server/GrpcServerLimiterBuilder.java @@ -18,6 +18,7 @@ import com.netflix.concurrency.limits.limiter.AbstractPartitionedLimiter; import io.grpc.Attributes; import io.grpc.Metadata; +import java.util.function.Predicate; public class GrpcServerLimiterBuilder extends AbstractPartitionedLimiter.Builder { /** @@ -44,6 +45,53 @@ public GrpcServerLimiterBuilder partitionByAttribute(Attributes.Key attr return partitionResolver(context -> context.getCall().getAttributes().get(attribute)); } + + /** + * Add a chainable bypass resolver predicate from context. Multiple resolvers may be added and if any of the + * predicate condition returns true the call is bypassed without increasing the limiter inflight count and + * affecting the algorithm. Will not bypass any calls by default if no resolvers are added. + * + * @param shouldBypass Predicate condition to bypass limit + * @return Chainable builder + */ + public GrpcServerLimiterBuilder bypassLimitResolver(Predicate shouldBypass) { + return bypassLimitResolverInternal(shouldBypass); + } + + /** + * Bypass limit if the request's full method name matches the specified gRPC method's full name. + * @param fullMethodName The full method name to check against the {@link GrpcServerRequestContext}'s method. + * If the request's method matches this fullMethodName, the limit will be bypassed. + * @return Chainable builder + */ + public GrpcServerLimiterBuilder bypassLimitByMethod(String fullMethodName) { + return bypassLimitResolver(context -> fullMethodName.equals(context.getCall().getMethodDescriptor().getFullMethodName())); + } + + /** + * Bypass limit if the value of the specified header matches the provided value. + * @param header The header key to check against the {@link GrpcServerRequestContext}'s headers. + * @param value The value to compare against the value of the specified header in the request. + * If they match, the limit will be bypassed. + * @param The type of the header value. + * @return Chainable builder + */ + public GrpcServerLimiterBuilder bypassLimitByHeader(Metadata.Key header, T value) { + return bypassLimitResolver(context -> value.equals(context.getHeaders().get(header))); + } + + /** + * Bypass limit if the value of the specified attribute matches the provided value. + * @param attribute The attribute key to check against the {@link GrpcServerRequestContext}'s attributes. + * @param value The value to compare against the value of the specified attribute in the request. + * If they match, the limit will be bypassed. + * @param The type of the attribute value. + * @return Chainable builder + */ + public GrpcServerLimiterBuilder bypassLimitByAttribute(Attributes.Key attribute, T value) { + return bypassLimitResolver(context -> value.equals(context.getCall().getAttributes().get(attribute))); + } + @Override protected GrpcServerLimiterBuilder self() { return this; diff --git a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/client/ConcurrencyLimitClientInterceptorTest.java b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/client/ConcurrencyLimitClientInterceptorTest.java index 30fbbab6..6772e357 100644 --- a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/client/ConcurrencyLimitClientInterceptorTest.java +++ b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/client/ConcurrencyLimitClientInterceptorTest.java @@ -1,16 +1,18 @@ package com.netflix.concurrency.limits.grpc.client; import com.netflix.concurrency.limits.Limiter; -import com.netflix.concurrency.limits.grpc.StringMarshaller; +import com.netflix.concurrency.limits.grpc.util.OptionalResultCaptor; +import com.netflix.concurrency.limits.limiter.SimpleLimiter; +import com.netflix.concurrency.limits.spectator.SpectatorMetricRegistry; +import com.netflix.spectator.api.DefaultRegistry; import io.grpc.CallOptions; import io.grpc.Channel; -import io.grpc.MethodDescriptor; -import io.grpc.MethodDescriptor.MethodType; -import io.grpc.Server; -import io.grpc.ServerServiceDefinition; import io.grpc.netty.NettyChannelBuilder; import io.grpc.netty.NettyServerBuilder; +import io.grpc.Server; +import io.grpc.ServerInterceptors; +import io.grpc.ServerServiceDefinition; import io.grpc.stub.ClientCalls; import io.grpc.stub.ServerCalls; @@ -20,16 +22,138 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import org.junit.Before; import org.junit.Ignore; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TestName; +import org.mockito.Mockito; + +import static com.netflix.concurrency.limits.grpc.util.InterceptorTestUtil.BYPASS_METHOD_DESCRIPTOR; +import static com.netflix.concurrency.limits.grpc.util.InterceptorTestUtil.METHOD_DESCRIPTOR; +import static com.netflix.concurrency.limits.grpc.util.InterceptorTestUtil.TEST_METRIC_NAME; +import static com.netflix.concurrency.limits.grpc.util.InterceptorTestUtil.verifyCounts; public class ConcurrencyLimitClientInterceptorTest { - private static final MethodDescriptor METHOD_DESCRIPTOR = MethodDescriptor.newBuilder() - .setType(MethodType.UNARY) - .setFullMethodName("service/method") - .setRequestMarshaller(StringMarshaller.INSTANCE) - .setResponseMarshaller(StringMarshaller.INSTANCE) - .build(); + + @Rule + public TestName testName = new TestName(); + + Limiter limiter; + Limiter bypassEnabledLimiter; + OptionalResultCaptor listener; + DefaultRegistry registry = new DefaultRegistry(); + SpectatorMetricRegistry spectatorMetricRegistry = new SpectatorMetricRegistry(registry, registry.createId(TEST_METRIC_NAME)); + private Server server; + private Channel channel; + + @Before + public void beforeEachTest() { + limiter = Mockito.spy(SimpleLimiter.newBuilder() + .named(testName.getMethodName()) + .metricRegistry(spectatorMetricRegistry) + .build()); + + bypassEnabledLimiter = Mockito.spy(new GrpcClientLimiterBuilder() + .bypassLimitByMethod("service/bypass") + .named(testName.getMethodName()) + .metricRegistry(spectatorMetricRegistry) + .build()); + + listener = OptionalResultCaptor.forClass(Limiter.Listener.class); + Mockito.doAnswer(listener).when(limiter).acquire(Mockito.any()); + } + + private void startServer(Limiter limiter) { + + ServerCalls.UnaryMethod method = (request, responseObserver) -> { + responseObserver.onNext("response"); + responseObserver.onCompleted(); + }; + + try { + server = NettyServerBuilder.forPort(0) + .addService(ServerInterceptors.intercept( + ServerServiceDefinition.builder("service") + .addMethod(METHOD_DESCRIPTOR, ServerCalls.asyncUnaryCall(method)) + .addMethod(BYPASS_METHOD_DESCRIPTOR, ServerCalls.asyncUnaryCall(method)) + .build()) + ) + .build() + .start(); + + channel = NettyChannelBuilder.forAddress("localhost", server.getPort()) + .usePlaintext(true) + .intercept(new ConcurrencyLimitClientInterceptor(limiter)) + .build(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Test + public void verifySuccessCountOnRelease() { + // Setup server + startServer(limiter); + + ClientCalls.blockingUnaryCall(channel, METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); + + Mockito.verify(limiter, Mockito.times(1)).acquire(Mockito.isA(GrpcClientRequestContext.class)); + Mockito.verify(listener.getResult().get(), Mockito.timeout(1000).times(1)).onSuccess(); + + verifyCounts(0, 0, 1, 0, 0, registry, testName.getMethodName()); + } + + @Test + public void verifyBypassCountWhenBypassConditionAdded() { + // Setup server with a bypass condition enabled limiter + startServer(bypassEnabledLimiter); + + // Calling a method for which the predicate condition passes + ClientCalls.blockingUnaryCall(channel, BYPASS_METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); + Mockito.verify(bypassEnabledLimiter, Mockito.times(1)).acquire(Mockito.isA(GrpcClientRequestContext.class)); + + verifyCounts(0, 0, 0, 0, 1, registry, testName.getMethodName()); + } + + @Test + public void verifyBypassCountWhenBypassConditionFailed() { + // Setup server with a bypass condition enabled limiter + startServer(bypassEnabledLimiter); + + // Calling a method for which the predicate condition fails + ClientCalls.blockingUnaryCall(channel, METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); + Mockito.verify(bypassEnabledLimiter, Mockito.times(1)).acquire(Mockito.isA(GrpcClientRequestContext.class)); + + verifyCounts(0, 0, 1, 0, 0, registry, testName.getMethodName()); + } + + @Test + public void verifyBypassCountWhenNoBypassConditionAdded() { + // Setup server with no bypass condition enabled + startServer(limiter); + + ClientCalls.blockingUnaryCall(channel, BYPASS_METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); + Mockito.verify(limiter, Mockito.times(1)).acquire(Mockito.isA(GrpcClientRequestContext.class)); + + verifyCounts(0, 0, 1, 0, 0, registry, testName.getMethodName()); + } + + @Test + public void testMultipleCalls() { + // Setup server with a bypass condition enabled limiter + startServer(bypassEnabledLimiter); + + // Calling both method types + ClientCalls.blockingUnaryCall(channel, METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); + ClientCalls.blockingUnaryCall(channel, BYPASS_METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); + ClientCalls.blockingUnaryCall(channel, BYPASS_METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); + ClientCalls.blockingUnaryCall(channel, METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); + + Mockito.verify(bypassEnabledLimiter, Mockito.times(4)).acquire(Mockito.isA(GrpcClientRequestContext.class)); + + verifyCounts(0, 0, 2, 0, 2, registry, testName.getMethodName()); + } @Test @Ignore @@ -72,4 +196,5 @@ public void simulation() throws IOException { ClientCalls.futureUnaryCall(channel.newCall(METHOD_DESCRIPTOR, CallOptions.DEFAULT), "request"); } } + } diff --git a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/ConcurrencyLimitServerInterceptorTest.java b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/ConcurrencyLimitServerInterceptorTest.java index a15aa88a..be87389f 100644 --- a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/ConcurrencyLimitServerInterceptorTest.java +++ b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/ConcurrencyLimitServerInterceptorTest.java @@ -3,19 +3,12 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.Uninterruptibles; import com.netflix.concurrency.limits.Limiter; -import com.netflix.concurrency.limits.grpc.StringMarshaller; -import com.netflix.concurrency.limits.grpc.mockito.OptionalResultCaptor; +import com.netflix.concurrency.limits.grpc.util.OptionalResultCaptor; import com.netflix.concurrency.limits.limiter.SimpleLimiter; import com.netflix.concurrency.limits.spectator.SpectatorMetricRegistry; import com.netflix.spectator.api.DefaultRegistry; -import com.netflix.spectator.api.Meter; -import com.netflix.spectator.api.Timer; import io.grpc.CallOptions; import io.grpc.Channel; -import io.grpc.ClientCall; -import io.grpc.Metadata; -import io.grpc.MethodDescriptor; -import io.grpc.MethodDescriptor.MethodType; import io.grpc.Server; import io.grpc.ServerInterceptors; import io.grpc.ServerServiceDefinition; @@ -34,37 +27,39 @@ import org.mockito.Mockito; import java.io.IOException; -import java.util.Comparator; import java.util.concurrent.TimeUnit; +import static com.netflix.concurrency.limits.grpc.util.InterceptorTestUtil.BYPASS_METHOD_DESCRIPTOR; +import static com.netflix.concurrency.limits.grpc.util.InterceptorTestUtil.METHOD_DESCRIPTOR; +import static com.netflix.concurrency.limits.grpc.util.InterceptorTestUtil.TEST_METRIC_NAME; +import static com.netflix.concurrency.limits.grpc.util.InterceptorTestUtil.verifyCounts; + public class ConcurrencyLimitServerInterceptorTest { @Rule public TestName testName = new TestName(); - private static final MethodDescriptor METHOD_DESCRIPTOR = MethodDescriptor.newBuilder() - .setType(MethodType.UNARY) - .setFullMethodName("service/method") - .setRequestMarshaller(StringMarshaller.INSTANCE) - .setResponseMarshaller(StringMarshaller.INSTANCE) - .build(); - - private DefaultRegistry registry = new DefaultRegistry(); - - private Server server; - private Channel channel; - Limiter limiter; + Limiter bypassEnabledLimiter; OptionalResultCaptor listener; + DefaultRegistry registry = new DefaultRegistry(); + SpectatorMetricRegistry spectatorMetricRegistry = new SpectatorMetricRegistry(registry, registry.createId(TEST_METRIC_NAME)); + private Server server; + private Channel channel; @Before public void beforeEachTest() { limiter = Mockito.spy(SimpleLimiter.newBuilder() .named(testName.getMethodName()) - .metricRegistry(new SpectatorMetricRegistry(registry, registry.createId("unit.test.limiter"))) + .metricRegistry(spectatorMetricRegistry) .build()); - listener = OptionalResultCaptor.forClass(Limiter.Listener.class); + bypassEnabledLimiter = Mockito.spy(new GrpcServerLimiterBuilder() + .bypassLimitByMethod("service/bypass") + .named(testName.getMethodName()) + .metricRegistry(spectatorMetricRegistry) + .build()); + listener = OptionalResultCaptor.forClass(Limiter.Listener.class); Mockito.doAnswer(listener).when(limiter).acquire(Mockito.any()); } @@ -80,12 +75,13 @@ public void afterEachTest() { registry.distributionSummaries().forEach(t -> System.out.println(" " + t.id() + " " + t.count() + " " + t.totalAmount())); } - private void startServer(ServerCalls.UnaryMethod method) { + private void startServer(ServerCalls.UnaryMethod method, Limiter limiter) { try { server = NettyServerBuilder.forPort(0) .addService(ServerInterceptors.intercept( ServerServiceDefinition.builder("service") .addMethod(METHOD_DESCRIPTOR, ServerCalls.asyncUnaryCall(method)) + .addMethod(BYPASS_METHOD_DESCRIPTOR, ServerCalls.asyncUnaryCall(method)) .build(), ConcurrencyLimitServerInterceptor.newBuilder(limiter) .build()) @@ -107,13 +103,13 @@ public void releaseOnSuccess() { startServer((req, observer) -> { observer.onNext("response"); observer.onCompleted(); - }); + }, limiter); ClientCalls.blockingUnaryCall(channel, METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); Mockito.verify(limiter, Mockito.times(1)).acquire(Mockito.isA(GrpcServerRequestContext.class)); Mockito.verify(listener.getResult().get(), Mockito.timeout(1000).times(1)).onSuccess(); - verifyCounts(0, 0, 1, 0); + verifyCounts(0, 0, 1, 0, 0, registry, testName.getMethodName()); } @Test @@ -121,7 +117,7 @@ public void releaseOnError() { // Setup server startServer((req, observer) -> { observer.onError(Status.INVALID_ARGUMENT.asRuntimeException()); - }); + }, limiter); try { ClientCalls.blockingUnaryCall(channel, METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); @@ -132,7 +128,7 @@ public void releaseOnError() { // Verify Mockito.verify(limiter, Mockito.times(1)).acquire(Mockito.isA(GrpcServerRequestContext.class)); - verifyCounts(0, 0, 1, 0); + verifyCounts(0, 0, 1, 0, 0, registry, testName.getMethodName()); } @Test @@ -140,7 +136,7 @@ public void releaseOnUncaughtException() throws IOException { // Setup server startServer((req, observer) -> { throw new RuntimeException("failure"); - }); + }, limiter); try { ClientCalls.blockingUnaryCall(channel, METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); @@ -152,7 +148,7 @@ public void releaseOnUncaughtException() throws IOException { Mockito.verify(limiter, Mockito.times(1)).acquire(Mockito.isA(GrpcServerRequestContext.class)); Mockito.verify(listener.getResult().get(), Mockito.timeout(1000).times(1)).onIgnore(); - verifyCounts(0, 1, 0, 0); + verifyCounts(0, 1, 0, 0, 0, registry, testName.getMethodName()); } @Test @@ -162,7 +158,7 @@ public void releaseOnCancellation() { Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); observer.onNext("delayed_response"); observer.onCompleted(); - }); + }, limiter); ListenableFuture future = ClientCalls.futureUnaryCall(channel.newCall(METHOD_DESCRIPTOR, CallOptions.DEFAULT), "foo"); Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); @@ -174,7 +170,7 @@ public void releaseOnCancellation() { Mockito.verify(listener.getResult().get(), Mockito.timeout(2000).times(1)).onSuccess(); - verifyCounts(0, 0, 1, 0); + verifyCounts(0, 0, 1, 0, 0, registry, testName.getMethodName()); } @Test @@ -184,7 +180,7 @@ public void releaseOnDeadlineExceeded() { Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); observer.onNext("delayed_response"); observer.onCompleted(); - }); + }, limiter); try { ClientCalls.blockingUnaryCall(channel.newCall(METHOD_DESCRIPTOR, CallOptions.DEFAULT.withDeadlineAfter(1, TimeUnit.SECONDS)), "foo"); @@ -197,17 +193,51 @@ public void releaseOnDeadlineExceeded() { Mockito.verify(listener.getResult().get(), Mockito.timeout(2000).times(1)).onSuccess(); - verifyCounts(0, 0, 1, 0); + verifyCounts(0, 0, 1, 0, 0, registry, testName.getMethodName()); } - public void verifyCounts(int dropped, int ignored, int success, int rejected) { - try { - TimeUnit.SECONDS.sleep(1); - } catch (InterruptedException e) { - } - Assert.assertEquals(dropped, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "dropped").count()); - Assert.assertEquals(ignored, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "ignored").count()); - Assert.assertEquals(success, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "success").count()); - Assert.assertEquals(rejected, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "rejected").count()); + @Test + public void verifyBypassCountWhenBypassConditionAdded() { + // Setup server with a bypass condition enabled limiter + startServer((req, observer) -> { + observer.onNext("response"); + observer.onCompleted(); + }, bypassEnabledLimiter); + + // Calling a method for which the predicate condition passes + ClientCalls.blockingUnaryCall(channel, BYPASS_METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); + Mockito.verify(bypassEnabledLimiter, Mockito.times(1)).acquire(Mockito.isA(GrpcServerRequestContext.class)); + + verifyCounts(0, 0, 0, 0, 1, registry, testName.getMethodName()); } + + @Test + public void verifyBypassCountWhenBypassConditionFailed() { + // Setup server with a bypass condition enabled limiter + startServer((req, observer) -> { + observer.onNext("response"); + observer.onCompleted(); + }, bypassEnabledLimiter); + + // Calling a method for which the predicate condition fails + ClientCalls.blockingUnaryCall(channel, METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); + Mockito.verify(bypassEnabledLimiter, Mockito.times(1)).acquire(Mockito.isA(GrpcServerRequestContext.class)); + + verifyCounts(0, 0, 1, 0, 0, registry, testName.getMethodName()); + } + + @Test + public void verifyBypassCountWhenNoBypassConditionAdded() { + // Setup server with no bypass condition enabled + startServer((req, observer) -> { + observer.onNext("response"); + observer.onCompleted(); + }, limiter); + + ClientCalls.blockingUnaryCall(channel, BYPASS_METHOD_DESCRIPTOR, CallOptions.DEFAULT, "foo"); + Mockito.verify(limiter, Mockito.times(1)).acquire(Mockito.isA(GrpcServerRequestContext.class)); + + verifyCounts(0, 0, 1, 0, 0, registry, testName.getMethodName()); + } + } diff --git a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/Driver.java b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/Driver.java index 8862b206..ab00500a 100644 --- a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/Driver.java +++ b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/Driver.java @@ -1,9 +1,6 @@ package com.netflix.concurrency.limits.grpc.server.example; import com.google.common.util.concurrent.Uninterruptibles; -import com.netflix.concurrency.limits.grpc.client.ConcurrencyLimitClientInterceptor; -import com.netflix.concurrency.limits.limit.FixedLimit; -import com.netflix.concurrency.limits.limiter.SimpleLimiter; import io.grpc.CallOptions; import io.grpc.Channel; import io.grpc.ClientInterceptors; @@ -25,6 +22,8 @@ import java.util.function.Consumer; import java.util.function.Supplier; +import static com.netflix.concurrency.limits.grpc.util.InterceptorTestUtil.METHOD_DESCRIPTOR; + public class Driver { public static final Metadata.Key ID_HEADER = Metadata.Key.of("id", Metadata.ASCII_STRING_MARSHALLER); @@ -158,7 +157,7 @@ public void run() { long startTime = System.nanoTime(); Uninterruptibles.sleepUninterruptibly(Math.max(0, segment.nextDelay()), TimeUnit.MILLISECONDS); - ClientCalls.asyncUnaryCall(channel.newCall(TestServer.METHOD_DESCRIPTOR, CallOptions.DEFAULT.withWaitForReady()), "request", + ClientCalls.asyncUnaryCall(channel.newCall(METHOD_DESCRIPTOR, CallOptions.DEFAULT.withWaitForReady()), "request", new StreamObserver() { @Override public void onNext(String value) { diff --git a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/Example.java b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/Example.java index abdc52b5..68a1685c 100644 --- a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/Example.java +++ b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/Example.java @@ -2,7 +2,6 @@ import com.netflix.concurrency.limits.grpc.server.GrpcServerLimiterBuilder; import com.netflix.concurrency.limits.limit.Gradient2Limit; -import com.netflix.concurrency.limits.limit.GradientLimit; import com.netflix.concurrency.limits.limit.WindowedLimit; import java.io.IOException; @@ -10,8 +9,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; public class Example { public static void main(String[] args) throws IOException { @@ -61,7 +58,7 @@ public static void main(String[] args) throws IOException { e.printStackTrace(); } }, 1, 1, TimeUnit.SECONDS); - + // Create a client driver.run(); } diff --git a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/TestServer.java b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/TestServer.java index 21a52948..3d43a124 100644 --- a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/TestServer.java +++ b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/server/example/TestServer.java @@ -13,12 +13,9 @@ import com.google.common.util.concurrent.Uninterruptibles; import com.netflix.concurrency.limits.Limiter; -import com.netflix.concurrency.limits.grpc.StringMarshaller; import com.netflix.concurrency.limits.grpc.server.ConcurrencyLimitServerInterceptor; import com.netflix.concurrency.limits.grpc.server.GrpcServerRequestContext; -import io.grpc.MethodDescriptor; -import io.grpc.MethodDescriptor.MethodType; import io.grpc.Server; import io.grpc.ServerCallHandler; import io.grpc.ServerInterceptors; @@ -31,16 +28,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static com.netflix.concurrency.limits.grpc.util.InterceptorTestUtil.METHOD_DESCRIPTOR; + public class TestServer { private static final Logger LOG = LoggerFactory.getLogger(TestServer.class); - public static final MethodDescriptor METHOD_DESCRIPTOR = MethodDescriptor.newBuilder() - .setType(MethodType.UNARY) - .setFullMethodName("service/method") - .setRequestMarshaller(StringMarshaller.INSTANCE) - .setResponseMarshaller(StringMarshaller.INSTANCE) - .build(); - private interface Segment { long duration(); long latency(); diff --git a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/util/InterceptorTestUtil.java b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/util/InterceptorTestUtil.java new file mode 100644 index 00000000..92e83647 --- /dev/null +++ b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/util/InterceptorTestUtil.java @@ -0,0 +1,41 @@ +package com.netflix.concurrency.limits.grpc.util; + +import com.netflix.spectator.api.DefaultRegistry; +import com.netflix.spectator.api.Registry; +import io.grpc.MethodDescriptor; +import org.junit.Assert; + +import java.util.concurrent.TimeUnit; + +public class InterceptorTestUtil { + + public static final MethodDescriptor METHOD_DESCRIPTOR = MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("service/method") + .setRequestMarshaller(StringMarshaller.INSTANCE) + .setResponseMarshaller(StringMarshaller.INSTANCE) + .build(); + + public static final MethodDescriptor BYPASS_METHOD_DESCRIPTOR = MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName("service/bypass") + .setRequestMarshaller(StringMarshaller.INSTANCE) + .setResponseMarshaller(StringMarshaller.INSTANCE) + .build(); + + public static final String TEST_METRIC_NAME = "unit.test.limiter"; + + public static void verifyCounts(int dropped, int ignored, int success, int rejected, int bypassed, Registry registry, String idTagValue) { + try { + TimeUnit.SECONDS.sleep(1); + } catch (InterruptedException e) { + } + Assert.assertEquals(dropped, registry.counter(TEST_METRIC_NAME + ".call", "id", idTagValue, "status", "dropped").count()); + Assert.assertEquals(ignored, registry.counter(TEST_METRIC_NAME + ".call", "id", idTagValue, "status", "ignored").count()); + Assert.assertEquals(success, registry.counter(TEST_METRIC_NAME + ".call", "id", idTagValue, "status", "success").count()); + Assert.assertEquals(rejected, registry.counter(TEST_METRIC_NAME + ".call", "id", idTagValue, "status", "rejected").count()); + Assert.assertEquals(bypassed, registry.counter(TEST_METRIC_NAME + ".call", "id", idTagValue, "status", "bypassed").count()); + } + + +} diff --git a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/mockito/OptionalResultCaptor.java b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/util/OptionalResultCaptor.java similarity index 93% rename from concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/mockito/OptionalResultCaptor.java rename to concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/util/OptionalResultCaptor.java index 230217fd..b2f33d79 100644 --- a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/mockito/OptionalResultCaptor.java +++ b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/util/OptionalResultCaptor.java @@ -1,4 +1,4 @@ -package com.netflix.concurrency.limits.grpc.mockito; +package com.netflix.concurrency.limits.grpc.util; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; diff --git a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/StringMarshaller.java b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/util/StringMarshaller.java similarity index 94% rename from concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/StringMarshaller.java rename to concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/util/StringMarshaller.java index 44e6376e..f9583423 100644 --- a/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/StringMarshaller.java +++ b/concurrency-limits-grpc/src/test/java/com/netflix/concurrency/limits/grpc/util/StringMarshaller.java @@ -1,4 +1,4 @@ -package com.netflix.concurrency.limits.grpc; +package com.netflix.concurrency.limits.grpc.util; import java.io.ByteArrayInputStream; import java.io.IOException; diff --git a/concurrency-limits-servlet-jakarta/build.gradle b/concurrency-limits-servlet-jakarta/build.gradle new file mode 100644 index 00000000..731adebb --- /dev/null +++ b/concurrency-limits-servlet-jakarta/build.gradle @@ -0,0 +1,43 @@ +plugins { + id 'java' +} + +java { + toolchain { + languageVersion = JavaLanguageVersion.of(11) + } +} + +tasks.named('compileTestJava', JavaCompile).configure { + javaCompiler = javaToolchains.compilerFor { + languageVersion = JavaLanguageVersion.of(17) + } +} + +tasks.withType(Test).configureEach { + javaLauncher = javaToolchains.launcherFor { + languageVersion = JavaLanguageVersion.of(17) + } +} + +// give test dependencies access to compileOnly dependencies to emulate providedCompile +configurations { + testImplementation.extendsFrom compileOnly +} + +dependencies { + api project(":concurrency-limits-core") + compileOnly "jakarta.servlet:jakarta.servlet-api:6.0.0" + implementation "org.slf4j:slf4j-api:${slf4jVersion}" + + testImplementation project(":concurrency-limits-spectator") + testImplementation "org.mockito:mockito-core:${mockitoVersion}" + testImplementation "org.mockito:mockito-junit-jupiter:${mockitoVersion}" + testImplementation "org.slf4j:slf4j-log4j12:${slf4jVersion}" + testImplementation "org.eclipse.jetty:jetty-server:11.+" + testImplementation "org.eclipse.jetty:jetty-servlet:11.+" + testCompileOnly "junit:junit:${jUnitLegacyVersion}" + testImplementation "org.springframework:spring-test:6.+" + testRuntimeOnly "org.junit.vintage:junit-vintage-engine:${jUnitVersion}" + testRuntimeOnly "org.junit.jupiter:junit-jupiter-engine:${jUnitVersion}" +} diff --git a/concurrency-limits-servlet-jakarta/dependencies.lock b/concurrency-limits-servlet-jakarta/dependencies.lock new file mode 100644 index 00000000..873ec968 --- /dev/null +++ b/concurrency-limits-servlet-jakarta/dependencies.lock @@ -0,0 +1,116 @@ +{ + "compileClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "jakarta.servlet:jakarta.servlet-api": { + "locked": "6.0.0" + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.36" + } + }, + "runtimeClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "org.slf4j:slf4j-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-core" + ], + "locked": "1.7.36" + } + }, + "testCompileClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "com.netflix.concurrency-limits:concurrency-limits-spectator": { + "project": true + }, + "com.netflix.spectator:spectator-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "locked": "1.7.9" + }, + "jakarta.servlet:jakarta.servlet-api": { + "locked": "6.0.0" + }, + "junit:junit": { + "locked": "4.13.2" + }, + "org.eclipse.jetty:jetty-server": { + "locked": "11.0.20" + }, + "org.eclipse.jetty:jetty-servlet": { + "locked": "11.0.20" + }, + "org.mockito:mockito-core": { + "locked": "4.11.0" + }, + "org.mockito:mockito-junit-jupiter": { + "locked": "4.11.0" + }, + "org.slf4j:slf4j-api": { + "locked": "2.0.9" + }, + "org.slf4j:slf4j-log4j12": { + "locked": "1.7.36" + }, + "org.springframework:spring-test": { + "locked": "6.1.5" + } + }, + "testRuntimeClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "project": true + }, + "com.netflix.concurrency-limits:concurrency-limits-spectator": { + "project": true + }, + "com.netflix.spectator:spectator-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "locked": "1.7.9" + }, + "jakarta.servlet:jakarta.servlet-api": { + "locked": "6.0.0" + }, + "org.eclipse.jetty:jetty-server": { + "locked": "11.0.20" + }, + "org.eclipse.jetty:jetty-servlet": { + "locked": "11.0.20" + }, + "org.junit.jupiter:junit-jupiter-engine": { + "locked": "5.10.2" + }, + "org.junit.vintage:junit-vintage-engine": { + "locked": "5.10.2" + }, + "org.mockito:mockito-core": { + "locked": "4.11.0" + }, + "org.mockito:mockito-junit-jupiter": { + "locked": "4.11.0" + }, + "org.slf4j:slf4j-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-core", + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "locked": "2.0.9" + }, + "org.slf4j:slf4j-log4j12": { + "locked": "1.7.36" + }, + "org.springframework:spring-test": { + "locked": "6.1.5" + } + } +} \ No newline at end of file diff --git a/concurrency-limits-servlet-jakarta/src/main/java/com/netflix/concurrency/limits/servlet/jakarta/ConcurrencyLimitServletFilter.java b/concurrency-limits-servlet-jakarta/src/main/java/com/netflix/concurrency/limits/servlet/jakarta/ConcurrencyLimitServletFilter.java new file mode 100644 index 00000000..833ac205 --- /dev/null +++ b/concurrency-limits-servlet-jakarta/src/main/java/com/netflix/concurrency/limits/servlet/jakarta/ConcurrencyLimitServletFilter.java @@ -0,0 +1,82 @@ +/** + * Copyright 2023 Netflix, Inc. + *

+ * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.concurrency.limits.servlet.jakarta; + +import com.netflix.concurrency.limits.Limiter; + +import jakarta.servlet.Filter; +import jakarta.servlet.FilterChain; +import jakarta.servlet.FilterConfig; +import jakarta.servlet.ServletException; +import jakarta.servlet.ServletRequest; +import jakarta.servlet.ServletResponse; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.util.Optional; + +/** + * Servlet {@link Filter} that enforces concurrency limits on all requests into the servlet. + * + * @see ServletLimiterBuilder + */ +public class ConcurrencyLimitServletFilter implements Filter { + private static final int STATUS_TOO_MANY_REQUESTS = 429; + private final Limiter limiter; + private final int throttleStatus; + + public ConcurrencyLimitServletFilter(Limiter limiter) { + this(limiter, STATUS_TOO_MANY_REQUESTS); + } + + public ConcurrencyLimitServletFilter(Limiter limiter, int throttleStatus) { + this.limiter = limiter; + this.throttleStatus = throttleStatus; + } + + @Override + public void init(FilterConfig filterConfig) throws ServletException { + } + + @Override + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + Optional listener = limiter.acquire((HttpServletRequest) request); + if (listener.isPresent()) { + try { + chain.doFilter(request, response); + listener.get().onSuccess(); + } catch (Exception e) { + listener.get().onIgnore(); + throw e; + } + } else { + outputThrottleError((HttpServletResponse) response); + } + } + + protected void outputThrottleError(HttpServletResponse response) { + try { + response.setStatus(throttleStatus); + response.getWriter().print("Concurrency limit exceeded"); + } catch (IOException e) { + } + } + + @Override + public void destroy() { + } +} diff --git a/concurrency-limits-servlet-jakarta/src/main/java/com/netflix/concurrency/limits/servlet/jakarta/ServletLimiterBuilder.java b/concurrency-limits-servlet-jakarta/src/main/java/com/netflix/concurrency/limits/servlet/jakarta/ServletLimiterBuilder.java new file mode 100644 index 00000000..f400bb51 --- /dev/null +++ b/concurrency-limits-servlet-jakarta/src/main/java/com/netflix/concurrency/limits/servlet/jakarta/ServletLimiterBuilder.java @@ -0,0 +1,149 @@ +/** + * Copyright 2023 Netflix, Inc. + *

+ * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.concurrency.limits.servlet.jakarta; + +import com.netflix.concurrency.limits.Limiter; +import com.netflix.concurrency.limits.limiter.AbstractPartitionedLimiter; + +import jakarta.servlet.http.HttpServletRequest; +import java.security.Principal; +import java.util.Optional; +import java.util.function.Function; +import java.util.function.Predicate; + +/** + * Builder to simplify creating a {@link Limiter} specific to a Servlet filter. By default, + * the same concurrency limit is shared by all requests. The limiter can be partitioned + * based on one of many request attributes. Only one type of partition may be specified. + */ +public final class ServletLimiterBuilder extends AbstractPartitionedLimiter.Builder { + /** + * Partition the limit by header + * @return Chainable builder + */ + public ServletLimiterBuilder partitionByHeader(String name) { + return partitionResolver(request -> Optional.ofNullable(request.getHeader(name)).orElse(null)); + } + + /** + * Partition the limit by {@link Principal}. Percentages of the limit are partitioned to named + * groups. Group membership is derived from the provided mapping function. + * @param principalToGroup Mapping function from {@link Principal} to a named group. + * @return Chainable builder + */ + public ServletLimiterBuilder partitionByUserPrincipal(Function principalToGroup) { + return partitionResolver(request -> Optional.ofNullable(request.getUserPrincipal()).map(principalToGroup).orElse(null)); + } + + /** + * Partition the limit by request attribute + * @return Chainable builder + */ + public ServletLimiterBuilder partitionByAttribute(String name) { + return partitionResolver(request -> Optional.ofNullable(request.getAttribute(name)).map(Object::toString).orElse(null)); + } + + /** + * Partition the limit by request parameter + * @return Chainable builder + */ + public ServletLimiterBuilder partitionByParameter(String name) { + return partitionResolver(request -> Optional.ofNullable(request.getParameter(name)).orElse(null)); + } + + /** + * Partition the limit by the full path. Percentages of the limit are partitioned to named + * groups. Group membership is derived from the provided mapping function. + * @param pathToGroup Mapping function from full path to a named group. + * @return Chainable builder + */ + public ServletLimiterBuilder partitionByPathInfo(Function pathToGroup) { + return partitionResolver(request -> Optional.ofNullable(request.getPathInfo()).map(pathToGroup).orElse(null)); + } + + /** + * Add a chainable bypass resolver predicate from context. Multiple resolvers may be added and if any of the + * predicate condition returns true the call is bypassed without increasing the limiter inflight count and + * affecting the algorithm. Will not bypass any calls by default if no resolvers are added. + * + * @param shouldBypass Predicate condition to bypass limit + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitResolver(Predicate shouldBypass) { + return bypassLimitResolverInternal(shouldBypass); + } + + /** + * Bypass limit if the value of the provided header name matches the specified value. + * @param name The name of the header to check. + * This should match exactly with the header name in the {@link HttpServletRequest } context. + * @param value The value to compare against. + * If the value of the header in the context matches this value, the limit will be bypassed. + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitByHeader(String name, String value) { + return bypassLimitResolver((context) -> value.equals(context.getHeader(name))); + } + + /** + * Bypass limit if the value of the provided attribute name matches the specified value. + * @param name The name of the attribute to check. + * This should match exactly with the attribute name in the {@link HttpServletRequest } context. + * @param value The value to compare against. + * If the value of the attribute in the context matches this value, the limit will be bypassed. + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitByAttribute(String name, String value) { + return bypassLimitResolver((context) -> value.equals(context.getAttribute(name).toString())); + } + + /** + * Bypass limit if the value of the provided parameter name matches the specified value. + * @param name The name of the parameter to check. + * This should match exactly with the parameter name in the {@link HttpServletRequest } context. + * @param value The value to compare against. + * If the value of the parameter in the context matches this value, the limit will be bypassed. + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitByParameter(String name, String value) { + return bypassLimitResolver((context) -> value.equals(context.getParameter(name))); + } + + /** + * Bypass limit if the request path info matches the specified path. + * @param pathInfo The path info to check against the {@link HttpServletRequest } pathInfo. + * If the request's pathInfo matches this, the limit will be bypassed. + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitByPathInfo(String pathInfo) { + return bypassLimitResolver((context) -> pathInfo.equals(context.getPathInfo())); + } + + /** + * Bypass limit if the request method matches the specified method. + * @param method The HTTP method (e.g. GET, POST, or PUT) to check against the {@link HttpServletRequest } method. + * If the request's method matches this method, the limit will be bypassed. + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitByMethod(String method) { + return bypassLimitResolver((context) -> method.equals(context.getMethod())); + } + + @Override + protected ServletLimiterBuilder self() { + return this; + } +} diff --git a/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterSimulationTest.java b/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterSimulationTest.java new file mode 100644 index 00000000..98fcc3b7 --- /dev/null +++ b/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterSimulationTest.java @@ -0,0 +1,84 @@ +package com.netflix.concurrency.limits; + +import com.netflix.concurrency.limits.executors.BlockingAdaptiveExecutor; +import com.netflix.concurrency.limits.limit.FixedLimit; +import com.netflix.concurrency.limits.limit.VegasLimit; +import com.netflix.concurrency.limits.limiter.SimpleLimiter; +import com.netflix.concurrency.limits.servlet.jakarta.ConcurrencyLimitServletFilter; +import com.netflix.concurrency.limits.servlet.jakarta.ServletLimiterBuilder; +import org.eclipse.jetty.servlet.FilterHolder; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; + +import jakarta.servlet.DispatcherType; +import jakarta.servlet.http.HttpServlet; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.security.Principal; +import java.util.EnumSet; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +public class ConcurrencyLimitServletFilterSimulationTest { + @ClassRule + public static HttpServerRule server = new HttpServerRule(context -> { + context.addServlet(HelloServlet.class, "/"); + + Limiter limiter = new ServletLimiterBuilder() + .limit(FixedLimit.of(10)) + .partitionByUserPrincipal(Principal::getName) + .partition("live", 0.8) + .partition("batch", 0.2) + .build(); + + FilterHolder holder = new FilterHolder(); + holder.setFilter(new ConcurrencyLimitServletFilter(limiter)); + + context.addFilter(holder, "/*", EnumSet.of(DispatcherType.REQUEST)); + }); + + @Test + @Ignore + public void simulation() throws Exception { + Limit limit = VegasLimit.newDefault(); + BlockingAdaptiveExecutor executor = new BlockingAdaptiveExecutor( + SimpleLimiter.newBuilder().limit(limit).build()); + AtomicInteger errors = new AtomicInteger(); + AtomicInteger success = new AtomicInteger(); + + Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> { + System.out.println(String.format("errors=%d success=%d limit=%s", errors.getAndSet(0), success.getAndSet(0), limit)); + }, 1, 1, TimeUnit.SECONDS); + + while (true) { + executor.execute(() -> { + try { + server.get("/batch"); + success.incrementAndGet(); + } catch (Exception e) { + errors.incrementAndGet(); + throw new RejectedExecutionException(); + } + }); + } + } + + public static class HelloServlet extends HttpServlet { + private static final long serialVersionUID = 1L; + + @Override + protected void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { + try { + TimeUnit.MILLISECONDS.sleep(100); + } catch (InterruptedException e) { + } + response.setContentType("text/html"); + response.setStatus(HttpServletResponse.SC_OK); + response.getWriter().println("Hello from HelloServlet"); + } + } +} diff --git a/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterTest.java b/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterTest.java new file mode 100644 index 00000000..701a9a9e --- /dev/null +++ b/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterTest.java @@ -0,0 +1,162 @@ +package com.netflix.concurrency.limits; + +import com.netflix.concurrency.limits.servlet.jakarta.ConcurrencyLimitServletFilter; +import com.netflix.concurrency.limits.servlet.jakarta.ServletLimiterBuilder; +import com.netflix.concurrency.limits.spectator.SpectatorMetricRegistry; +import com.netflix.spectator.api.DefaultRegistry; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.mockito.Mockito; +import org.springframework.mock.web.MockFilterChain; +import org.springframework.mock.web.MockHttpServletRequest; +import org.springframework.mock.web.MockHttpServletResponse; + +import jakarta.servlet.ServletException; +import jakarta.servlet.http.HttpServletRequest; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import java.util.Optional; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; + +public class ConcurrencyLimitServletFilterTest { + + @Rule + public TestName testName = new TestName(); + + Limiter limiter; + DefaultRegistry registry = new DefaultRegistry(); + SpectatorMetricRegistry spectatorMetricRegistry = new SpectatorMetricRegistry(registry, registry.createId("unit.test.limiter")); + + @Before + public void beforeEachTest() { + + // Will bypass GET calls or calls with /admin path or both + limiter = Mockito.spy(new ServletLimiterBuilder() + .bypassLimitByMethod("GET") + .bypassLimitByPathInfo("/admin/health") + .named(testName.getMethodName()) + .metricRegistry(spectatorMetricRegistry) + .build()); + } + + @Test + public void testDoFilterAllowed() throws ServletException, IOException { + + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter); + + MockHttpServletRequest request = new MockHttpServletRequest(); + MockHttpServletResponse response = new MockHttpServletResponse(); + MockFilterChain filterChain = new MockFilterChain(); + + filter.doFilter(request, response, filterChain); + assertEquals("Request should be passed to the downstream chain", request, filterChain.getRequest()); + assertEquals("Response should be passed to the downstream chain", response, filterChain.getResponse()); + + verifyCounts(0, 0, 1, 0, 0); + } + + @Test + public void testDoFilterThrottled() throws ServletException, IOException { + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter); + + //Empty means to throttle this request + doReturn(Optional.empty()).when(limiter).acquire(any()); + + MockHttpServletResponse response = new MockHttpServletResponse(); + MockFilterChain filterChain = new MockFilterChain(); + + filter.doFilter(new MockHttpServletRequest(), response, filterChain); + + assertNull("doFilter should not be called on the filterchain", filterChain.getRequest()); + assertEquals("Status should be 429 - too many requests", 429, response.getStatus()); + } + + @Test + public void testDoFilterThrottledCustomStatus() throws ServletException, IOException { + final int customThrottleStatus = 503; + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter, customThrottleStatus); + + //Empty means to throttle this request + doReturn(Optional.empty()).when(limiter).acquire(any()); + + MockHttpServletResponse response = new MockHttpServletResponse(); + + filter.doFilter(new MockHttpServletRequest(), response, new MockFilterChain()); + + assertEquals("custom status should be respected", customThrottleStatus, response.getStatus()); + + } + + @Test + public void testDoFilterBypassCheckPassedForMethod() throws ServletException, IOException { + + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter); + + MockHttpServletRequest request = new MockHttpServletRequest(); + request.setMethod("GET"); + request.setPathInfo("/live/path"); + MockHttpServletResponse response = new MockHttpServletResponse(); + MockFilterChain filterChain = new MockFilterChain(); + + filter.doFilter(request, response, filterChain); + + assertEquals("Request should be passed to the downstream chain", request, filterChain.getRequest()); + assertEquals("Response should be passed to the downstream chain", response, filterChain.getResponse()); + verifyCounts(0, 0, 0, 0, 1); + + } + + @Test + public void testDoFilterBypassCheckPassedForPath() throws ServletException, IOException { + + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter); + + MockHttpServletRequest request = new MockHttpServletRequest(); + request.setMethod("POST"); + request.setPathInfo("/admin/health"); + MockHttpServletResponse response = new MockHttpServletResponse(); + MockFilterChain filterChain = new MockFilterChain(); + + filter.doFilter(request, response, filterChain); + + assertEquals("Request should be passed to the downstream chain", request, filterChain.getRequest()); + assertEquals("Response should be passed to the downstream chain", response, filterChain.getResponse()); + verifyCounts(0, 0, 0, 0, 1); + } + + @Test + public void testDoFilterBypassCheckFailed() throws ServletException, IOException { + + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter); + + MockHttpServletRequest request = new MockHttpServletRequest(); + request.setMethod("POST"); + request.setPathInfo("/live/path"); + MockHttpServletResponse response = new MockHttpServletResponse(); + MockFilterChain filterChain = new MockFilterChain(); + + filter.doFilter(request, response, filterChain); + + assertEquals("Request should be passed to the downstream chain", request, filterChain.getRequest()); + assertEquals("Response should be passed to the downstream chain", response, filterChain.getResponse()); + verifyCounts(0, 0, 1, 0, 0); + } + + public void verifyCounts(int dropped, int ignored, int success, int rejected, int bypassed) { + try { + TimeUnit.SECONDS.sleep(1); + } catch (InterruptedException e) { + } + assertEquals(dropped, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "dropped").count()); + assertEquals(ignored, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "ignored").count()); + assertEquals(success, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "success").count()); + assertEquals(rejected, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "rejected").count()); + assertEquals(bypassed, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "bypassed").count()); + } +} diff --git a/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/GroupServletLimiterTest.java b/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/GroupServletLimiterTest.java new file mode 100644 index 00000000..24ed04b2 --- /dev/null +++ b/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/GroupServletLimiterTest.java @@ -0,0 +1,172 @@ +package com.netflix.concurrency.limits; + +import com.netflix.concurrency.limits.Limiter.Listener; +import com.netflix.concurrency.limits.limit.VegasLimit; +import com.netflix.concurrency.limits.servlet.jakarta.ServletLimiterBuilder; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentMatchers; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import jakarta.servlet.http.HttpServletRequest; +import java.security.Principal; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; + +@RunWith(MockitoJUnitRunner.class) +public class GroupServletLimiterTest { + + @Test + public void userPrincipalMatchesGroup() { + Map principalToGroup = Mockito.spy(new HashMap<>()); + principalToGroup.put("bob", "live"); + + Limiter limiter = new ServletLimiterBuilder() + .limit(VegasLimit.newDefault()) + .partitionByUserPrincipal(p -> principalToGroup.get(p.getName())) + .partition("live", 0.8) + .partition("batch", 0.2) + .build(); + + HttpServletRequest request = createMockRequestWithPrincipal("bob"); + Optional listener = limiter.acquire(request); + + Assert.assertTrue(listener.isPresent()); + Mockito.verify(principalToGroup, Mockito.times(1)).get("bob"); + } + + @Test + public void userPrincipalDoesNotMatchGroup() { + Map principalToGroup = Mockito.spy(new HashMap<>()); + principalToGroup.put("bob", "live"); + + Limiter limiter = new ServletLimiterBuilder() + .limit(VegasLimit.newDefault()) + .partitionByUserPrincipal(p -> principalToGroup.get(p.getName())) + .partition("live", 0.8) + .partition("batch", 0.2) + .build(); + + HttpServletRequest request = createMockRequestWithPrincipal("doesntexist"); + Optional listener = limiter.acquire(request); + + Assert.assertTrue(listener.isPresent()); + Mockito.verify(principalToGroup, Mockito.times(1)).get("doesntexist"); + } + + @Test + public void nullUserPrincipalDoesNotMatchGroup() { + Map principalToGroup = Mockito.spy(new HashMap<>()); + principalToGroup.put("bob", "live"); + + Limiter limiter = new ServletLimiterBuilder() + .limit(VegasLimit.newDefault()) + .partitionByUserPrincipal(p -> principalToGroup.get(p.getName())) + .partition("live", 0.8) + .partition("batch", 0.2) + .build(); + + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + Mockito.when(request.getUserPrincipal()).thenReturn(null); + + Optional listener = limiter.acquire(request); + + Assert.assertTrue(listener.isPresent()); + Mockito.verify(principalToGroup, Mockito.times(0)).get(Mockito.any()); + } + + @Test + public void nullUserPrincipalNameDoesNotMatchGroup() { + Map principalToGroup = Mockito.spy(new HashMap<>()); + principalToGroup.put("bob", "live"); + + Limiter limiter = new ServletLimiterBuilder() + .limit(VegasLimit.newDefault()) + .partitionByUserPrincipal(p -> principalToGroup.get(p.getName())) + .partition("live", 0.8) + .partition("batch", 0.2) + .build(); + + HttpServletRequest request = createMockRequestWithPrincipal(null); + Optional listener = limiter.acquire(request); + + Assert.assertTrue(listener.isPresent()); + Mockito.verify(principalToGroup, Mockito.times(1)).get(ArgumentMatchers.isNull()); + } + + @Test + public void pathMatchesGroup() { + Map pathToGroup = Mockito.spy(new HashMap<>()); + pathToGroup.put("/live/path", "live"); + + Limiter limiter = new ServletLimiterBuilder() + .limit(VegasLimit.newDefault()) + .partitionByPathInfo(pathToGroup::get) + .partition("live", 0.8) + .partition("batch", 0.2) + .build(); + + HttpServletRequest request = createMockRequestWithPathInfo("/live/path"); + Optional listener = limiter.acquire(request); + + Assert.assertTrue(listener.isPresent()); + Mockito.verify(pathToGroup, Mockito.times(1)).get("/live/path"); + } + + @Test + public void pathDoesNotMatchesGroup() { + Map pathToGroup = Mockito.spy(new HashMap<>()); + pathToGroup.put("/live/path", "live"); + + Limiter limiter = new ServletLimiterBuilder() + .limit(VegasLimit.newDefault()) + .partitionByPathInfo(pathToGroup::get) + .partition("live", 0.8) + .partition("batch", 0.2) + .build(); + + HttpServletRequest request = createMockRequestWithPathInfo("/other/path"); + Optional listener = limiter.acquire(request); + + Assert.assertTrue(listener.isPresent()); + Mockito.verify(pathToGroup, Mockito.times(1)).get("/other/path"); + } + + @Test + public void nullPathDoesNotMatchesGroup() { + Map pathToGroup = Mockito.spy(new HashMap<>()); + pathToGroup.put("/live/path", "live"); + + Limiter limiter = new ServletLimiterBuilder() + .limit(VegasLimit.newDefault()) + .partitionByPathInfo(pathToGroup::get) + .partition("live", 0.8) + .partition("batch", 0.2) + .build(); + + HttpServletRequest request = createMockRequestWithPathInfo(null); + Optional listener = limiter.acquire(request); + + Assert.assertTrue(listener.isPresent()); + Mockito.verify(pathToGroup, Mockito.times(0)).get(Mockito.any()); + } + + private HttpServletRequest createMockRequestWithPrincipal(String name) { + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + Principal principal = Mockito.mock(Principal.class); + + Mockito.when(request.getUserPrincipal()).thenReturn(principal); + Mockito.when(principal.getName()).thenReturn(name); + return request; + } + + private HttpServletRequest createMockRequestWithPathInfo(String name) { + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + + Mockito.when(request.getPathInfo()).thenReturn(name); + return request; + } +} diff --git a/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/HttpServerRule.java b/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/HttpServerRule.java new file mode 100644 index 00000000..8e39675e --- /dev/null +++ b/concurrency-limits-servlet-jakarta/src/test/java/com/netflix/concurrency/limits/HttpServerRule.java @@ -0,0 +1,73 @@ +package com.netflix.concurrency.limits; + +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.servlet.ServletContextHandler; +import org.eclipse.jetty.servlet.ServletHandler; +import org.junit.rules.ExternalResource; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +public class HttpServerRule extends ExternalResource { + private Server server; + private final Consumer customizer; + + public HttpServerRule(Consumer customizer) { + this.customizer = customizer; + } + + protected void before() throws Throwable { + this.server = new Server(0); + + ServletHandler handler = new ServletHandler(); + server.setHandler(handler); + + ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS); + context.setContextPath("/"); + customizer.accept(context); + server.setHandler(context); + server.start(); + } + + /** + * Override to tear down your specific external resource. + */ + protected void after() { + if (server != null) { + try { + server.stop(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } + + public int getPort() { + return ((ServerConnector) server.getConnectors()[0]).getLocalPort(); + } + + public String get(String path) throws Exception { + URL url = new URL("http://localhost:" + getPort() + path); + HttpURLConnection con = (HttpURLConnection) url.openConnection(); + con.setRequestMethod("GET"); + int responseCode = con.getResponseCode(); + if (responseCode != 200) { + throw new Exception(readString(con.getInputStream())); + } else { + return readString(con.getInputStream()); + } + } + + public String readString(InputStream is) throws IOException { + try (BufferedReader buffer = new BufferedReader(new InputStreamReader(is))) { + return buffer.lines().collect(Collectors.joining("\n")); + } + } +} diff --git a/concurrency-limits-servlet/build.gradle b/concurrency-limits-servlet/build.gradle index 3e1bf140..370a30fc 100644 --- a/concurrency-limits-servlet/build.gradle +++ b/concurrency-limits-servlet/build.gradle @@ -5,13 +5,18 @@ plugins { sourceCompatibility = JavaVersion.VERSION_1_8 dependencies { - compile project(":concurrency-limits-core") - compile "javax.servlet:javax.servlet-api:3.1.0" - compile "org.slf4j:slf4j-api:1.7.+" - - testCompile "org.mockito:mockito-core:1.+" - testCompile "org.slf4j:slf4j-log4j12:1.7.+" - testCompile "org.eclipse.jetty:jetty-server:9.4.+" - testCompile "org.eclipse.jetty:jetty-servlet:9.4.+" - testCompile "junit:junit-dep:4.10" + api project(":concurrency-limits-core") + compileOnly "javax.servlet:javax.servlet-api:3.1.0" + implementation "org.slf4j:slf4j-api:${slf4jVersion}" + + testImplementation project(":concurrency-limits-spectator") + testImplementation "org.mockito:mockito-core:${mockitoVersion}" + testImplementation "org.mockito:mockito-junit-jupiter:${mockitoVersion}" + testImplementation "org.slf4j:slf4j-log4j12:${slf4jVersion}" + testImplementation "org.eclipse.jetty:jetty-server:9.4.+" + testImplementation "org.eclipse.jetty:jetty-servlet:9.4.+" + testCompileOnly "junit:junit:${jUnitLegacyVersion}" + testImplementation "org.springframework:spring-test:${springVersion}" + testRuntimeOnly "org.junit.vintage:junit-vintage-engine:${jUnitVersion}" + testRuntimeOnly "org.junit.jupiter:junit-jupiter-engine:${jUnitVersion}" } diff --git a/concurrency-limits-servlet/dependencies.lock b/concurrency-limits-servlet/dependencies.lock new file mode 100644 index 00000000..a84fbe39 --- /dev/null +++ b/concurrency-limits-servlet/dependencies.lock @@ -0,0 +1,110 @@ +{ + "compileClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "javax.servlet:javax.servlet-api": { + "locked": "3.1.0" + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.36" + } + }, + "runtimeClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "org.slf4j:slf4j-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-core" + ], + "locked": "1.7.36" + } + }, + "testCompileClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "com.netflix.concurrency-limits:concurrency-limits-spectator": { + "project": true + }, + "com.netflix.spectator:spectator-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "locked": "1.7.9" + }, + "junit:junit": { + "locked": "4.13.2" + }, + "org.eclipse.jetty:jetty-server": { + "locked": "9.4.54.v20240208" + }, + "org.eclipse.jetty:jetty-servlet": { + "locked": "9.4.54.v20240208" + }, + "org.mockito:mockito-core": { + "locked": "4.11.0" + }, + "org.mockito:mockito-junit-jupiter": { + "locked": "4.11.0" + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.36" + }, + "org.slf4j:slf4j-log4j12": { + "locked": "1.7.36" + }, + "org.springframework:spring-test": { + "locked": "5.3.33" + } + }, + "testRuntimeClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "project": true + }, + "com.netflix.concurrency-limits:concurrency-limits-spectator": { + "project": true + }, + "com.netflix.spectator:spectator-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "locked": "1.7.9" + }, + "org.eclipse.jetty:jetty-server": { + "locked": "9.4.54.v20240208" + }, + "org.eclipse.jetty:jetty-servlet": { + "locked": "9.4.54.v20240208" + }, + "org.junit.jupiter:junit-jupiter-engine": { + "locked": "5.10.2" + }, + "org.junit.vintage:junit-vintage-engine": { + "locked": "5.10.2" + }, + "org.mockito:mockito-core": { + "locked": "4.11.0" + }, + "org.mockito:mockito-junit-jupiter": { + "locked": "4.11.0" + }, + "org.slf4j:slf4j-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-core", + "com.netflix.concurrency-limits:concurrency-limits-spectator" + ], + "locked": "1.7.36" + }, + "org.slf4j:slf4j-log4j12": { + "locked": "1.7.36" + }, + "org.springframework:spring-test": { + "locked": "5.3.33" + } + } +} \ No newline at end of file diff --git a/concurrency-limits-servlet/src/main/java/com/netflix/concurrency/limits/servlet/ConcurrencyLimitServletFilter.java b/concurrency-limits-servlet/src/main/java/com/netflix/concurrency/limits/servlet/ConcurrencyLimitServletFilter.java index 18d80399..e7e640cd 100644 --- a/concurrency-limits-servlet/src/main/java/com/netflix/concurrency/limits/servlet/ConcurrencyLimitServletFilter.java +++ b/concurrency-limits-servlet/src/main/java/com/netflix/concurrency/limits/servlet/ConcurrencyLimitServletFilter.java @@ -38,9 +38,14 @@ public class ConcurrencyLimitServletFilter implements Filter { private static final int STATUS_TOO_MANY_REQUESTS = 429; private final Limiter limiter; + private final int throttleStatus; public ConcurrencyLimitServletFilter(Limiter limiter) { + this(limiter, STATUS_TOO_MANY_REQUESTS); + } + public ConcurrencyLimitServletFilter(Limiter limiter, int throttleStatus) { this.limiter = limiter; + this.throttleStatus = throttleStatus; } @Override @@ -67,7 +72,7 @@ public void doFilter(ServletRequest request, ServletResponse response, FilterCha protected void outputThrottleError(HttpServletResponse response) { try { - response.setStatus(STATUS_TOO_MANY_REQUESTS); + response.setStatus(throttleStatus); response.getWriter().print("Concurrency limit exceeded"); } catch (IOException e) { } diff --git a/concurrency-limits-servlet/src/main/java/com/netflix/concurrency/limits/servlet/ServletLimiterBuilder.java b/concurrency-limits-servlet/src/main/java/com/netflix/concurrency/limits/servlet/ServletLimiterBuilder.java index dcc78b76..1496e785 100644 --- a/concurrency-limits-servlet/src/main/java/com/netflix/concurrency/limits/servlet/ServletLimiterBuilder.java +++ b/concurrency-limits-servlet/src/main/java/com/netflix/concurrency/limits/servlet/ServletLimiterBuilder.java @@ -18,6 +18,7 @@ import com.netflix.concurrency.limits.Limiter; import com.netflix.concurrency.limits.limiter.AbstractPartitionedLimiter; +import java.util.function.Predicate; import javax.servlet.http.HttpServletRequest; import java.security.Principal; import java.util.Optional; @@ -72,7 +73,75 @@ public ServletLimiterBuilder partitionByParameter(String name) { public ServletLimiterBuilder partitionByPathInfo(Function pathToGroup) { return partitionResolver(request -> Optional.ofNullable(request.getPathInfo()).map(pathToGroup).orElse(null)); } - + + /** + * Add a chainable bypass resolver predicate from context. Multiple resolvers may be added and if any of the + * predicate condition returns true the call is bypassed without increasing the limiter inflight count and + * affecting the algorithm. Will not bypass any calls by default if no resolvers are added. + * + * @param shouldBypass Predicate condition to bypass limit + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitResolver(Predicate shouldBypass) { + return bypassLimitResolverInternal(shouldBypass); + } + + /** + * Bypass the limit if the value of the provided header name matches the specified value. + * @param name The name of the header to check. + * This should match exactly with the header name in the {@link HttpServletRequest } context. + * @param value The value to compare against. + * If the value of the header in the context matches this value, the limit will be bypassed. + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitByHeader(String name, String value) { + return bypassLimitResolver((context) -> value.equals(context.getHeader(name))); + } + + /** + * Bypass limit if the value of the provided attribute name matches the specified value. + * @param name The name of the attribute to check. + * This should match exactly with the attribute name in the {@link HttpServletRequest } context. + * @param value The value to compare against. + * If the value of the attribute in the context matches this value, the limit will be bypassed. + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitByAttribute(String name, String value) { + return bypassLimitResolver((context) -> value.equals(context.getAttribute(name).toString())); + } + + /** + * Bypass limit if the value of the provided parameter name matches the specified value. + * @param name The name of the parameter to check. + * This should match exactly with the parameter name in the {@link HttpServletRequest } context. + * @param value The value to compare against. + * If the value of the parameter in the context matches this value, the limit will be bypassed. + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitByParameter(String name, String value) { + return bypassLimitResolver((context) -> value.equals(context.getParameter(name))); + } + + /** + * Bypass limit if the request path info matches the specified path. + * @param pathInfo The path info to check against the {@link HttpServletRequest } pathInfo. + * If the request's pathInfo matches this, the limit will be bypassed. + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitByPathInfo(String pathInfo) { + return bypassLimitResolver((context) -> pathInfo.equals(context.getPathInfo())); + } + + /** + * Bypass limit if the request method matches the specified method. + * @param method The HTTP method (e.g. GET, POST, or PUT) to check against the {@link HttpServletRequest } method. + * If the request's method matches this method, the limit will be bypassed. + * @return Chainable builder + */ + public ServletLimiterBuilder bypassLimitByMethod(String method) { + return bypassLimitResolver((context) -> method.equals(context.getMethod())); + } + @Override protected ServletLimiterBuilder self() { return this; diff --git a/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterSimulationTest.java b/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterSimulationTest.java new file mode 100644 index 00000000..0012a662 --- /dev/null +++ b/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterSimulationTest.java @@ -0,0 +1,86 @@ +package com.netflix.concurrency.limits; + +import com.netflix.concurrency.limits.executors.BlockingAdaptiveExecutor; +import com.netflix.concurrency.limits.limit.FixedLimit; +import com.netflix.concurrency.limits.limit.VegasLimit; +import com.netflix.concurrency.limits.limiter.SimpleLimiter; +import com.netflix.concurrency.limits.servlet.ConcurrencyLimitServletFilter; +import com.netflix.concurrency.limits.servlet.ServletLimiterBuilder; +import org.eclipse.jetty.servlet.FilterHolder; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; + +import javax.servlet.DispatcherType; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.security.Principal; +import java.util.EnumSet; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +public class ConcurrencyLimitServletFilterSimulationTest { + @ClassRule + public static HttpServerRule server = new HttpServerRule(context -> { + context.addServlet(HelloServlet.class, "/"); + + Limiter limiter = new ServletLimiterBuilder() + .limit(FixedLimit.of(10)) + .partitionByUserPrincipal(Principal::getName) + .partition("live", 0.8) + .partition("batch", 0.2) + .build(); + + FilterHolder holder = new FilterHolder(); + holder.setFilter(new ConcurrencyLimitServletFilter(limiter)); + + context.addFilter(holder, "/*", EnumSet.of(DispatcherType.REQUEST)); + }); + + @Test + @Ignore + public void simulation() throws Exception { + Limit limit = VegasLimit.newDefault(); + BlockingAdaptiveExecutor executor = new BlockingAdaptiveExecutor( + SimpleLimiter.newBuilder().limit(limit).build()); + AtomicInteger errors = new AtomicInteger(); + AtomicInteger success = new AtomicInteger(); + + Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> { + System.out.println(String.format("errors=%d success=%d limit=%s", errors.getAndSet(0), success.getAndSet(0), limit)); + }, 1, 1, TimeUnit.SECONDS); + + + while (true) { + executor.execute(() -> { + try { + server.get("/batch"); + success.incrementAndGet(); + } catch (Exception e) { + errors.incrementAndGet(); + throw new RejectedExecutionException(); + } + }); + } + } + + public static class HelloServlet extends HttpServlet { + private static final long serialVersionUID = 1L; + + @Override + protected void doGet( HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { + try { + TimeUnit.MILLISECONDS.sleep(100); + } catch (InterruptedException e) { + } + response.setContentType("text/html"); + response.setStatus(HttpServletResponse.SC_OK); + response.getWriter().println("Hello from HelloServlet"); + } + } +} diff --git a/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterTest.java b/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterTest.java index fd1b4479..bbcd50ac 100644 --- a/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterTest.java +++ b/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/ConcurrencyLimitServletFilterTest.java @@ -1,88 +1,162 @@ package com.netflix.concurrency.limits; -import com.netflix.concurrency.limits.executors.BlockingAdaptiveExecutor; -import com.netflix.concurrency.limits.limit.FixedLimit; -import com.netflix.concurrency.limits.limit.VegasLimit; -import com.netflix.concurrency.limits.limiter.SimpleLimiter; import com.netflix.concurrency.limits.servlet.ConcurrencyLimitServletFilter; import com.netflix.concurrency.limits.servlet.ServletLimiterBuilder; +import com.netflix.concurrency.limits.spectator.SpectatorMetricRegistry; +import com.netflix.spectator.api.DefaultRegistry; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.mockito.Mockito; +import org.springframework.mock.web.MockFilterChain; +import org.springframework.mock.web.MockHttpServletRequest; +import org.springframework.mock.web.MockHttpServletResponse; -import java.io.IOException; -import java.security.Principal; -import java.util.EnumSet; -import java.util.concurrent.Executors; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import javax.servlet.DispatcherType; import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import java.util.Optional; -import org.eclipse.jetty.servlet.FilterHolder; -import org.junit.ClassRule; -import org.junit.Ignore; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; public class ConcurrencyLimitServletFilterTest { - @ClassRule - public static HttpServerRule server = new HttpServerRule(context -> { - context.addServlet(HelloServlet.class, "/"); - - Limiter limiter = new ServletLimiterBuilder() - .limit(FixedLimit.of(10)) - .partitionByUserPrincipal(Principal::getName) - .partition("live", 0.8) - .partition("batch", 0.2) - .build(); - - FilterHolder holder = new FilterHolder(); - holder.setFilter(new ConcurrencyLimitServletFilter(limiter)); - - context.addFilter(holder, "/*", EnumSet.of(DispatcherType.REQUEST)); - }); - + + @Rule + public TestName testName = new TestName(); + + Limiter limiter; + DefaultRegistry registry = new DefaultRegistry(); + SpectatorMetricRegistry spectatorMetricRegistry = new SpectatorMetricRegistry(registry, registry.createId("unit.test.limiter")); + + @Before + public void beforeEachTest() { + + // Will bypass GET calls or calls with /admin path or both + limiter = Mockito.spy(new ServletLimiterBuilder() + .bypassLimitByMethod("GET") + .bypassLimitByPathInfo("/admin/health") + .named(testName.getMethodName()) + .metricRegistry(spectatorMetricRegistry) + .build()); + } + @Test - @Ignore - public void simulation() throws Exception { - Limit limit = VegasLimit.newDefault(); - BlockingAdaptiveExecutor executor = new BlockingAdaptiveExecutor( - SimpleLimiter.newBuilder().limit(limit).build()); - AtomicInteger errors = new AtomicInteger(); - AtomicInteger success = new AtomicInteger(); - - Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> { - System.out.println(String.format("errors=%d success=%d limit=%s", errors.getAndSet(0), success.getAndSet(0), limit)); - }, 1, 1, TimeUnit.SECONDS); - - - while (true) { - executor.execute(() -> { - try { - server.get("/batch"); - success.incrementAndGet(); - } catch (Exception e) { - errors.incrementAndGet(); - throw new RejectedExecutionException(); - } - }); - } + public void testDoFilterAllowed() throws ServletException, IOException { + + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter); + + MockHttpServletRequest request = new MockHttpServletRequest(); + MockHttpServletResponse response = new MockHttpServletResponse(); + MockFilterChain filterChain = new MockFilterChain(); + + filter.doFilter(request, response, filterChain); + assertEquals("Request should be passed to the downstream chain", request, filterChain.getRequest()); + assertEquals("Response should be passed to the downstream chain", response, filterChain.getResponse()); + + verifyCounts(0, 0, 1, 0, 0); + } + + @Test + public void testDoFilterThrottled() throws ServletException, IOException { + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter); + + //Empty means to throttle this request + doReturn(Optional.empty()).when(limiter).acquire(any()); + + MockHttpServletResponse response = new MockHttpServletResponse(); + MockFilterChain filterChain = new MockFilterChain(); + + filter.doFilter(new MockHttpServletRequest(), response, filterChain); + + assertNull("doFilter should not be called on the filterchain", filterChain.getRequest()); + assertEquals("Status should be 429 - too many requests", 429, response.getStatus()); + } + + @Test + public void testDoFilterThrottledCustomStatus() throws ServletException, IOException { + final int customThrottleStatus = 503; + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter, customThrottleStatus); + + //Empty means to throttle this request + doReturn(Optional.empty()).when(limiter).acquire(any()); + + MockHttpServletResponse response = new MockHttpServletResponse(); + + filter.doFilter(new MockHttpServletRequest(), response, new MockFilterChain()); + + assertEquals("custom status should be respected", customThrottleStatus, response.getStatus()); + + } + + @Test + public void testDoFilterBypassCheckPassedForMethod() throws ServletException, IOException { + + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter); + + MockHttpServletRequest request = new MockHttpServletRequest(); + request.setMethod("GET"); + request.setPathInfo("/live/path"); + MockHttpServletResponse response = new MockHttpServletResponse(); + MockFilterChain filterChain = new MockFilterChain(); + + filter.doFilter(request, response, filterChain); + + assertEquals("Request should be passed to the downstream chain", request, filterChain.getRequest()); + assertEquals("Response should be passed to the downstream chain", response, filterChain.getResponse()); + verifyCounts(0, 0, 0, 0, 1); + + } + + @Test + public void testDoFilterBypassCheckPassedForPath() throws ServletException, IOException { + + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter); + + MockHttpServletRequest request = new MockHttpServletRequest(); + request.setMethod("POST"); + request.setPathInfo("/admin/health"); + MockHttpServletResponse response = new MockHttpServletResponse(); + MockFilterChain filterChain = new MockFilterChain(); + + filter.doFilter(request, response, filterChain); + + assertEquals("Request should be passed to the downstream chain", request, filterChain.getRequest()); + assertEquals("Response should be passed to the downstream chain", response, filterChain.getResponse()); + verifyCounts(0, 0, 0, 0, 1); + } + + @Test + public void testDoFilterBypassCheckFailed() throws ServletException, IOException { + + ConcurrencyLimitServletFilter filter = new ConcurrencyLimitServletFilter(limiter); + + MockHttpServletRequest request = new MockHttpServletRequest(); + request.setMethod("POST"); + request.setPathInfo("/live/path"); + MockHttpServletResponse response = new MockHttpServletResponse(); + MockFilterChain filterChain = new MockFilterChain(); + + filter.doFilter(request, response, filterChain); + + assertEquals("Request should be passed to the downstream chain", request, filterChain.getRequest()); + assertEquals("Response should be passed to the downstream chain", response, filterChain.getResponse()); + verifyCounts(0, 0, 1, 0, 0); } - - public static class HelloServlet extends HttpServlet { - private static final long serialVersionUID = 1L; - - @Override - protected void doGet( HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { - try { - TimeUnit.MILLISECONDS.sleep(100); - } catch (InterruptedException e) { - } - response.setContentType("text/html"); - response.setStatus(HttpServletResponse.SC_OK); - response.getWriter().println("Hello from HelloServlet"); + + public void verifyCounts(int dropped, int ignored, int success, int rejected, int bypassed) { + try { + TimeUnit.SECONDS.sleep(1); + } catch (InterruptedException e) { } + assertEquals(dropped, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "dropped").count()); + assertEquals(ignored, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "ignored").count()); + assertEquals(success, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "success").count()); + assertEquals(rejected, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "rejected").count()); + assertEquals(bypassed, registry.counter("unit.test.limiter.call", "id", testName.getMethodName(), "status", "bypassed").count()); } } diff --git a/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/GroupServletLimiterTest.java b/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/GroupServletLimiterTest.java index a53f7c43..8eccf640 100644 --- a/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/GroupServletLimiterTest.java +++ b/concurrency-limits-servlet/src/test/java/com/netflix/concurrency/limits/GroupServletLimiterTest.java @@ -14,9 +14,9 @@ import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.Matchers; +import org.mockito.ArgumentMatchers; import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class GroupServletLimiterTest { @@ -96,7 +96,7 @@ public void nullUserPrincipalNameDoesNotMatchGroup() { Optional listener = limiter.acquire(request); Assert.assertTrue(listener.isPresent()); - Mockito.verify(principalToGroup, Mockito.times(1)).get(Matchers.isNull(String.class)); + Mockito.verify(principalToGroup, Mockito.times(1)).get(ArgumentMatchers.isNull()); } @Test diff --git a/concurrency-limits-spectator/build.gradle b/concurrency-limits-spectator/build.gradle index 002880bf..59a17561 100644 --- a/concurrency-limits-spectator/build.gradle +++ b/concurrency-limits-spectator/build.gradle @@ -5,9 +5,12 @@ plugins { sourceCompatibility = JavaVersion.VERSION_1_8 dependencies { - compile project(":concurrency-limits-core") + implementation project(":concurrency-limits-core") - compile 'com.netflix.spectator:spectator-api:0.+' - - testCompile "junit:junit-dep:4.10" + api "com.netflix.spectator:spectator-api:${spectatorVersion}" + implementation "org.slf4j:slf4j-api:${slf4jVersion}" + + testCompileOnly "junit:junit:${jUnitLegacyVersion}" + testRuntimeOnly "org.junit.vintage:junit-vintage-engine:${jUnitVersion}" + testRuntimeOnly "org.junit.jupiter:junit-jupiter-engine:${jUnitVersion}" } diff --git a/concurrency-limits-spectator/dependencies.lock b/concurrency-limits-spectator/dependencies.lock new file mode 100644 index 00000000..2dfd05e2 --- /dev/null +++ b/concurrency-limits-spectator/dependencies.lock @@ -0,0 +1,61 @@ +{ + "compileClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "com.netflix.spectator:spectator-api": { + "locked": "1.7.9" + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.36" + } + }, + "runtimeClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "com.netflix.spectator:spectator-api": { + "locked": "1.7.9" + }, + "org.slf4j:slf4j-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-core" + ], + "locked": "1.7.36" + } + }, + "testCompileClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "com.netflix.spectator:spectator-api": { + "locked": "1.7.9" + }, + "junit:junit": { + "locked": "4.13.2" + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.36" + } + }, + "testRuntimeClasspath": { + "com.netflix.concurrency-limits:concurrency-limits-core": { + "project": true + }, + "com.netflix.spectator:spectator-api": { + "locked": "1.7.9" + }, + "org.junit.jupiter:junit-jupiter-engine": { + "locked": "5.10.2" + }, + "org.junit.vintage:junit-vintage-engine": { + "locked": "5.10.2" + }, + "org.slf4j:slf4j-api": { + "firstLevelTransitive": [ + "com.netflix.concurrency-limits:concurrency-limits-core" + ], + "locked": "1.7.36" + } + } +} \ No newline at end of file diff --git a/concurrency-limits-spectator/src/main/java/com/netflix/concurrency/limits/spectator/SpectatorMetricRegistry.java b/concurrency-limits-spectator/src/main/java/com/netflix/concurrency/limits/spectator/SpectatorMetricRegistry.java index 4c837b5a..17a52097 100644 --- a/concurrency-limits-spectator/src/main/java/com/netflix/concurrency/limits/spectator/SpectatorMetricRegistry.java +++ b/concurrency-limits-spectator/src/main/java/com/netflix/concurrency/limits/spectator/SpectatorMetricRegistry.java @@ -18,7 +18,6 @@ import java.util.function.Supplier; import com.netflix.concurrency.limits.MetricRegistry; -import com.netflix.spectator.api.Counter; import com.netflix.spectator.api.DistributionSummary; import com.netflix.spectator.api.Id; import com.netflix.spectator.api.Registry; @@ -32,11 +31,24 @@ public SpectatorMetricRegistry(Registry registry, Id baseId) { this.registry = registry; this.baseId = baseId; } - + @Override public SampleListener distribution(String id, String... tagNameValuePairs) { DistributionSummary summary = registry.distributionSummary(suffixBaseId(id).withTags(tagNameValuePairs)); - return value -> summary.record(value.longValue()); + return new SampleListener() { + @Override + public void addSample(Number value) { + summary.record(value.longValue()); + } + @Override + public void addLongSample(long value) { + summary.record(value); + } + @Override + public void addDoubleSample(double value) { + summary.record((long) value); + } + }; } @Override @@ -53,7 +65,7 @@ public void gauge(String id, Supplier supplier, String... tagNameValuePa public Counter counter(String id, String... tagNameValuePairs) { Id metricId = suffixBaseId(id).withTags(tagNameValuePairs); com.netflix.spectator.api.Counter spectatorCounter = registry.counter(metricId); - return () -> spectatorCounter.increment(); + return spectatorCounter::increment; } private Id suffixBaseId(String suffix) { diff --git a/dependencies.lock b/dependencies.lock new file mode 100644 index 00000000..544b7b4d --- /dev/null +++ b/dependencies.lock @@ -0,0 +1,3 @@ +{ + +} \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 94336fca..d64cd491 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 290541c7..a80b22ce 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.3-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-bin.zip +networkTimeout=10000 +validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew index cccdd3d5..1aa94a42 100755 --- a/gradlew +++ b/gradlew @@ -1,78 +1,127 @@ -#!/usr/bin/env sh +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ############################################################################## -## -## Gradle start up script for UN*X -## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# ############################################################################## # Attempt to set APP_HOME + # Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null - -APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="" +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" +MAX_FD=maximum warn () { echo "$*" -} +} >&2 die () { echo echo "$*" echo exit 1 -} +} >&2 # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACMD=$JAVA_HOME/jre/sh/java else - JAVACMD="$JAVA_HOME/bin/java" + JAVACMD=$JAVA_HOME/bin/java fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME @@ -81,92 +130,120 @@ Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else - JAVACMD="java" - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." + fi fi # Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac fi -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) -# For Cygwin, switch paths to Windows format before running java -if $cygwin ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) fi - i=$((i+1)) + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg done - case $i in - (0) set -- ;; - (1) set -- "$args0" ;; - (2) set -- "$args0" "$args1" ;; - (3) set -- "$args0" "$args1" "$args2" ;; - (4) set -- "$args0" "$args1" "$args2" "$args3" ;; - (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac fi -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=$(save "$@") - -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" -# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong -if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then - cd "$(dirname "$0")" +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" fi +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat index e95643d6..6689b85b 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -1,4 +1,20 @@ -@if "%DEBUG%" == "" @echo off +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @@ -9,19 +25,23 @@ if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init +if %ERRORLEVEL% equ 0 goto execute echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. @@ -35,7 +55,7 @@ goto fail set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe -if exist "%JAVA_EXE%" goto init +if exist "%JAVA_EXE%" goto execute echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% @@ -45,38 +65,26 @@ echo location of your Java installation. goto fail -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + @rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* :end @rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd +if %ERRORLEVEL% equ 0 goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% :mainEnd if "%OS%"=="Windows_NT" endlocal diff --git a/installViaTravis.sh b/installViaTravis.sh deleted file mode 100755 index 4395f4f2..00000000 --- a/installViaTravis.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -# This script will build dyno project. - -if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then - echo -e "Assemble Pull Request #$TRAVIS_PULL_REQUEST => Branch [$TRAVIS_BRANCH]" - ./gradlew assemble -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" == "" ]; then - echo -e 'Assemble Branch with Snapshot => Branch ['$TRAVIS_BRANCH']' - ./gradlew -Prelease.travisci=true assemble -elif [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_TAG" != "" ]; then - echo -e 'Assemble Branch for Release => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG']' - ./gradlew -Prelease.travisci=true assemble -else - echo -e 'WARN: Should not be here => Branch ['$TRAVIS_BRANCH'] Tag ['$TRAVIS_TAG'] Pull Request ['$TRAVIS_PULL_REQUEST']' - ./gradlew assemble -fi diff --git a/settings.gradle b/settings.gradle index 522ace90..8ad0b907 100644 --- a/settings.gradle +++ b/settings.gradle @@ -2,4 +2,5 @@ rootProject.name='concurrency-limits' include 'concurrency-limits-core' include 'concurrency-limits-grpc' include 'concurrency-limits-servlet' +include 'concurrency-limits-servlet-jakarta' include 'concurrency-limits-spectator'