diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml index 17c53cf..9fb518a 100644 --- a/.github/workflows/gh-pages.yml +++ b/.github/workflows/gh-pages.yml @@ -33,14 +33,14 @@ jobs: with: gradle-home-cache-cleanup: true - name: Extract implementation info - run: ./gradlew --quiet extractImplementations > implementations.json + run: ./gradlew --quiet extractImplementations - name: Add results to step summary - run: cat implementations.json >> $GITHUB_STEP_SUMMARY + run: cat docs/_includes/implementations.json >> $GITHUB_STEP_SUMMARY - name: Upload Implementations uses: actions/upload-artifact@v3 with: name: implementations - path: implementations.json + path: docs/_includes/* retention-days: 1 run_functional: @@ -61,14 +61,42 @@ jobs: - name: Add results to step summary run: | echo "# Overall comparison" >> $GITHUB_STEP_SUMMARY - cat build/reports/creek/functional-summary.md >> $GITHUB_STEP_SUMMARY + cat docs/_includes/functional-summary.md >> $GITHUB_STEP_SUMMARY echo "# Specific Draft & Implementation results" >> $GITHUB_STEP_SUMMARY - cat build/reports/creek/per-draft.md >> $GITHUB_STEP_SUMMARY + cat docs/_includes/per-draft.md >> $GITHUB_STEP_SUMMARY - name: Upload Implementations uses: actions/upload-artifact@v3 with: - name: functional-summary - path: build/reports/creek/* + name: functional + path: docs/_includes/* + retention-days: 1 + + run_performance: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Set up JDK + uses: actions/setup-java@0ab4596768b603586c0de567f2430c30f5b0d2b0 # v3.13.0 + with: + java-version: '17' + distribution: 'adopt' + - name: Setup Gradle + uses: gradle/gradle-build-action@842c587ad8aa4c68eeba24c396e15af4c2e9f30a # v2.9.0 + with: + gradle-home-cache-cleanup: true + - name: Run performance benchmarks + run: ./gradlew --quiet runBenchmarks + - name: Add results to step summary + run: | + echo "# Json Validator Benchmark Results" >> $GITHUB_STEP_SUMMARY + cat docs/_includes/JsonValidateBenchmark.md >> $GITHUB_STEP_SUMMARY + echo "# Json Serde Benchmark Results" >> $GITHUB_STEP_SUMMARY + cat docs/_includes/JsonSerdeBenchmark.md >> $GITHUB_STEP_SUMMARY + - name: Upload Implementations + uses: actions/upload-artifact@v3 + with: + name: performance + path: docs/_includes/* retention-days: 1 build_pages: @@ -90,22 +118,11 @@ jobs: - name: Setup Pages id: pages uses: actions/configure-pages@f156874f8191504dae5b037505266ed5dda6c382 # v3.0.6 - - name: Download Implementations JSON - uses: actions/download-artifact@v3 - with: - name: implementations - - name: Inject Implementations JSON into site - run: sed -i $'/IMPLEMENTATIONS_JSON/ { r implementations.json\nd }' docs/_docs/* - - name: Download Functional JSON + - name: Download all build artefacts uses: actions/download-artifact@v3 with: - name: functional-summary - - name: Inject Functional JSON into site - run: | - sed -i $'/FUNCTIONAL_SUMMARY_JSON/ { r functional-summary.json\nd }' docs/_docs/* - cat per-draft.md >> "docs/_docs/2. functional.md" + path: docs/_includes/ - name: Build with Jekyll - # Outputs to the './docs/_site' directory by default run: (cd docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}") env: JEKYLL_ENV: production diff --git a/README.md b/README.md index d987ec6..91e091b 100644 --- a/README.md +++ b/README.md @@ -312,7 +312,8 @@ Adding a new validator implementation is relatively straight forward and very we There should be one test per supported draft version. See the other methods in these classes for examples. 7. Run `./gradlew` to format your code, perform static analysis and run the tests. Ensure this passes! -8. Raise a PR with your changes. +8. Follow [these instructions](docs) to build and view the website, and ensure your new implementation data is included in tables and charts. +9. Raise a PR with your changes. [1]: https://github.com/eclipse-vertx/vertx-json-schema diff --git a/build.gradle.kts b/build.gradle.kts index cadd2f6..1f1dd6c 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -127,12 +127,6 @@ val runFunctionalTests = tasks.register("runFunctionalTests") { tasks.register("runBenchmarks") { classpath = sourceSets.main.get().runtimeClasspath mainClass.set("org.creekservice.kafka.test.perf.PerformanceMain") - args(listOf( - // Output results in csv format - "-rf", "csv", - // To a named file - "-rff", "benchmark_results.csv" - )) dependsOn(pullTask) } @@ -163,6 +157,11 @@ tasks.check { dependsOn(runFunctionalTests, runBenchmarkSmokeTest, extractImplementations) } +tasks.register("buildTestIncludes") { + description = "Build include files needed to generate the Jekyll website"; + dependsOn(runFunctionalTests, runBenchmarkSmokeTest, extractImplementations) +} + // Dummy / empty tasks required to allow the repo to use the same standard GitHub workflows as other Creek repos: tasks.register("coveralls") tasks.register("cV") diff --git a/docs/.gitignore b/docs/.gitignore index 34a2c9f..2a1d21e 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -12,4 +12,7 @@ Gemfile.lock .jekyll-metadata .sass-cache _asset_bundler_cache -_site \ No newline at end of file +_site + +# Includes generated by the GitHub pages build: +_includes/** \ No newline at end of file diff --git a/docs/README.md b/docs/README.md index 284754d..c44c225 100644 --- a/docs/README.md +++ b/docs/README.md @@ -32,11 +32,35 @@ git commit -m "updating gems" git push --set-upstream origin gems-update ``` -#### 3. Run the local server +#### 3. Generate includes + +For the site to render correctly certain include files need to be generated. + +Includes are stored in the `docs/_includes` directory and will be ignored by git. + +These include: + +| Include details | Gradle task name | Filename | +|---------------------------------------------------------------------------|----------------------------------------|-------------------------| +| A JSON document containing the details of all implementations under test. | extractImplementations | implementations.json | +| A JSON document containing the summary of the functional testing | runFunctionalTests | functional-summary.json | +| A Markdown document containing the per-draft functional testing results | runFunctionalTests | per-draft.md | +| A JSON document containing the results of the performance benchmarking | runBenchmarkSmokeTest or runBenchmarks | benchmark_results.json | + +Generate these locally by running: + +```shell +./graldew buildTestIncludes +``` + +Note: this will not run the full performance benchmarking as this takes many hours. +Instead, it will run the smoke benchmarks will generate inaccurate data go enough for testing the rendering of the website. + +#### 4. Run the local server ```shell (cd docs && bundle exec jekyll serve --livereload --baseurl /json-schema-validation-comparison) ``` This will launch a web server so that you can work on the site locally. -Check it out on [http://localhost:4000/json-schema-validation-comparison](http://localhost:4000/json-schema-validation-comparison). +Check it out on [http://localhost:4000/json-schema-validation-comparison](http://localhost:4000/json-schema-validation-comparison). \ No newline at end of file diff --git a/docs/_docs/1. implementations.md b/docs/_docs/1. implementations.md index df5abbd..a074be0 100644 --- a/docs/_docs/1. implementations.md +++ b/docs/_docs/1. implementations.md @@ -25,9 +25,7 @@ against the underlying [  GitHub Repo](http @@ -59,13 +75,11 @@ whereas _optional_ features only account for a maximum 25% of the score. [JSON-Schema-Test-Suite]: https://github.com/json-schema-org/JSON-Schema-Test-Suite +[Bowtie]: https://github.com/bowtie-json-schema/bowtie -### Detailed results - -Below is a more details set of results for each specification draft an implementation supports. -Each table details the number of test cases that pass and fail for each test file in the JSON schema test suite. - -[//]: # (Do not add content below this line, or delete the line following this comment, as the build appends data to this file) diff --git a/docs/_docs/3. performance.md b/docs/_docs/3. performance.md index 22c864e..74daf13 100644 --- a/docs/_docs/3. performance.md +++ b/docs/_docs/3. performance.md @@ -8,4 +8,180 @@ toc: true classes: wide --- -WIP [https://github.com/creek-service/json-schema-validation-comparison/issues/53](https://github.com/creek-service/json-schema-validation-comparison/issues/53). \ No newline at end of file +## Introduction + +The purpose of this section is to determine how quickly each validator implementation can validate JSON documents. + +## Benchmarks + +Each validator implementation is run through the benchmarks below. +Each benchmark uses the [Java Microbenchmark Harness][jhm] to capture meaningful performance metrics. + +The first of these benchmark covers a wide range of JSON schema functionality, while the second focuses on a more +real-world example, using a small common subset of functionality, in the context of using schema validated JSON +as a serialization format. Combined, these should give a good comparison of performance. + +### JSON schema test suite benchmark + +This benchmark measures the average time taken to run through all _positive_ test cases in the standard +[JSON Schema Test Suite][JSON-Schema-Test-Suite]. +Results are broken down by implementation and schema draft specification. + +Each of the following graphs compares the average time it took each implementation to validate all the **positive** +test cases, with the following caveats: + +**Note:** +This benchmark excludes _negative_ tests as most production use cases only see infrequent validation failures. +As the verbosity of error information and the cost of building this information varies greatly between implementations, +we did not want the benchmark to penalise implementations for providing rich error information. +{: .notice--warning} + +**Note:** +This benchmark excludes the time spent building the validator instances and parsing the JSON schema itself. +This decision was made as most production use cases allow the same validator instance to be used many times, +meaning the cost of validation is much more important than the cost of building the validator. +{: .notice--warning} + +**Note:** +The number of test cases in the standard test suite varies between draft specification, e.g. `DRAFT 7` +has fewer tests than draft `2020-12`. As the benchmark measures the time taken to run through all test for a draft specification, +comparing performance across different draft specifications can be misleading. +{: .notice--warning} + +
+ +
+ +  + +
+ +
+ +  + +
+ +
+ +  + +
+ +
+ +  + +
+ +
+ +### Serde benchmark + +The intent of this benchmark is to provide a more real-world benchmark. A common use of JSON is as a serialization format +for a Java object model: A Java object is serialized to JSON and this JSON is validated against the schema before being +stored or transmitted. At a later point, the JSON is read, validated and deserialized back to the Java object. +Many use cases use a very small subset of the JSON Schema features. + +This benchmark measures the average time taken to serialize a [simple Java object][TestModel], including polymorphism, +to JSON and back, validating the intermediate JSON data on both legs of the journey. + +JSON (de)serialization is generally handled by [Jackson][Jackson], except where this isn't compatible with the validation implementation. +The graphs below include the round-trip time it takes Jackson to serialise and deserialise the same instance, though with no validation, +for comparison. + +The serialized form is roughly 1KB of JSON, and the schema is roughly 2KB. + +The preferred draft specification for this benchmark is `DRAFT 7`. However, not all implementations support this. +Where an implementation does not support `DRAFT 7`, it is tested with `DRAFT 2020-12`. +[Task 59](https://github.com/creek-service/json-schema-validation-comparison/issues/59) will change this to output results for both. + +The schema file for `DRAFT 2020-12` can be found [here][2020-schema], and for `DRAFT 7` [here][7-schema]. + +Each of the following graphs compares the average time it took each implementation to serialize & validate, +then validate & deserialize the simple Java object, with the following caveats: + +**Note:** +As different implementations are tested using different versions of the schema specification, +which may be more or less rich than other versions, comparison across specification versions may be misleading. +{: .notice--warning} + +
+ +
+ +  + +
+ +
+ +[//]: # (Chart scripts: https://www.chartjs.org/docs/latest/) + + +[//]: # (Table scripts: https://github.com/fiduswriter/Simple-DataTables) + + + +[//]: # (IMPLEMENTATIONS_JSON, PERFORMANCE_JSON) + + + +[JSON-Schema-Test-Suite]: https://github.com/json-schema-org/JSON-Schema-Test-Suite +[jhm]: https://github.com/openjdk/jmh +[TestModel]: https://github.com/creek-service/json-schema-validation-comparison/blob/main/src/main/java/org/creekservice/kafka/test/perf/model/ModelState.java +[Jackson]: https://github.com/FasterXML/jackson-databind +[2020-schema]: https://github.com/creek-service/json-schema-validation-comparison/blob/main/src/main/resources/schema-draft-2020-12.json +[7-schema]: https://github.com/creek-service/json-schema-validation-comparison/blob/main/src/main/resources/schema-draft-7.json \ No newline at end of file diff --git a/src/main/java/org/creekservice/kafka/test/perf/FunctionalMain.java b/src/main/java/org/creekservice/kafka/test/perf/FunctionalMain.java index 8956ee3..389bc9b 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/FunctionalMain.java +++ b/src/main/java/org/creekservice/kafka/test/perf/FunctionalMain.java @@ -17,6 +17,7 @@ package org.creekservice.kafka.test.perf; import static java.util.stream.Collectors.toMap; +import static org.creekservice.kafka.test.perf.ProjectPaths.INCLUDES_ROOT; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.io.IOException; @@ -81,16 +82,14 @@ public static void main(final String... args) { } private static void outputResults(final Map results) { - final Path reportRoot = Paths.get("build/reports/creek/"); - final Summary summary = new Summary(results); - writeOutput(summary.toMarkdown(), reportRoot.resolve("functional-summary.md")); - writeOutput(summary.toJson(), reportRoot.resolve("functional-summary.json")); + writeOutput(summary.toMarkdown(), INCLUDES_ROOT.resolve("functional-summary.md")); + writeOutput(summary.toJson(), INCLUDES_ROOT.resolve("functional-summary.json")); final PerDraftSummary perDraftSummary = new PerDraftSummary(results); - writeOutput(perDraftSummary.toMarkdown(), reportRoot.resolve("per-draft.md")); + writeOutput(perDraftSummary.toMarkdown(), INCLUDES_ROOT.resolve("per-draft.md")); - System.out.println("Results written to " + reportRoot.toAbsolutePath()); + System.out.println("Results written to " + INCLUDES_ROOT.toAbsolutePath()); } private static void writeOutput(final String content, final Path path) { diff --git a/src/main/java/org/creekservice/kafka/test/perf/ImplementationsMain.java b/src/main/java/org/creekservice/kafka/test/perf/ImplementationsMain.java index 28e5084..0a6113d 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/ImplementationsMain.java +++ b/src/main/java/org/creekservice/kafka/test/perf/ImplementationsMain.java @@ -16,15 +16,25 @@ package org.creekservice.kafka.test.perf; +import static org.creekservice.kafka.test.perf.ProjectPaths.INCLUDES_ROOT; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; import org.creekservice.kafka.test.perf.implementations.Implementations; import org.creekservice.kafka.test.perf.util.ImplsJsonFormatter; /** Main entry point for getting information about the implementations under test */ public final class ImplementationsMain { + private static final Path JSON_IMPLS = INCLUDES_ROOT.resolve("implementations.json"); + private ImplementationsMain() {} - public static void main(final String[] args) { - System.out.println(ImplsJsonFormatter.implDetailsAsJson(Implementations.all())); + public static void main(final String[] args) throws IOException { + Files.createDirectories(INCLUDES_ROOT); + final String json = ImplsJsonFormatter.implDetailsAsJson(Implementations.all()); + Files.write(JSON_IMPLS, json.getBytes(StandardCharsets.UTF_8)); } } diff --git a/src/main/java/org/creekservice/kafka/test/perf/PerformanceMain.java b/src/main/java/org/creekservice/kafka/test/perf/PerformanceMain.java index 138b27a..c324bde 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/PerformanceMain.java +++ b/src/main/java/org/creekservice/kafka/test/perf/PerformanceMain.java @@ -16,12 +16,51 @@ package org.creekservice.kafka.test.perf; +import static org.creekservice.kafka.test.perf.ProjectPaths.INCLUDES_ROOT; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.creekservice.kafka.test.perf.performance.util.JsonToMarkdownConvertor; +import org.creekservice.kafka.test.perf.performance.util.PerformanceDataValidator; + /** Entry point for running the performance benchmarks. */ public final class PerformanceMain { + private static final Path JSON_RESULTS = INCLUDES_ROOT.resolve("benchmark_results.json"); + private PerformanceMain() {} - public static void main(final String[] args) throws Exception { - org.openjdk.jmh.Main.main(args); + public static void main(final String[] suppliedArgs) throws Exception { + runBenchmarks(suppliedArgs); + validateJsonOutput(); + writeMarkdownOutput(); + } + + private static void runBenchmarks(final String[] suppliedArgs) throws IOException { + final String[] additionalArgs = { + // Output results in csv format + "-rf", + "json", + // To a named file + "-rff", + JSON_RESULTS.toString() + }; + + final String[] allArgs = new String[suppliedArgs.length + additionalArgs.length]; + System.arraycopy(suppliedArgs, 0, allArgs, 0, suppliedArgs.length); + System.arraycopy(additionalArgs, 0, allArgs, suppliedArgs.length, additionalArgs.length); + + Files.createDirectories(INCLUDES_ROOT); + + org.openjdk.jmh.Main.main(allArgs); + } + + private static void validateJsonOutput() { + new PerformanceDataValidator().validate(JSON_RESULTS); + } + + private static void writeMarkdownOutput() { + new JsonToMarkdownConvertor().convert(JSON_RESULTS, INCLUDES_ROOT); } } diff --git a/src/test/java/org/creekservice/kafka/test/perf/implementations/ConfluentImplementationTest.java b/src/main/java/org/creekservice/kafka/test/perf/ProjectPaths.java similarity index 69% rename from src/test/java/org/creekservice/kafka/test/perf/implementations/ConfluentImplementationTest.java rename to src/main/java/org/creekservice/kafka/test/perf/ProjectPaths.java index 563a05c..18ae8c7 100644 --- a/src/test/java/org/creekservice/kafka/test/perf/implementations/ConfluentImplementationTest.java +++ b/src/main/java/org/creekservice/kafka/test/perf/ProjectPaths.java @@ -14,6 +14,15 @@ * limitations under the License. */ -package org.creekservice.kafka.test.perf.implementations; +package org.creekservice.kafka.test.perf; -class ConfluentImplementationTest extends ImplementationTest {} +import java.nio.file.Path; +import java.nio.file.Paths; + +public final class ProjectPaths { + + /** Jekyll include directory */ + public static final Path INCLUDES_ROOT = Paths.get("docs/_includes/"); + + private ProjectPaths() {} +} diff --git a/src/main/java/org/creekservice/kafka/test/perf/implementations/ConfluentImplementation.java b/src/main/java/org/creekservice/kafka/test/perf/implementations/ConfluentImplementation.java deleted file mode 100644 index d760cfe..0000000 --- a/src/main/java/org/creekservice/kafka/test/perf/implementations/ConfluentImplementation.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2023 Creek Contributors (https://github.com/creek-service) - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.creekservice.kafka.test.perf.implementations; - -import static org.creekservice.kafka.test.perf.testsuite.SchemaSpec.DRAFT_04; -import static org.creekservice.kafka.test.perf.testsuite.SchemaSpec.DRAFT_06; -import static org.creekservice.kafka.test.perf.testsuite.SchemaSpec.DRAFT_07; - -import io.confluent.kafka.schemaregistry.ParsedSchema; -import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient; -import io.confluent.kafka.schemaregistry.json.JsonSchemaProvider; -import io.confluent.kafka.serializers.json.KafkaJsonSchemaDeserializer; -import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializer; -import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializerConfig; -import java.awt.Color; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import org.apache.kafka.common.serialization.Deserializer; -import org.apache.kafka.common.serialization.Serializer; -import org.creekservice.kafka.test.perf.model.TestModel; -import org.creekservice.kafka.test.perf.testsuite.AdditionalSchemas; -import org.creekservice.kafka.test.perf.testsuite.SchemaSpec; - -@SuppressWarnings("resource") -public class ConfluentImplementation implements Implementation { - - private static final MetaData METADATA = - new MetaData( - "Confluent validating JSON serde", - "Confluent", - Language.Java, - Licence.Apache_v2_0, - Set.of(DRAFT_04, DRAFT_06, DRAFT_07), - "https://docs.confluent.io/platform/current/schema-registry/fundamentals/serdes-develop/serdes-json.html", - new Color(255, 255, 255)); - - private static final String TOPIC_NAME = "t"; - - @Override - public MetaData metadata() { - return METADATA; - } - - @Override - public JsonValidator prepare( - final String schema, final SchemaSpec spec, final AdditionalSchemas additionalSchemas) { - try { - final Optional parsedSchema = - new JsonSchemaProvider().parseSchema(schema, List.of()); - final MockSchemaRegistryClient srClient = new MockSchemaRegistryClient(); - final int schemaId = - srClient.register(TOPIC_NAME + "-value", parsedSchema.orElseThrow()); - - final Map validating = new HashMap<>(); - validating.put(KafkaJsonSchemaSerializerConfig.SCHEMA_REGISTRY_URL_CONFIG, "ignored"); - validating.put(KafkaJsonSchemaSerializerConfig.FAIL_INVALID_SCHEMA, true); - validating.put(KafkaJsonSchemaSerializerConfig.AUTO_REGISTER_SCHEMAS, false); - validating.put(KafkaJsonSchemaSerializerConfig.USE_SCHEMA_ID, schemaId); - validating.put(KafkaJsonSchemaSerializerConfig.ID_COMPATIBILITY_STRICT, false); - - final Serializer serializer = - new KafkaJsonSchemaSerializer<>(srClient, validating); - final Deserializer deserializer = - new KafkaJsonSchemaDeserializer<>(srClient, validating, TestModel.class); - - final Map nonValidating = new HashMap<>(validating); - nonValidating.put(KafkaJsonSchemaSerializerConfig.FAIL_INVALID_SCHEMA, false); - final Serializer nonValidatingSerializer = - new KafkaJsonSchemaSerializer<>(srClient, nonValidating); - - return new JsonValidator() { - @Override - public void validate(final String json) { - throw new UnsupportedOperationException("Not under test"); - } - - @Override - public byte[] serialize(final TestModel model, final boolean validate) { - return validate - ? serializer.serialize(TOPIC_NAME, model) - : nonValidatingSerializer.serialize(TOPIC_NAME, model); - } - - @Override - public TestModel deserialize(final byte[] data) { - return deserializer.deserialize(TOPIC_NAME, data); - } - }; - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - // Final, empty finalize method stops spotbugs CT_CONSTRUCTOR_THROW - // Can be moved to base type after https://github.com/spotbugs/spotbugs/issues/2665 - @Override - @SuppressWarnings({"deprecation", "Finalize"}) - protected final void finalize() {} -} diff --git a/src/main/java/org/creekservice/kafka/test/perf/implementations/DevHarrelImplementation.java b/src/main/java/org/creekservice/kafka/test/perf/implementations/DevHarrelImplementation.java index 7901d03..98eb620 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/implementations/DevHarrelImplementation.java +++ b/src/main/java/org/creekservice/kafka/test/perf/implementations/DevHarrelImplementation.java @@ -45,7 +45,7 @@ public class DevHarrelImplementation implements Implementation { private static final MetaData METADATA = new MetaData( "json-schema (dev.harrel)", - "dev.harrel", + "DevHarrel", Language.Java, Licence.MIT, Set.of(DRAFT_2020_12, DRAFT_2019_09), diff --git a/src/main/java/org/creekservice/kafka/test/perf/implementations/EveritImplementation.java b/src/main/java/org/creekservice/kafka/test/perf/implementations/EveritImplementation.java index 2540275..a74fe44 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/implementations/EveritImplementation.java +++ b/src/main/java/org/creekservice/kafka/test/perf/implementations/EveritImplementation.java @@ -25,6 +25,7 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.json.JsonMapper; +import java.awt.Color; import java.io.ByteArrayInputStream; import java.util.Collection; import java.util.Map; @@ -55,7 +56,8 @@ public class EveritImplementation implements Implementation { Language.Java, Licence.Apache_v2_0, Set.of(DRAFT_04, DRAFT_06, DRAFT_07), - "https://github.com/everit-org/json-schema"); + "https://github.com/everit-org/json-schema", + new Color(54, 162, 235)); private ObjectMapper mapper = JsonMapper.builder().build(); diff --git a/src/main/java/org/creekservice/kafka/test/perf/implementations/Implementation.java b/src/main/java/org/creekservice/kafka/test/perf/implementations/Implementation.java index f79f673..9af27a8 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/implementations/Implementation.java +++ b/src/main/java/org/creekservice/kafka/test/perf/implementations/Implementation.java @@ -25,6 +25,7 @@ import java.net.URL; import java.util.Set; import java.util.TreeSet; +import java.util.regex.Pattern; import org.creekservice.kafka.test.perf.model.TestModel; import org.creekservice.kafka.test.perf.testsuite.AdditionalSchemas; import org.creekservice.kafka.test.perf.testsuite.SchemaSpec; @@ -85,6 +86,9 @@ public String toString() { } class MetaData { + + public static final Pattern SHORT_NAME_PATTERN = Pattern.compile("[A-Za-z0-9]+"); + private final String longName; private final String shortName; private final Language language; @@ -97,7 +101,8 @@ class MetaData { * Construct metadata about a specific validator implementation. * * @param longName a more expressive name. - * @param shortName the short name, as used in reports. + * @param shortName the short name, as used in reports. Can only contain alphanumeric + * characters. * @param language the programming language the validator library is written in. * @param licence the licence the validator library is released under. * @param supported the set of supported JSON schema draft specifications. @@ -133,21 +138,11 @@ public MetaData( if (shortName.isBlank()) { throw new IllegalArgumentException("Short name blank"); } - } - /** - * Temp constructor to avoid issues for anyone currently adding new implementation. - * - *

Will be removed soon. - */ - public MetaData( - final String longName, - final String shortName, - final Language language, - final Licence licence, - final Set supported, - final String url) { - this(longName, shortName, language, licence, supported, url, new Color(235, 54, 172)); + if (!SHORT_NAME_PATTERN.matcher(shortName).matches()) { + throw new IllegalArgumentException( + "Short name not match required pattern: " + SHORT_NAME_PATTERN.pattern()); + } } @JsonProperty("longName") diff --git a/src/main/java/org/creekservice/kafka/test/perf/implementations/Implementations.java b/src/main/java/org/creekservice/kafka/test/perf/implementations/Implementations.java index 4c842ab..b607866 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/implementations/Implementations.java +++ b/src/main/java/org/creekservice/kafka/test/perf/implementations/Implementations.java @@ -27,6 +27,7 @@ public final class Implementations { private static final List IMPLS = List.of( + new JacksonImplementation(), new EveritImplementation(), new JustifyImplementation(), new MedeiaImplementation(), diff --git a/src/main/java/org/creekservice/kafka/test/perf/implementations/JacksonImplementation.java b/src/main/java/org/creekservice/kafka/test/perf/implementations/JacksonImplementation.java index 0d8eeef..e741450 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/implementations/JacksonImplementation.java +++ b/src/main/java/org/creekservice/kafka/test/perf/implementations/JacksonImplementation.java @@ -37,7 +37,7 @@ public class JacksonImplementation implements Implementation { Licence.Apache_v2_0, Set.of(SchemaSpec.DRAFT_07), "https://github.com/FasterXML/jackson-core", - new Color(255, 255, 255)); + new Color(20, 84, 166)); private ObjectMapper mapper = JsonMapper.builder().build(); diff --git a/src/main/java/org/creekservice/kafka/test/perf/implementations/SkemaImplementation.java b/src/main/java/org/creekservice/kafka/test/perf/implementations/SkemaImplementation.java index 8fb5cd1..5a7df18 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/implementations/SkemaImplementation.java +++ b/src/main/java/org/creekservice/kafka/test/perf/implementations/SkemaImplementation.java @@ -48,7 +48,7 @@ public class SkemaImplementation implements Implementation { private static final MetaData METADATA = new MetaData( "erosb/json-sKema", - "sKema", + "Skema", Language.Kotlin, Licence.MIT, Set.of(DRAFT_2020_12), diff --git a/src/main/java/org/creekservice/kafka/test/perf/implementations/VertxImplementation.java b/src/main/java/org/creekservice/kafka/test/perf/implementations/VertxImplementation.java index 3fc996e..63c3dff 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/implementations/VertxImplementation.java +++ b/src/main/java/org/creekservice/kafka/test/perf/implementations/VertxImplementation.java @@ -48,7 +48,7 @@ public class VertxImplementation implements Implementation { private static final MetaData METADATA = new MetaData( "Vert.x Json Schema", - "Vert.x", + "Vertx", Language.Java, Licence.Apache_v2_0, SUPPORTED.keySet(), diff --git a/src/main/java/org/creekservice/kafka/test/perf/performance/JsonSerdeBenchmark.java b/src/main/java/org/creekservice/kafka/test/perf/performance/JsonSerdeBenchmark.java index 4d18d10..d37d265 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/performance/JsonSerdeBenchmark.java +++ b/src/main/java/org/creekservice/kafka/test/perf/performance/JsonSerdeBenchmark.java @@ -20,7 +20,6 @@ import java.nio.file.Path; import java.util.Map; -import org.creekservice.kafka.test.perf.implementations.ConfluentImplementation; import org.creekservice.kafka.test.perf.implementations.DevHarrelImplementation; import org.creekservice.kafka.test.perf.implementations.EveritImplementation; import org.creekservice.kafka.test.perf.implementations.Implementation; @@ -57,23 +56,8 @@ * use the basic JSON schema features: primitives, enums, arrays, polymorphic types and length * assertions. This can be extended in the future it needed. * - *

Most recent results (On 2021 Macbook, M1 Max: 2.06 - 3.22 GHz, in High Power mode, JDK - * 17.0.6): - * - *

- * Benchmark                                               Mode  Cnt  Score   Error  Units
- * JsonSerdeBenchmark.measureJacksonIntermediateRoundTrip  avgt   20  3.852 ± 0.063  us/op
- * JsonSerdeBenchmark.measureRawJacksonRoundTrip           avgt   20  3.890 ± 0.047  us/op
- * JsonSerdeBenchmark.measureConfluentRoundTrip            avgt   20  131.029 ±  1.964  us/op
- * JsonSerdeBenchmark.measureEveritRoundTrip               avgt   20  116.423 ±  2.763  us/op
- * JsonSerdeBenchmark.measureJustifyRoundTrip              avgt   20   75.547 ±  0.819  us/op
- * JsonSerdeBenchmark.measureMedeiaRoundTrip               avgt   20   38.443 ±  1.010  us/op
- * JsonSerdeBenchmark.measureNetworkNtRoundTrip            avgt   20  898.339 ± 30.028  us/op
- * JsonSerdeBenchmark.measureSchemaFriendRoundTrip         avgt   20  127.588 ±  0.897  us/op
- * JsonSerdeBenchmark.measureSkemaRoundTrip                avgt   20  111.483 ±  2.036  us/op
- * JsonSerdeBenchmark.measureSnowRoundTrip                 avgt   20  611.803 ±  6.733  us/op
- * JsonSerdeBenchmark.measureVertxRoundTrip                avgt   20  738.511 ± 45.223  us/op
- * 
+ *

The preferred Schema draft is Draft_7. Draft_2020_12 will be used where implementations do not + * support 7. */ @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(MICROSECONDS) @@ -81,7 +65,7 @@ @Fork(4) // Note: to debug, set fork to 0. // @Warmup(iterations = 0, time = 10) // @Measurement(iterations = 1, time = 10) -@SuppressWarnings("FieldMayBeFinal") // not final to avoid folding. +@SuppressWarnings({"FieldMayBeFinal", "MethodName"}) // not final to avoid folding. public class JsonSerdeBenchmark { static { @@ -95,7 +79,7 @@ public JacksonState() { } @Benchmark - public TestModel measureJacksonRoundTrip(final JacksonState impl, final ModelState model) { + public TestModel measureDraft_07_Jackson(final JacksonState impl, final ModelState model) { return impl.roundTrip(model); } @@ -106,7 +90,7 @@ public MedeiaState() { } @Benchmark - public TestModel measureMedeiaRoundTrip(final MedeiaState impl, final ModelState model) { + public TestModel measureDraft_07_Medeia(final MedeiaState impl, final ModelState model) { return impl.roundTrip(model); } @@ -117,7 +101,7 @@ public EveritState() { } @Benchmark - public TestModel measureEveritRoundTrip(final EveritState impl, final ModelState model) { + public TestModel measureDraft_07_Everit(final EveritState impl, final ModelState model) { return impl.roundTrip(model); } @@ -128,18 +112,7 @@ public SkemaState() { } @Benchmark - public TestModel measureSkemaRoundTrip(final SkemaState impl, final ModelState model) { - return impl.roundTrip(model); - } - - public static class ConfluentState extends ImplementationState { - public ConfluentState() { - super(new ConfluentImplementation()); - } - } - - @Benchmark - public TestModel measureConfluentRoundTrip(final ConfluentState impl, final ModelState model) { + public TestModel measureDraft_2020_12_Skema(final SkemaState impl, final ModelState model) { return impl.roundTrip(model); } @@ -150,7 +123,7 @@ public VertxState() { } @Benchmark - public TestModel measureVertxRoundTrip(final VertxState impl, final ModelState model) { + public TestModel measureDraft_07_Vertx(final VertxState impl, final ModelState model) { return impl.roundTrip(model); } @@ -161,7 +134,7 @@ public SchemaFriendState() { } @Benchmark - public TestModel measureSchemaFriendRoundTrip( + public TestModel measureDraft_07_SchemaFriend( final SchemaFriendState impl, final ModelState model) { return impl.roundTrip(model); } @@ -173,7 +146,7 @@ public NetworkNtState() { } @Benchmark - public TestModel measureNetworkNtRoundTrip(final NetworkNtState impl, final ModelState model) { + public TestModel measureDraft_07_NetworkNt(final NetworkNtState impl, final ModelState model) { return impl.roundTrip(model); } @@ -184,7 +157,7 @@ public SnowState() { } @Benchmark - public TestModel measureSnowRoundTrip(final SnowState impl, final ModelState model) { + public TestModel measureDraft_07_Snow(final SnowState impl, final ModelState model) { return impl.roundTrip(model); } @@ -195,7 +168,7 @@ public JustifyState() { } @Benchmark - public TestModel measureJustifyRoundTrip(final JustifyState impl, final ModelState model) { + public TestModel measureDraft_07_Justify(final JustifyState impl, final ModelState model) { return impl.roundTrip(model); } @@ -206,7 +179,8 @@ public DevHarrelState() { } @Benchmark - public TestModel measureDevHarrelRoundTrip(final DevHarrelState impl, final ModelState model) { + public TestModel measureDraft_2020_12_DevHarrel( + final DevHarrelState impl, final ModelState model) { return impl.roundTrip(model); } diff --git a/src/main/java/org/creekservice/kafka/test/perf/performance/JsonValidateBenchmark.java b/src/main/java/org/creekservice/kafka/test/perf/performance/JsonValidateBenchmark.java index 1eaae8a..8faaf70 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/performance/JsonValidateBenchmark.java +++ b/src/main/java/org/creekservice/kafka/test/perf/performance/JsonValidateBenchmark.java @@ -50,42 +50,7 @@ *

The benchmark runs each validator through the standard set of tests * - *

The results show the average time it take each impl to run through the test suite, per draft. - * - *

Most recent results (On 2021 Macbook, M1 Max: 2.06 - 3.22 GHz, in High Power mode, JDK - * 17.0.6): - * - *

- * Benchmark                                                Mode  Cnt     Score    Error  Units
- * JsonValidateBenchmark.measureDraft_2019_09_NetworkNt     avgt   20     6.017 ±  0.216  ms/op
- * JsonValidateBenchmark.measureDraft_2019_09_SchemaFriend  avgt   20     1.482 ±  0.005  ms/op
- * JsonValidateBenchmark.measureDraft_2019_09_Snow          avgt   20   316.178 ± 28.242  ms/op
- * JsonValidateBenchmark.measureDraft_2019_09_Vertx         avgt   20     3.818 ±  0.028  ms/op
- * JsonValidateBenchmark.measureDraft_2020_12_NetworkNt     avgt   20     7.305 ±  0.073  ms/op
- * JsonValidateBenchmark.measureDraft_2020_12_SchemaFriend  avgt   20     1.654 ±  0.005  ms/op
- * JsonValidateBenchmark.measureDraft_2020_12_Skema         avgt   20     2.812 ±  0.015  ms/op
- * JsonValidateBenchmark.measureDraft_2020_12_Vertx         avgt   20     3.669 ±  0.019  ms/op
- * JsonValidateBenchmark.measureDraft_3_SchemaFriend        avgt   20     0.235 ±  0.005  ms/op
- * JsonValidateBenchmark.measureDraft_4_Everit              avgt   20     0.328 ±  0.006  ms/op
- * JsonValidateBenchmark.measureDraft_4_Justify             avgt   20     0.634 ±  0.009  ms/op
- * JsonValidateBenchmark.measureDraft_4_Medeia              avgt   20     0.346 ±  0.006  ms/op
- * JsonValidateBenchmark.measureDraft_4_NetworkNt           avgt   20     1.086 ±  0.004  ms/op
- * JsonValidateBenchmark.measureDraft_4_SchemaFriend        avgt   20     0.480 ±  0.017  ms/op
- * JsonValidateBenchmark.measureDraft_4_Vertx               avgt   20     1.362 ±  0.006  ms/op
- * JsonValidateBenchmark.measureDraft_6_Everit              avgt   20     0.400 ±  0.003  ms/op
- * JsonValidateBenchmark.measureDraft_6_Justify             avgt   20     0.816 ±  0.008  ms/op
- * JsonValidateBenchmark.measureDraft_6_Medeia              avgt   20     0.416 ±  0.007  ms/op
- * JsonValidateBenchmark.measureDraft_6_NetworkNt           avgt   20     1.771 ±  0.044  ms/op
- * JsonValidateBenchmark.measureDraft_6_SchemaFriend        avgt   20     0.700 ±  0.018  ms/op
- * JsonValidateBenchmark.measureDraft_6_Snow                avgt   20    78.241 ±  6.515  ms/op
- * JsonValidateBenchmark.measureDraft_7_Everit              avgt   20     0.508 ±  0.005  ms/op
- * JsonValidateBenchmark.measureDraft_7_Justify             avgt   20     1.044 ±  0.019  ms/op
- * JsonValidateBenchmark.measureDraft_7_Medeia              avgt   20     0.666 ±  0.007  ms/op
- * JsonValidateBenchmark.measureDraft_7_NetworkNt           avgt   20     2.573 ±  0.032  ms/op
- * JsonValidateBenchmark.measureDraft_7_SchemaFriend        avgt   20     0.918 ±  0.012  ms/op
- * JsonValidateBenchmark.measureDraft_7_Snow                avgt   20    76.627 ±  6.336  ms/op
- * JsonValidateBenchmark.measureDraft_7_Vertx               avgt   20     2.141 ±  0.072  ms/op
- * 
+ *

The results show the average time it takes each impl to run through the test suite, per draft. */ @BenchmarkMode(Mode.AverageTime) @OutputTimeUnit(MILLISECONDS) @@ -114,17 +79,17 @@ public MedeiaValidator() { } @Benchmark - public Result measureDraft_4_Medeia(final MedeiaValidator validator) { + public Result measureDraft_04_Medeia(final MedeiaValidator validator) { return validator.validate(SchemaSpec.DRAFT_04); } @Benchmark - public Result measureDraft_6_Medeia(final MedeiaValidator validator) { + public Result measureDraft_06_Medeia(final MedeiaValidator validator) { return validator.validate(SchemaSpec.DRAFT_06); } @Benchmark - public Result measureDraft_7_Medeia(final MedeiaValidator validator) { + public Result measureDraft_07_Medeia(final MedeiaValidator validator) { return validator.validate(SchemaSpec.DRAFT_07); } @@ -136,17 +101,17 @@ public EveritValidator() { } @Benchmark - public Result measureDraft_4_Everit(final EveritValidator validator) { + public Result measureDraft_04_Everit(final EveritValidator validator) { return validator.validate(SchemaSpec.DRAFT_04); } @Benchmark - public Result measureDraft_6_Everit(final EveritValidator validator) { + public Result measureDraft_06_Everit(final EveritValidator validator) { return validator.validate(SchemaSpec.DRAFT_06); } @Benchmark - public Result measureDraft_7_Everit(final EveritValidator validator) { + public Result measureDraft_07_Everit(final EveritValidator validator) { return validator.validate(SchemaSpec.DRAFT_07); } @@ -170,12 +135,12 @@ public VertxValidator() { } @Benchmark - public Result measureDraft_4_Vertx(final VertxValidator validator) { + public Result measureDraft_04_Vertx(final VertxValidator validator) { return validator.validate(SchemaSpec.DRAFT_04); } @Benchmark - public Result measureDraft_7_Vertx(final VertxValidator validator) { + public Result measureDraft_07_Vertx(final VertxValidator validator) { return validator.validate(SchemaSpec.DRAFT_07); } @@ -197,22 +162,22 @@ public SchemaFriendValidator() { } @Benchmark - public Result measureDraft_3_SchemaFriend(final SchemaFriendValidator validator) { + public Result measureDraft_03_SchemaFriend(final SchemaFriendValidator validator) { return validator.validate(SchemaSpec.DRAFT_03); } @Benchmark - public Result measureDraft_4_SchemaFriend(final SchemaFriendValidator validator) { + public Result measureDraft_04_SchemaFriend(final SchemaFriendValidator validator) { return validator.validate(SchemaSpec.DRAFT_04); } @Benchmark - public Result measureDraft_6_SchemaFriend(final SchemaFriendValidator validator) { + public Result measureDraft_06_SchemaFriend(final SchemaFriendValidator validator) { return validator.validate(SchemaSpec.DRAFT_06); } @Benchmark - public Result measureDraft_7_SchemaFriend(final SchemaFriendValidator validator) { + public Result measureDraft_07_SchemaFriend(final SchemaFriendValidator validator) { return validator.validate(SchemaSpec.DRAFT_07); } @@ -234,17 +199,17 @@ public NetworkNtValidator() { } @Benchmark - public Result measureDraft_4_NetworkNt(final NetworkNtValidator validator) { + public Result measureDraft_04_NetworkNt(final NetworkNtValidator validator) { return validator.validate(SchemaSpec.DRAFT_04); } @Benchmark - public Result measureDraft_6_NetworkNt(final NetworkNtValidator validator) { + public Result measureDraft_06_NetworkNt(final NetworkNtValidator validator) { return validator.validate(SchemaSpec.DRAFT_06); } @Benchmark - public Result measureDraft_7_NetworkNt(final NetworkNtValidator validator) { + public Result measureDraft_07_NetworkNt(final NetworkNtValidator validator) { return validator.validate(SchemaSpec.DRAFT_07); } @@ -266,12 +231,12 @@ public SnowValidator() { } @Benchmark - public Result measureDraft_6_Snow(final SnowValidator validator) { + public Result measureDraft_06_Snow(final SnowValidator validator) { return validator.validate(SchemaSpec.DRAFT_06); } @Benchmark - public Result measureDraft_7_Snow(final SnowValidator validator) { + public Result measureDraft_07_Snow(final SnowValidator validator) { return validator.validate(SchemaSpec.DRAFT_07); } @@ -288,17 +253,17 @@ public JustifyValidator() { } @Benchmark - public Result measureDraft_4_Justify(final JustifyValidator validator) { + public Result measureDraft_04_Justify(final JustifyValidator validator) { return validator.validate(SchemaSpec.DRAFT_04); } @Benchmark - public Result measureDraft_6_Justify(final JustifyValidator validator) { + public Result measureDraft_06_Justify(final JustifyValidator validator) { return validator.validate(SchemaSpec.DRAFT_06); } @Benchmark - public Result measureDraft_7_Justify(final JustifyValidator validator) { + public Result measureDraft_07_Justify(final JustifyValidator validator) { return validator.validate(SchemaSpec.DRAFT_07); } diff --git a/src/main/java/org/creekservice/kafka/test/perf/performance/util/JsonToMarkdownConvertor.java b/src/main/java/org/creekservice/kafka/test/perf/performance/util/JsonToMarkdownConvertor.java new file mode 100644 index 0000000..bf7f981 --- /dev/null +++ b/src/main/java/org/creekservice/kafka/test/perf/performance/util/JsonToMarkdownConvertor.java @@ -0,0 +1,123 @@ +/* + * Copyright 2023 Creek Contributors (https://github.com/creek-service) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.creekservice.kafka.test.perf.performance.util; + +import static java.lang.System.lineSeparator; +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Objects.requireNonNull; + +import java.io.IOException; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.creekservice.kafka.test.perf.performance.util.model.PerformanceResult; + +public final class JsonToMarkdownConvertor { + + private static final List HEADINGS = + List.of("Benchmark", "Mode", "Score", "Score Error (99.9%)", "Unit"); + + private final PerformanceJsonReader reader; + + public JsonToMarkdownConvertor() { + this(new PerformanceJsonReader()); + } + + JsonToMarkdownConvertor(final PerformanceJsonReader reader) { + this.reader = requireNonNull(reader, "reader"); + } + + /** + * Convert a JSON performance result file into markdown. + * + * @param jsonResult the JSON performance result data to read. Generated by JMH. + * @param reportRoot the directory markdown performance results should be written to. + */ + public void convert(final Path jsonResult, final Path reportRoot) { + writeMarkdown(convert(jsonResult), reportRoot); + } + + Map convert(final Path jsonResult) { + final Map> splitResults = + splitResults(reader.read(jsonResult)); + return splitResults.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> convert(e.getValue()))); + } + + private static String convert(final List results) { + final StringBuilder builder = new StringBuilder(); + builder.append(headings()); + results.forEach(result -> builder.append(row(result))); + return builder.toString(); + } + + private static Map> splitResults( + final PerformanceResult[] results) { + return Arrays.stream(results).collect(Collectors.groupingBy(result -> result.testClass())); + } + + private static void writeMarkdown(final Map converted, final Path reportRoot) { + converted.forEach( + (testName, reportContent) -> + writeMarkdown(reportContent, reportRoot.resolve(testName + ".md"))); + } + + private static void writeMarkdown(final String markdown, final Path file) { + try { + Files.write(file, markdown.getBytes(UTF_8)); + } catch (IOException e) { + throw new RuntimeException("Failed to write to " + file, e); + } + } + + private static String headings() { + final String headings = HEADINGS.stream().collect(Collectors.joining(" | ", "| ", " |")); + final String nextLine = + HEADINGS.stream() + .map(heading -> "-".repeat(heading.length())) + .collect(Collectors.joining("-|-", "|-", "-|")); + return headings + lineSeparator() + nextLine + lineSeparator(); + } + + private static String row(final PerformanceResult result) { + final List values = + List.of( + result.testCase(), + result.mode(), + formatDecimal(result.metric().score()), + result.metric() + .scoreError() + .map(JsonToMarkdownConvertor::formatDecimal) + .orElse(""), + result.metric().scoreUnit()); + final String row = values.stream().collect(Collectors.joining(" | ", "| ", " |")); + return row + lineSeparator(); + } + + private static String formatDecimal(final BigDecimal decimal) { + final int sigFig = 5; + final int exponent = decimal.precision() - decimal.scale(); + final int scale = Math.max(0, sigFig - exponent); + final BigDecimal scaledNumber = decimal.setScale(scale, RoundingMode.HALF_EVEN); + return scaledNumber.toPlainString(); + } +} diff --git a/src/main/java/org/creekservice/kafka/test/perf/performance/util/PerformanceDataValidator.java b/src/main/java/org/creekservice/kafka/test/perf/performance/util/PerformanceDataValidator.java new file mode 100644 index 0000000..0b2fcb0 --- /dev/null +++ b/src/main/java/org/creekservice/kafka/test/perf/performance/util/PerformanceDataValidator.java @@ -0,0 +1,126 @@ +/* + * Copyright 2023 Creek Contributors (https://github.com/creek-service) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.creekservice.kafka.test.perf.performance.util; + +import static java.util.Objects.requireNonNull; + +import java.nio.file.Path; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import org.creekservice.kafka.test.perf.implementations.Implementation; +import org.creekservice.kafka.test.perf.implementations.Implementations; +import org.creekservice.kafka.test.perf.performance.util.model.PerformanceResult; +import org.creekservice.kafka.test.perf.testsuite.SchemaSpec; + +/** + * Validate the output results will be compatible and match expected patterns used in + * performance.md. + */ +public final class PerformanceDataValidator { + + private final PerformanceJsonReader reader; + private final List implementations; + private final Set specs; + + public PerformanceDataValidator() { + this(new PerformanceJsonReader()); + } + + PerformanceDataValidator(final PerformanceJsonReader reader) { + this.reader = requireNonNull(reader, "reader"); + this.implementations = List.copyOf(Implementations.all()); + this.specs = + Arrays.stream(SchemaSpec.values()) + .map(SchemaSpec::capitalisedName) + .collect(Collectors.toCollection(LinkedHashSet::new)); + } + + public void validate(final Path jsonResults) { + final PerformanceResult[] results = reader.read(jsonResults); + + for (final PerformanceResult result : results) { + validate(result); + } + } + + // Benchmark function name should be in format: measure_ + // e.g. measureDraft_4_Medeia, or measureDraft_2020_12_Skema + private static final Pattern METHOD_PATTERN = + Pattern.compile( + "measure(?Draft[_0-9]+)_(?" + + Implementation.MetaData.SHORT_NAME_PATTERN.pattern() + + ")"); + + private void validate(final PerformanceResult result) { + final Matcher matcher = METHOD_PATTERN.matcher(result.testCase()); + if (!matcher.matches()) { + throw new ValidationException( + result, + "with a name that does not match the expected pattern." + + " Expected pattern: " + + METHOD_PATTERN.pattern()); + } + + validateDraft(matcher.group("draft"), result); + validateImplementationName(matcher.group("impl"), result); + } + + private void validateDraft(final String draft, final PerformanceResult result) { + if (!specs.contains(draft)) { + throw new ValidationException( + result, + "with a name that does not contain a valid schema specification draft." + + System.lineSeparator() + + "Available versions: " + + specs + + System.lineSeparator() + + "Detected version: " + + draft); + } + } + + private void validateImplementationName( + final String implShortName, final PerformanceResult result) { + if (implementations.stream() + .noneMatch(impl -> impl.metadata().shortName().equals(implShortName))) { + throw new ValidationException( + result, + "with a name that not end with a known implementation's short name." + + System.lineSeparator() + + "Detected short name: " + + implShortName); + } + } + + private static final class ValidationException extends IllegalArgumentException { + ValidationException(final PerformanceResult result, final String msg) { + super( + "The JSON benchmark results contain a benchmark method " + + msg + + System.lineSeparator() + + "Method name: " + + result.testClass() + + "." + + result.testCase()); + } + } +} diff --git a/src/main/java/org/creekservice/kafka/test/perf/performance/util/PerformanceJsonReader.java b/src/main/java/org/creekservice/kafka/test/perf/performance/util/PerformanceJsonReader.java new file mode 100644 index 0000000..b989295 --- /dev/null +++ b/src/main/java/org/creekservice/kafka/test/perf/performance/util/PerformanceJsonReader.java @@ -0,0 +1,55 @@ +/* + * Copyright 2023 Creek Contributors (https://github.com/creek-service) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.creekservice.kafka.test.perf.performance.util; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.json.JsonMapper; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import org.creekservice.kafka.test.perf.performance.util.model.PerformanceResult; + +final class PerformanceJsonReader { + + PerformanceResult[] read(final Path jsonResult) { + return parseJson(readJson(jsonResult)); + } + + private static String readJson(final Path jsonResult) { + try { + return Files.readString(jsonResult, UTF_8); + } catch (IOException e) { + throw new RuntimeException("Failed to read from " + jsonResult, e); + } + } + + static PerformanceResult[] parseJson(final String jsonResult) { + try { + final ObjectMapper mapper = + JsonMapper.builder() + .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) + .enable(DeserializationFeature.USE_BIG_DECIMAL_FOR_FLOATS) + .build(); + return mapper.readValue(jsonResult, PerformanceResult[].class); + } catch (IOException e) { + throw new RuntimeException("Failed to read from " + jsonResult, e); + } + } +} diff --git a/src/main/java/org/creekservice/kafka/test/perf/performance/util/model/Metric.java b/src/main/java/org/creekservice/kafka/test/perf/performance/util/model/Metric.java new file mode 100644 index 0000000..68bc008 --- /dev/null +++ b/src/main/java/org/creekservice/kafka/test/perf/performance/util/model/Metric.java @@ -0,0 +1,80 @@ +/* + * Copyright 2023 Creek Contributors (https://github.com/creek-service) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.creekservice.kafka.test.perf.performance.util.model; + +import static java.util.Objects.requireNonNull; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.math.BigDecimal; +import java.util.Objects; +import java.util.Optional; + +@SuppressWarnings("OptionalUsedAsFieldOrParameterType") +public final class Metric { + + private final BigDecimal score; + private final Optional scoreError; + private final String scoreUnit; + + @JsonCreator + public Metric( + @JsonProperty(value = "score", required = true) final BigDecimal score, + @JsonProperty(value = "scoreError", required = true) final Object scoreError, + @JsonProperty(value = "scoreUnit", required = true) final String scoreUnit) { + this.score = requireNonNull(score, "score"); + this.scoreError = optionalDecimal(requireNonNull(scoreError, "scoreError")); + this.scoreUnit = requireNonNull(scoreUnit, "scoreUnit"); + } + + public BigDecimal score() { + return score; + } + + public Optional scoreError() { + return scoreError; + } + + public String scoreUnit() { + return scoreUnit; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final Metric metric = (Metric) o; + return Objects.equals(score, metric.score) + && Objects.equals(scoreError, metric.scoreError) + && Objects.equals(scoreUnit, metric.scoreUnit); + } + + @Override + public int hashCode() { + return Objects.hash(score, scoreError, scoreUnit); + } + + private static Optional optionalDecimal(final Object decimal) { + return BigDecimal.class.isAssignableFrom(decimal.getClass()) + ? Optional.of((BigDecimal) decimal) + : Optional.empty(); // Handles "NaN" case. + } +} diff --git a/src/main/java/org/creekservice/kafka/test/perf/performance/util/model/PerformanceResult.java b/src/main/java/org/creekservice/kafka/test/perf/performance/util/model/PerformanceResult.java new file mode 100644 index 0000000..f07c104 --- /dev/null +++ b/src/main/java/org/creekservice/kafka/test/perf/performance/util/model/PerformanceResult.java @@ -0,0 +1,91 @@ +/* + * Copyright 2023 Creek Contributors (https://github.com/creek-service) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.creekservice.kafka.test.perf.performance.util.model; + +import static java.util.Objects.requireNonNull; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +public final class PerformanceResult { + + private static final String PERFORMANCE_PACKAGE = + "org.creekservice.kafka.test.perf.performance."; + + private final String testClass; + private final String testCase; + + private final String mode; + private final Metric metric; + + @JsonCreator + public PerformanceResult( + @JsonProperty(value = "benchmark", required = true) final String benchmark, + @JsonProperty(value = "mode", required = true) final String mode, + @JsonProperty(value = "primaryMetric", required = true) final Metric primaryMetric) { + this.testClass = extractTestClass(requireNonNull(benchmark, "benchmark")); + this.testCase = extractTestCase(benchmark); + this.mode = requireNonNull(mode, "mode"); + this.metric = requireNonNull(primaryMetric, "primaryMetric"); + } + + public String testClass() { + return testClass; + } + + public String testCase() { + return testCase; + } + + public String mode() { + return mode; + } + + public Metric metric() { + return metric; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final PerformanceResult that = (PerformanceResult) o; + return Objects.equals(testClass, that.testClass) + && Objects.equals(testCase, that.testCase) + && Objects.equals(mode, that.mode) + && Objects.equals(metric, that.metric); + } + + @Override + public int hashCode() { + return Objects.hash(testClass, testCase, mode, metric); + } + + private static String extractTestClass(final String benchmark) { + final String test = benchmark.substring(PERFORMANCE_PACKAGE.length()); + return test.substring(0, test.indexOf(".")); + } + + private static String extractTestCase(final String benchmark) { + return benchmark.substring(benchmark.lastIndexOf(".") + 1); + } +} diff --git a/src/main/java/org/creekservice/kafka/test/perf/testsuite/SchemaSpec.java b/src/main/java/org/creekservice/kafka/test/perf/testsuite/SchemaSpec.java index 2324251..4218233 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/testsuite/SchemaSpec.java +++ b/src/main/java/org/creekservice/kafka/test/perf/testsuite/SchemaSpec.java @@ -84,6 +84,10 @@ public URI uri() { return uri; } + public String capitalisedName() { + return Character.toUpperCase(name().charAt(0)) + name().substring(1).toLowerCase(); + } + public static Optional fromDir(final String dirName) { return Arrays.stream(values()).filter(spec -> spec.dirName.equals(dirName)).findAny(); } diff --git a/src/main/java/org/creekservice/kafka/test/perf/testsuite/output/PerDraftSummary.java b/src/main/java/org/creekservice/kafka/test/perf/testsuite/output/PerDraftSummary.java index b2ce6aa..3b9ad6e 100644 --- a/src/main/java/org/creekservice/kafka/test/perf/testsuite/output/PerDraftSummary.java +++ b/src/main/java/org/creekservice/kafka/test/perf/testsuite/output/PerDraftSummary.java @@ -17,24 +17,19 @@ package org.creekservice.kafka.test.perf.testsuite.output; import static java.lang.System.lineSeparator; -import static java.util.Objects.requireNonNull; import static java.util.stream.Collectors.toMap; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.nio.file.Path; -import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.function.BinaryOperator; import java.util.stream.Collectors; -import java.util.stream.Stream; import org.creekservice.api.test.util.TestPaths; import org.creekservice.kafka.test.perf.implementations.Implementation; import org.creekservice.kafka.test.perf.testsuite.JsonSchemaTestSuite; import org.creekservice.kafka.test.perf.testsuite.SchemaSpec; import org.creekservice.kafka.test.perf.util.Table; -import org.jetbrains.annotations.NotNull; public final class PerDraftSummary { @@ -42,83 +37,31 @@ public final class PerDraftSummary { TestPaths.moduleRoot("json-schema-validation-comparison") .resolve("build/json-schema-test-suite/tests"); - private final Map results; + private final Map results; public PerDraftSummary(final Map results) { this.results = results.entrySet().stream() - .flatMap(e -> buildResults(e.getKey(), e.getValue())) .collect( toMap( - Map.Entry::getKey, - e -> e.getValue().build(), + e -> e.getKey().metadata().shortName(), + e -> new ImplTables(e.getValue()), throwOnDuplicate(), TreeMap::new)); } public String toMarkdown() { return results.entrySet().stream() - .map( - e -> - "#### " - + e.getKey() - + lineSeparator() - + lineSeparator() - + e.getValue().toMarkdown()) + .map(e -> "#### " + e.getKey() + lineSeparator() + e.getValue().toMarkdown()) .collect(Collectors.joining(lineSeparator())); } - private Stream> buildResults( - final Implementation impl, final JsonSchemaTestSuite.Result results) { - final Map output = new TreeMap<>(); - results.visit( - (spec, result) -> { - output.computeIfAbsent( - new Key(spec, impl.metadata().shortName()), k -> new Builder()) - .add(result, spec); - }); - return output.entrySet().stream(); - } - - private static BinaryOperator throwOnDuplicate() { + private static BinaryOperator throwOnDuplicate() { return (m1, m2) -> { throw new IllegalStateException("Duplicate!"); }; } - @SuppressFBWarnings("EQ_COMPARETO_USE_OBJECT_EQUALS") - private static final class Key implements Comparable { - - private static final Comparator COMPARATOR = - Comparator.comparing(Key::spec).thenComparing(Key::impl); - - private final SchemaSpec spec; - private final String impl; - - private Key(final SchemaSpec spec, final String impl) { - this.spec = requireNonNull(spec, "spec"); - this.impl = requireNonNull(impl, "impl"); - } - - SchemaSpec spec() { - return spec; - } - - String impl() { - return impl; - } - - @Override - public int compareTo(@NotNull final Key o) { - return COMPARATOR.compare(this, o); - } - - @Override - public String toString() { - return impl + ": " + spec; - } - } - private static class Counts { private int pass; private int fail; @@ -156,4 +99,37 @@ public Table build() { return table; } } + + private static class ImplTables { + + private final Map tables; + + ImplTables(final JsonSchemaTestSuite.Result results) { + final Map output = new TreeMap<>(); + results.visit( + (spec, result) -> + output.computeIfAbsent(spec, k -> new Builder()).add(result, spec)); + + this.tables = + output.entrySet().stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + e -> e.getValue().build(), + throwOnDuplicate(), + TreeMap::new)); + } + + public String toMarkdown() { + return tables.entrySet().stream() + .map( + e -> + "##### " + + e.getKey().capitalisedName() + + lineSeparator() + + lineSeparator() + + e.getValue().toMarkdown()) + .collect(Collectors.joining(lineSeparator())); + } + } } diff --git a/src/test/java/org/creekservice/kafka/test/perf/performance/util/JsonToMarkdownConvertorTest.java b/src/test/java/org/creekservice/kafka/test/perf/performance/util/JsonToMarkdownConvertorTest.java new file mode 100644 index 0000000..4f5be70 --- /dev/null +++ b/src/test/java/org/creekservice/kafka/test/perf/performance/util/JsonToMarkdownConvertorTest.java @@ -0,0 +1,126 @@ +/* + * Copyright 2023 Creek Contributors (https://github.com/creek-service) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.creekservice.kafka.test.perf.performance.util; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.when; + +import java.math.BigDecimal; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Map; +import org.creekservice.kafka.test.perf.performance.util.model.Metric; +import org.creekservice.kafka.test.perf.performance.util.model.PerformanceResult; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class JsonToMarkdownConvertorTest { + + private static final String EXPECTED_HEADINGS = + "| Benchmark | Mode | Score | Score Error (99.9%) | Unit |\n" + + "|-----------|------|-------|---------------------|------|\n"; + + private static final Path SOME_PATH = Paths.get("some/path"); + + @Mock private PerformanceJsonReader reader; + private JsonToMarkdownConvertor convertor; + + @BeforeEach + void setUp() { + convertor = new JsonToMarkdownConvertor(reader); + } + + @Test + void shouldConvertJsonToMarkdown() { + // Given: + when(reader.read(SOME_PATH)) + .thenReturn( + new PerformanceResult[] { + new PerformanceResult( + "org.creekservice.kafka.test.perf.performance.JsonValidateBenchmark.measureDraft_4_Medeia", + "avgt", + new Metric( + new BigDecimal("0.34276444437738995"), + new BigDecimal("0.0038394222791281593"), + "ms/op")), + new PerformanceResult( + "org.creekservice.kafka.test.perf.performance.JsonValidateBenchmark.measureDraft_7_Medeia", + "avgt", + new Metric( + new BigDecimal("0.893598359837538"), + new BigDecimal("0.0035983789573"), + "ms/op")), + new PerformanceResult( + "org.creekservice.kafka.test.perf.performance.JsonSerdeBenchmark.measureEveritRoundTrip", + "diff", + new Metric( + new BigDecimal("2135454.1245"), + new BigDecimal("0.003536745566"), + "us/op")) + }); + + // When: + final Map results = convertor.convert(SOME_PATH); + + // Then: + assertThat( + results, + is( + Map.of( + "JsonValidateBenchmark", + EXPECTED_HEADINGS + + "| measureDraft_4_Medeia | avgt | 0.34276 | 0.0038394 |" + + " ms/op |\n" + + "| measureDraft_7_Medeia | avgt | 0.89360 | 0.0035984 |" + + " ms/op |\n", + "JsonSerdeBenchmark", + EXPECTED_HEADINGS + + "| measureEveritRoundTrip | diff | 2135454 | 0.0035367 |" + + " us/op |\n"))); + } + + @Test + void shouldHandleNaN() { + // Given: + when(reader.read(SOME_PATH)) + .thenReturn( + new PerformanceResult[] { + new PerformanceResult( + "org.creekservice.kafka.test.perf.performance.JsonValidateBenchmark.measureDraft_7_Medeia", + "avgt", + new Metric(new BigDecimal("0.893602424"), "NaN", "ms/op")) + }); + + // When: + final Map results = convertor.convert(SOME_PATH); + + // Then: + assertThat( + results, + is( + Map.of( + "JsonValidateBenchmark", + EXPECTED_HEADINGS + + "| measureDraft_7_Medeia | avgt | 0.89360 | | ms/op" + + " |\n"))); + } +} diff --git a/src/test/java/org/creekservice/kafka/test/perf/performance/util/PerformanceDataValidatorTest.java b/src/test/java/org/creekservice/kafka/test/perf/performance/util/PerformanceDataValidatorTest.java new file mode 100644 index 0000000..35fa150 --- /dev/null +++ b/src/test/java/org/creekservice/kafka/test/perf/performance/util/PerformanceDataValidatorTest.java @@ -0,0 +1,121 @@ +/* + * Copyright 2023 Creek Contributors (https://github.com/creek-service) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.creekservice.kafka.test.perf.performance.util; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mock.Strictness.LENIENT; +import static org.mockito.Mockito.when; + +import java.nio.file.Path; +import org.creekservice.kafka.test.perf.performance.util.model.PerformanceResult; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class PerformanceDataValidatorTest { + + private static final Path SOME_PATH = Path.of("some/path"); + @Mock private PerformanceJsonReader reader; + + @Mock(strictness = LENIENT) + private PerformanceResult result1; + + private PerformanceDataValidator validator; + + @BeforeEach + void setUp() { + validator = new PerformanceDataValidator(reader); + + when(reader.read(SOME_PATH)).thenReturn(new PerformanceResult[] {result1}); + when(result1.testClass()).thenReturn("JsonTestBenchmark"); + } + + @Test + void shouldThrowIfBenchmarkMethodDoesNotMatchPattern() { + // Given: + when(result1.testCase()).thenReturn("invalidPattern"); + + // When: + final Exception e = + assertThrows(IllegalArgumentException.class, () -> validator.validate(SOME_PATH)); + + // Then: + assertThat( + e.getMessage(), + is( + "The JSON benchmark results contain a benchmark method with a name that" + + " does not match the expected pattern. Expected pattern:" + + " measure(?Draft[_0-9]+)_(?[A-Za-z0-9]+)\n" + + "Method name: JsonTestBenchmark.invalidPattern")); + } + + @Test + void shouldThrowOnUnknownDraftVersion() { + // Given: + when(result1.testCase()).thenReturn("measureDraft_11_Snow"); + + // When: + final Exception e = + assertThrows(IllegalArgumentException.class, () -> validator.validate(SOME_PATH)); + + // Then: + assertThat( + e.getMessage(), + is( + "The JSON benchmark results contain a benchmark method with a name that" + + " does not contain a valid schema specification draft.\n" + + "Available versions: [Draft_03, Draft_04, Draft_06, Draft_07," + + " Draft_2019_09, Draft_2020_12]\n" + + "Detected version: Draft_11\n" + + "Method name: JsonTestBenchmark.measureDraft_11_Snow")); + } + + @Test + void shouldThrowOnUnknownImplementation() { + // Given: + when(result1.testCase()).thenReturn("measureDraft_07_InvalidImpl"); + + // When: + final Exception e = + assertThrows(IllegalArgumentException.class, () -> validator.validate(SOME_PATH)); + + // Then: + assertThat( + e.getMessage(), + is( + "The JSON benchmark results contain a benchmark method with a name that not" + + " end with a known implementation's short name.\n" + + "Detected short name: InvalidImpl\n" + + "Method name: JsonTestBenchmark.measureDraft_07_InvalidImpl")); + } + + @Test + void shouldParseJson() { + // Given: + when(result1.testCase()).thenReturn("measureDraft_2020_12_Vertx"); + + // When: + validator.validate(SOME_PATH); + + // Then: did not throw. + } +} diff --git a/src/test/java/org/creekservice/kafka/test/perf/performance/util/PerformanceJsonReaderTest.java b/src/test/java/org/creekservice/kafka/test/perf/performance/util/PerformanceJsonReaderTest.java new file mode 100644 index 0000000..5ecbd2d --- /dev/null +++ b/src/test/java/org/creekservice/kafka/test/perf/performance/util/PerformanceJsonReaderTest.java @@ -0,0 +1,179 @@ +/* + * Copyright 2023 Creek Contributors (https://github.com/creek-service) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.creekservice.kafka.test.perf.performance.util; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.arrayContaining; + +import java.math.BigDecimal; +import org.creekservice.kafka.test.perf.performance.util.model.Metric; +import org.creekservice.kafka.test.perf.performance.util.model.PerformanceResult; +import org.junit.jupiter.api.Test; + +class PerformanceJsonReaderTest { + + private static final String JSON_RESULT = + "[\n" + + + // One complete one: + " {\n" + + " \"jmhVersion\" : \"1.36\",\n" + + " \"benchmark\" :" + + " \"org.creekservice.kafka.test.perf.performance.JsonValidateBenchmark.measureDraft_4_Medeia\",\n" + + " \"mode\" : \"avgt\",\n" + + " \"threads\" : 4,\n" + + " \"forks\" : 4,\n" + + " \"jvm\" :" + + " \"/opt/homebrew/Cellar/openjdk@17/17.0.9/libexec/openjdk.jdk/Contents/Home/bin/java\",\n" + + " \"jvmArgs\" : [\n" + + " \"-Dfile.encoding=UTF-8\",\n" + + " \"-Duser.country=GB\",\n" + + " \"-Duser.language=en\",\n" + + " \"-Duser.variant\"\n" + + " ],\n" + + " \"jdkVersion\" : \"17.0.9\",\n" + + " \"vmName\" : \"OpenJDK 64-Bit Server VM\",\n" + + " \"vmVersion\" : \"17.0.9+0\",\n" + + " \"warmupIterations\" : 5,\n" + + " \"warmupTime\" : \"10 s\",\n" + + " \"warmupBatchSize\" : 1,\n" + + " \"measurementIterations\" : 5,\n" + + " \"measurementTime\" : \"10 s\",\n" + + " \"measurementBatchSize\" : 1,\n" + + " \"primaryMetric\" : {\n" + + " \"score\" : 0.34276444437738995,\n" + + " \"scoreError\" : 0.0038394222791281593,\n" + + " \"scoreConfidence\" : [\n" + + " 0.3389250220982618,\n" + + " 0.3466038666565181\n" + + " ],\n" + + " \"scorePercentiles\" : {\n" + + " \"0.0\" : 0.3341750081828707,\n" + + " \"50.0\" : 0.3432545406943275,\n" + + " \"90.0\" : 0.3482507649509782,\n" + + " \"95.0\" : 0.34973757670327116,\n" + + " \"99.0\" : 0.34981339737699124,\n" + + " \"99.9\" : 0.34981339737699124,\n" + + " \"99.99\" : 0.34981339737699124,\n" + + " \"99.999\" : 0.34981339737699124,\n" + + " \"99.9999\" : 0.34981339737699124,\n" + + " \"100.0\" : 0.34981339737699124\n" + + " },\n" + + " \"scoreUnit\" : \"ms/op\",\n" + + " \"rawData\" : [\n" + + " [\n" + + " 0.3465202925610167,\n" + + " 0.34249890961701457,\n" + + " 0.34450357678071153,\n" + + " 0.34075528555055234,\n" + + " 0.3425288988276069\n" + + " ],\n" + + " [\n" + + " 0.34829698390259,\n" + + " 0.34396076146988674,\n" + + " 0.3433999185464469,\n" + + " 0.34549801455831286,\n" + + " 0.34390681847990284\n" + + " ],\n" + + " [\n" + + " 0.3430043540612691,\n" + + " 0.33511882592058606,\n" + + " 0.33611264096826354,\n" + + " 0.3341750081828707,\n" + + " 0.33599361359531954\n" + + " ],\n" + + " [\n" + + " 0.3478347943864718,\n" + + " 0.34221756760803823,\n" + + " 0.3460400623117395,\n" + + " 0.34981339737699124,\n" + + " 0.343109162842208\n" + + " ]\n" + + " ]\n" + + " }\n" + + " },\n" + // One minimal: + + " {\n" + + " \"benchmark\" :" + + " \"org.creekservice.kafka.test.perf.performance.JsonValidateBenchmark.measureDraft_7_Medeia\",\n" + + " \"mode\" : \"diff\",\n" + + " \"primaryMetric\" : {\n" + + " \"score\" : 0.893598359837538,\n" + + " \"scoreError\" : 0.0035983789573,\n" + + " \"scoreUnit\" : \"ms/op\"\n" + + " },\n" + + " \"secondaryMetrics\" : {\n" + + " }\n" + + " }\n" + + "]"; + + private static final String JSON_RESULT_WITH_NAN = + "[\n" + + " {\n" + + " \"benchmark\" :" + + " \"org.creekservice.kafka.test.perf.performance.JsonValidateBenchmark.measureDraft_7_Medeia\",\n" + + " \"mode\" : \"avgt\",\n" + + " \"primaryMetric\" : {\n" + + " \"score\" : 0.893598359837538,\n" + + " \"scoreError\" : \"NaN\",\n" + + " \"scoreUnit\" : \"ms/op\"\n" + + " },\n" + + " \"secondaryMetrics\" : {\n" + + " }\n" + + " }\n" + + "]"; + + @Test + void shouldParseJson() { + // When: + final PerformanceResult[] results = PerformanceJsonReader.parseJson(JSON_RESULT); + + // Then: + assertThat( + results, + arrayContaining( + new PerformanceResult( + "org.creekservice.kafka.test.perf.performance.JsonValidateBenchmark.measureDraft_4_Medeia", + "avgt", + new Metric( + new BigDecimal("0.34276444437738995"), + new BigDecimal("0.0038394222791281593"), + "ms/op")), + new PerformanceResult( + "org.creekservice.kafka.test.perf.performance.JsonValidateBenchmark.measureDraft_7_Medeia", + "diff", + new Metric( + new BigDecimal("0.893598359837538"), + new BigDecimal("0.0035983789573"), + "ms/op")))); + } + + @Test + void shouldHandleNaN() { + // When: + final PerformanceResult[] results = PerformanceJsonReader.parseJson(JSON_RESULT_WITH_NAN); + + // Then: + assertThat( + results, + arrayContaining( + new PerformanceResult( + "org.creekservice.kafka.test.perf.performance.JsonValidateBenchmark.measureDraft_7_Medeia", + "avgt", + new Metric(new BigDecimal("0.893598359837538"), "NaN", "ms/op")))); + } +} diff --git a/src/test/java/org/creekservice/kafka/test/perf/util/ImplsJsonFormatterTest.java b/src/test/java/org/creekservice/kafka/test/perf/util/ImplsJsonFormatterTest.java index e6809e2..4721cbe 100644 --- a/src/test/java/org/creekservice/kafka/test/perf/util/ImplsJsonFormatterTest.java +++ b/src/test/java/org/creekservice/kafka/test/perf/util/ImplsJsonFormatterTest.java @@ -37,7 +37,7 @@ class ImplsJsonFormatterTest { private static final Implementation.MetaData MD_A = new Implementation.MetaData( "Implementation A", - "Impl_A", + "ImplA", Implementation.Language.Java, Implementation.Licence.Apache_v2_0, Set.of(SchemaSpec.DRAFT_2019_09, SchemaSpec.DRAFT_04), @@ -47,7 +47,7 @@ class ImplsJsonFormatterTest { private static final Implementation.MetaData MD_B = new Implementation.MetaData( "Implementation B", - "Impl_B", + "ImplB", Implementation.Language.Java, Implementation.Licence.Apache_v2_0, Set.of(SchemaSpec.DRAFT_07), @@ -76,14 +76,14 @@ void shouldFormatAsJson() { json, is( "[{\"longName\":\"Implementation A\"," - + "\"shortName\":\"Impl_A\"," + + "\"shortName\":\"ImplA\"," + "\"language\":\"Java\"," + "\"licence\":\"Apache Licence 2.0\"," + "\"supported\":[\"DRAFT_04\",\"DRAFT_2019_09\"]," + "\"url\":\"http://a\"," + "\"color\":\"rgb(0,0,0)\"}," + "{\"longName\":\"Implementation B\"," - + "\"shortName\":\"Impl_B\"," + + "\"shortName\":\"ImplB\"," + "\"language\":\"Java\"," + "\"licence\":\"Apache Licence 2.0\"," + "\"supported\":[\"DRAFT_07\"],"