diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000..1f92b0f9 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,22 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/python +{ + "name": "Python 3", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye" + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + // "postCreateCommand": "pip3 install --user -r requirements.txt", + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.pylintrc b/.pylintrc index a71657a0..285a676e 100644 --- a/.pylintrc +++ b/.pylintrc @@ -360,7 +360,7 @@ indent-after-paren=4 indent-string=' ' # Maximum number of characters on a single line. -max-line-length=140 +max-line-length=180 # Maximum number of lines in a module. max-module-lines=1000 diff --git a/Pipfile b/Pipfile new file mode 100644 index 00000000..2bd67e90 --- /dev/null +++ b/Pipfile @@ -0,0 +1,13 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] +opensearch-benchmark = {file = ".", editable = true} + +[dev-packages] + +[requires] +python_version = "3.11" +python_full_version = "3.11.11" diff --git a/Pipfile.lock b/Pipfile.lock new file mode 100644 index 00000000..bb3bb78c --- /dev/null +++ b/Pipfile.lock @@ -0,0 +1,1392 @@ +{ + "_meta": { + "hash": { + "sha256": "e1a1134afb3a24381a238ccf58622d1e6dde3ffe2614fe28fb158a2114c071d9" + }, + "pipfile-spec": 6, + "requires": { + "python_full_version": "3.11.11", + "python_version": "3.11" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "aiohappyeyeballs": { + "hashes": [ + "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745", + "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8" + ], + "markers": "python_version >= '3.8'", + "version": "==2.4.4" + }, + "aiohttp": { + "hashes": [ + "sha256:012f176945af138abc10c4a48743327a92b4ca9adc7a0e078077cdb5dbab7be0", + "sha256:02c13415b5732fb6ee7ff64583a5e6ed1c57aa68f17d2bda79c04888dfdc2769", + "sha256:03b6002e20938fc6ee0918c81d9e776bebccc84690e2b03ed132331cca065ee5", + "sha256:04814571cb72d65a6899db6099e377ed00710bf2e3eafd2985166f2918beaf59", + "sha256:0580f2e12de2138f34debcd5d88894786453a76e98febaf3e8fe5db62d01c9bf", + "sha256:06a8e2ee1cbac16fe61e51e0b0c269400e781b13bcfc33f5425912391a542985", + "sha256:076bc454a7e6fd646bc82ea7f98296be0b1219b5e3ef8a488afbdd8e81fbac50", + "sha256:0c9527819b29cd2b9f52033e7fb9ff08073df49b4799c89cb5754624ecd98299", + "sha256:0dc49f42422163efb7e6f1df2636fe3db72713f6cd94688e339dbe33fe06d61d", + "sha256:14cdb5a9570be5a04eec2ace174a48ae85833c2aadc86de68f55541f66ce42ab", + "sha256:15fccaf62a4889527539ecb86834084ecf6e9ea70588efde86e8bc775e0e7542", + "sha256:24213ba85a419103e641e55c27dc7ff03536c4873470c2478cce3311ba1eee7b", + "sha256:31d5093d3acd02b31c649d3a69bb072d539d4c7659b87caa4f6d2bcf57c2fa2b", + "sha256:3691ed7726fef54e928fe26344d930c0c8575bc968c3e239c2e1a04bd8cf7838", + "sha256:386fbe79863eb564e9f3615b959e28b222259da0c48fd1be5929ac838bc65683", + "sha256:3bbbfff4c679c64e6e23cb213f57cc2c9165c9a65d63717108a644eb5a7398df", + "sha256:3de34936eb1a647aa919655ff8d38b618e9f6b7f250cc19a57a4bf7fd2062b6d", + "sha256:40d1c7a7f750b5648642586ba7206999650208dbe5afbcc5284bcec6579c9b91", + "sha256:44224d815853962f48fe124748227773acd9686eba6dc102578defd6fc99e8d9", + "sha256:47ad15a65fb41c570cd0ad9a9ff8012489e68176e7207ec7b82a0940dddfd8be", + "sha256:482cafb7dc886bebeb6c9ba7925e03591a62ab34298ee70d3dd47ba966370d2c", + "sha256:49c7dbbc1a559ae14fc48387a115b7d4bbc84b4a2c3b9299c31696953c2a5219", + "sha256:4b2c7ac59c5698a7a8207ba72d9e9c15b0fc484a560be0788b31312c2c5504e4", + "sha256:4cca22a61b7fe45da8fc73c3443150c3608750bbe27641fc7558ec5117b27fdf", + "sha256:4cfce37f31f20800a6a6620ce2cdd6737b82e42e06e6e9bd1b36f546feb3c44f", + "sha256:502a1464ccbc800b4b1995b302efaf426e8763fadf185e933c2931df7db9a199", + "sha256:53bf2097e05c2accc166c142a2090e4c6fd86581bde3fd9b2d3f9e93dda66ac1", + "sha256:593c114a2221444f30749cc5e5f4012488f56bd14de2af44fe23e1e9894a9c60", + "sha256:5d6958671b296febe7f5f859bea581a21c1d05430d1bbdcf2b393599b1cdce77", + "sha256:5ef359ebc6949e3a34c65ce20230fae70920714367c63afd80ea0c2702902ccf", + "sha256:613e5169f8ae77b1933e42e418a95931fb4867b2991fc311430b15901ed67079", + "sha256:61b9bae80ed1f338c42f57c16918853dc51775fb5cb61da70d590de14d8b5fb4", + "sha256:6362cc6c23c08d18ddbf0e8c4d5159b5df74fea1a5278ff4f2c79aed3f4e9f46", + "sha256:65a96e3e03300b41f261bbfd40dfdbf1c301e87eab7cd61c054b1f2e7c89b9e8", + "sha256:65e55ca7debae8faaffee0ebb4b47a51b4075f01e9b641c31e554fd376595c6c", + "sha256:68386d78743e6570f054fe7949d6cb37ef2b672b4d3405ce91fafa996f7d9b4d", + "sha256:68ff6f48b51bd78ea92b31079817aff539f6c8fc80b6b8d6ca347d7c02384e33", + "sha256:6ab29b8a0beb6f8eaf1e5049252cfe74adbaafd39ba91e10f18caeb0e99ffb34", + "sha256:77ae58586930ee6b2b6f696c82cf8e78c8016ec4795c53e36718365f6959dc82", + "sha256:77c4aa15a89847b9891abf97f3d4048f3c2d667e00f8a623c89ad2dccee6771b", + "sha256:78153314f26d5abef3239b4a9af20c229c6f3ecb97d4c1c01b22c4f87669820c", + "sha256:7852bbcb4d0d2f0c4d583f40c3bc750ee033265d80598d0f9cb6f372baa6b836", + "sha256:7e97d622cb083e86f18317282084bc9fbf261801b0192c34fe4b1febd9f7ae69", + "sha256:7f3dc0e330575f5b134918976a645e79adf333c0a1439dcf6899a80776c9ab39", + "sha256:80886dac673ceaef499de2f393fc80bb4481a129e6cb29e624a12e3296cc088f", + "sha256:811f23b3351ca532af598405db1093f018edf81368e689d1b508c57dcc6b6a32", + "sha256:86a5dfcc39309470bd7b68c591d84056d195428d5d2e0b5ccadfbaf25b026ebc", + "sha256:8b3cf2dc0f0690a33f2d2b2cb15db87a65f1c609f53c37e226f84edb08d10f52", + "sha256:8cc5203b817b748adccb07f36390feb730b1bc5f56683445bfe924fc270b8816", + "sha256:909af95a72cedbefe5596f0bdf3055740f96c1a4baa0dd11fd74ca4de0b4e3f1", + "sha256:974d3a2cce5fcfa32f06b13ccc8f20c6ad9c51802bb7f829eae8a1845c4019ec", + "sha256:98283b94cc0e11c73acaf1c9698dea80c830ca476492c0fe2622bd931f34b487", + "sha256:98f5635f7b74bcd4f6f72fcd85bea2154b323a9f05226a80bc7398d0c90763b0", + "sha256:99b7920e7165be5a9e9a3a7f1b680f06f68ff0d0328ff4079e5163990d046767", + "sha256:9bca390cb247dbfaec3c664326e034ef23882c3f3bfa5fbf0b56cad0320aaca5", + "sha256:9e2e576caec5c6a6b93f41626c9c02fc87cd91538b81a3670b2e04452a63def6", + "sha256:9ef405356ba989fb57f84cac66f7b0260772836191ccefbb987f414bcd2979d9", + "sha256:a55d2ad345684e7c3dd2c20d2f9572e9e1d5446d57200ff630e6ede7612e307f", + "sha256:ab7485222db0959a87fbe8125e233b5a6f01f4400785b36e8a7878170d8c3138", + "sha256:b1fc6b45010a8d0ff9e88f9f2418c6fd408c99c211257334aff41597ebece42e", + "sha256:b78f053a7ecfc35f0451d961dacdc671f4bcbc2f58241a7c820e9d82559844cf", + "sha256:b99acd4730ad1b196bfb03ee0803e4adac371ae8efa7e1cbc820200fc5ded109", + "sha256:be2b516f56ea883a3e14dda17059716593526e10fb6303189aaf5503937db408", + "sha256:beb39a6d60a709ae3fb3516a1581777e7e8b76933bb88c8f4420d875bb0267c6", + "sha256:bf3d1a519a324af764a46da4115bdbd566b3c73fb793ffb97f9111dbc684fc4d", + "sha256:c49a76c1038c2dd116fa443eba26bbb8e6c37e924e2513574856de3b6516be99", + "sha256:c5532f0441fc09c119e1dca18fbc0687e64fbeb45aa4d6a87211ceaee50a74c4", + "sha256:c6b9e6d7e41656d78e37ce754813fa44b455c3d0d0dced2a047def7dc5570b74", + "sha256:c87bf31b7fdab94ae3adbe4a48e711bfc5f89d21cf4c197e75561def39e223bc", + "sha256:cbad88a61fa743c5d283ad501b01c153820734118b65aee2bd7dbb735475ce0d", + "sha256:cf14627232dfa8730453752e9cdc210966490992234d77ff90bc8dc0dce361d5", + "sha256:db1d0b28fcb7f1d35600150c3e4b490775251dea70f894bf15c678fdd84eda6a", + "sha256:ddf5f7d877615f6a1e75971bfa5ac88609af3b74796ff3e06879e8422729fd01", + "sha256:e44a9a3c053b90c6f09b1bb4edd880959f5328cf63052503f892c41ea786d99f", + "sha256:efb15a17a12497685304b2d976cb4939e55137df7b09fa53f1b6a023f01fcb4e", + "sha256:fbbaea811a2bba171197b08eea288b9402faa2bab2ba0858eecdd0a4105753a3" + ], + "markers": "python_version >= '3.9'", + "version": "==3.11.10" + }, + "aiosignal": { + "hashes": [ + "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5", + "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54" + ], + "markers": "python_version >= '3.9'", + "version": "==1.3.2" + }, + "attrs": { + "hashes": [ + "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff", + "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308" + ], + "markers": "python_version >= '3.8'", + "version": "==24.3.0" + }, + "boto3": { + "hashes": [ + "sha256:2bbaf1551b1ed55770cb437d7040f1abe6742601103695057b30ce6328eef286", + "sha256:c422b68ae76959b9e23b77eb79e41c3483332f7e1de918d2b083c456d8cf234c" + ], + "markers": "python_version >= '3.8'", + "version": "==1.35.82" + }, + "botocore": { + "hashes": [ + "sha256:78dd7bf8f49616d00073698d7bbaf5a115208fe730b7b7afae4456adddb3552e", + "sha256:e43b97d8cbf19d35ce3a177f144bd97cc370f0a67d0984c7d7cf105ac198748f" + ], + "markers": "python_version >= '3.8'", + "version": "==1.35.82" + }, + "cachetools": { + "hashes": [ + "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292", + "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a" + ], + "markers": "python_version >= '3.7'", + "version": "==5.5.0" + }, + "certifi": { + "hashes": [ + "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56", + "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db" + ], + "markers": "python_version >= '3.6'", + "version": "==2024.12.14" + }, + "charset-normalizer": { + "hashes": [ + "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621", + "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6", + "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8", + "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912", + "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c", + "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b", + "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d", + "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d", + "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95", + "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e", + "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565", + "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", + "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab", + "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be", + "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", + "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907", + "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0", + "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2", + "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62", + "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62", + "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", + "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc", + "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284", + "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca", + "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455", + "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858", + "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b", + "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594", + "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", + "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db", + "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", + "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea", + "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6", + "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", + "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749", + "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7", + "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd", + "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99", + "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242", + "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee", + "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129", + "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2", + "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51", + "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee", + "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8", + "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", + "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613", + "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742", + "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe", + "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3", + "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5", + "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631", + "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7", + "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15", + "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c", + "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea", + "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417", + "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", + "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", + "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca", + "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa", + "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99", + "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149", + "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41", + "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574", + "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0", + "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f", + "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", + "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654", + "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3", + "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19", + "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", + "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578", + "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9", + "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1", + "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51", + "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719", + "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236", + "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a", + "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c", + "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade", + "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944", + "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc", + "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6", + "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6", + "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27", + "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6", + "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2", + "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12", + "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf", + "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", + "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7", + "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf", + "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", + "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b", + "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", + "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03", + "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4", + "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", + "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365", + "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a", + "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748", + "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b", + "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", + "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482" + ], + "markers": "python_full_version >= '3.7.0'", + "version": "==3.4.0" + }, + "events": { + "hashes": [ + "sha256:a7286af378ba3e46640ac9825156c93bdba7502174dd696090fdfcd4d80a1abd" + ], + "version": "==0.5" + }, + "frozenlist": { + "hashes": [ + "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e", + "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf", + "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6", + "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a", + "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d", + "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f", + "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28", + "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b", + "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9", + "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2", + "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec", + "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2", + "sha256:17dcc32fc7bda7ce5875435003220a457bcfa34ab7924a49a1c19f55b6ee185c", + "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336", + "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4", + "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d", + "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b", + "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c", + "sha256:2150cc6305a2c2ab33299453e2968611dacb970d2283a14955923062c8d00b10", + "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08", + "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942", + "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8", + "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f", + "sha256:2d0da8bbec082bf6bf18345b180958775363588678f64998c2b7609e34719b10", + "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5", + "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6", + "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21", + "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c", + "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d", + "sha256:374ca2dabdccad8e2a76d40b1d037f5bd16824933bf7bcea3e59c891fd4a0923", + "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608", + "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de", + "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17", + "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0", + "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f", + "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641", + "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c", + "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a", + "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0", + "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9", + "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab", + "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f", + "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3", + "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a", + "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784", + "sha256:73f2e31ea8dd7df61a359b731716018c2be196e5bb3b74ddba107f694fbd7604", + "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d", + "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5", + "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03", + "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e", + "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953", + "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee", + "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d", + "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817", + "sha256:828afae9f17e6de596825cf4228ff28fbdf6065974e5ac1410cecc22f699d2b3", + "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039", + "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f", + "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9", + "sha256:91d6c171862df0a6c61479d9724f22efb6109111017c87567cfeb7b5d1449fdf", + "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76", + "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba", + "sha256:97160e245ea33d8609cd2b8fd997c850b56db147a304a262abc2b3be021a9171", + "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb", + "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439", + "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631", + "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972", + "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d", + "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869", + "sha256:a72b7a6e3cd2725eff67cd64c8f13335ee18fc3c7befc05aed043d24c7b9ccb9", + "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411", + "sha256:b97f7b575ab4a8af9b7bc1d2ef7f29d3afee2226bd03ca3875c16451ad5a7723", + "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2", + "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b", + "sha256:c16d2fa63e0800723139137d667e1056bee1a1cf7965153d2d104b62855e9b99", + "sha256:c1fac3e2ace2eb1052e9f7c7db480818371134410e1f5c55d65e8f3ac6d1407e", + "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840", + "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3", + "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb", + "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3", + "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0", + "sha256:dd94994fc91a6177bfaafd7d9fd951bc8689b0a98168aa26b5f543868548d3ca", + "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45", + "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e", + "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f", + "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5", + "sha256:f1577515d35ed5649d52ab4319db757bb881ce3b2b796d7283e6634d99ace307", + "sha256:f1e6540b7fa044eee0bb5111ada694cf3dc15f2b0347ca125ee9ca984d5e9e6e", + "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2", + "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778", + "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a", + "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30", + "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a" + ], + "markers": "python_version >= '3.8'", + "version": "==1.5.0" + }, + "google-auth": { + "hashes": [ + "sha256:0054623abf1f9c83492c63d3f47e77f0a544caa3d40b2d98e099a611c2dd5d00", + "sha256:42664f18290a6be591be5329a96fe30184be1a1badb7292a7f686a9659de9ca0" + ], + "markers": "python_version >= '3.7'", + "version": "==2.37.0" + }, + "google-crc32c": { + "hashes": [ + "sha256:05e2d8c9a2f853ff116db9706b4a27350587f341eda835f46db3c0a8c8ce2f24", + "sha256:18e311c64008f1f1379158158bb3f0c8d72635b9eb4f9545f8cf990c5668e59d", + "sha256:236c87a46cdf06384f614e9092b82c05f81bd34b80248021f729396a78e55d7e", + "sha256:35834855408429cecf495cac67ccbab802de269e948e27478b1e47dfb6465e57", + "sha256:386122eeaaa76951a8196310432c5b0ef3b53590ef4c317ec7588ec554fec5d2", + "sha256:40b05ab32a5067525670880eb5d169529089a26fe35dce8891127aeddc1950e8", + "sha256:48abd62ca76a2cbe034542ed1b6aee851b6f28aaca4e6551b5599b6f3ef175cc", + "sha256:50cf2a96da226dcbff8671233ecf37bf6e95de98b2a2ebadbfdf455e6d05df42", + "sha256:51c4f54dd8c6dfeb58d1df5e4f7f97df8abf17a36626a217f169893d1d7f3e9f", + "sha256:5bcc90b34df28a4b38653c36bb5ada35671ad105c99cfe915fb5bed7ad6924aa", + "sha256:62f6d4a29fea082ac4a3c9be5e415218255cf11684ac6ef5488eea0c9132689b", + "sha256:6eceb6ad197656a1ff49ebfbbfa870678c75be4344feb35ac1edf694309413dc", + "sha256:7aec8e88a3583515f9e0957fe4f5f6d8d4997e36d0f61624e70469771584c760", + "sha256:91ca8145b060679ec9176e6de4f89b07363d6805bd4760631ef254905503598d", + "sha256:a184243544811e4a50d345838a883733461e67578959ac59964e43cca2c791e7", + "sha256:a9e4b426c3702f3cd23b933436487eb34e01e00327fac20c9aebb68ccf34117d", + "sha256:bb0966e1c50d0ef5bc743312cc730b533491d60585a9a08f897274e57c3f70e0", + "sha256:bb8b3c75bd157010459b15222c3fd30577042a7060e29d42dabce449c087f2b3", + "sha256:bd5e7d2445d1a958c266bfa5d04c39932dc54093fa391736dbfdb0f1929c1fb3", + "sha256:c87d98c7c4a69066fd31701c4e10d178a648c2cac3452e62c6b24dc51f9fcc00", + "sha256:d2952396dc604544ea7476b33fe87faedc24d666fb0c2d5ac971a2b9576ab871", + "sha256:d8797406499f28b5ef791f339594b0b5fdedf54e203b5066675c406ba69d705c", + "sha256:d9e9913f7bd69e093b81da4535ce27af842e7bf371cde42d1ae9e9bd382dc0e9", + "sha256:e2806553238cd076f0a55bddab37a532b53580e699ed8e5606d0de1f856b5205", + "sha256:ebab974b1687509e5c973b5c4b8b146683e101e102e17a86bd196ecaa4d099fc", + "sha256:ed767bf4ba90104c1216b68111613f0d5926fb3780660ea1198fc469af410e9d", + "sha256:f7a1fc29803712f80879b0806cb83ab24ce62fc8daf0569f2204a0cfd7f68ed4" + ], + "markers": "python_version >= '3.9'", + "version": "==1.6.0" + }, + "google-resumable-media": { + "hashes": [ + "sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa", + "sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0" + ], + "markers": "python_version >= '3.7'", + "version": "==2.7.2" + }, + "h5py": { + "hashes": [ + "sha256:018a4597f35092ae3fb28ee851fdc756d2b88c96336b8480e124ce1ac6fb9166", + "sha256:050a4f2c9126054515169c49cb900949814987f0c7ae74c341b0c9f9b5056834", + "sha256:06a903a4e4e9e3ebbc8b548959c3c2552ca2d70dac14fcfa650d9261c66939ed", + "sha256:1473348139b885393125126258ae2d70753ef7e9cec8e7848434f385ae72069e", + "sha256:2f0f1a382cbf494679c07b4371f90c70391dedb027d517ac94fa2c05299dacda", + "sha256:326d70b53d31baa61f00b8aa5f95c2fcb9621a3ee8365d770c551a13dbbcbfdf", + "sha256:3b15d8dbd912c97541312c0e07438864d27dbca857c5ad634de68110c6beb1c2", + "sha256:3fdf95092d60e8130ba6ae0ef7a9bd4ade8edbe3569c13ebbaf39baefffc5ba4", + "sha256:4532c7e97fbef3d029735db8b6f5bf01222d9ece41e309b20d63cfaae2fb5c4d", + "sha256:513171e90ed92236fc2ca363ce7a2fc6f2827375efcbb0cc7fbdd7fe11fecafc", + "sha256:52ab036c6c97055b85b2a242cb540ff9590bacfda0c03dd0cf0661b311f522f8", + "sha256:577d618d6b6dea3da07d13cc903ef9634cde5596b13e832476dd861aaf651f3e", + "sha256:59400f88343b79655a242068a9c900001a34b63e3afb040bd7cdf717e440f653", + "sha256:59685fe40d8c1fbbee088c88cd4da415a2f8bee5c270337dc5a1c4aa634e3307", + "sha256:5c4b41d1019322a5afc5082864dfd6359f8935ecd37c11ac0029be78c5d112c9", + "sha256:62be1fc0ef195891949b2c627ec06bc8e837ff62d5b911b6e42e38e0f20a897d", + "sha256:6fdf6d7936fa824acfa27305fe2d9f39968e539d831c5bae0e0d83ed521ad1ac", + "sha256:7b3b8f3b48717e46c6a790e3128d39c61ab595ae0a7237f06dfad6a3b51d5351", + "sha256:84342bffd1f82d4f036433e7039e241a243531a1d3acd7341b35ae58cdab05bf", + "sha256:ad8a76557880aed5234cfe7279805f4ab5ce16b17954606cca90d578d3e713ef", + "sha256:ba51c0c5e029bb5420a343586ff79d56e7455d496d18a30309616fdbeed1068f", + "sha256:cb65f619dfbdd15e662423e8d257780f9a66677eae5b4b3fc9dca70b5fd2d2a3", + "sha256:ccd9006d92232727d23f784795191bfd02294a4f2ba68708825cb1da39511a93", + "sha256:d2b8dd64f127d8b324f5d2cd1c0fd6f68af69084e9e47d27efeb9e28e685af3e", + "sha256:d3e465aee0ec353949f0f46bf6c6f9790a2006af896cee7c178a8c3e5090aa32", + "sha256:e4d51919110a030913201422fb07987db4338eba5ec8c5a15d6fab8e03d443fc" + ], + "markers": "python_version >= '3.9'", + "version": "==3.12.1" + }, + "idna": { + "hashes": [ + "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", + "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" + ], + "markers": "python_version >= '3.6'", + "version": "==3.10" + }, + "ijson": { + "hashes": [ + "sha256:0015354011303175eae7e2ef5136414e91de2298e5a2e9580ed100b728c07e51", + "sha256:034642558afa57351a0ffe6de89e63907c4cf6849070cc10a3b2542dccda1afe", + "sha256:0420c24e50389bc251b43c8ed379ab3e3ba065ac8262d98beb6735ab14844460", + "sha256:04366e7e4a4078d410845e58a2987fd9c45e63df70773d7b6e87ceef771b51ee", + "sha256:0b003501ee0301dbf07d1597482009295e16d647bb177ce52076c2d5e64113e0", + "sha256:0ee57a28c6bf523d7cb0513096e4eb4dac16cd935695049de7608ec110c2b751", + "sha256:192e4b65495978b0bce0c78e859d14772e841724d3269fc1667dc6d2f53cc0ea", + "sha256:1efb521090dd6cefa7aafd120581947b29af1713c902ff54336b7c7130f04c47", + "sha256:25fd49031cdf5fd5f1fd21cb45259a64dad30b67e64f745cc8926af1c8c243d3", + "sha256:2636cb8c0f1023ef16173f4b9a233bcdb1df11c400c603d5f299fac143ca8d70", + "sha256:29ce02af5fbf9ba6abb70765e66930aedf73311c7d840478f1ccecac53fefbf3", + "sha256:2af323a8aec8a50fa9effa6d640691a30a9f8c4925bd5364a1ca97f1ac6b9b5c", + "sha256:30cfea40936afb33b57d24ceaf60d0a2e3d5c1f2335ba2623f21d560737cc730", + "sha256:33afc25057377a6a43c892de34d229a86f89ea6c4ca3dd3db0dcd17becae0dbb", + "sha256:36aa56d68ea8def26778eb21576ae13f27b4a47263a7a2581ab2ef58b8de4451", + "sha256:3917b2b3d0dbbe3296505da52b3cb0befbaf76119b2edaff30bd448af20b5400", + "sha256:3aba5c4f97f4e2ce854b5591a8b0711ca3b0c64d1b253b04ea7b004b0a197ef6", + "sha256:3c556f5553368dff690c11d0a1fb435d4ff1f84382d904ccc2dc53beb27ba62e", + "sha256:3dc1fb02c6ed0bae1b4bf96971258bf88aea72051b6e4cebae97cff7090c0607", + "sha256:3e8d8de44effe2dbd0d8f3eb9840344b2d5b4cc284a14eb8678aec31d1b6bea8", + "sha256:40ee3821ee90be0f0e95dcf9862d786a7439bd1113e370736bfdf197e9765bfb", + "sha256:44367090a5a876809eb24943f31e470ba372aaa0d7396b92b953dda953a95d14", + "sha256:45ff05de889f3dc3d37a59d02096948ce470699f2368b32113954818b21aa74a", + "sha256:4690e3af7b134298055993fcbea161598d23b6d3ede11b12dca6815d82d101d5", + "sha256:473f5d921fadc135d1ad698e2697025045cd8ed7e5e842258295012d8a3bc702", + "sha256:47c144117e5c0e2babb559bc8f3f76153863b8dd90b2d550c51dab5f4b84a87f", + "sha256:4ac6c3eeed25e3e2cb9b379b48196413e40ac4e2239d910bb33e4e7f6c137745", + "sha256:4b72178b1e565d06ab19319965022b36ef41bcea7ea153b32ec31194bec032a2", + "sha256:4e9ffe358d5fdd6b878a8a364e96e15ca7ca57b92a48f588378cef315a8b019e", + "sha256:501dce8eaa537e728aa35810656aa00460a2547dcb60937c8139f36ec344d7fc", + "sha256:5378d0baa59ae422905c5f182ea0fd74fe7e52a23e3821067a7d58c8306b2191", + "sha256:542c1e8fddf082159a5d759ee1412c73e944a9a2412077ed00b303ff796907dc", + "sha256:63afea5f2d50d931feb20dcc50954e23cef4127606cc0ecf7a27128ed9f9a9e6", + "sha256:658ba9cad0374d37b38c9893f4864f284cdcc7d32041f9808fba8c7bcaadf134", + "sha256:6b661a959226ad0d255e49b77dba1d13782f028589a42dc3172398dd3814c797", + "sha256:72e3488453754bdb45c878e31ce557ea87e1eb0f8b4fc610373da35e8074ce42", + "sha256:7914d0cf083471856e9bc2001102a20f08e82311dfc8cf1a91aa422f9414a0d6", + "sha256:7ab00721304af1ae1afa4313ecfa1bf16b07f55ef91e4a5b93aeaa3e2bd7917c", + "sha256:7d0b6b637d05dbdb29d0bfac2ed8425bb369e7af5271b0cc7cf8b801cb7360c2", + "sha256:7e2b3e9ca957153557d06c50a26abaf0d0d6c0ddf462271854c968277a6b5372", + "sha256:7f172e6ba1bee0d4c8f8ebd639577bfe429dee0f3f96775a067b8bae4492d8a0", + "sha256:7f7a5250599c366369fbf3bc4e176f5daa28eb6bc7d6130d02462ed335361675", + "sha256:844c0d1c04c40fd1b60f148dc829d3f69b2de789d0ba239c35136efe9a386529", + "sha256:8643c255a25824ddd0895c59f2319c019e13e949dc37162f876c41a283361527", + "sha256:8795e88adff5aa3c248c1edce932db003d37a623b5787669ccf205c422b91e4a", + "sha256:87c727691858fd3a1c085d9980d12395517fcbbf02c69fbb22dede8ee03422da", + "sha256:8851584fb931cffc0caa395f6980525fd5116eab8f73ece9d95e6f9c2c326c4c", + "sha256:891f95c036df1bc95309951940f8eea8537f102fa65715cdc5aae20b8523813b", + "sha256:8c85447569041939111b8c7dbf6f8fa7a0eb5b2c4aebb3c3bec0fb50d7025121", + "sha256:8e0ff16c224d9bfe4e9e6bd0395826096cda4a3ef51e6c301e1b61007ee2bd24", + "sha256:8f83f553f4cde6d3d4eaf58ec11c939c94a0ec545c5b287461cafb184f4b3a14", + "sha256:8f890d04ad33262d0c77ead53c85f13abfb82f2c8f078dfbf24b78f59534dfdd", + "sha256:8fdf3721a2aa7d96577970f5604bd81f426969c1822d467f07b3d844fa2fecc7", + "sha256:907f3a8674e489abdcb0206723e5560a5cb1fa42470dcc637942d7b10f28b695", + "sha256:92355f95a0e4da96d4c404aa3cff2ff033f9180a9515f813255e1526551298c1", + "sha256:97a9aea46e2a8371c4cf5386d881de833ed782901ac9f67ebcb63bb3b7d115af", + "sha256:988e959f2f3d59ebd9c2962ae71b97c0df58323910d0b368cc190ad07429d1bb", + "sha256:99f5c8ab048ee4233cc4f2b461b205cbe01194f6201018174ac269bf09995749", + "sha256:9cd5c03c63ae06d4f876b9844c5898d0044c7940ff7460db9f4cd984ac7862b5", + "sha256:a3b730ef664b2ef0e99dec01b6573b9b085c766400af363833e08ebc1e38eb2f", + "sha256:a716e05547a39b788deaf22725490855337fc36613288aa8ae1601dc8c525553", + "sha256:a7ec759c4a0fc820ad5dc6a58e9c391e7b16edcb618056baedbedbb9ea3b1524", + "sha256:aaa6bfc2180c31a45fac35d40e3312a3d09954638ce0b2e9424a88e24d262a13", + "sha256:ad04cf38164d983e85f9cba2804566c0160b47086dcca4cf059f7e26c5ace8ca", + "sha256:b2f73f0d0fce5300f23a1383d19b44d103bb113b57a69c36fd95b7c03099b181", + "sha256:b325f42e26659df1a0de66fdb5cde8dd48613da9c99c07d04e9fb9e254b7ee1c", + "sha256:b51bab2c4e545dde93cb6d6bb34bf63300b7cd06716f195dd92d9255df728331", + "sha256:b5c3e285e0735fd8c5a26d177eca8b52512cdd8687ca86ec77a0c66e9c510182", + "sha256:b73b493af9e947caed75d329676b1b801d673b17481962823a3e55fe529c8b8b", + "sha256:b9d85a02e77ee8ea6d9e3fd5d515bcc3d798d9c1ea54817e5feb97a9bc5d52fe", + "sha256:bdcfc88347fd981e53c33d832ce4d3e981a0d696b712fbcb45dcc1a43fe65c65", + "sha256:c594c0abe69d9d6099f4ece17763d53072f65ba60b372d8ba6de8695ce6ee39e", + "sha256:c8a9befb0c0369f0cf5c1b94178d0d78f66d9cebb9265b36be6e4f66236076b8", + "sha256:cd174b90db68c3bcca273e9391934a25d76929d727dc75224bf244446b28b03b", + "sha256:d5576415f3d76290b160aa093ff968f8bf6de7d681e16e463a0134106b506f49", + "sha256:d654d045adafdcc6c100e8e911508a2eedbd2a1b5f93f930ba13ea67d7704ee9", + "sha256:d92e339c69b585e7b1d857308ad3ca1636b899e4557897ccd91bb9e4a56c965b", + "sha256:da3b6987a0bc3e6d0f721b42c7a0198ef897ae50579547b0345f7f02486898f5", + "sha256:dd26b396bc3a1e85f4acebeadbf627fa6117b97f4c10b177d5779577c6607744", + "sha256:de7c1ddb80fa7a3ab045266dca169004b93f284756ad198306533b792774f10a", + "sha256:df3ab5e078cab19f7eaeef1d5f063103e1ebf8c26d059767b26a6a0ad8b250a3", + "sha256:e0155a8f079c688c2ccaea05de1ad69877995c547ba3d3612c1c336edc12a3a5", + "sha256:e10c14535abc7ddf3fd024aa36563cd8ab5d2bb6234a5d22c77c30e30fa4fb2b", + "sha256:e4396b55a364a03ff7e71a34828c3ed0c506814dd1f50e16ebed3fc447d5188e", + "sha256:e5589225c2da4bb732c9c370c5961c39a6db72cf69fb2a28868a5413ed7f39e6", + "sha256:e6576cdc36d5a09b0c1a3d81e13a45d41a6763188f9eaae2da2839e8a4240bce", + "sha256:e6850ae33529d1e43791b30575070670070d5fe007c37f5d06aebc1dd152ab3f", + "sha256:e9afd97339fc5a20f0542c971f90f3ca97e73d3050cdc488d540b63fae45329a", + "sha256:ead50635fb56577c07eff3e557dac39533e0fe603000684eea2af3ed1ad8f941", + "sha256:ed1336a2a6e5c427f419da0154e775834abcbc8ddd703004108121c6dd9eba9d", + "sha256:f0c819f83e4f7b7f7463b2dc10d626a8be0c85fbc7b3db0edc098c2b16ac968e", + "sha256:f64f01795119880023ba3ce43072283a393f0b90f52b66cc0ea1a89aa64a9ccb", + "sha256:f87a7e52f79059f9c58f6886c262061065eb6f7554a587be7ed3aa63e6b71b34", + "sha256:ff835906f84451e143f31c4ce8ad73d83ef4476b944c2a2da91aec8b649570e1" + ], + "version": "==3.3.0" + }, + "jinja2": { + "hashes": [ + "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", + "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d" + ], + "markers": "python_version >= '3.7'", + "version": "==3.1.4" + }, + "jmespath": { + "hashes": [ + "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", + "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe" + ], + "markers": "python_version >= '3.7'", + "version": "==1.0.1" + }, + "jsonschema": { + "hashes": [ + "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4", + "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566" + ], + "markers": "python_version >= '3.8'", + "version": "==4.23.0" + }, + "jsonschema-specifications": { + "hashes": [ + "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272", + "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf" + ], + "markers": "python_version >= '3.9'", + "version": "==2024.10.1" + }, + "markupsafe": { + "hashes": [ + "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", + "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", + "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", + "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", + "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", + "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", + "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", + "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", + "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", + "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", + "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", + "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", + "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", + "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", + "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", + "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", + "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", + "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", + "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", + "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", + "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", + "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", + "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", + "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", + "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", + "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", + "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", + "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", + "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", + "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", + "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", + "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", + "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", + "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", + "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", + "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", + "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", + "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", + "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", + "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", + "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", + "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", + "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", + "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", + "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", + "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", + "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", + "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", + "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", + "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", + "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", + "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", + "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", + "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", + "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", + "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", + "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", + "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", + "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", + "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", + "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50" + ], + "markers": "python_version >= '3.9'", + "version": "==3.0.2" + }, + "multidict": { + "hashes": [ + "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f", + "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056", + "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761", + "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3", + "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b", + "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6", + "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748", + "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966", + "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f", + "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1", + "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6", + "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada", + "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305", + "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2", + "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d", + "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a", + "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef", + "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c", + "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb", + "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60", + "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6", + "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4", + "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478", + "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81", + "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7", + "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56", + "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3", + "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6", + "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30", + "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb", + "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506", + "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0", + "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925", + "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c", + "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6", + "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e", + "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95", + "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2", + "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133", + "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2", + "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa", + "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3", + "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3", + "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436", + "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657", + "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581", + "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492", + "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43", + "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2", + "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2", + "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926", + "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057", + "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc", + "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80", + "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255", + "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1", + "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972", + "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53", + "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1", + "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423", + "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a", + "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160", + "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c", + "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd", + "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa", + "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5", + "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b", + "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa", + "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef", + "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44", + "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4", + "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156", + "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753", + "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28", + "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d", + "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a", + "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304", + "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008", + "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429", + "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72", + "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399", + "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3", + "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392", + "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167", + "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c", + "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774", + "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351", + "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76", + "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875", + "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd", + "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28", + "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db" + ], + "markers": "python_version >= '3.8'", + "version": "==6.1.0" + }, + "numpy": { + "hashes": [ + "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b", + "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818", + "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20", + "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0", + "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010", + "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a", + "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea", + "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c", + "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71", + "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110", + "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be", + "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a", + "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a", + "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5", + "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed", + "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd", + "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c", + "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e", + "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0", + "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c", + "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a", + "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b", + "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0", + "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6", + "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2", + "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a", + "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30", + "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218", + "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5", + "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07", + "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2", + "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4", + "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764", + "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef", + "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3", + "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f" + ], + "markers": "python_version >= '3.9'", + "version": "==1.26.4" + }, + "opensearch-benchmark": { + "editable": true, + "file": "." + }, + "opensearch-py": { + "extras": [ + "async" + ], + "hashes": [ + "sha256:52c60fdb5d4dcf6cce3ee746c13b194529b0161e0f41268b98ab8f1624abe2fa", + "sha256:6598df0bc7a003294edd0ba88a331e0793acbb8c910c43edf398791e3b2eccda" + ], + "markers": "python_version >= '3.8' and python_version < '4'", + "version": "==2.8.0" + }, + "propcache": { + "hashes": [ + "sha256:03ff9d3f665769b2a85e6157ac8b439644f2d7fd17615a82fa55739bc97863f4", + "sha256:049324ee97bb67285b49632132db351b41e77833678432be52bdd0289c0e05e4", + "sha256:081a430aa8d5e8876c6909b67bd2d937bfd531b0382d3fdedb82612c618bc41a", + "sha256:0f022d381747f0dfe27e99d928e31bc51a18b65bb9e481ae0af1380a6725dd1f", + "sha256:12d1083f001ace206fe34b6bdc2cb94be66d57a850866f0b908972f90996b3e9", + "sha256:14d86fe14b7e04fa306e0c43cdbeebe6b2c2156a0c9ce56b815faacc193e320d", + "sha256:160291c60081f23ee43d44b08a7e5fb76681221a8e10b3139618c5a9a291b84e", + "sha256:1672137af7c46662a1c2be1e8dc78cb6d224319aaa40271c9257d886be4363a6", + "sha256:19a0f89a7bb9d8048d9c4370c9c543c396e894c76be5525f5e1ad287f1750ddf", + "sha256:1ac2f5fe02fa75f56e1ad473f1175e11f475606ec9bd0be2e78e4734ad575034", + "sha256:1cd9a1d071158de1cc1c71a26014dcdfa7dd3d5f4f88c298c7f90ad6f27bb46d", + "sha256:1ffc3cca89bb438fb9c95c13fc874012f7b9466b89328c3c8b1aa93cdcfadd16", + "sha256:297878dc9d0a334358f9b608b56d02e72899f3b8499fc6044133f0d319e2ec30", + "sha256:2d3af2e79991102678f53e0dbf4c35de99b6b8b58f29a27ca0325816364caaba", + "sha256:30b43e74f1359353341a7adb783c8f1b1c676367b011709f466f42fda2045e95", + "sha256:3156628250f46a0895f1f36e1d4fbe062a1af8718ec3ebeb746f1d23f0c5dc4d", + "sha256:31f5af773530fd3c658b32b6bdc2d0838543de70eb9a2156c03e410f7b0d3aae", + "sha256:3935bfa5fede35fb202c4b569bb9c042f337ca4ff7bd540a0aa5e37131659348", + "sha256:39d51fbe4285d5db5d92a929e3e21536ea3dd43732c5b177c7ef03f918dff9f2", + "sha256:3f77ce728b19cb537714499928fe800c3dda29e8d9428778fc7c186da4c09a64", + "sha256:4160d9283bd382fa6c0c2b5e017acc95bc183570cd70968b9202ad6d8fc48dce", + "sha256:4a571d97dbe66ef38e472703067021b1467025ec85707d57e78711c085984e54", + "sha256:4e6281aedfca15301c41f74d7005e6e3f4ca143584ba696ac69df4f02f40d629", + "sha256:52277518d6aae65536e9cea52d4e7fd2f7a66f4aa2d30ed3f2fcea620ace3c54", + "sha256:556fc6c10989f19a179e4321e5d678db8eb2924131e64652a51fe83e4c3db0e1", + "sha256:574faa3b79e8ebac7cb1d7930f51184ba1ccf69adfdec53a12f319a06030a68b", + "sha256:58791550b27d5488b1bb52bc96328456095d96206a250d28d874fafe11b3dfaf", + "sha256:5b750a8e5a1262434fb1517ddf64b5de58327f1adc3524a5e44c2ca43305eb0b", + "sha256:5d97151bc92d2b2578ff7ce779cdb9174337390a535953cbb9452fb65164c587", + "sha256:5eee736daafa7af6d0a2dc15cc75e05c64f37fc37bafef2e00d77c14171c2097", + "sha256:6445804cf4ec763dc70de65a3b0d9954e868609e83850a47ca4f0cb64bd79fea", + "sha256:647894f5ae99c4cf6bb82a1bb3a796f6e06af3caa3d32e26d2350d0e3e3faf24", + "sha256:66d4cfda1d8ed687daa4bc0274fcfd5267873db9a5bc0418c2da19273040eeb7", + "sha256:6a9a8c34fb7bb609419a211e59da8887eeca40d300b5ea8e56af98f6fbbb1541", + "sha256:6b3f39a85d671436ee3d12c017f8fdea38509e4f25b28eb25877293c98c243f6", + "sha256:6b6fb63ae352e13748289f04f37868099e69dba4c2b3e271c46061e82c745634", + "sha256:70693319e0b8fd35dd863e3e29513875eb15c51945bf32519ef52927ca883bc3", + "sha256:781e65134efaf88feb447e8c97a51772aa75e48b794352f94cb7ea717dedda0d", + "sha256:819ce3b883b7576ca28da3861c7e1a88afd08cc8c96908e08a3f4dd64a228034", + "sha256:857112b22acd417c40fa4595db2fe28ab900c8c5fe4670c7989b1c0230955465", + "sha256:887d9b0a65404929641a9fabb6452b07fe4572b269d901d622d8a34a4e9043b2", + "sha256:8b3489ff1ed1e8315674d0775dc7d2195fb13ca17b3808721b54dbe9fd020faf", + "sha256:92fc4500fcb33899b05ba73276dfb684a20d31caa567b7cb5252d48f896a91b1", + "sha256:9403db39be1393618dd80c746cb22ccda168efce239c73af13c3763ef56ffc04", + "sha256:98110aa363f1bb4c073e8dcfaefd3a5cea0f0834c2aab23dda657e4dab2f53b5", + "sha256:999779addc413181912e984b942fbcc951be1f5b3663cd80b2687758f434c583", + "sha256:9caac6b54914bdf41bcc91e7eb9147d331d29235a7c967c150ef5df6464fd1bb", + "sha256:a7a078f5d37bee6690959c813977da5291b24286e7b962e62a94cec31aa5188b", + "sha256:a7e65eb5c003a303b94aa2c3852ef130230ec79e349632d030e9571b87c4698c", + "sha256:a96dc1fa45bd8c407a0af03b2d5218392729e1822b0c32e62c5bf7eeb5fb3958", + "sha256:aca405706e0b0a44cc6bfd41fbe89919a6a56999157f6de7e182a990c36e37bc", + "sha256:accb6150ce61c9c4b7738d45550806aa2b71c7668c6942f17b0ac182b6142fd4", + "sha256:ad1af54a62ffe39cf34db1aa6ed1a1873bd548f6401db39d8e7cd060b9211f82", + "sha256:ae1aa1cd222c6d205853b3013c69cd04515f9d6ab6de4b0603e2e1c33221303e", + "sha256:b2d0a12018b04f4cb820781ec0dffb5f7c7c1d2a5cd22bff7fb055a2cb19ebce", + "sha256:b480c6a4e1138e1aa137c0079b9b6305ec6dcc1098a8ca5196283e8a49df95a9", + "sha256:b74c261802d3d2b85c9df2dfb2fa81b6f90deeef63c2db9f0e029a3cac50b518", + "sha256:ba278acf14471d36316159c94a802933d10b6a1e117b8554fe0d0d9b75c9d536", + "sha256:bb6178c241278d5fe853b3de743087be7f5f4c6f7d6d22a3b524d323eecec505", + "sha256:bf72af5e0fb40e9babf594308911436c8efde3cb5e75b6f206c34ad18be5c052", + "sha256:bfd3223c15bebe26518d58ccf9a39b93948d3dcb3e57a20480dfdd315356baff", + "sha256:c214999039d4f2a5b2073ac506bba279945233da8c786e490d411dfc30f855c1", + "sha256:c2f992c07c0fca81655066705beae35fc95a2fa7366467366db627d9f2ee097f", + "sha256:cba4cfa1052819d16699e1d55d18c92b6e094d4517c41dd231a8b9f87b6fa681", + "sha256:cea7daf9fc7ae6687cf1e2c049752f19f146fdc37c2cc376e7d0032cf4f25347", + "sha256:cf6c4150f8c0e32d241436526f3c3f9cbd34429492abddbada2ffcff506c51af", + "sha256:d09c333d36c1409d56a9d29b3a1b800a42c76a57a5a8907eacdbce3f18768246", + "sha256:d27b84d5880f6d8aa9ae3edb253c59d9f6642ffbb2c889b78b60361eed449787", + "sha256:d2ccec9ac47cf4e04897619c0e0c1a48c54a71bdf045117d3a26f80d38ab1fb0", + "sha256:d71264a80f3fcf512eb4f18f59423fe82d6e346ee97b90625f283df56aee103f", + "sha256:d93f3307ad32a27bda2e88ec81134b823c240aa3abb55821a8da553eed8d9439", + "sha256:d9631c5e8b5b3a0fda99cb0d29c18133bca1e18aea9effe55adb3da1adef80d3", + "sha256:ddfab44e4489bd79bda09d84c430677fc7f0a4939a73d2bba3073036f487a0a6", + "sha256:e7048abd75fe40712005bcfc06bb44b9dfcd8e101dda2ecf2f5aa46115ad07ca", + "sha256:e73091191e4280403bde6c9a52a6999d69cdfde498f1fdf629105247599b57ec", + "sha256:e800776a79a5aabdb17dcc2346a7d66d0777e942e4cd251defeb084762ecd17d", + "sha256:edc9fc7051e3350643ad929df55c451899bb9ae6d24998a949d2e4c87fb596d3", + "sha256:f089118d584e859c62b3da0892b88a83d611c2033ac410e929cb6754eec0ed16", + "sha256:f174bbd484294ed9fdf09437f889f95807e5f229d5d93588d34e92106fbf6717", + "sha256:f508b0491767bb1f2b87fdfacaba5f7eddc2f867740ec69ece6d1946d29029a6", + "sha256:f7a31fc1e1bd362874863fdeed71aed92d348f5336fd84f2197ba40c59f061bd", + "sha256:f9479aa06a793c5aeba49ce5c5692ffb51fcd9a7016e017d555d5e2b0045d212" + ], + "markers": "python_version >= '3.9'", + "version": "==0.2.1" + }, + "psutil": { + "hashes": [ + "sha256:000d1d1ebd634b4efb383f4034437384e44a6d455260aaee2eca1e9c1b55f047", + "sha256:045f00a43c737f960d273a83973b2511430d61f283a44c96bf13a6e829ba8fdc", + "sha256:0895b8414afafc526712c498bd9de2b063deaac4021a3b3c34566283464aff8e", + "sha256:1209036fbd0421afde505a4879dee3b2fd7b1e14fee81c0069807adcbbcca747", + "sha256:1ad45a1f5d0b608253b11508f80940985d1d0c8f6111b5cb637533a0e6ddc13e", + "sha256:353815f59a7f64cdaca1c0307ee13558a0512f6db064e92fe833784f08539c7a", + "sha256:498c6979f9c6637ebc3a73b3f87f9eb1ec24e1ce53a7c5173b8508981614a90b", + "sha256:5cd2bcdc75b452ba2e10f0e8ecc0b57b827dd5d7aaffbc6821b2a9a242823a76", + "sha256:6d3fbbc8d23fcdcb500d2c9f94e07b1342df8ed71b948a2649b5cb060a7c94ca", + "sha256:6e2dcd475ce8b80522e51d923d10c7871e45f20918e027ab682f94f1c6351688", + "sha256:9118f27452b70bb1d9ab3198c1f626c2499384935aaf55388211ad982611407e", + "sha256:9dcbfce5d89f1d1f2546a2090f4fcf87c7f669d1d90aacb7d7582addece9fb38", + "sha256:a8506f6119cff7015678e2bce904a4da21025cc70ad283a53b099e7620061d85", + "sha256:a8fb3752b491d246034fa4d279ff076501588ce8cbcdbb62c32fd7a377d996be", + "sha256:c0e0c00aa18ca2d3b2b991643b799a15fc8f0563d2ebb6040f64ce8dc027b942", + "sha256:d905186d647b16755a800e7263d43df08b790d709d575105d419f8b6ef65423a", + "sha256:ff34df86226c0227c52f38b919213157588a678d049688eded74c76c8ba4a5d0" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", + "version": "==6.1.0" + }, + "py-cpuinfo": { + "hashes": [ + "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690", + "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5" + ], + "version": "==9.0.0" + }, + "pyasn1": { + "hashes": [ + "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", + "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034" + ], + "markers": "python_version >= '3.8'", + "version": "==0.6.1" + }, + "pyasn1-modules": { + "hashes": [ + "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd", + "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c" + ], + "markers": "python_version >= '3.8'", + "version": "==0.4.1" + }, + "python-dateutil": { + "hashes": [ + "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", + "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "version": "==2.9.0.post0" + }, + "referencing": { + "hashes": [ + "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c", + "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de" + ], + "markers": "python_version >= '3.8'", + "version": "==0.35.1" + }, + "requests": { + "hashes": [ + "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", + "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6" + ], + "markers": "python_version >= '3.8'", + "version": "==2.32.3" + }, + "rpds-py": { + "hashes": [ + "sha256:009de23c9c9ee54bf11303a966edf4d9087cd43a6003672e6aa7def643d06518", + "sha256:02fbb9c288ae08bcb34fb41d516d5eeb0455ac35b5512d03181d755d80810059", + "sha256:0a0461200769ab3b9ab7e513f6013b7a97fdeee41c29b9db343f3c5a8e2b9e61", + "sha256:0b09865a9abc0ddff4e50b5ef65467cd94176bf1e0004184eb915cbc10fc05c5", + "sha256:0b8db6b5b2d4491ad5b6bdc2bc7c017eec108acbf4e6785f42a9eb0ba234f4c9", + "sha256:0c150c7a61ed4a4f4955a96626574e9baf1adf772c2fb61ef6a5027e52803543", + "sha256:0f3cec041684de9a4684b1572fe28c7267410e02450f4561700ca5a3bc6695a2", + "sha256:1352ae4f7c717ae8cba93421a63373e582d19d55d2ee2cbb184344c82d2ae55a", + "sha256:177c7c0fce2855833819c98e43c262007f42ce86651ffbb84f37883308cb0e7d", + "sha256:1978d0021e943aae58b9b0b196fb4895a25cc53d3956b8e35e0b7682eefb6d56", + "sha256:1a60bce91f81ddaac922a40bbb571a12c1070cb20ebd6d49c48e0b101d87300d", + "sha256:1aef18820ef3e4587ebe8b3bc9ba6e55892a6d7b93bac6d29d9f631a3b4befbd", + "sha256:1e9663daaf7a63ceccbbb8e3808fe90415b0757e2abddbfc2e06c857bf8c5e2b", + "sha256:20070c65396f7373f5df4005862fa162db5d25d56150bddd0b3e8214e8ef45b4", + "sha256:214b7a953d73b5e87f0ebece4a32a5bd83c60a3ecc9d4ec8f1dca968a2d91e99", + "sha256:22bebe05a9ffc70ebfa127efbc429bc26ec9e9b4ee4d15a740033efda515cf3d", + "sha256:24e8abb5878e250f2eb0d7859a8e561846f98910326d06c0d51381fed59357bd", + "sha256:26fd7cac7dd51011a245f29a2cc6489c4608b5a8ce8d75661bb4a1066c52dfbe", + "sha256:27b1d3b3915a99208fee9ab092b8184c420f2905b7d7feb4aeb5e4a9c509b8a1", + "sha256:27e98004595899949bd7a7b34e91fa7c44d7a97c40fcaf1d874168bb652ec67e", + "sha256:2b8f60e1b739a74bab7e01fcbe3dddd4657ec685caa04681df9d562ef15b625f", + "sha256:2de29005e11637e7a2361fa151f780ff8eb2543a0da1413bb951e9f14b699ef3", + "sha256:2e8b55d8517a2fda8d95cb45d62a5a8bbf9dd0ad39c5b25c8833efea07b880ca", + "sha256:2fa4331c200c2521512595253f5bb70858b90f750d39b8cbfd67465f8d1b596d", + "sha256:3445e07bf2e8ecfeef6ef67ac83de670358abf2996916039b16a218e3d95e97e", + "sha256:3453e8d41fe5f17d1f8e9c383a7473cd46a63661628ec58e07777c2fff7196dc", + "sha256:378753b4a4de2a7b34063d6f95ae81bfa7b15f2c1a04a9518e8644e81807ebea", + "sha256:3af6e48651c4e0d2d166dc1b033b7042ea3f871504b6805ba5f4fe31581d8d38", + "sha256:3dfcbc95bd7992b16f3f7ba05af8a64ca694331bd24f9157b49dadeeb287493b", + "sha256:3f21f0495edea7fdbaaa87e633a8689cd285f8f4af5c869f27bc8074638ad69c", + "sha256:4041711832360a9b75cfb11b25a6a97c8fb49c07b8bd43d0d02b45d0b499a4ff", + "sha256:44d61b4b7d0c2c9ac019c314e52d7cbda0ae31078aabd0f22e583af3e0d79723", + "sha256:4617e1915a539a0d9a9567795023de41a87106522ff83fbfaf1f6baf8e85437e", + "sha256:4b232061ca880db21fa14defe219840ad9b74b6158adb52ddf0e87bead9e8493", + "sha256:5246b14ca64a8675e0a7161f7af68fe3e910e6b90542b4bfb5439ba752191df6", + "sha256:5725dd9cc02068996d4438d397e255dcb1df776b7ceea3b9cb972bdb11260a83", + "sha256:583f6a1993ca3369e0f80ba99d796d8e6b1a3a2a442dd4e1a79e652116413091", + "sha256:59259dc58e57b10e7e18ce02c311804c10c5a793e6568f8af4dead03264584d1", + "sha256:593eba61ba0c3baae5bc9be2f5232430453fb4432048de28399ca7376de9c627", + "sha256:59f4a79c19232a5774aee369a0c296712ad0e77f24e62cad53160312b1c1eaa1", + "sha256:5f0e260eaf54380380ac3808aa4ebe2d8ca28b9087cf411649f96bad6900c728", + "sha256:62d9cfcf4948683a18a9aff0ab7e1474d407b7bab2ca03116109f8464698ab16", + "sha256:64607d4cbf1b7e3c3c8a14948b99345eda0e161b852e122c6bb71aab6d1d798c", + "sha256:655ca44a831ecb238d124e0402d98f6212ac527a0ba6c55ca26f616604e60a45", + "sha256:666ecce376999bf619756a24ce15bb14c5bfaf04bf00abc7e663ce17c3f34fe7", + "sha256:68049202f67380ff9aa52f12e92b1c30115f32e6895cd7198fa2a7961621fc5a", + "sha256:69803198097467ee7282750acb507fba35ca22cc3b85f16cf45fb01cb9097730", + "sha256:6c7b99ca52c2c1752b544e310101b98a659b720b21db00e65edca34483259967", + "sha256:6dd9412824c4ce1aca56c47b0991e65bebb7ac3f4edccfd3f156150c96a7bf25", + "sha256:70eb60b3ae9245ddea20f8a4190bd79c705a22f8028aaf8bbdebe4716c3fab24", + "sha256:70fb28128acbfd264eda9bf47015537ba3fe86e40d046eb2963d75024be4d055", + "sha256:7b2513ba235829860b13faa931f3b6846548021846ac808455301c23a101689d", + "sha256:7ef9d9da710be50ff6809fed8f1963fecdfecc8b86656cadfca3bc24289414b0", + "sha256:81e69b0a0e2537f26d73b4e43ad7bc8c8efb39621639b4434b76a3de50c6966e", + "sha256:8633e471c6207a039eff6aa116e35f69f3156b3989ea3e2d755f7bc41754a4a7", + "sha256:8bd7c8cfc0b8247c8799080fbff54e0b9619e17cdfeb0478ba7295d43f635d7c", + "sha256:9253fc214112405f0afa7db88739294295f0e08466987f1d70e29930262b4c8f", + "sha256:99b37292234e61325e7a5bb9689e55e48c3f5f603af88b1642666277a81f1fbd", + "sha256:9bd7228827ec7bb817089e2eb301d907c0d9827a9e558f22f762bb690b131652", + "sha256:9beeb01d8c190d7581a4d59522cd3d4b6887040dcfc744af99aa59fef3e041a8", + "sha256:a63cbdd98acef6570c62b92a1e43266f9e8b21e699c363c0fef13bd530799c11", + "sha256:a76e42402542b1fae59798fab64432b2d015ab9d0c8c47ba7addddbaf7952333", + "sha256:ac0a03221cdb5058ce0167ecc92a8c89e8d0decdc9e99a2ec23380793c4dcb96", + "sha256:b0b4136a252cadfa1adb705bb81524eee47d9f6aab4f2ee4fa1e9d3cd4581f64", + "sha256:b25bc607423935079e05619d7de556c91fb6adeae9d5f80868dde3468657994b", + "sha256:b3d504047aba448d70cf6fa22e06cb09f7cbd761939fdd47604f5e007675c24e", + "sha256:bb47271f60660803ad11f4c61b42242b8c1312a31c98c578f79ef9387bbde21c", + "sha256:bbb232860e3d03d544bc03ac57855cd82ddf19c7a07651a7c0fdb95e9efea8b9", + "sha256:bc27863442d388870c1809a87507727b799c8460573cfbb6dc0eeaef5a11b5ec", + "sha256:bc51abd01f08117283c5ebf64844a35144a0843ff7b2983e0648e4d3d9f10dbb", + "sha256:be2eb3f2495ba669d2a985f9b426c1797b7d48d6963899276d22f23e33d47e37", + "sha256:bf9db5488121b596dbfc6718c76092fda77b703c1f7533a226a5a9f65248f8ad", + "sha256:c58e2339def52ef6b71b8f36d13c3688ea23fa093353f3a4fee2556e62086ec9", + "sha256:cfbc454a2880389dbb9b5b398e50d439e2e58669160f27b60e5eca11f68ae17c", + "sha256:cff63a0272fcd259dcc3be1657b07c929c466b067ceb1c20060e8d10af56f5bf", + "sha256:d115bffdd417c6d806ea9069237a4ae02f513b778e3789a359bc5856e0404cc4", + "sha256:d20cfb4e099748ea39e6f7b16c91ab057989712d31761d3300d43134e26e165f", + "sha256:d48424e39c2611ee1b84ad0f44fb3b2b53d473e65de061e3f460fc0be5f1939d", + "sha256:e0fa2d4ec53dc51cf7d3bb22e0aa0143966119f42a0c3e4998293a3dd2856b09", + "sha256:e32fee8ab45d3c2db6da19a5323bc3362237c8b653c70194414b892fd06a080d", + "sha256:e35ba67d65d49080e8e5a1dd40101fccdd9798adb9b050ff670b7d74fa41c566", + "sha256:e3fb866d9932a3d7d0c82da76d816996d1667c44891bd861a0f97ba27e84fc74", + "sha256:e61b02c3f7a1e0b75e20c3978f7135fd13cb6cf551bf4a6d29b999a88830a338", + "sha256:e67ba3c290821343c192f7eae1d8fd5999ca2dc99994114643e2f2d3e6138b15", + "sha256:e79dd39f1e8c3504be0607e5fc6e86bb60fe3584bec8b782578c3b0fde8d932c", + "sha256:e89391e6d60251560f0a8f4bd32137b077a80d9b7dbe6d5cab1cd80d2746f648", + "sha256:ea7433ce7e4bfc3a85654aeb6747babe3f66eaf9a1d0c1e7a4435bbdf27fea84", + "sha256:eaf16ae9ae519a0e237a0f528fd9f0197b9bb70f40263ee57ae53c2b8d48aeb3", + "sha256:eb0c341fa71df5a4595f9501df4ac5abfb5a09580081dffbd1ddd4654e6e9123", + "sha256:f276b245347e6e36526cbd4a266a417796fc531ddf391e43574cf6466c492520", + "sha256:f47ad3d5f3258bd7058d2d506852217865afefe6153a36eb4b6928758041d831", + "sha256:f56a6b404f74ab372da986d240e2e002769a7d7102cc73eb238a4f72eec5284e", + "sha256:f5cf2a0c2bdadf3791b5c205d55a37a54025c6e18a71c71f82bb536cf9a454bf", + "sha256:f5d36399a1b96e1a5fdc91e0522544580dbebeb1f77f27b2b0ab25559e103b8b", + "sha256:f60bd8423be1d9d833f230fdbccf8f57af322d96bcad6599e5a771b151398eb2", + "sha256:f612463ac081803f243ff13cccc648578e2279295048f2a8d5eb430af2bae6e3", + "sha256:f73d3fef726b3243a811121de45193c0ca75f6407fe66f3f4e183c983573e130", + "sha256:f82a116a1d03628a8ace4859556fb39fd1424c933341a08ea3ed6de1edb0283b", + "sha256:fb0ba113b4983beac1a2eb16faffd76cb41e176bf58c4afe3e14b9c681f702de", + "sha256:fb4f868f712b2dd4bcc538b0a0c1f63a2b1d584c925e69a224d759e7070a12d5", + "sha256:fb6116dfb8d1925cbdb52595560584db42a7f664617a1f7d7f6e32f138cdf37d", + "sha256:fda7cb070f442bf80b642cd56483b5548e43d366fe3f39b98e67cce780cded00", + "sha256:feea821ee2a9273771bae61194004ee2fc33f8ec7db08117ef9147d4bbcbca8e" + ], + "markers": "python_version >= '3.9'", + "version": "==0.22.3" + }, + "rsa": { + "hashes": [ + "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7", + "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21" + ], + "markers": "python_version >= '3.6' and python_version < '4'", + "version": "==4.9" + }, + "s3transfer": { + "hashes": [ + "sha256:244a76a24355363a68164241438de1b72f8781664920260c48465896b712a41e", + "sha256:29edc09801743c21eb5ecbc617a152df41d3c287f67b615f73e5f750583666a7" + ], + "markers": "python_version >= '3.8'", + "version": "==0.10.4" + }, + "six": { + "hashes": [ + "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", + "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "version": "==1.17.0" + }, + "tabulate": { + "hashes": [ + "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c", + "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f" + ], + "markers": "python_version >= '3.7'", + "version": "==0.9.0" + }, + "thespian": { + "hashes": [ + "sha256:c987a8042ba2303e22371f38a67354593dd81c4c11ba1eba7f6657409288d5ed" + ], + "version": "==3.10.6" + }, + "urllib3": { + "hashes": [ + "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", + "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" + ], + "markers": "python_version >= '3.8'", + "version": "==2.2.3" + }, + "wheel": { + "hashes": [ + "sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729", + "sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248" + ], + "markers": "python_version >= '3.8'", + "version": "==0.45.1" + }, + "yappi": { + "hashes": [ + "sha256:01705971b728a4f95829b723d08883c7623ec275f4066f4048b28dc0151fe0af", + "sha256:0d62741c0ac883067e40481ab89ddd9e004292dbd22ac5992cf45745bf28ccc3", + "sha256:198831ccab42295ae2be265d422fdc0d9ccc8ae3e074e7c70fb58731e8181221", + "sha256:1cf46ebe43ac95f8736618a5f0ac763c7502a3aa964a1dda083d9e9c1bf07b12", + "sha256:1d82839835ae2c291b88fb56d82f80c88c00d76df29f3c1ed050db73b553bef0", + "sha256:1f03127742746ec4cf7e422b08212daf094505ab7f5d725d7b273ed3c475c3d9", + "sha256:20b8289e8cca781e948f72d86c03b308e077abeec53ec60080f77319041e0511", + "sha256:215964abb3818124bc638cf5456ca311e70188146afb30336cced0fc4ef42f5b", + "sha256:2246e57e1ab7d11a184042fe5726fbffca8c1a59c5eb01d1a043741403bf844d", + "sha256:228ab550d53b5e37d618b42f5085e504376963b48f867d45d0fdc8a1e0c811d2", + "sha256:2594ab790a9db37223e7861ec9cdf74d1edf05a78b31a8806ff24abcde668bea", + "sha256:2ba5c27a82cdd84e5102b789ab5061431944e3dee27e0970c3167b3bce78b262", + "sha256:307d681dd0cdaa7986e3b22115e41597f92778db03ba9be5096cfcb13929c5e9", + "sha256:32c6d928604d7a236090bc36d324f309fe8344c91123bb84e37c43f6677adddc", + "sha256:334b31dfefae02bc28b7cd50953aaaae3292e40c15efb613792e4a587281a161", + "sha256:354cf94d659302b421b13c03487f2f1bce969b97b85fba88afb11f2ef83c35f3", + "sha256:3736ea6458edbabd96918d88e2963594823e4ab4c58d62a52ef81f6b5839ec19", + "sha256:3752ab9480f28709427d6077d220d963ed7caa84e18fd0f404022f4076850b0e", + "sha256:3aa33acd51ba1b5d81e5d6ec305d144531d215635b9dfd8ee1d57688c77725af", + "sha256:3d95ce88d0b533a44a6d9521b983e3412e5c50d7fd152f2155764effad4ecf7f", + "sha256:402252d543e47464707ea5d7e4a63c7e77ce81cb58b8559c8883e67ae483911c", + "sha256:40aa421ea7078795ed2f0e6bae3f8f64f6cd5019c885a12c613b44dd1fc598b4", + "sha256:463b822727658937bd95a7d80ca9758605b8cd0014e004e9e520ec9cb4db0c92", + "sha256:49f1f8b16d6f42a79a06ae5268f39e71de3648d6797471dc71d80d91be4a6484", + "sha256:4bc9a30b162cb0e13d6400476fa05c272992bd359592e9bba1a570878d9e155c", + "sha256:4bd4f820e84d823724b8de4bf6857025e9e6c953978dd32485e054cf7de0eda7", + "sha256:4efb7ee80a1ac4511e900ebced03aea761ab129269b0d571586a25d3a71e7a35", + "sha256:6822f33ae4474eb9ffc8865e64cba70daef23832be36b4d63d1d8dfd890101cf", + "sha256:721a67aa9f110d509e2729cb145b79b87fe28d42e253476a524fa781aff41c3c", + "sha256:733a212014f2b44673ed62be53b3d4dd458844cd2008ba107f27a3293e42f43a", + "sha256:737e3cb6bb05f326eb63000663a4dc08dc08cc9827f7634445250c9610e5e717", + "sha256:7bbafb779c3f90edd09fd34733859226785618adee3179d5949dbba2e90f550a", + "sha256:7c01a2bd8abc3b6d33ae60dea26f97e2372e0087a747289bbab0fe67c8ac8925", + "sha256:7d80938e566ac6329daa3b036fdf7bd34488010efcf0a65169a44603878daa4e", + "sha256:8a4bd5dd1c50e81440c712e6f43ac682768690d2dd0307665910a52db2d69175", + "sha256:8dd13a430b046e2921ddf63d992da97968724b41a03e68292f06a2afa11c9d6e", + "sha256:944df9ebc6b283d6591a6b5f4c586d0eb9c6131c915f1b20fb36127ade83720d", + "sha256:9683c40de7e4ddff225068032cd97a6d928e4beddd9c7cf6515325be8ac28036", + "sha256:a50eb3aec893c40554f8f811d3341af266d844e7759f7f7abfcdba2744885ea3", + "sha256:b1795ea62ee9a39c1cff01a2c477b8bd5b1ca95c17d258efbf770b73eb62b2b8", + "sha256:ba1cd02fd914441d916db2972c3657711b2d7843cdd481e16244dee5870579af", + "sha256:c713b660a23f4f8a33ea08a168f9f94d92b0383683e8ae3e9467587b5a8a0eae", + "sha256:ce9b908e99368c14bcdc1e198fc2ffe0cf42191ebfcec5458d10c4335f2abaf6", + "sha256:cf117a9f733e0d8386bc8c454c11b275999c4bf559d742cbb8b60ace1d813f23", + "sha256:d229ab4f2711aeed440037d9007db79d776e79c552ecde23b0b68591fa7ecccf", + "sha256:de7aeaae96ce5d727d2d3f905dfbdbb512c4be1f7ef5178facac0835da63738a", + "sha256:dec8fb0125fe636f9218ec3ce022d8435299beadfee1def82ee75e11bce38ebd", + "sha256:e234dfd385fefaecc640235448d912e35f6a1400bc73be723744e901f2432527", + "sha256:e2e08a11f7e6b49ef09659506ac3bf0484881d6f634c6026c6bcbe3d345ee7c2", + "sha256:e9b3e1ce82b2bf30eeab19df7544d2caf5d7dc06bd7196ee2249a94e2081a5ae", + "sha256:f0b4bbdbaeda9ae84364a26cef6ccc512c44f3131a0b074f8892c5147f2e3bea", + "sha256:f1305d50e805358937b022d455a17127a7ea2eb8eaf7595e0d06b0760f4bcc58", + "sha256:f27bbc3311a3662231cff395d38061683fac5c538f3bab6796ff05511d2cce43", + "sha256:f326045442f7d63aa54dc4a18eda358b186af3316ae52619dd606058fb3b4182", + "sha256:f3f833bae26d1046610a08ddb0c968311056d07c8930ab11985e1e38c97cb91e", + "sha256:fc84074575afcc5a2a712e132c0b51541b7434b3099be99f573964ef3b6064a8", + "sha256:ff3688aa99b08ee10ced478b7255ac03865a8b5c0677482056acfe4d4f56e45f" + ], + "markers": "python_version >= '3.6'", + "version": "==1.6.10" + }, + "yarl": { + "hashes": [ + "sha256:00e5a1fea0fd4f5bfa7440a47eff01d9822a65b4488f7cff83155a0f31a2ecba", + "sha256:02ddb6756f8f4517a2d5e99d8b2f272488e18dd0bfbc802f31c16c6c20f22193", + "sha256:045b8482ce9483ada4f3f23b3774f4e1bf4f23a2d5c912ed5170f68efb053318", + "sha256:09c7907c8548bcd6ab860e5f513e727c53b4a714f459b084f6580b49fa1b9cee", + "sha256:0b0cad37311123211dc91eadcb322ef4d4a66008d3e1bdc404808992260e1a0e", + "sha256:0b3c92fa08759dbf12b3a59579a4096ba9af8dd344d9a813fc7f5070d86bbab1", + "sha256:0fb2171a4486bb075316ee754c6d8382ea6eb8b399d4ec62fde2b591f879778a", + "sha256:1a74a13a4c857a84a845505fd2d68e54826a2cd01935a96efb1e9d86c728e186", + "sha256:1d407181cfa6e70077df3377938c08012d18893f9f20e92f7d2f314a437c30b1", + "sha256:1dd4bdd05407ced96fed3d7f25dbbf88d2ffb045a0db60dbc247f5b3c5c25d50", + "sha256:25b411eddcfd56a2f0cd6a384e9f4f7aa3efee14b188de13048c25b5e91f1640", + "sha256:2d06d3005e668744e11ed80812e61efd77d70bb7f03e33c1598c301eea20efbb", + "sha256:2ec9bbba33b2d00999af4631a3397d1fd78290c48e2a3e52d8dd72db3a067ac8", + "sha256:3236da9272872443f81fedc389bace88408f64f89f75d1bdb2256069a8730ccc", + "sha256:35098b24e0327fc4ebdc8ffe336cee0a87a700c24ffed13161af80124b7dc8e5", + "sha256:41f7ce59d6ee7741af71d82020346af364949314ed3d87553763a2df1829cc58", + "sha256:436c4fc0a4d66b2badc6c5fc5ef4e47bb10e4fd9bf0c79524ac719a01f3607c2", + "sha256:4891ed92157e5430874dad17b15eb1fda57627710756c27422200c52d8a4e393", + "sha256:4ac515b860c36becb81bb84b667466885096b5fc85596948548b667da3bf9f24", + "sha256:5094d9206c64181d0f6e76ebd8fb2f8fe274950a63890ee9e0ebfd58bf9d787b", + "sha256:54d6921f07555713b9300bee9c50fb46e57e2e639027089b1d795ecd9f7fa910", + "sha256:578e281c393af575879990861823ef19d66e2b1d0098414855dd367e234f5b3c", + "sha256:5a3f356548e34a70b0172d8890006c37be92995f62d95a07b4a42e90fba54272", + "sha256:602d98f2c2d929f8e697ed274fbadc09902c4025c5a9963bf4e9edfc3ab6f7ed", + "sha256:61b1a825a13bef4a5f10b1885245377d3cd0bf87cba068e1d9a88c2ae36880e1", + "sha256:61e5e68cb65ac8f547f6b5ef933f510134a6bf31bb178be428994b0cb46c2a04", + "sha256:61ee62ead9b68b9123ec24bc866cbef297dd266175d53296e2db5e7f797f902d", + "sha256:6333c5a377c8e2f5fae35e7b8f145c617b02c939d04110c76f29ee3676b5f9a5", + "sha256:6748dbf9bfa5ba1afcc7556b71cda0d7ce5f24768043a02a58846e4a443d808d", + "sha256:67a283dd2882ac98cc6318384f565bffc751ab564605959df4752d42483ad889", + "sha256:75674776d96d7b851b6498f17824ba17849d790a44d282929c42dbb77d4f17ae", + "sha256:757e81cae69244257d125ff31663249b3013b5dc0a8520d73694aed497fb195b", + "sha256:77a6e85b90a7641d2e07184df5557132a337f136250caafc9ccaa4a2a998ca2c", + "sha256:7c33dd1931a95e5d9a772d0ac5e44cac8957eaf58e3c8da8c1414de7dd27c576", + "sha256:7df647e8edd71f000a5208fe6ff8c382a1de8edfbccdbbfe649d263de07d8c34", + "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477", + "sha256:80316a8bd5109320d38eef8833ccf5f89608c9107d02d2a7f985f98ed6876990", + "sha256:82123d0c954dc58db301f5021a01854a85bf1f3bb7d12ae0c01afc414a882ca2", + "sha256:84b2deecba4a3f1a398df819151eb72d29bfeb3b69abb145a00ddc8d30094512", + "sha256:8503ad47387b8ebd39cbbbdf0bf113e17330ffd339ba1144074da24c545f0069", + "sha256:877d209b6aebeb5b16c42cbb377f5f94d9e556626b1bfff66d7b0d115be88d0a", + "sha256:8874027a53e3aea659a6d62751800cf6e63314c160fd607489ba5c2edd753cf6", + "sha256:88a19f62ff30117e706ebc9090b8ecc79aeb77d0b1f5ec10d2d27a12bc9f66d0", + "sha256:8d39d351e7faf01483cc7ff7c0213c412e38e5a340238826be7e0e4da450fdc8", + "sha256:90adb47ad432332d4f0bc28f83a5963f426ce9a1a8809f5e584e704b82685dcb", + "sha256:913829534200eb0f789d45349e55203a091f45c37a2674678744ae52fae23efa", + "sha256:93b2e109287f93db79210f86deb6b9bbb81ac32fc97236b16f7433db7fc437d8", + "sha256:9d41beda9dc97ca9ab0b9888cb71f7539124bc05df02c0cff6e5acc5a19dcc6e", + "sha256:a440a2a624683108a1b454705ecd7afc1c3438a08e890a1513d468671d90a04e", + "sha256:a4bb030cf46a434ec0225bddbebd4b89e6471814ca851abb8696170adb163985", + "sha256:a9ca04806f3be0ac6d558fffc2fdf8fcef767e0489d2684a21912cc4ed0cd1b8", + "sha256:ac1801c45cbf77b6c99242eeff4fffb5e4e73a800b5c4ad4fc0be5def634d2e1", + "sha256:ac36703a585e0929b032fbaab0707b75dc12703766d0b53486eabd5139ebadd5", + "sha256:b1771de9944d875f1b98a745bc547e684b863abf8f8287da8466cf470ef52690", + "sha256:b464c4ab4bfcb41e3bfd3f1c26600d038376c2de3297760dfe064d2cb7ea8e10", + "sha256:b4f6450109834af88cb4cc5ecddfc5380ebb9c228695afc11915a0bf82116789", + "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b", + "sha256:b643562c12680b01e17239be267bc306bbc6aac1f34f6444d1bded0c5ce438ca", + "sha256:b958ddd075ddba5b09bb0be8a6d9906d2ce933aee81100db289badbeb966f54e", + "sha256:b9d60031cf568c627d028239693fd718025719c02c9f55df0a53e587aab951b5", + "sha256:ba23302c0c61a9999784e73809427c9dbedd79f66a13d84ad1b1943802eaaf59", + "sha256:ba87babd629f8af77f557b61e49e7c7cac36f22f871156b91e10a6e9d4f829e9", + "sha256:c017a3b6df3a1bd45b9fa49a0f54005e53fbcad16633870104b66fa1a30a29d8", + "sha256:c1e1cc06da1491e6734f0ea1e6294ce00792193c463350626571c287c9a704db", + "sha256:c654d5207c78e0bd6d749f6dae1dcbbfde3403ad3a4b11f3c5544d9906969dde", + "sha256:c69697d3adff5aa4f874b19c0e4ed65180ceed6318ec856ebc423aa5850d84f7", + "sha256:c7d79f7d9aabd6011004e33b22bc13056a3e3fb54794d138af57f5ee9d9032cb", + "sha256:ccaa3a4b521b780a7e771cc336a2dba389a0861592bbce09a476190bb0c8b4b3", + "sha256:ccd17349166b1bee6e529b4add61727d3f55edb7babbe4069b5764c9587a8cc6", + "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285", + "sha256:d0e883008013c0e4aef84dcfe2a0b172c4d23c2669412cf5b3371003941f72bb", + "sha256:d980e0325b6eddc81331d3f4551e2a333999fb176fd153e075c6d1c2530aa8a8", + "sha256:e17c9361d46a4d5addf777c6dd5eab0715a7684c2f11b88c67ac37edfba6c482", + "sha256:e2c08cc9b16f4f4bc522771d96734c7901e7ebef70c6c5c35dd0f10845270bcd", + "sha256:e35ef8683211db69ffe129a25d5634319a677570ab6b2eba4afa860f54eeaf75", + "sha256:e3b9fd71836999aad54084906f8663dffcd2a7fb5cdafd6c37713b2e72be1760", + "sha256:ef9f7768395923c3039055c14334ba4d926f3baf7b776c923c93d80195624782", + "sha256:f52a265001d830bc425f82ca9eabda94a64a4d753b07d623a9f2863fde532b53", + "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2", + "sha256:fbd6748e8ab9b41171bb95c6142faf068f5ef1511935a0aa07025438dd9a9bc1", + "sha256:fe57328fbc1bfd0bd0514470ac692630f3901c0ee39052ae47acd1d90a436719", + "sha256:fea09ca13323376a2fdfb353a5fa2e59f90cd18d7ca4eaa1fd31f0a8b4f91e62" + ], + "markers": "python_version >= '3.9'", + "version": "==1.18.3" + }, + "zstandard": { + "hashes": [ + "sha256:034b88913ecc1b097f528e42b539453fa82c3557e414b3de9d5632c80439a473", + "sha256:0a7f0804bb3799414af278e9ad51be25edf67f78f916e08afdb983e74161b916", + "sha256:11e3bf3c924853a2d5835b24f03eeba7fc9b07d8ca499e247e06ff5676461a15", + "sha256:12a289832e520c6bd4dcaad68e944b86da3bad0d339ef7989fb7e88f92e96072", + "sha256:1516c8c37d3a053b01c1c15b182f3b5f5eef19ced9b930b684a73bad121addf4", + "sha256:157e89ceb4054029a289fb504c98c6a9fe8010f1680de0201b3eb5dc20aa6d9e", + "sha256:1bfe8de1da6d104f15a60d4a8a768288f66aa953bbe00d027398b93fb9680b26", + "sha256:1e172f57cd78c20f13a3415cc8dfe24bf388614324d25539146594c16d78fcc8", + "sha256:1fd7e0f1cfb70eb2f95a19b472ee7ad6d9a0a992ec0ae53286870c104ca939e5", + "sha256:203d236f4c94cd8379d1ea61db2fce20730b4c38d7f1c34506a31b34edc87bdd", + "sha256:27d3ef2252d2e62476389ca8f9b0cf2bbafb082a3b6bfe9d90cbcbb5529ecf7c", + "sha256:29a2bc7c1b09b0af938b7a8343174b987ae021705acabcbae560166567f5a8db", + "sha256:2ef230a8fd217a2015bc91b74f6b3b7d6522ba48be29ad4ea0ca3a3775bf7dd5", + "sha256:2ef3775758346d9ac6214123887d25c7061c92afe1f2b354f9388e9e4d48acfc", + "sha256:2f146f50723defec2975fb7e388ae3a024eb7151542d1599527ec2aa9cacb152", + "sha256:2fb4535137de7e244c230e24f9d1ec194f61721c86ebea04e1581d9d06ea1269", + "sha256:32ba3b5ccde2d581b1e6aa952c836a6291e8435d788f656fe5976445865ae045", + "sha256:34895a41273ad33347b2fc70e1bff4240556de3c46c6ea430a7ed91f9042aa4e", + "sha256:379b378ae694ba78cef921581ebd420c938936a153ded602c4fea612b7eaa90d", + "sha256:38302b78a850ff82656beaddeb0bb989a0322a8bbb1bf1ab10c17506681d772a", + "sha256:3aa014d55c3af933c1315eb4bb06dd0459661cc0b15cd61077afa6489bec63bb", + "sha256:4051e406288b8cdbb993798b9a45c59a4896b6ecee2f875424ec10276a895740", + "sha256:40b33d93c6eddf02d2c19f5773196068d875c41ca25730e8288e9b672897c105", + "sha256:43da0f0092281bf501f9c5f6f3b4c975a8a0ea82de49ba3f7100e64d422a1274", + "sha256:445e4cb5048b04e90ce96a79b4b63140e3f4ab5f662321975679b5f6360b90e2", + "sha256:48ef6a43b1846f6025dde6ed9fee0c24e1149c1c25f7fb0a0585572b2f3adc58", + "sha256:50a80baba0285386f97ea36239855f6020ce452456605f262b2d33ac35c7770b", + "sha256:519fbf169dfac1222a76ba8861ef4ac7f0530c35dd79ba5727014613f91613d4", + "sha256:53dd9d5e3d29f95acd5de6802e909ada8d8d8cfa37a3ac64836f3bc4bc5512db", + "sha256:53ea7cdc96c6eb56e76bb06894bcfb5dfa93b7adcf59d61c6b92674e24e2dd5e", + "sha256:576856e8594e6649aee06ddbfc738fec6a834f7c85bf7cadd1c53d4a58186ef9", + "sha256:59556bf80a7094d0cfb9f5e50bb2db27fefb75d5138bb16fb052b61b0e0eeeb0", + "sha256:5d41d5e025f1e0bccae4928981e71b2334c60f580bdc8345f824e7c0a4c2a813", + "sha256:61062387ad820c654b6a6b5f0b94484fa19515e0c5116faf29f41a6bc91ded6e", + "sha256:61f89436cbfede4bc4e91b4397eaa3e2108ebe96d05e93d6ccc95ab5714be512", + "sha256:62136da96a973bd2557f06ddd4e8e807f9e13cbb0bfb9cc06cfe6d98ea90dfe0", + "sha256:64585e1dba664dc67c7cdabd56c1e5685233fbb1fc1966cfba2a340ec0dfff7b", + "sha256:65308f4b4890aa12d9b6ad9f2844b7ee42c7f7a4fd3390425b242ffc57498f48", + "sha256:66b689c107857eceabf2cf3d3fc699c3c0fe8ccd18df2219d978c0283e4c508a", + "sha256:6a41c120c3dbc0d81a8e8adc73312d668cd34acd7725f036992b1b72d22c1772", + "sha256:6f77fa49079891a4aab203d0b1744acc85577ed16d767b52fc089d83faf8d8ed", + "sha256:72c68dda124a1a138340fb62fa21b9bf4848437d9ca60bd35db36f2d3345f373", + "sha256:752bf8a74412b9892f4e5b58f2f890a039f57037f52c89a740757ebd807f33ea", + "sha256:76e79bc28a65f467e0409098fa2c4376931fd3207fbeb6b956c7c476d53746dd", + "sha256:774d45b1fac1461f48698a9d4b5fa19a69d47ece02fa469825b442263f04021f", + "sha256:77da4c6bfa20dd5ea25cbf12c76f181a8e8cd7ea231c673828d0386b1740b8dc", + "sha256:77ea385f7dd5b5676d7fd943292ffa18fbf5c72ba98f7d09fc1fb9e819b34c23", + "sha256:80080816b4f52a9d886e67f1f96912891074903238fe54f2de8b786f86baded2", + "sha256:80a539906390591dd39ebb8d773771dc4db82ace6372c4d41e2d293f8e32b8db", + "sha256:82d17e94d735c99621bf8ebf9995f870a6b3e6d14543b99e201ae046dfe7de70", + "sha256:837bb6764be6919963ef41235fd56a6486b132ea64afe5fafb4cb279ac44f259", + "sha256:84433dddea68571a6d6bd4fbf8ff398236031149116a7fff6f777ff95cad3df9", + "sha256:8c24f21fa2af4bb9f2c492a86fe0c34e6d2c63812a839590edaf177b7398f700", + "sha256:8ed7d27cb56b3e058d3cf684d7200703bcae623e1dcc06ed1e18ecda39fee003", + "sha256:9206649ec587e6b02bd124fb7799b86cddec350f6f6c14bc82a2b70183e708ba", + "sha256:983b6efd649723474f29ed42e1467f90a35a74793437d0bc64a5bf482bedfa0a", + "sha256:98da17ce9cbf3bfe4617e836d561e433f871129e3a7ac16d6ef4c680f13a839c", + "sha256:9c236e635582742fee16603042553d276cca506e824fa2e6489db04039521e90", + "sha256:9da6bc32faac9a293ddfdcb9108d4b20416219461e4ec64dfea8383cac186690", + "sha256:a05e6d6218461eb1b4771d973728f0133b2a4613a6779995df557f70794fd60f", + "sha256:a0817825b900fcd43ac5d05b8b3079937073d2b1ff9cf89427590718b70dd840", + "sha256:a4ae99c57668ca1e78597d8b06d5af837f377f340f4cce993b551b2d7731778d", + "sha256:a8c86881813a78a6f4508ef9daf9d4995b8ac2d147dcb1a450448941398091c9", + "sha256:a8fffdbd9d1408006baaf02f1068d7dd1f016c6bcb7538682622c556e7b68e35", + "sha256:a9b07268d0c3ca5c170a385a0ab9fb7fdd9f5fd866be004c4ea39e44edce47dd", + "sha256:ab19a2d91963ed9e42b4e8d77cd847ae8381576585bad79dbd0a8837a9f6620a", + "sha256:ac184f87ff521f4840e6ea0b10c0ec90c6b1dcd0bad2f1e4a9a1b4fa177982ea", + "sha256:b0e166f698c5a3e914947388c162be2583e0c638a4703fc6a543e23a88dea3c1", + "sha256:b2170c7e0367dde86a2647ed5b6f57394ea7f53545746104c6b09fc1f4223573", + "sha256:b2d8c62d08e7255f68f7a740bae85b3c9b8e5466baa9cbf7f57f1cde0ac6bc09", + "sha256:b4567955a6bc1b20e9c31612e615af6b53733491aeaa19a6b3b37f3b65477094", + "sha256:b69bb4f51daf461b15e7b3db033160937d3ff88303a7bc808c67bbc1eaf98c78", + "sha256:b8c0bd73aeac689beacd4e7667d48c299f61b959475cdbb91e7d3d88d27c56b9", + "sha256:be9b5b8659dff1f913039c2feee1aca499cfbc19e98fa12bc85e037c17ec6ca5", + "sha256:bf0a05b6059c0528477fba9054d09179beb63744355cab9f38059548fedd46a9", + "sha256:c16842b846a8d2a145223f520b7e18b57c8f476924bda92aeee3a88d11cfc391", + "sha256:c363b53e257246a954ebc7c488304b5592b9c53fbe74d03bc1c64dda153fb847", + "sha256:c7c517d74bea1a6afd39aa612fa025e6b8011982a0897768a2f7c8ab4ebb78a2", + "sha256:d20fd853fbb5807c8e84c136c278827b6167ded66c72ec6f9a14b863d809211c", + "sha256:d2240ddc86b74966c34554c49d00eaafa8200a18d3a5b6ffbf7da63b11d74ee2", + "sha256:d477ed829077cd945b01fc3115edd132c47e6540ddcd96ca169facff28173057", + "sha256:d50d31bfedd53a928fed6707b15a8dbeef011bb6366297cc435accc888b27c20", + "sha256:dc1d33abb8a0d754ea4763bad944fd965d3d95b5baef6b121c0c9013eaf1907d", + "sha256:dc5d1a49d3f8262be192589a4b72f0d03b72dcf46c51ad5852a4fdc67be7b9e4", + "sha256:e2d1a054f8f0a191004675755448d12be47fa9bebbcffa3cdf01db19f2d30a54", + "sha256:e7792606d606c8df5277c32ccb58f29b9b8603bf83b48639b7aedf6df4fe8171", + "sha256:ed1708dbf4d2e3a1c5c69110ba2b4eb6678262028afd6c6fbcc5a8dac9cda68e", + "sha256:f2d4380bf5f62daabd7b751ea2339c1a21d1c9463f1feb7fc2bdcea2c29c3160", + "sha256:f3513916e8c645d0610815c257cbfd3242adfd5c4cfa78be514e5a3ebb42a41b", + "sha256:f8346bfa098532bc1fb6c7ef06783e969d87a99dd1d2a5a18a892c1d7a643c58", + "sha256:f83fa6cae3fff8e98691248c9320356971b59678a17f20656a9e59cd32cee6d8", + "sha256:fa6ce8b52c5987b3e34d5674b0ab529a4602b632ebab0a93b07bfb4dfc8f8a33", + "sha256:fb2b1ecfef1e67897d336de3a0e3f52478182d6a47eda86cbd42504c5cbd009a", + "sha256:fc9ca1c9718cb3b06634c7c8dec57d24e9438b2aa9a0f02b8bb36bf478538880", + "sha256:fd30d9c67d13d891f2360b2a120186729c111238ac63b43dbd37a5a40670b8ca", + "sha256:fd7699e8fd9969f455ef2926221e0233f81a2542921471382e77a9e2f2b57f4b", + "sha256:fe3b385d996ee0822fd46528d9f0443b880d4d05528fd26a9119a54ec3f91c69" + ], + "markers": "python_version >= '3.8'", + "version": "==0.23.0" + } + }, + "develop": {} +} diff --git a/osbenchmark/aggregator.py b/osbenchmark/aggregator.py index 016d90f4..b79f8c47 100644 --- a/osbenchmark/aggregator.py +++ b/osbenchmark/aggregator.py @@ -3,12 +3,12 @@ from typing import Any, Dict, List, Union import uuid -from osbenchmark.metrics import FileTestExecutionStore +from osbenchmark.metrics import FileTestExecutionStore, TestExecution from osbenchmark import metrics, workload, config from osbenchmark.utils import io as rio class Aggregator: - def __init__(self, cfg, test_executions_dict, args): + def __init__(self, cfg, test_executions_dict, args) -> None: self.config = cfg self.args = args self.test_executions = test_executions_dict @@ -21,69 +21,72 @@ def __init__(self, cfg, test_executions_dict, args): self.test_procedure_name = None self.loaded_workload = None - def count_iterations_for_each_op(self, test_execution) -> None: - matching_test_procedure = next((tp for tp in self.loaded_workload.test_procedures if tp.name == self.test_procedure_name), None) + def count_iterations_for_each_op(self, test_execution: TestExecution) -> None: + """Count iterations for each operation in the test execution""" workload_params = test_execution.workload_params if test_execution.workload_params else {} - test_execution_id = test_execution.test_execution_id self.accumulated_iterations[test_execution_id] = {} - if matching_test_procedure: - for task in matching_test_procedure.schedule: - task_name = task.name - task_name_iterations = f"{task_name}_iterations" - if task_name_iterations in workload_params: - iterations = int(workload_params[task_name_iterations]) - else: - iterations = task.iterations or 1 - self.accumulated_iterations[test_execution_id][task_name] = iterations - else: - raise ValueError(f"Test procedure '{self.test_procedure_name}' not found in the loaded workload.") + for task in self.loaded_workload.find_test_procedure_or_default(self.test_procedure_name).schedule: + task_name = task.name + task_name_iterations = f"{task_name}_iterations" + iterations = int(workload_params.get(task_name_iterations, task.iterations or 1)) + self.accumulated_iterations[test_execution_id][task_name] = iterations - def accumulate_results(self, test_execution: Any) -> None: - for item in test_execution.results.get("op_metrics", []): - task = item.get("task", "") + def accumulate_results(self, test_execution: TestExecution) -> None: + """Accumulate results from a single test execution""" + for operation_metric in test_execution.results.get("op_metrics", []): + task = operation_metric.get("task", "") self.accumulated_results.setdefault(task, {}) for metric in self.metrics: self.accumulated_results[task].setdefault(metric, []) - self.accumulated_results[task][metric].append(item.get(metric)) + self.accumulated_results[task][metric].append(operation_metric.get(metric)) def aggregate_json_by_key(self, key_path: Union[str, List[str]]) -> Any: - all_jsons = [self.test_store.find_by_test_execution_id(id).results for id in self.test_executions.keys()] - - # retrieve nested value from a dictionary given a key path - def get_nested_value(obj: Dict[str, Any], path: List[str]) -> Any: + """ + Aggregates JSON results across multiple test executions using a specified key path. + Handles nested dictionary structures and calculates averages for numeric values + """ + all_json_results = [self.test_store.find_by_test_execution_id(id).results for id in self.test_executions.keys()] + + def get_nested_value(json_data: Dict[str, Any], path: List[str]) -> Any: + """ + Retrieves a value from a nested dictionary structure using a path of keys. + """ for key in path: - if isinstance(obj, dict): - obj = obj.get(key, {}) - elif isinstance(obj, list) and key.isdigit(): - obj = obj[int(key)] if int(key) < len(obj) else {} + if isinstance(json_data, dict): + json_data = json_data.get(key, {}) + elif isinstance(json_data, list) and key.isdigit(): + json_data = json_data[int(key)] if int(key) < len(json_data) else {} else: return None - return obj + return json_data - def aggregate_helper(objects: List[Any]) -> Any: - if not objects: + def aggregate_json_elements(json_elements: List[Any]) -> Any: + if not json_elements: return None - if all(isinstance(obj, (int, float)) for obj in objects): - avg = sum(objects) / len(objects) - return avg - if all(isinstance(obj, dict) for obj in objects): - keys = set().union(*objects) - return {key: aggregate_helper([obj.get(key) for obj in objects]) for key in keys} - if all(isinstance(obj, list) for obj in objects): - max_length = max(len(obj) for obj in objects) - return [aggregate_helper([obj[i] if i < len(obj) else None for obj in objects]) for i in range(max_length)] - return next((obj for obj in objects if obj is not None), None) + # If all elements are numbers, calculate the average + if all(isinstance(obj, (int, float)) for obj in json_elements): + return sum(json_elements) / len(json_elements) + # If all elements are dictionaries, recursively aggregate their values + if all(isinstance(obj, dict) for obj in json_elements): + keys = set().union(*json_elements) + return {key: aggregate_json_elements([obj.get(key) for obj in json_elements]) for key in keys} + # If all elements are lists, recursively aggregate corresponding elements + if all(isinstance(obj, list) for obj in json_elements): + max_length = max(len(obj) for obj in json_elements) + return [aggregate_json_elements([obj[i] if i < len(obj) else None for obj in json_elements]) for i in range(max_length)] + # If elements are of mixed types, return the first non-None value + return next((obj for obj in json_elements if obj is not None), None) if isinstance(key_path, str): key_path = key_path.split('.') - values = [get_nested_value(json, key_path) for json in all_jsons] - return aggregate_helper(values) + nested_values = [get_nested_value(json_result, key_path) for json_result in all_json_results] + return aggregate_json_elements(nested_values) - def build_aggregated_results(self): - test_exe = self.test_store.find_by_test_execution_id(list(self.test_executions.keys())[0]) + def build_aggregated_results_dict(self) -> Dict[str, Any]: + """Builds a dictionary of aggregated metrics from all test executions""" aggregated_results = { "op_metrics": [], "correctness_metrics": self.aggregate_json_by_key("correctness_metrics"), @@ -147,8 +150,30 @@ def build_aggregated_results(self): aggregated_results["op_metrics"].append(op_metric) - # extract the necessary data from the first test execution, since the configurations should be identical for all test executions + return aggregated_results + + def update_config_object(self, test_execution: TestExecution) -> None: + """ + Updates the configuration object with values from a test execution. + Uses the first test execution as reference since configurations should be identical + """ current_timestamp = self.config.opts("system", "time.start") + self.config.add(config.Scope.applicationOverride, "builder", + "provision_config_instance.names", test_execution.provision_config_instance) + self.config.add(config.Scope.applicationOverride, "system", + "env.name", test_execution.environment_name) + self.config.add(config.Scope.applicationOverride, "system", "time.start", current_timestamp) + self.config.add(config.Scope.applicationOverride, "test_execution", "pipeline", test_execution.pipeline) + self.config.add(config.Scope.applicationOverride, "workload", "params", test_execution.workload_params) + self.config.add(config.Scope.applicationOverride, "builder", + "provision_config_instance.params", test_execution.provision_config_instance_params) + self.config.add(config.Scope.applicationOverride, "builder", "plugin.params", test_execution.plugin_params) + self.config.add(config.Scope.applicationOverride, "workload", "latency.percentiles", test_execution.latency_percentiles) + self.config.add(config.Scope.applicationOverride, "workload", "throughput.percentiles", test_execution.throughput_percentiles) + + def build_aggregated_results(self) -> TestExecution: + test_exe = self.test_store.find_by_test_execution_id(list(self.test_executions.keys())[0]) + aggregated_results = self.build_aggregated_results_dict() if hasattr(self.args, 'results_file') and self.args.results_file != "": normalized_results_file = rio.normalize_path(self.args.results_file, self.cwd) @@ -165,19 +190,7 @@ def build_aggregated_results(self): print("Aggregate test execution ID: ", test_execution_id) - # add values to the configuration object - self.config.add(config.Scope.applicationOverride, "builder", - "provision_config_instance.names", test_exe.provision_config_instance) - self.config.add(config.Scope.applicationOverride, "system", - "env.name", test_exe.environment_name) - self.config.add(config.Scope.applicationOverride, "system", "time.start", current_timestamp) - self.config.add(config.Scope.applicationOverride, "test_execution", "pipeline", test_exe.pipeline) - self.config.add(config.Scope.applicationOverride, "workload", "params", test_exe.workload_params) - self.config.add(config.Scope.applicationOverride, "builder", - "provision_config_instance.params", test_exe.provision_config_instance_params) - self.config.add(config.Scope.applicationOverride, "builder", "plugin.params", test_exe.plugin_params) - self.config.add(config.Scope.applicationOverride, "workload", "latency.percentiles", test_exe.latency_percentiles) - self.config.add(config.Scope.applicationOverride, "workload", "throughput.percentiles", test_exe.throughput_percentiles) + self.update_config_object(test_exe) loaded_workload = workload.load_workload(self.config) test_procedure_object = loaded_workload.find_test_procedure_or_default(self.test_procedure_name) @@ -223,7 +236,7 @@ def calculate_weighted_average(self, task_metrics: Dict[str, List[Any]], task_na return weighted_metrics - def calculate_rsd(self, values: List[Union[int, float]], metric_name: str): + def calculate_rsd(self, values: List[Union[int, float]], metric_name: str) -> Union[float, str]: if not values: raise ValueError(f"Cannot calculate RSD for metric '{metric_name}': empty list of values") if len(values) == 1: diff --git a/osbenchmark/worker_coordinator/worker_coordinator.py b/osbenchmark/worker_coordinator/worker_coordinator.py index d08242a9..73001731 100644 --- a/osbenchmark/worker_coordinator/worker_coordinator.py +++ b/osbenchmark/worker_coordinator/worker_coordinator.py @@ -52,6 +52,8 @@ # Messages sent between worker_coordinators # ################################## + + class PrepareBenchmark: """ Initiates preparation steps for a benchmark. The benchmark should only be started after StartBenchmark is sent. @@ -75,6 +77,7 @@ class PrepareWorkload: Initiates preparation of a workload. """ + def __init__(self, cfg, workload): """ :param cfg: Benchmark internal configuration object. @@ -221,22 +224,27 @@ def __init__(self): self.cluster_details = None def receiveMsg_PoisonMessage(self, poisonmsg, sender): - self.logger.error("Main worker_coordinator received a fatal indication from load generator (%s). Shutting down.", poisonmsg.details) + self.logger.error( + "Main worker_coordinator received a fatal indication from load generator (%s). Shutting down.", poisonmsg.details) self.coordinator.close() - self.send(self.start_sender, actor.BenchmarkFailure("Fatal workload or load generator indication", poisonmsg.details)) + self.send(self.start_sender, actor.BenchmarkFailure( + "Fatal workload or load generator indication", poisonmsg.details)) def receiveMsg_BenchmarkFailure(self, msg, sender): - self.logger.error("Main worker_coordinator received a fatal exception from load generator. Shutting down.") + self.logger.error( + "Main worker_coordinator received a fatal exception from load generator. Shutting down.") self.coordinator.close() self.send(self.start_sender, msg) def receiveMsg_BenchmarkCancelled(self, msg, sender): - self.logger.info("Main worker_coordinator received a notification that the benchmark has been cancelled.") + self.logger.info( + "Main worker_coordinator received a notification that the benchmark has been cancelled.") self.coordinator.close() self.send(self.start_sender, msg) def receiveMsg_ActorExitRequest(self, msg, sender): - self.logger.info("Main worker_coordinator received ActorExitRequest and will terminate all load generators.") + self.logger.info( + "Main worker_coordinator received ActorExitRequest and will terminate all load generators.") self.status = "exiting" def receiveMsg_ChildActorExited(self, msg, sender): @@ -246,13 +254,16 @@ def receiveMsg_ChildActorExited(self, msg, sender): if self.status == "exiting": self.logger.info("Worker [%d] has exited.", worker_index) else: - self.logger.error("Worker [%d] has exited prematurely. Aborting benchmark.", worker_index) - self.send(self.start_sender, actor.BenchmarkFailure("Worker [{}] has exited prematurely.".format(worker_index))) + self.logger.error( + "Worker [%d] has exited prematurely. Aborting benchmark.", worker_index) + self.send(self.start_sender, actor.BenchmarkFailure( + "Worker [{}] has exited prematurely.".format(worker_index))) else: self.logger.info("A workload preparator has exited.") def receiveUnrecognizedMessage(self, msg, sender): - self.logger.info("Main worker_coordinator received unknown message [%s] (ignoring).", str(msg)) + self.logger.info( + "Main worker_coordinator received unknown message [%s] (ignoring).", str(msg)) @actor.no_retry("worker_coordinator") # pylint: disable=no-value-for-parameter def receiveMsg_PrepareBenchmark(self, msg, sender): @@ -264,7 +275,8 @@ def receiveMsg_PrepareBenchmark(self, msg, sender): def receiveMsg_StartBenchmark(self, msg, sender): self.start_sender = sender self.coordinator.start_benchmark() - self.wakeupAfter(datetime.timedelta(seconds=WorkerCoordinatorActor.WAKEUP_INTERVAL_SECONDS)) + self.wakeupAfter(datetime.timedelta( + seconds=WorkerCoordinatorActor.WAKEUP_INTERVAL_SECONDS)) @actor.no_retry("worker_coordinator") # pylint: disable=no-value-for-parameter def receiveMsg_WorkloadPrepared(self, msg, sender): @@ -273,7 +285,8 @@ def receiveMsg_WorkloadPrepared(self, msg, sender): @actor.no_retry("worker_coordinator") # pylint: disable=no-value-for-parameter def receiveMsg_JoinPointReached(self, msg, sender): - self.coordinator.joinpoint_reached(msg.worker_id, msg.worker_timestamp, msg.task) + self.coordinator.joinpoint_reached( + msg.worker_id, msg.worker_timestamp, msg.task) @actor.no_retry("worker_coordinator") # pylint: disable=no-value-for-parameter def receiveMsg_UpdateSamples(self, msg, sender): @@ -289,13 +302,15 @@ def receiveMsg_WakeupMessage(self, msg, sender): self.post_process_timer = 0 self.coordinator.post_process_samples() self.coordinator.update_progress_message() - self.wakeupAfter(datetime.timedelta(seconds=WorkerCoordinatorActor.WAKEUP_INTERVAL_SECONDS)) + self.wakeupAfter(datetime.timedelta( + seconds=WorkerCoordinatorActor.WAKEUP_INTERVAL_SECONDS)) def create_client(self, host): return self.createActor(Worker, targetActorRequirements=self._requirements(host)) def start_worker(self, worker_coordinator, worker_id, cfg, workload, allocations): - self.send(worker_coordinator, StartWorker(worker_id, cfg, workload, allocations)) + self.send(worker_coordinator, StartWorker( + worker_id, cfg, workload, allocations)) def drive_at(self, worker_coordinator, client_start_timestamp): self.send(worker_coordinator, Drive(client_start_timestamp)) @@ -305,10 +320,12 @@ def complete_current_task(self, worker_coordinator): def on_task_finished(self, metrics, next_task_scheduled_in): if next_task_scheduled_in > 0: - self.wakeupAfter(datetime.timedelta(seconds=next_task_scheduled_in), payload=WorkerCoordinatorActor.RESET_RELATIVE_TIME_MARKER) + self.wakeupAfter(datetime.timedelta(seconds=next_task_scheduled_in), + payload=WorkerCoordinatorActor.RESET_RELATIVE_TIME_MARKER) else: self.coordinator.reset_relative_time() - self.send(self.start_sender, TaskFinished(metrics, next_task_scheduled_in)) + self.send(self.start_sender, TaskFinished( + metrics, next_task_scheduled_in)) def _requirements(self, host): if host == "localhost": @@ -320,7 +337,8 @@ def on_cluster_details_retrieved(self, cluster_details): self.cluster_details = cluster_details def prepare_workload(self, hosts, cfg, workload): - self.logger.info("Starting prepare workload process on hosts [%s]", hosts) + self.logger.info( + "Starting prepare workload process on hosts [%s]", hosts) self.children = [self._create_workload_preparator(h) for h in hosts] msg = PrepareWorkload(cfg, workload) for child in self.children: @@ -330,7 +348,8 @@ def _create_workload_preparator(self, host): return self.createActor(WorkloadPreparationActor, targetActorRequirements=self._requirements(host)) def _after_workload_prepared(self): - cluster_version = self.cluster_details["version"] if self.cluster_details else {} + cluster_version = self.cluster_details["version"] if self.cluster_details else { + } for child in self.children: self.send(child, thespian.actors.ActorExitRequest()) self.children = [] @@ -354,7 +373,8 @@ def load_local_config(coordinator_config): "telemetry" ]) # set root path (normally done by the main entry point) - cfg.add(config.Scope.application, "node", "benchmark.root", paths.benchmark_root()) + cfg.add(config.Scope.application, "node", + "benchmark.root", paths.benchmark_root()) return cfg @@ -362,6 +382,7 @@ class TaskExecutionActor(actor.BenchmarkActor): """ This class should be used for long-running tasks, as it ensures they do not block the actor's messaging system """ + def __init__(self): super().__init__() self.pool = concurrent.futures.ThreadPoolExecutor(max_workers=1) @@ -406,10 +427,12 @@ def receiveMsg_WakeupMessage(self, msg, sender): if self.executor_future is not None and self.executor_future.done(): e = self.executor_future.exception(timeout=0) if e: - self.logger.exception("Worker failed. Notifying parent...", exc_info=e) + self.logger.exception( + "Worker failed. Notifying parent...", exc_info=e) # the exception might be user-defined and not be on the load path of the original sender. Hence, it # cannot be deserialized on the receiver so we convert it here to a plain string. - self.send(self.parent, actor.BenchmarkFailure("Error in task executor", str(e))) + self.send(self.parent, actor.BenchmarkFailure( + "Error in task executor", str(e))) else: self.executor_future = None self.send(self.parent, ReadyForWork()) @@ -420,6 +443,7 @@ def receiveMsg_BenchmarkFailure(self, msg, sender): # sent by our no_retry infrastructure; forward to master self.send(self.parent, msg) + class WorkloadPreparationActor(actor.BenchmarkActor): class Status(Enum): INITIALIZING = "initializing" @@ -439,8 +463,10 @@ def __init__(self): self.workload = None def receiveMsg_PoisonMessage(self, poisonmsg, sender): - self.logger.error("Workload Preparator received a fatal indication from a load generator (%s). Shutting down.", poisonmsg.details) - self.send(self.original_sender, actor.BenchmarkFailure("Fatal workload preparation indication", poisonmsg.details)) + self.logger.error( + "Workload Preparator received a fatal indication from a load generator (%s). Shutting down.", poisonmsg.details) + self.send(self.original_sender, actor.BenchmarkFailure( + "Fatal workload preparation indication", poisonmsg.details)) @actor.no_retry("workload preparator") # pylint: disable=no-value-for-parameter def receiveMsg_ActorExitRequest(self, msg, sender): @@ -462,14 +488,16 @@ def receiveMsg_PrepareWorkload(self, msg, sender): tpr = WorkloadProcessorRegistry(self.cfg) self.workload = msg.workload self.logger.info("Preparing workload [%s]", self.workload.name) - self.logger.info("Reloading workload [%s] to ensure plugins are up-to-date.", self.workload.name) + self.logger.info( + "Reloading workload [%s] to ensure plugins are up-to-date.", self.workload.name) # the workload might have been loaded on a different machine (the coordinator machine) so we force a workload # update to ensure we use the latest version of plugins. load_workload(self.cfg) load_workload_plugins(self.cfg, self.workload.name, register_workload_processor=tpr.register_workload_processor, - force_update=True) + force_update=True) # we expect on_prepare_workload can take a long time. seed a queue of tasks and delegate to child workers - self.children = [self._create_task_executor() for _ in range(num_cores(self.cfg))] + self.children = [self._create_task_executor() + for _ in range(num_cores(self.cfg))] for processor in tpr.processors: self.processors.put(processor) self._seed_tasks(self.processors.get()) @@ -498,7 +526,8 @@ def receiveMsg_ReadyForWork(self, msg, sender): else: next_task = None new_msg = DoTask(next_task, self.cfg) - self.logger.debug("Workload Preparator sending %s to %s", vars(new_msg), sender) + self.logger.debug( + "Workload Preparator sending %s to %s", vars(new_msg), sender) self.send(sender, new_msg) @actor.no_retry("workload preparator") # pylint: disable=no-value-for-parameter @@ -509,7 +538,7 @@ def receiveMsg_WorkerIdle(self, msg, sender): def num_cores(cfg): return int(cfg.opts("system", "available.cores", mandatory=False, - default_value=multiprocessing.cpu_count())) + default_value=multiprocessing.cpu_count())) class WorkerCoordinator: @@ -555,11 +584,13 @@ def create_os_clients(self): all_hosts = self.config.opts("client", "hosts").all_hosts opensearch = {} for cluster_name, cluster_hosts in all_hosts.items(): - all_client_options = self.config.opts("client", "options").all_client_options + all_client_options = self.config.opts( + "client", "options").all_client_options cluster_client_options = dict(all_client_options[cluster_name]) # Use retries to avoid aborts on long living connections for telemetry devices cluster_client_options["retry-on-timeout"] = True - opensearch[cluster_name] = self.os_client_factory(cluster_hosts, cluster_client_options).create() + opensearch[cluster_name] = self.os_client_factory( + cluster_hosts, cluster_client_options).create() return opensearch def prepare_telemetry(self, opensearch, enable): @@ -571,18 +602,27 @@ def prepare_telemetry(self, opensearch, enable): if enable: devices = [ - telemetry.NodeStats(telemetry_params, opensearch, self.metrics_store), - telemetry.ExternalEnvironmentInfo(os_default, self.metrics_store), - telemetry.ClusterEnvironmentInfo(os_default, self.metrics_store), + telemetry.NodeStats( + telemetry_params, opensearch, self.metrics_store), + telemetry.ExternalEnvironmentInfo( + os_default, self.metrics_store), + telemetry.ClusterEnvironmentInfo( + os_default, self.metrics_store), telemetry.JvmStatsSummary(os_default, self.metrics_store), telemetry.IndexStats(os_default, self.metrics_store), - telemetry.MlBucketProcessingTime(os_default, self.metrics_store), + telemetry.MlBucketProcessingTime( + os_default, self.metrics_store), telemetry.SegmentStats(log_root, os_default), - telemetry.CcrStats(telemetry_params, opensearch, self.metrics_store), - telemetry.RecoveryStats(telemetry_params, opensearch, self.metrics_store), - telemetry.TransformStats(telemetry_params, opensearch, self.metrics_store), - telemetry.SearchableSnapshotsStats(telemetry_params, opensearch, self.metrics_store), - telemetry.SegmentReplicationStats(telemetry_params, opensearch, self.metrics_store) + telemetry.CcrStats( + telemetry_params, opensearch, self.metrics_store), + telemetry.RecoveryStats( + telemetry_params, opensearch, self.metrics_store), + telemetry.TransformStats( + telemetry_params, opensearch, self.metrics_store), + telemetry.SearchableSnapshotsStats( + telemetry_params, opensearch, self.metrics_store), + telemetry.SegmentReplicationStats( + telemetry_params, opensearch, self.metrics_store) ] else: devices = [] @@ -594,20 +634,24 @@ def wait_for_rest_api(self, opensearch): if client.wait_for_rest_layer(os_default, max_attempts=40): self.logger.info("REST API is available.") else: - self.logger.error("REST API layer is not yet available. Stopping benchmark.") - raise exceptions.SystemSetupError("OpenSearch REST API layer is not available.") + self.logger.error( + "REST API layer is not yet available. Stopping benchmark.") + raise exceptions.SystemSetupError( + "OpenSearch REST API layer is not available.") def retrieve_cluster_info(self, opensearch): try: return opensearch["default"].info() except BaseException: - self.logger.exception("Could not retrieve cluster info on benchmark start") + self.logger.exception( + "Could not retrieve cluster info on benchmark start") return None def prepare_benchmark(self, t): self.workload = t self.test_procedure = select_test_procedure(self.config, self.workload) - self.quiet = self.config.opts("system", "quiet.mode", mandatory=False, default_value=False) + self.quiet = self.config.opts( + "system", "quiet.mode", mandatory=False, default_value=False) downsample_factor = int(self.config.opts( "results_publishing", "metrics.request.downsample.factor", mandatory=False, default_value=1)) @@ -623,15 +667,20 @@ def prepare_benchmark(self, t): os_clients = self.create_os_clients() - skip_rest_api_check = self.config.opts("builder", "skip.rest.api.check") - uses_static_responses = self.config.opts("client", "options").uses_static_responses + skip_rest_api_check = self.config.opts( + "builder", "skip.rest.api.check") + uses_static_responses = self.config.opts( + "client", "options").uses_static_responses if skip_rest_api_check: - self.logger.info("Skipping REST API check as requested explicitly.") + self.logger.info( + "Skipping REST API check as requested explicitly.") elif uses_static_responses: - self.logger.info("Skipping REST API check as static responses are used.") + self.logger.info( + "Skipping REST API check as static responses are used.") else: self.wait_for_rest_api(os_clients) - self.target.on_cluster_details_retrieved(self.retrieve_cluster_info(os_clients)) + self.target.on_cluster_details_retrieved( + self.retrieve_cluster_info(os_clients)) # Avoid issuing any requests to the target cluster when static responses are enabled. The results # are not useful and attempts to connect to a non-existing cluster just lead to exception traces in logs. @@ -649,7 +698,8 @@ def prepare_benchmark(self, t): self.load_worker_coordinator_hosts.append(host_config) - self.target.prepare_workload([h["host"] for h in self.load_worker_coordinator_hosts], self.config, self.workload) + self.target.prepare_workload( + [h["host"] for h in self.load_worker_coordinator_hosts], self.config, self.workload) def start_benchmark(self): self.logger.info("Benchmark is about to start.") @@ -660,29 +710,34 @@ def start_benchmark(self): self.logger.info("Cluster-level telemetry devices are now attached.") allocator = Allocator(self.test_procedure.schedule) self.allocations = allocator.allocations - self.number_of_steps = len(allocator.join_points) - 1 ## 1 + self.number_of_steps = len(allocator.join_points) - 1 # 1 self.tasks_per_join_point = allocator.tasks_per_joinpoint self.logger.info("Benchmark consists of [%d] steps executed by [%d] clients.", self.number_of_steps, len(self.allocations)) # avoid flooding the log if there are too many clients if allocator.clients < 128: - self.logger.info("Allocation matrix:\n%s", "\n".join([str(a) for a in self.allocations])) + self.logger.info("Allocation matrix:\n%s", "\n".join( + [str(a) for a in self.allocations])) - worker_assignments = calculate_worker_assignments(self.load_worker_coordinator_hosts, allocator.clients) + worker_assignments = calculate_worker_assignments( + self.load_worker_coordinator_hosts, allocator.clients) worker_id = 0 for assignment in worker_assignments: - host = assignment["host"] #localhost - for clients in assignment["workers"]: # cpu cores [0] [1] [2] [3] + host = assignment["host"] # localhost + for clients in assignment["workers"]: # cpu cores [0] [1] [2] [3] # don't assign workers without any clients if len(clients) > 0: - self.logger.info("Allocating worker [%d] on [%s] with [%d] clients.", worker_id, host, len(clients)) + self.logger.info( + "Allocating worker [%d] on [%s] with [%d] clients.", worker_id, host, len(clients)) worker = self.target.create_client(host) client_allocations = ClientAllocations() for client_id in clients: - client_allocations.add(client_id, self.allocations[client_id]) + client_allocations.add( + client_id, self.allocations[client_id]) self.clients_per_worker[client_id] = worker_id - self.target.start_worker(worker, worker_id, self.config, self.workload, client_allocations) + self.target.start_worker( + worker, worker_id, self.config, self.workload, client_allocations) self.workers.append(worker) worker_id += 1 @@ -690,11 +745,13 @@ def start_benchmark(self): def joinpoint_reached(self, worker_id, worker_local_timestamp, task_allocations): self.currently_completed += 1 - self.workers_completed_current_step[worker_id] = (worker_local_timestamp, time.perf_counter()) + self.workers_completed_current_step[worker_id] = ( + worker_local_timestamp, time.perf_counter()) self.logger.info("[%d/%d] workers reached join point [%d/%d].", self.currently_completed, len(self.workers), self.current_step + 1, self.number_of_steps) if self.currently_completed == len(self.workers): - self.logger.info("All workers completed their tasks until join point [%d/%d].", self.current_step + 1, self.number_of_steps) + self.logger.info( + "All workers completed their tasks until join point [%d/%d].", self.current_step + 1, self.number_of_steps) # we can go on to the next step self.currently_completed = 0 self.complete_current_task_sent = False @@ -746,13 +803,15 @@ def move_to_next_task(self, workers_curr_step): start_next_task = time.perf_counter() + waiting_period for worker_id, worker in enumerate(self.workers): worker_ended_task_at, master_received_msg_at = workers_curr_step[worker_id] - worker_start_timestamp = worker_ended_task_at + (start_next_task - master_received_msg_at) + worker_start_timestamp = worker_ended_task_at + \ + (start_next_task - master_received_msg_at) self.logger.info("Scheduling next task for worker id [%d] at their timestamp [%f] (master timestamp [%f])", worker_id, worker_start_timestamp, start_next_task) self.target.drive_at(worker, worker_start_timestamp) def may_complete_current_task(self, task_allocations): - joinpoints_completing_parent = [a for a in task_allocations if a.task.preceding_task_completes_parent] + joinpoints_completing_parent = [ + a for a in task_allocations if a.task.preceding_task_completes_parent] # we need to actively send CompleteCurrentTask messages to all remaining workers. if len(joinpoints_completing_parent) > 0 and not self.complete_current_task_sent: # while this list could contain multiple items, it should always be the same task (but multiple @@ -774,14 +833,17 @@ def may_complete_current_task(self, task_allocations): # As we are waiting for other clients to finish, we would send this message over and over again. # Hence we need to memorize whether we have already sent it for the current step. self.complete_current_task_sent = True - self.logger.info("All affected clients have finished. Notifying all clients to complete their current tasks.") + self.logger.info( + "All affected clients have finished. Notifying all clients to complete their current tasks.") for worker in self.workers: self.target.complete_current_task(worker) else: if len(pending_client_ids) > 32: - self.logger.info("[%d] clients did not yet finish.", len(pending_client_ids)) + self.logger.info( + "[%d] clients did not yet finish.", len(pending_client_ids)) else: - self.logger.info("Client id(s) [%s] did not yet finish.", ",".join(map(str, pending_client_ids))) + self.logger.info("Client id(s) [%s] did not yet finish.", ",".join( + map(str, pending_client_ids))) def reset_relative_time(self): self.logger.debug("Resetting relative time of request metrics store.") @@ -804,7 +866,8 @@ def update_samples(self, samples): def update_progress_message(self, task_finished=False): if not self.quiet and self.current_step >= 0: - tasks = ",".join([t.name for t in self.tasks_per_join_point[self.current_step]]) + tasks = ",".join( + [t.name for t in self.tasks_per_join_point[self.current_step]]) if task_finished: total_progress = 1.0 @@ -817,7 +880,8 @@ def update_progress_message(self, task_finished=False): num_clients = max(len(progress_per_client), 1) total_progress = sum(progress_per_client) / num_clients - self.progress_results_publisher.print("Running %s" % tasks, "[%3d%% done]" % (round(total_progress * 100))) + self.progress_results_publisher.print( + "Running %s" % tasks, "[%3d%% done]" % (round(total_progress * 100))) if task_finished: self.progress_results_publisher.finish() @@ -904,7 +968,8 @@ def __call__(self, raw_samples): relative_time=sample.relative_time, meta_data=meta_data) self.metrics_store.put_value_cluster_level(name="client_processing_time", - value=convert.seconds_to_ms(sample.client_processing_time), + value=convert.seconds_to_ms( + sample.client_processing_time), unit="ms", task=sample.task.name, operation=sample.operation_name, operation_type=sample.operation_type, sample_type=sample.sample_type, absolute_time=sample.absolute_time, @@ -924,11 +989,13 @@ def __call__(self, raw_samples): relative_time=timing.relative_time, meta_data=meta_data) end = time.perf_counter() - self.logger.debug("Storing latency and service time took [%f] seconds.", (end - start)) + self.logger.debug( + "Storing latency and service time took [%f] seconds.", (end - start)) start = end aggregates = self.throughput_calculator.calculate(raw_samples) end = time.perf_counter() - self.logger.debug("Calculating throughput took [%f] seconds.", (end - start)) + self.logger.debug( + "Calculating throughput took [%f] seconds.", (end - start)) start = end for task, samples in aggregates.items(): meta_data = self.merge( @@ -943,7 +1010,8 @@ def __call__(self, raw_samples): sample_type=sample_type, absolute_time=absolute_time, relative_time=relative_time, meta_data=meta_data) end = time.perf_counter() - self.logger.debug("Storing throughput took [%f] seconds.", (end - start)) + self.logger.debug( + "Storing throughput took [%f] seconds.", (end - start)) start = end # this will be a noop for the in-memory metrics store. If we use an ES metrics store however, this will ensure that we already send # the data and also clear the in-memory buffer. This allows users to see data already while running the benchmark. In cases where @@ -953,7 +1021,8 @@ def __call__(self, raw_samples): # no need for frequent refreshes. self.metrics_store.flush(refresh=False) end = time.perf_counter() - self.logger.debug("Flushing the metrics store took [%f] seconds.", (end - start)) + self.logger.debug( + "Flushing the metrics store took [%f] seconds.", (end - start)) self.logger.debug("Postprocessing [%d] raw samples (downsampled to [%d] samples) took [%f] seconds in total.", len(raw_samples), final_sample_count, (end - total_start)) @@ -1011,7 +1080,8 @@ def calculate_worker_assignments(host_configs, client_count): return assignments -ClientAllocation = collections.namedtuple("ClientAllocation", ["client_id", "task"]) +ClientAllocation = collections.namedtuple( + "ClientAllocation", ["client_id", "task"]) class ClientAllocations: @@ -1032,7 +1102,8 @@ def tasks(self, task_index, remove_empty=True): for allocation in self.allocations: tasks_at_index = allocation["tasks"][task_index] if remove_empty and tasks_at_index is not None: - current_tasks.append(ClientAllocation(allocation["client_id"], tasks_at_index)) + current_tasks.append(ClientAllocation( + allocation["client_id"], tasks_at_index)) return current_tasks @@ -1074,7 +1145,8 @@ def receiveMsg_StartWorker(self, msg, sender): self.worker_id = msg.worker_id self.config = load_local_config(msg.config) self.on_error = self.config.opts("worker_coordinator", "on.error") - self.sample_queue_size = int(self.config.opts("results_publishing", "sample.queue.size", mandatory=False, default_value=1 << 20)) + self.sample_queue_size = int(self.config.opts( + "results_publishing", "sample.queue.size", mandatory=False, default_value=1 << 20)) self.workload = msg.workload workload.set_absolute_data_path(self.config, self.workload) self.client_allocations = msg.client_allocations @@ -1085,12 +1157,14 @@ def receiveMsg_StartWorker(self, msg, sender): self.wakeup_interval = 0.5 runner.register_default_runners() if self.workload.has_plugins: - workload.load_workload_plugins(self.config, self.workload.name, runner.register_runner, scheduler.register_scheduler) + workload.load_workload_plugins( + self.config, self.workload.name, runner.register_runner, scheduler.register_scheduler) self.drive() @actor.no_retry("worker") # pylint: disable=no-value-for-parameter def receiveMsg_Drive(self, msg, sender): - sleep_time = datetime.timedelta(seconds=msg.client_start_timestamp - time.perf_counter()) + sleep_time = datetime.timedelta( + seconds=msg.client_start_timestamp - time.perf_counter()) self.logger.info("Worker[%d] is continuing its work at task index [%d] on [%f], that is in [%s].", self.worker_id, self.current_task_index, msg.client_start_timestamp, sleep_time) self.start_driving = True @@ -1127,9 +1201,11 @@ def receiveMsg_WakeupMessage(self, msg, sender): str(self.worker_id), exc_info=e) # the exception might be user-defined and not be on the load path of the master worker_coordinator. Hence, it cannot be # deserialized on the receiver so we convert it here to a plain string. - self.send(self.master, actor.BenchmarkFailure("Error in load generator [{}]".format(self.worker_id), str(e))) + self.send(self.master, actor.BenchmarkFailure( + "Error in load generator [{}]".format(self.worker_id), str(e))) else: - self.logger.info("Worker[%s] is ready for the next task.", str(self.worker_id)) + self.logger.info( + "Worker[%s] is ready for the next task.", str(self.worker_id)) self.executor_future = None self.drive() else: @@ -1143,22 +1219,27 @@ def receiveMsg_WakeupMessage(self, msg, sender): self.logger.debug("Worker[%s] is executing [%s] (dependent eternal task).", str(self.worker_id), most_recent_sample.task) else: - self.logger.debug("Worker[%s] is executing (no samples).", str(self.worker_id)) - self.wakeupAfter(datetime.timedelta(seconds=self.wakeup_interval)) + self.logger.debug( + "Worker[%s] is executing (no samples).", str(self.worker_id)) + self.wakeupAfter(datetime.timedelta( + seconds=self.wakeup_interval)) def receiveMsg_ActorExitRequest(self, msg, sender): - self.logger.info("Worker[%s] has received ActorExitRequest.", str(self.worker_id)) + self.logger.info( + "Worker[%s] has received ActorExitRequest.", str(self.worker_id)) if self.executor_future is not None and self.executor_future.running(): self.cancel.set() self.pool.shutdown() - self.logger.info("Worker[%s] is exiting due to ActorExitRequest.", str(self.worker_id)) + self.logger.info( + "Worker[%s] is exiting due to ActorExitRequest.", str(self.worker_id)) def receiveMsg_BenchmarkFailure(self, msg, sender): # sent by our no_retry infrastructure; forward to master self.send(self.master, msg) def receiveUnrecognizedMessage(self, msg, sender): - self.logger.info("Worker[%d] received unknown message [%s] (ignoring).", self.worker_id, str(msg)) + self.logger.info( + "Worker[%d] received unknown message [%s] (ignoring).", self.worker_id, str(msg)) def drive(self): task_allocations = self.current_tasks_and_advance() @@ -1167,7 +1248,8 @@ def drive(self): task_allocations = self.current_tasks_and_advance() if self.at_joinpoint(): - self.logger.info("Worker[%d] reached join point at index [%d].", self.worker_id, self.current_task_index) + self.logger.info( + "Worker[%d] reached join point at index [%d].", self.worker_id, self.current_task_index) # clients that don't execute tasks don't need to care about waiting if self.executor_future is not None: self.executor_future.result() @@ -1176,7 +1258,8 @@ def drive(self): self.complete.clear() self.executor_future = None self.sampler = None - self.send(self.master, JoinPointReached(self.worker_id, task_allocations)) + self.send(self.master, JoinPointReached( + self.worker_id, task_allocations)) else: # There may be a situation where there are more (parallel) tasks than workers. If we were asked to complete all tasks, we not # only need to complete actively running tasks but actually all scheduled tasks until we reach the next join point. @@ -1184,13 +1267,16 @@ def drive(self): self.logger.info("Worker[%d] skips tasks at index [%d] because it has been asked to complete all " "tasks until next join point.", self.worker_id, self.current_task_index) else: - self.logger.info("Worker[%d] is executing tasks at index [%d].", self.worker_id, self.current_task_index) - self.sampler = Sampler(start_timestamp=time.perf_counter(), buffer_size=self.sample_queue_size) + self.logger.info( + "Worker[%d] is executing tasks at index [%d].", self.worker_id, self.current_task_index) + self.sampler = Sampler( + start_timestamp=time.perf_counter(), buffer_size=self.sample_queue_size) executor = AsyncIoAdapter(self.config, self.workload, task_allocations, self.sampler, self.cancel, self.complete, self.on_error) self.executor_future = self.pool.submit(executor) - self.wakeupAfter(datetime.timedelta(seconds=self.wakeup_interval)) + self.wakeupAfter(datetime.timedelta( + seconds=self.wakeup_interval)) def at_joinpoint(self): return self.client_allocations.is_joinpoint(self.current_task_index) @@ -1199,7 +1285,8 @@ def current_tasks_and_advance(self): self.current_task_index = self.next_task_index current = self.client_allocations.tasks(self.current_task_index) self.next_task_index += 1 - self.logger.info("Worker[%d] is at task index [%d].", self.worker_id, self.current_task_index) + self.logger.info("Worker[%d] is at task index [%d].", + self.worker_id, self.current_task_index) return current def send_samples(self): @@ -1230,7 +1317,8 @@ def add(self, task, client_id, sample_type, meta_data, absolute_time, request_st latency, service_time, client_processing_time, processing_time, throughput, ops, ops_unit, time_period, percent_completed, dependent_timing)) except queue.Full: - self.logger.warning("Dropping sample for [%s] due to a full sampling queue.", task.operation.name) + self.logger.warning( + "Dropping sample for [%s] due to a full sampling queue.", task.operation.name) @property def samples(self): @@ -1289,7 +1377,8 @@ def dependent_timings(self): if self._dependent_timing: for t in self._dependent_timing: yield Sample(self.client_id, t["absolute_time"], t["request_start"], self.task_start, self.task, - self.sample_type, self.request_meta_data, 0, t["service_time"], 0, 0, 0, self.total_ops, + self.sample_type, self.request_meta_data, 0, t[ + "service_time"], 0, 0, 0, self.total_ops, self.total_ops_unit, self.time_period, self.percent_completed, None, t["operation"], t["operation-type"]) @@ -1301,7 +1390,8 @@ def __repr__(self, *args, **kwargs): def select_test_procedure(config, t): test_procedure_name = config.opts("workload", "test_procedure.name") - selected_test_procedure = t.find_test_procedure_or_default(test_procedure_name) + selected_test_procedure = t.find_test_procedure_or_default( + test_procedure_name) if not selected_test_procedure: raise exceptions.SystemSetupError("Unknown test_procedure [%s] for workload [%s]. You can list the available workloads and their " @@ -1314,6 +1404,7 @@ class TaskStats: """ Stores per task numbers needed for throughput calculation in between multiple calculations. """ + def __init__(self, bucket_interval, sample_type, start_time): self.unprocessed = [] self.total_count = 0 @@ -1336,7 +1427,8 @@ def maybe_update_sample_type(self, current_sample_type): self.has_samples_in_sample_type = False def update_interval(self, absolute_sample_time): - self.interval = max(absolute_sample_time - self.start_time, self.interval) + self.interval = max(absolute_sample_time - + self.start_time, self.interval) def can_calculate_throughput(self): return self.interval > 0 and self.interval >= self.bucket @@ -1388,7 +1480,8 @@ def calculate(self, samples, bucket_interval_secs=1): # only transform the values into the expected structure. first_sample = current_samples[0] if first_sample.throughput is None: - task_throughput = self.calculate_task_throughput(task, current_samples, bucket_interval_secs) + task_throughput = self.calculate_task_throughput( + task, current_samples, bucket_interval_secs) else: task_throughput = self.map_task_throughput(current_samples) global_throughput[task].extend(task_throughput) @@ -1464,9 +1557,12 @@ def __init__(self, cfg, workload, task_allocations, sampler, cancel, complete, a self.cancel = cancel self.complete = complete self.abort_on_error = abort_on_error - self.profiling_enabled = self.cfg.opts("worker_coordinator", "profiling") - self.assertions_enabled = self.cfg.opts("worker_coordinator", "assertions") - self.debug_event_loop = self.cfg.opts("system", "async.debug", mandatory=False, default_value=False) + self.profiling_enabled = self.cfg.opts( + "worker_coordinator", "profiling") + self.assertions_enabled = self.cfg.opts( + "worker_coordinator", "assertions") + self.debug_event_loop = self.cfg.opts( + "system", "async.debug", mandatory=False, default_value=False) self.logger = logging.getLogger(__name__) def __call__(self, *args, **kwargs): @@ -1492,16 +1588,18 @@ async def run(self): def os_clients(all_hosts, all_client_options): opensearch = {} for cluster_name, cluster_hosts in all_hosts.items(): - opensearch[cluster_name] = client.OsClientFactory(cluster_hosts, all_client_options[cluster_name]).create_async() + opensearch[cluster_name] = client.OsClientFactory( + cluster_hosts, all_client_options[cluster_name]).create_async() return opensearch # Properly size the internal connection pool to match the number of expected clients but allow the user # to override it if needed. client_count = len(self.task_allocations) opensearch = os_clients(self.cfg.opts("client", "hosts").all_hosts, - self.cfg.opts("client", "options").with_max_connections(client_count)) + self.cfg.opts("client", "options").with_max_connections(client_count)) - self.logger.info("Task assertions enabled: %s", str(self.assertions_enabled)) + self.logger.info("Task assertions enabled: %s", + str(self.assertions_enabled)) runner.enable_assertions(self.assertions_enabled) aws = [] @@ -1510,7 +1608,8 @@ def os_clients(all_hosts, all_client_options): for client_id, task_allocation in self.task_allocations: task = task_allocation.task if task not in params_per_task: - param_source = workload.operation_parameters(self.workload, task) + param_source = workload.operation_parameters( + self.workload, task) params_per_task[task] = param_source # We cannot use the global client index here because we need to support parallel execution of tasks # with multiple clients. Consider the following scenario: @@ -1524,21 +1623,25 @@ def os_clients(all_hosts, all_client_options): async_executor = AsyncExecutor( client_id, task, schedule, opensearch, self.sampler, self.cancel, self.complete, task.error_behavior(self.abort_on_error), self.cfg) - final_executor = AsyncProfiler(async_executor) if self.profiling_enabled else async_executor + final_executor = AsyncProfiler( + async_executor) if self.profiling_enabled else async_executor aws.append(final_executor()) run_start = time.perf_counter() try: _ = await asyncio.gather(*aws) finally: run_end = time.perf_counter() - self.logger.info("Total run duration: %f seconds.", (run_end - run_start)) + self.logger.info("Total run duration: %f seconds.", + (run_end - run_start)) await asyncio.get_event_loop().shutdown_asyncgens() shutdown_asyncgens_end = time.perf_counter() - self.logger.info("Total time to shutdown asyncgens: %f seconds.", (shutdown_asyncgens_end - run_end)) + self.logger.info( + "Total time to shutdown asyncgens: %f seconds.", (shutdown_asyncgens_end - run_end)) for s in opensearch.values(): await s.transport.close() transport_close_end = time.perf_counter() - self.logger.info("Total time to close transports: %f seconds.", (shutdown_asyncgens_end - transport_close_end)) + self.logger.info("Total time to close transports: %f seconds.", + (shutdown_asyncgens_end - transport_close_end)) class AsyncProfiler: @@ -1603,18 +1706,21 @@ async def __call__(self, *args, **kwargs): task_completes_parent = self.task.completes_parent total_start = time.perf_counter() # lazily initialize the schedule - self.logger.debug("Initializing schedule for client id [%s].", self.client_id) + self.logger.debug( + "Initializing schedule for client id [%s].", self.client_id) schedule = self.schedule_handle() self.schedule_handle.start() rampup_wait_time = self.schedule_handle.ramp_up_wait_time if rampup_wait_time: - self.logger.info("client id [%s] waiting [%.2f]s for ramp-up.", self.client_id, rampup_wait_time) + self.logger.info( + "client id [%s] waiting [%.2f]s for ramp-up.", self.client_id, rampup_wait_time) await asyncio.sleep(rampup_wait_time) - + if rampup_wait_time: console.println(f" Client id {self.client_id} is running now.") - self.logger.debug("Entering main loop for client id [%s].", self.client_id) + self.logger.debug( + "Entering main loop for client id [%s].", self.client_id) # noinspection PyBroadException try: async for expected_scheduled_time, sample_type, percent_completed, runner, params in schedule: @@ -1637,8 +1743,9 @@ async def __call__(self, *args, **kwargs): if params: if params.get("operation-type") == "vector-search": available_cores = int(self.cfg.opts("system", "available.cores", mandatory=False, - default_value=multiprocessing.cpu_count())) - params.update({"num_clients": self.task.clients, "num_cores": available_cores}) + default_value=multiprocessing.cpu_count())) + params.update( + {"num_clients": self.task.clients, "num_cores": available_cores}) total_ops, total_ops_unit, request_meta_data = await execute_single(runner, self.opensearch, params, self.on_error) request_start = request_context.request_start @@ -1648,10 +1755,12 @@ async def __call__(self, *args, **kwargs): processing_end = time.perf_counter() service_time = request_end - request_start - client_processing_time = (client_request_end - client_request_start) - service_time + client_processing_time = ( + client_request_end - client_request_start) - service_time processing_time = processing_end - processing_start time_period = request_end - total_start - self.schedule_handle.after_request(processing_end, total_ops, total_ops_unit, request_meta_data) + self.schedule_handle.after_request( + processing_end, total_ops, total_ops_unit, request_meta_data) # Allow runners to override the throughput calculation in very specific circumstances. Usually, Benchmark # assumes that throughput is the "amount of work" (determined by the "weight") per unit of time # (determined by the elapsed time period). However, in certain cases (e.g. shard recovery or other @@ -1664,7 +1773,8 @@ async def __call__(self, *args, **kwargs): # throughput = request_meta_data.pop("throughput", None) # Do not calculate latency separately when we run unthrottled. This metric is just confusing then. - latency = request_end - absolute_expected_schedule_time if throughput_throttled else service_time + latency = request_end - \ + absolute_expected_schedule_time if throughput_throttled else service_time # If this task completes the parent task we should *not* check for completion by another client but # instead continue until our own runner has completed. We need to do this because the current # worker (process) could run multiple clients that execute the same task. We do not want all clients to @@ -1688,11 +1798,13 @@ async def __call__(self, *args, **kwargs): time_period, progress, request_meta_data.pop("dependent_timing", None)) if completed: - self.logger.info("Task [%s] is considered completed due to external event.", self.task) + self.logger.info( + "Task [%s] is considered completed due to external event.", self.task) break except BaseException as e: self.logger.exception("Could not execute schedule") - raise exceptions.BenchmarkError(f"Cannot run task [{self.task}]: {e}") from None + raise exceptions.BenchmarkError( + f"Cannot run task [{self.task}]: {e}") from None finally: # Actively set it if this task completes its parent if task_completes_parent: @@ -1758,14 +1870,17 @@ async def execute_single(runner, opensearch, params, on_error): request_meta_data["error-description"] = error_description except KeyError as e: request_context_holder.on_client_request_end() - logging.getLogger(__name__).exception("Cannot execute runner [%s]; most likely due to missing parameters.", str(runner)) - msg = "Cannot execute [%s]. Provided parameters are: %s. Error: [%s]." % (str(runner), list(params.keys()), str(e)) + logging.getLogger(__name__).exception( + "Cannot execute runner [%s]; most likely due to missing parameters.", str(runner)) + msg = "Cannot execute [%s]. Provided parameters are: %s. Error: [%s]." % ( + str(runner), list(params.keys()), str(e)) console.error(msg) raise exceptions.SystemSetupError(msg) if not request_meta_data["success"]: if on_error == "abort" or fatal_error: - msg = "Request returned an error. Error type: %s" % request_meta_data.get("error-type", "Unknown") + msg = "Request returned an error. Error type: %s" % request_meta_data.get( + "error-type", "Unknown") description = request_meta_data.get("error-description") if description: msg += ", Description: %s" % description @@ -1774,7 +1889,8 @@ async def execute_single(runner, opensearch, params, on_error): if 'error-description' in request_meta_data: try: - error_metadata = json.loads(request_meta_data["error-description"]) + error_metadata = json.loads( + request_meta_data["error-description"]) # parse error-description metadata opensearch_operation_error = parse_error(error_metadata) console.error(opensearch_operation_error.get_error_message()) @@ -1782,7 +1898,8 @@ async def execute_single(runner, opensearch, params, on_error): # error-description is not a valid json so we just print it console.error(request_meta_data["error-description"]) - logging.getLogger(__name__).error(request_meta_data["error-description"]) + logging.getLogger(__name__).error( + request_meta_data["error-description"]) return total_ops, total_ops_unit, request_meta_data @@ -1799,7 +1916,8 @@ def __init__(self, id, clients_executing_completing_task=None): clients_executing_completing_task = [] self.id = id self.clients_executing_completing_task = clients_executing_completing_task - self.num_clients_executing_completing_task = len(clients_executing_completing_task) + self.num_clients_executing_completing_task = len( + clients_executing_completing_task) self.preceding_task_completes_parent = self.num_clients_executing_completing_task > 0 def __hash__(self): @@ -1871,14 +1989,15 @@ def allocations(self): # more tasks than actually available clients) physical_client_index = client_index % max_clients if sub_task.completes_parent: - clients_executing_completing_task.append(physical_client_index) + clients_executing_completing_task.append( + physical_client_index) ta = TaskAllocation(task=sub_task, client_index_in_task=client_index - start_client_index, global_client_index=client_index, # if task represents a parallel structure this is the total number of clients # executing sub-tasks concurrently. total_clients=task.clients) - + allocations[physical_client_index].append(ta) start_client_index += sub_task.clients @@ -1891,7 +2010,8 @@ def allocations(self): allocations[client_index].append(None) # let all clients join after each task, then we go on - next_join_point = JoinPoint(join_point_id, clients_executing_completing_task) + next_join_point = JoinPoint( + join_point_id, clients_executing_completing_task) for client_index in range(max_clients): allocations[client_index].append(next_join_point) join_point_id += 1 @@ -1979,7 +2099,8 @@ def schedule_for(task_allocation, parameter_source): params_for_op = parameter_source.partition(client_index, task.clients) if hasattr(sched, "parameter_source"): if client_index == 0: - logger.debug("Setting parameter source [%s] for scheduler [%s]", params_for_op, sched) + logger.debug( + "Setting parameter source [%s] for scheduler [%s]", params_for_op, sched) sched.parameter_source = params_for_op if requires_time_period_schedule(task, runner_for_op, params_for_op): @@ -2005,9 +2126,11 @@ def schedule_for(task_allocation, parameter_source): if client_index == 0: if loop_control.infinite: - logger.info("Parameter source will determine when the schedule for [%s] terminates.", task.name) + logger.info( + "Parameter source will determine when the schedule for [%s] terminates.", task.name) else: - logger.info("%s schedule will determine when the schedule for [%s] terminates.", str(loop_control), task.name) + logger.info("%s schedule will determine when the schedule for [%s] terminates.", str( + loop_control), task.name) return ScheduleHandle(task_allocation, sched, loop_control, runner_for_op, params_for_op) @@ -2043,11 +2166,11 @@ def __init__(self, task_allocation, sched, task_progress_control, runner, params self.runner = runner self.params = params # TODO: Can we offload the parameter source execution to a different thread / process? Is this too heavy-weight? - #from concurrent.futures import ThreadPoolExecutor - #import asyncio - #self.io_pool_exc = ThreadPoolExecutor(max_workers=1) - #self.loop = asyncio.get_event_loop() - + # from concurrent.futures import ThreadPoolExecutor + # import asyncio + # self.io_pool_exc = ThreadPoolExecutor(max_workers=1) + # self.loop = asyncio.get_event_loop() + @property def ramp_up_wait_time(self): """ @@ -2058,7 +2181,7 @@ def ramp_up_wait_time(self): return ramp_up_time_period * (self.task_allocation.global_client_index / self.task_allocation.total_clients) else: return 0 - + def start(self): self.task_progress_control.start() @@ -2071,13 +2194,14 @@ def after_request(self, now, weight, unit, request_meta_data): async def __call__(self): next_scheduled = 0 if self.task_progress_control.infinite: - param_source_knows_progress = hasattr(self.params, "percent_completed") + param_source_knows_progress = hasattr( + self.params, "percent_completed") while True: try: next_scheduled = self.sched.next(next_scheduled) # does not contribute at all to completion. Hence, we cannot define completion. percent_completed = self.params.percent_completed if param_source_knows_progress else None - #current_params = await self.loop.run_in_executor(self.io_pool_exc, self.params.params) + # current_params = await self.loop.run_in_executor(self.io_pool_exc, self.params.params) yield (next_scheduled, self.task_progress_control.sample_type, percent_completed, self.runner, self.params.params()) self.task_progress_control.next() @@ -2087,7 +2211,7 @@ async def __call__(self): while not self.task_progress_control.completed: try: next_scheduled = self.sched.next(next_scheduled) - #current_params = await self.loop.run_in_executor(self.io_pool_exc, self.params.params) + # current_params = await self.loop.run_in_executor(self.io_pool_exc, self.params.params) yield (next_scheduled, self.task_progress_control.sample_type, self.task_progress_control.percent_completed, @@ -2147,7 +2271,8 @@ def __init__(self, warmup_iterations, iterations): if warmup_iterations is not None and iterations is not None: self._total_iterations = self._warmup_iterations + self._iterations if self._total_iterations == 0: - raise exceptions.BenchmarkAssertionError("Operation must run at least for one iteration.") + raise exceptions.BenchmarkAssertionError( + "Operation must run at least for one iteration.") else: self._total_iterations = None self._it = None diff --git a/osbenchmark/workload/loader.py b/osbenchmark/workload/loader.py index 8905c6fc..2bd7bf39 100644 --- a/osbenchmark/workload/loader.py +++ b/osbenchmark/workload/loader.py @@ -13,7 +13,7 @@ # not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an @@ -76,10 +76,12 @@ def on_prepare_workload(self, workload, data_root_dir): class WorkloadProcessorRegistry: def __init__(self, cfg): - self.required_processors = [TaskFilterWorkloadProcessor(cfg), TestModeWorkloadProcessor(cfg), QueryRandomizerWorkloadProcessor(cfg)] + self.required_processors = [TaskFilterWorkloadProcessor( + cfg), TestModeWorkloadProcessor(cfg), QueryRandomizerWorkloadProcessor(cfg)] self.workload_processors = [] self.offline = cfg.opts("system", "offline.mode") - self.test_mode = cfg.opts("workload", "test.mode.enabled", mandatory=False, default_value=False) + self.test_mode = cfg.opts( + "workload", "test.mode.enabled", mandatory=False, default_value=False) self.base_config = cfg self.custom_configuration = False @@ -99,7 +101,8 @@ def register_workload_processor(self, processor): @property def processors(self): if not self.custom_configuration: - self.register_workload_processor(DefaultWorkloadPreparator(self.base_config)) + self.register_workload_processor( + DefaultWorkloadPreparator(self.base_config)) return [*self.required_processors, *self.workload_processors] @@ -118,7 +121,8 @@ def workloads(cfg): def list_workloads(cfg): available_workloads = workloads(cfg) - only_auto_generated_test_procedures = all(t.default_test_procedure.auto_generated for t in available_workloads) + only_auto_generated_test_procedures = all( + t.default_test_procedure.auto_generated for t in available_workloads) data = [] for t in available_workloads: @@ -130,7 +134,8 @@ def list_workloads(cfg): line.append(",".join(map(str, t.test_procedures))) data.append(line) - headers = ["Name", "Description", "Documents", "Compressed Size", "Uncompressed Size"] + headers = ["Name", "Description", "Documents", + "Compressed Size", "Uncompressed Size"] if not only_auto_generated_test_procedures: headers.append("Default TestProcedure") headers.append("All TestProcedures") @@ -160,9 +165,11 @@ def test_procedure_info(c): console.println("") for num, task in enumerate(c.schedule, start=1): if task.nested: - console.println(format_task(task, suffix=":", num="{}. ".format(num))) + console.println(format_task( + task, suffix=":", num="{}. ".format(num))) for leaf_num, leaf_task in enumerate(task, start=1): - console.println(format_task(leaf_task, indent="\t", num="{}.{} ".format(num, leaf_num))) + console.println(format_task( + leaf_task, indent="\t", num="{}.{} ".format(num, leaf_num))) else: console.println(format_task(task, num="{}. ".format(num))) @@ -170,9 +177,12 @@ def test_procedure_info(c): console.println("Showing details for workload [{}]:\n".format(t.name)) console.println("* Description: {}".format(t.description)) if t.number_of_documents: - console.println("* Documents: {}".format(convert.number_to_human_string(t.number_of_documents))) - console.println("* Compressed Size: {}".format(convert.bytes_to_human_string(t.compressed_size_in_bytes))) - console.println("* Uncompressed Size: {}".format(convert.bytes_to_human_string(t.uncompressed_size_in_bytes))) + console.println( + "* Documents: {}".format(convert.number_to_human_string(t.number_of_documents))) + console.println( + "* Compressed Size: {}".format(convert.bytes_to_human_string(t.compressed_size_in_bytes))) + console.println("* Uncompressed Size: {}".format( + convert.bytes_to_human_string(t.uncompressed_size_in_bytes))) console.println("") if t.selected_test_procedure: @@ -199,28 +209,32 @@ def _load_single_workload(cfg, workload_repository, workload_name): try: workload_dir = workload_repository.workload_dir(workload_name) reader = WorkloadFileReader(cfg) - current_workload = reader.read(workload_name, workload_repository.workload_file(workload_name), workload_dir) + current_workload = reader.read( + workload_name, workload_repository.workload_file(workload_name), workload_dir) tpr = WorkloadProcessorRegistry(cfg) - has_plugins = load_workload_plugins(cfg, workload_name, register_workload_processor=tpr.register_workload_processor) + has_plugins = load_workload_plugins( + cfg, workload_name, register_workload_processor=tpr.register_workload_processor) current_workload.has_plugins = has_plugins for processor in tpr.processors: processor.on_after_load_workload(current_workload) return current_workload except FileNotFoundError as e: - logging.getLogger(__name__).exception("Cannot load workload [%s]", workload_name) + logging.getLogger(__name__).exception( + "Cannot load workload [%s]", workload_name) raise exceptions.SystemSetupError(f"Cannot load workload [{workload_name}]. " f"List the available workloads with [{PROGRAM_NAME} list workloads].") from e except BaseException: - logging.getLogger(__name__).exception("Cannot load workload [%s]", workload_name) + logging.getLogger(__name__).exception( + "Cannot load workload [%s]", workload_name) raise def load_workload_plugins(cfg, - workload_name, - register_runner=None, - register_scheduler=None, - register_workload_processor=None, - force_update=False): + workload_name, + register_runner=None, + register_scheduler=None, + register_workload_processor=None, + force_update=False): """ Loads plugins that are defined for the current workload (as specified by the configuration). @@ -235,8 +249,10 @@ def load_workload_plugins(cfg, """ repo = workload_repo(cfg, fetch=force_update, update=force_update) workload_plugin_path = repo.workload_dir(workload_name) - logging.getLogger(__name__).debug("Invoking plugin_reader with name [%s] resolved to path [%s]", workload_name, workload_plugin_path) - plugin_reader = WorkloadPluginReader(workload_plugin_path, register_runner, register_scheduler, register_workload_processor) + logging.getLogger(__name__).debug( + "Invoking plugin_reader with name [%s] resolved to path [%s]", workload_name, workload_plugin_path) + plugin_reader = WorkloadPluginReader( + workload_plugin_path, register_runner, register_scheduler, register_workload_processor) if plugin_reader.can_load(): plugin_reader.load() @@ -268,9 +284,11 @@ def first_existing(root_dirs, f): for document_set in corpus.documents: # At this point we can assume that the file is available locally. Check which path exists and set it. if document_set.document_archive: - document_set.document_archive = first_existing(data_root, document_set.document_archive) + document_set.document_archive = first_existing( + data_root, document_set.document_archive) if document_set.document_file: - document_set.document_file = first_existing(data_root, document_set.document_file) + document_set.document_file = first_existing( + data_root, document_set.document_file) def is_simple_workload_mode(cfg): @@ -301,7 +319,8 @@ def data_dir(cfg, workload_name, corpus_name): :param corpus_name: Name of the current corpus. :return: A list containing either one or two elements. Each element contains a path to a directory which may contain document files. """ - corpus_dir = os.path.join(cfg.opts("benchmarks", "local.dataset.cache"), corpus_name) + corpus_dir = os.path.join( + cfg.opts("benchmarks", "local.dataset.cache"), corpus_name) if is_simple_workload_mode(cfg): workload_path = cfg.opts("workload", "workload.path") r = SimpleWorkloadRepository(workload_path) @@ -315,23 +334,30 @@ def data_dir(cfg, workload_name, corpus_name): class GitWorkloadRepository: def __init__(self, cfg, fetch, update, repo_class=repo.BenchmarkRepository): # current workload name (if any) - self.workload_name = cfg.opts("workload", "workload.name", mandatory=False) - distribution_version = cfg.opts("builder", "distribution.version", mandatory=False) + self.workload_name = cfg.opts( + "workload", "workload.name", mandatory=False) + distribution_version = cfg.opts( + "builder", "distribution.version", mandatory=False) repo_name = cfg.opts("workload", "repository.name") - repo_revision = cfg.opts("workload", "repository.revision", mandatory=False) + repo_revision = cfg.opts( + "workload", "repository.revision", mandatory=False) offline = cfg.opts("system", "offline.mode") - remote_url = cfg.opts("workloads", "%s.url" % repo_name, mandatory=False) + remote_url = cfg.opts("workloads", "%s.url" % + repo_name, mandatory=False) root = cfg.opts("node", "root.dir") - workload_repositories = cfg.opts("benchmarks", "workload.repository.dir") + workload_repositories = cfg.opts( + "benchmarks", "workload.repository.dir") workloads_dir = os.path.join(root, workload_repositories) - self.repo = repo_class(remote_url, workloads_dir, repo_name, "workloads", offline, fetch) + self.repo = repo_class(remote_url, workloads_dir, + repo_name, "workloads", offline, fetch) if update: if repo_revision: self.repo.checkout(repo_revision) else: self.repo.update(distribution_version) - cfg.add(config.Scope.applicationOverride, "workload", "repository.revision", self.repo.revision) + cfg.add(config.Scope.applicationOverride, "workload", + "repository.revision", self.repo.revision) @property def workload_names(self): @@ -347,30 +373,35 @@ def workload_file(self, workload_name): class SimpleWorkloadRepository: def __init__(self, workload_path): if not os.path.exists(workload_path): - raise exceptions.SystemSetupError("Workload path %s does not exist" % workload_path) + raise exceptions.SystemSetupError( + "Workload path %s does not exist" % workload_path) if os.path.isdir(workload_path): self.workload_name = io.basename(workload_path) self._workload_dir = workload_path self._workload_file = os.path.join(workload_path, "workload.json") if not os.path.exists(self._workload_file): - raise exceptions.SystemSetupError("Could not find workload.json in %s" % workload_path) + raise exceptions.SystemSetupError( + "Could not find workload.json in %s" % workload_path) elif os.path.isfile(workload_path): if io.has_extension(workload_path, ".json"): self._workload_dir = io.dirname(workload_path) self._workload_file = workload_path self.workload_name = io.splitext(io.basename(workload_path))[0] else: - raise exceptions.SystemSetupError("%s has to be a JSON file" % workload_path) + raise exceptions.SystemSetupError( + "%s has to be a JSON file" % workload_path) else: - raise exceptions.SystemSetupError("%s is neither a file nor a directory" % workload_path) + raise exceptions.SystemSetupError( + "%s is neither a file nor a directory" % workload_path) @property def workload_names(self): return [self.workload_name] def workload_dir(self, workload_name): - assert workload_name == self.workload_name, "Expect provided workload name [%s] to match [%s]" % (workload_name, self.workload_name) + assert workload_name == self.workload_name, "Expect provided workload name [%s] to match [%s]" % ( + workload_name, self.workload_name) return self._workload_dir def workload_file(self, workload_name): @@ -423,7 +454,8 @@ def prepare_docs(cfg, workload, corpus, preparator): preparator.prepare_document_set(document_set, data_root[1]) def on_prepare_workload(self, workload, data_root_dir): - prep = DocumentSetPreparator(workload.name, self.downloader, self.decompressor) + prep = DocumentSetPreparator( + workload.name, self.downloader, self.decompressor) for corpus in used_corpora(workload): params = { "cfg": self.cfg, @@ -469,9 +501,11 @@ def download(self, base_url, source_url, target_path, size_in_bytes): file_name = os.path.basename(target_path) if not base_url: - raise exceptions.DataError("Cannot download data because no base URL is provided.") + raise exceptions.DataError( + "Cannot download data because no base URL is provided.") if self.offline: - raise exceptions.SystemSetupError(f"Cannot find [{target_path}]. Please disable offline mode and retry.") + raise exceptions.SystemSetupError( + f"Cannot find [{target_path}]. Please disable offline mode and retry.") if source_url: data_url = source_url @@ -486,16 +520,20 @@ def download(self, base_url, source_url, target_path, size_in_bytes): io.ensure_dir(os.path.dirname(target_path)) if size_in_bytes: size_in_mb = round(convert.bytes_to_mb(size_in_bytes)) - self.logger.info("Downloading data from [%s] (%s MB) to [%s].", data_url, size_in_mb, target_path) + self.logger.info( + "Downloading data from [%s] (%s MB) to [%s].", data_url, size_in_mb, target_path) else: - self.logger.info("Downloading data from [%s] to [%s].", data_url, target_path) + self.logger.info( + "Downloading data from [%s] to [%s].", data_url, target_path) # we want to have a bit more accurate download progress as these files are typically very large progress = net.Progress("[INFO] Downloading workload data file: " + os.path.basename(target_path), accuracy=1) - net.download(data_url, target_path, size_in_bytes, progress_indicator=progress) + net.download(data_url, target_path, size_in_bytes, + progress_indicator=progress) progress.finish() - self.logger.info("Downloaded data from [%s] to [%s].", data_url, target_path) + self.logger.info( + "Downloaded data from [%s] to [%s].", data_url, target_path) except urllib.error.HTTPError as e: if e.code == 404 and self.test_mode: raise exceptions.DataError("This workload does not support test mode. Ask the workload author to add it or" @@ -508,7 +546,8 @@ def download(self, base_url, source_url, target_path, size_in_bytes): msg += f" (HTTP status: {e.code})" raise exceptions.DataError(msg, e) from None except urllib.error.URLError as e: - raise exceptions.DataError(f"Could not download [{data_url}] to [{target_path}].") from e + raise exceptions.DataError( + f"Could not download [{data_url}] to [{target_path}].") from e if not os.path.isfile(target_path): raise exceptions.SystemSetupError(f"Could not download [{data_url}] to [{target_path}]. Verify data " @@ -535,7 +574,8 @@ def has_expected_size(self, file_name, expected_size): def create_file_offset_table(self, document_file_path, base_url, source_url, expected_number_of_lines): # just rebuild the file every time for the time being. Later on, we might check the data file fingerprint to avoid it - lines_read = io.prepare_file_offset_table(document_file_path, base_url, source_url, self.downloader) + lines_read = io.prepare_file_offset_table( + document_file_path, base_url, source_url, self.downloader) if lines_read and lines_read != expected_number_of_lines: io.remove_file_offset_table(document_file_path) raise exceptions.DataError(f"Data in [{document_file_path}] for workload [{self.workload_name}] are invalid. " @@ -558,7 +598,8 @@ def prepare_document_set(self, document_set, data_root): :param data_root: The data root directory for this document set. """ doc_path = os.path.join(data_root, document_set.document_file) - archive_path = os.path.join(data_root, document_set.document_archive) if document_set.has_compressed_corpus() else None + archive_path = os.path.join( + data_root, document_set.document_archive) if document_set.has_compressed_corpus() else None while True: if self.is_locally_available(doc_path) and \ self.has_expected_size(doc_path, document_set.uncompressed_size_in_bytes): @@ -566,7 +607,8 @@ def prepare_document_set(self, document_set, data_root): if document_set.has_compressed_corpus() and \ self.is_locally_available(archive_path) and \ self.has_expected_size(archive_path, document_set.compressed_size_in_bytes): - self.decompressor.decompress(archive_path, doc_path, document_set.uncompressed_size_in_bytes) + self.decompressor.decompress( + archive_path, doc_path, document_set.uncompressed_size_in_bytes) else: if document_set.has_compressed_corpus(): target_path = archive_path @@ -576,25 +618,30 @@ def prepare_document_set(self, document_set, data_root): expected_size = document_set.uncompressed_size_in_bytes else: # this should not happen in practice as the JSON schema should take care of this - raise exceptions.BenchmarkAssertionError(f"Workload {self.workload_name} specifies documents but no corpus") + raise exceptions.BenchmarkAssertionError( + f"Workload {self.workload_name} specifies documents but no corpus") try: if document_set.document_file_parts: for part in document_set.document_file_parts: - self.downloader.download(document_set.base_url, None, os.path.join(data_root, part["name"]), part["size"]) + self.downloader.download(document_set.base_url, None, os.path.join( + data_root, part["name"]), part["size"]) try: with open(target_path, "wb") as outfile: console.info(f"Concatenating file parts {', '.join([p['name'] for p in document_set.document_file_parts])}" f" into {os.path.basename(target_path)}", flush=True, logger=self.logger) for part in document_set.document_file_parts: - part_name = os.path.join(data_root, part["name"]) + part_name = os.path.join( + data_root, part["name"]) with open(part_name, "rb") as infile: shutil.copyfileobj(infile, outfile) os.remove(part_name) except Exception as e: - raise exceptions.DataError(f"Encountered exception {repr(e)} when building corpus file from parts") + raise exceptions.DataError( + f"Encountered exception {repr(e)} when building corpus file from parts") else: - self.downloader.download(document_set.base_url, document_set.source_url, target_path, expected_size) + self.downloader.download( + document_set.base_url, document_set.source_url, target_path, expected_size) except exceptions.DataError as e: if e.message == "Cannot download data because no base URL is provided." and \ self.is_locally_available(target_path): @@ -604,7 +651,8 @@ def prepare_document_set(self, document_set, data_root): else: raise if document_set.support_file_offset_table: - self.create_file_offset_table(doc_path, document_set.base_url, document_set.source_url, document_set.number_of_lines) + self.create_file_offset_table( + doc_path, document_set.base_url, document_set.source_url, document_set.number_of_lines) def prepare_bundled_document_set(self, document_set, data_root): """ @@ -626,12 +674,14 @@ def prepare_bundled_document_set(self, document_set, data_root): :return: See postcondition. """ doc_path = os.path.join(data_root, document_set.document_file) - archive_path = os.path.join(data_root, document_set.document_archive) if document_set.has_compressed_corpus() else None + archive_path = os.path.join( + data_root, document_set.document_archive) if document_set.has_compressed_corpus() else None while True: if self.is_locally_available(doc_path): if self.has_expected_size(doc_path, document_set.uncompressed_size_in_bytes): - self.create_file_offset_table(doc_path, document_set.base_url, document_set.source_url, document_set.number_of_lines) + self.create_file_offset_table( + doc_path, document_set.base_url, document_set.source_url, document_set.number_of_lines) return True else: raise exceptions.DataError(f"[{doc_path}] is present but does not have the expected size " @@ -639,7 +689,8 @@ def prepare_bundled_document_set(self, document_set, data_root): if document_set.has_compressed_corpus() and self.is_locally_available(archive_path): if self.has_expected_size(archive_path, document_set.compressed_size_in_bytes): - self.decompressor.decompress(archive_path, doc_path, document_set.uncompressed_size_in_bytes) + self.decompressor.decompress( + archive_path, doc_path, document_set.uncompressed_size_in_bytes) else: # treat this is an error because if the file is present but the size does not match, something is # really fishy. It is likely that the user is currently creating a new workload and did not specify @@ -657,7 +708,8 @@ class TemplateSource: benchmark.collect(parts=... """ - collect_parts_re = re.compile(r"{{\ +?benchmark\.collect\(parts=\"(.+?(?=\"))\"\)\ +?}}") + collect_parts_re = re.compile( + r"{{\ +?benchmark\.collect\(parts=\"(.+?(?=\"))\"\)\ +?}}") def __init__(self, base_path, template_file_name, source=io.FileSource, fileglobber=glob.glob): self.base_path = base_path @@ -674,12 +726,16 @@ def load_template_from_file(self): autoescape=select_autoescape(['html', 'xml'])), self.template_file_name) except jinja2.TemplateNotFound: - self.logger.exception("Could not load workload from [%s].", self.template_file_name) - raise WorkloadSyntaxError("Could not load workload from '{}'".format(self.template_file_name)) - self.assembled_source = self.replace_includes(self.base_path, base_workload[0]) + self.logger.exception( + "Could not load workload from [%s].", self.template_file_name) + raise WorkloadSyntaxError( + "Could not load workload from '{}'".format(self.template_file_name)) + self.assembled_source = self.replace_includes( + self.base_path, base_workload[0]) def load_template_from_string(self, template_source): - self.assembled_source = self.replace_includes(self.base_path, template_source) + self.assembled_source = self.replace_includes( + self.base_path, template_source) def replace_includes(self, base_path, workload_fragment): match = TemplateSource.collect_parts_re.findall(workload_fragment) @@ -689,7 +745,8 @@ def replace_includes(self, base_path, workload_fragment): for glob_pattern in match: full_glob_path = os.path.join(base_path, glob_pattern) sub_source = self.read_glob_files(full_glob_path) - repl[glob_pattern] = self.replace_includes(base_path=io.dirname(full_glob_path), workload_fragment=sub_source) + repl[glob_pattern] = self.replace_includes( + base_path=io.dirname(full_glob_path), workload_fragment=sub_source) def replstring(matchobj): # matchobj.groups() is a tuple and first element contains the matched group id @@ -803,9 +860,11 @@ def relative_glob(start, f): return [] base_path = io.dirname(template_file_name) - template_source = TemplateSource(base_path, io.basename(template_file_name)) + template_source = TemplateSource( + base_path, io.basename(template_file_name)) template_source.load_template_from_file() - register_all_params_in_workload(template_source.assembled_source, complete_workload_params) + register_all_params_in_workload( + template_source.assembled_source, complete_workload_params) return render_template(loader=jinja2.FileSystemLoader(base_path), template_source=template_source.assembled_source, @@ -843,7 +902,8 @@ def _filters_from_filtered_tasks(self, filtered_tasks): raise exceptions.SystemSetupError(f"Invalid format for filtered tasks: [{t}]. " f"Expected [type] but got [{spec[0]}].") else: - raise exceptions.SystemSetupError(f"Invalid format for filtered tasks: [{t}]") + raise exceptions.SystemSetupError( + f"Invalid format for filtered tasks: [{t}]") return filters def _filter_out_match(self, task): @@ -874,7 +934,8 @@ def on_after_load_workload(self, input_workload, **kwargs): leaf_task, test_procedure) task.remove_task(leaf_task) for task in tasks_to_remove: - self.logger.info("Removing task [%s] from test_procedure [%s] due to task filter.", task, test_procedure) + self.logger.info( + "Removing task [%s] from test_procedure [%s] due to task filter.", task, test_procedure) test_procedure.remove_task(task) return input_workload @@ -882,16 +943,19 @@ def on_after_load_workload(self, input_workload, **kwargs): class TestModeWorkloadProcessor(WorkloadProcessor): def __init__(self, cfg): - self.test_mode_enabled = cfg.opts("workload", "test.mode.enabled", mandatory=False, default_value=False) + self.test_mode_enabled = cfg.opts( + "workload", "test.mode.enabled", mandatory=False, default_value=False) self.logger = logging.getLogger(__name__) def on_after_load_workload(self, input_workload, **kwargs): if not self.test_mode_enabled: return input_workload - self.logger.info("Preparing workload [%s] for test mode.", str(input_workload)) + self.logger.info( + "Preparing workload [%s] for test mode.", str(input_workload)) for corpus in input_workload.corpora: if self.logger.isEnabledFor(logging.DEBUG): - self.logger.debug("Reducing corpus size to 1000 documents for [%s]", corpus.name) + self.logger.debug( + "Reducing corpus size to 1000 documents for [%s]", corpus.name) for document_set in corpus.documents: # TODO #341: Should we allow this for snapshots too? if document_set.is_bulk: @@ -908,7 +972,7 @@ def on_after_load_workload(self, input_workload, **kwargs): document_set.document_file = f"{path}-1k{ext}" else: raise exceptions.BenchmarkAssertionError(f"Document corpus [{corpus.name}] has neither compressed " - f"nor uncompressed corpus.") + f"nor uncompressed corpus.") # we don't want to check sizes document_set.compressed_size_in_bytes = None @@ -923,12 +987,14 @@ def on_after_load_workload(self, input_workload, **kwargs): if leaf_task.warmup_iterations is not None and leaf_task.warmup_iterations > leaf_task.clients: count = leaf_task.clients if self.logger.isEnabledFor(logging.DEBUG): - self.logger.debug("Resetting warmup iterations to %d for [%s]", count, str(leaf_task)) + self.logger.debug( + "Resetting warmup iterations to %d for [%s]", count, str(leaf_task)) leaf_task.warmup_iterations = count if leaf_task.iterations is not None and leaf_task.iterations > leaf_task.clients: count = leaf_task.clients if self.logger.isEnabledFor(logging.DEBUG): - self.logger.debug("Resetting measurement iterations to %d for [%s]", count, str(leaf_task)) + self.logger.debug( + "Resetting measurement iterations to %d for [%s]", count, str(leaf_task)) leaf_task.iterations = count if leaf_task.warmup_time_period is not None and leaf_task.warmup_time_period > 0: leaf_task.warmup_time_period = 0 @@ -950,16 +1016,22 @@ def on_after_load_workload(self, input_workload, **kwargs): return input_workload + class QueryRandomizerWorkloadProcessor(WorkloadProcessor): DEFAULT_RF = 0.3 DEFAULT_N = 5000 DEFAULT_ALPHA = 1 + def __init__(self, cfg): - self.randomization_enabled = cfg.opts("workload", "randomization.enabled", mandatory=False, default_value=False) - self.rf = float(cfg.opts("workload", "randomization.repeat_frequency", mandatory=False, default_value=self.DEFAULT_RF)) + self.randomization_enabled = cfg.opts( + "workload", "randomization.enabled", mandatory=False, default_value=False) + self.rf = float(cfg.opts("workload", "randomization.repeat_frequency", + mandatory=False, default_value=self.DEFAULT_RF)) self.logger = logging.getLogger(__name__) - self.N = int(cfg.opts("workload", "randomization.n", mandatory=False, default_value=self.DEFAULT_N)) - self.zipf_alpha = float(cfg.opts("workload", "randomization.alpha", mandatory=False, default_value=self.DEFAULT_ALPHA)) + self.N = int(cfg.opts("workload", "randomization.n", + mandatory=False, default_value=self.DEFAULT_N)) + self.zipf_alpha = float(cfg.opts( + "workload", "randomization.alpha", mandatory=False, default_value=self.DEFAULT_ALPHA)) self.H_list = self.precompute_H(self.N, self.zipf_alpha) # Helper functions for computing Zipf distribution @@ -1001,7 +1073,7 @@ def get_dict_from_previous_path(self, root, current_path): def extract_fields_helper(self, root, current_path): # Recursively called to find the location of ranges in an OpenSearch range query. # Return the field and the current path if we're currently scanning the field name in a range query, otherwise return an empty list. - fields = [] # pairs of (field, path_to_field) + fields = [] # pairs of (field, path_to_field) curr = self.get_dict_from_previous_path(root, current_path) if isinstance(curr, dict) and curr != {}: if len(current_path) > 0 and current_path[-1] == "range": @@ -1012,7 +1084,8 @@ def extract_fields_helper(self, root, current_path): return fields else: for key in curr.keys(): - fields += self.extract_fields_helper(root, current_path + [key]) + fields += self.extract_fields_helper( + root, current_path + [key]) return fields elif isinstance(curr, list) and curr != []: for i in range(len(curr)): @@ -1041,7 +1114,8 @@ def set_range(self, params, fields_and_paths, new_values): for field_and_path, new_value in zip(fields_and_paths, new_values): field = field_and_path[0] path = field_and_path[1] - range_section = self.get_dict_from_previous_path(params["body"]["query"], path)[field] + range_section = self.get_dict_from_previous_path( + params["body"]["query"], path)[field] # get the section of the query corresponding to the field name for greater_than in ["gte", "gt"]: if greater_than in range_section: @@ -1059,26 +1133,32 @@ def get_repeated_value_index(self): def get_randomized_values(self, input_workload, input_params, get_standard_value=params.get_standard_value, - get_standard_value_source=params.get_standard_value_source, # Made these configurable for simpler unit tests + # Made these configurable for simpler unit tests + get_standard_value_source=params.get_standard_value_source, **kwargs): # The queries as listed in operations/default.json don't have the index param, # unlike the custom ones you would specify in workload.py, so we have to add them ourselves if not "index" in input_params: - input_params["index"] = params.get_target(input_workload, input_params) + input_params["index"] = params.get_target( + input_workload, input_params) fields_and_paths = self.extract_fields_and_paths(input_params) if random.random() < self.rf: # Draw a potentially repeated value from the saved standard values index = self.get_repeated_value_index() - new_values = [get_standard_value(kwargs["op_name"], field_and_path[0], index) for field_and_path in fields_and_paths] + new_values = [get_standard_value( + kwargs["op_name"], field_and_path[0], index) for field_and_path in fields_and_paths] # Use the same index for all fields in one query, otherwise the probability of repeats in a multi-field query would be very low - input_params = self.set_range(input_params, fields_and_paths, new_values) + input_params = self.set_range( + input_params, fields_and_paths, new_values) else: # Generate a new random value, from the standard value source function. This will be new (a cache miss) - new_values = [get_standard_value_source(kwargs["op_name"], field_and_path[0])() for field_and_path in fields_and_paths] - input_params = self.set_range(input_params, fields_and_paths, new_values) + new_values = [get_standard_value_source( + kwargs["op_name"], field_and_path[0])() for field_and_path in fields_and_paths] + input_params = self.set_range( + input_params, fields_and_paths, new_values) return input_params def create_param_source_lambda(self, op_name, get_standard_value, get_standard_value_source): @@ -1091,7 +1171,8 @@ def on_after_load_workload(self, input_workload, **kwargs): if not self.randomization_enabled: self.logger.info("Query randomization is disabled.") return input_workload - self.logger.info("Query randomization is enabled, with repeat frequency = %d, n = %d",self.rf, self.N) + self.logger.info( + "Query randomization is enabled, with repeat frequency = %d, n = %d", self.rf, self.N) # By default, use params for standard values and generate new standard values the first time an op/field is seen. # In unit tests, we should be able to supply our own sources independent of params. @@ -1113,7 +1194,8 @@ def on_after_load_workload(self, input_workload, **kwargs): for task in default_test_procedure.schedule: for leaf_task in task: try: - op_type = workload.OperationType.from_hyphenated_string(leaf_task.operation.type) + op_type = workload.OperationType.from_hyphenated_string( + leaf_task.operation.type) except KeyError: op_type = None self.logger.info( @@ -1130,9 +1212,11 @@ def on_after_load_workload(self, input_workload, **kwargs): # Generate the right number of standard values for this field, if not already present for field_and_path in self.extract_fields_and_paths(leaf_task.operation.params): if generate_new_standard_values: - params.generate_standard_values_if_absent(op_name, field_and_path[0], self.N) + params.generate_standard_values_if_absent( + op_name, field_and_path[0], self.N) return input_workload + class CompleteWorkloadParams: def __init__(self, user_specified_workload_params=None): self.workload_defined_params = set() @@ -1169,15 +1253,18 @@ class WorkloadFileReader: """ def __init__(self, cfg): - workload_schema_file = os.path.join(cfg.opts("node", "benchmark.root"), "resources", "workload-schema.json") + workload_schema_file = os.path.join( + cfg.opts("node", "benchmark.root"), "resources", "workload-schema.json") with open(workload_schema_file, mode="rt", encoding="utf-8") as f: self.workload_schema = json.loads(f.read()) self.workload_params = cfg.opts("workload", "params", mandatory=False) - self.complete_workload_params = CompleteWorkloadParams(user_specified_workload_params=self.workload_params) + self.complete_workload_params = CompleteWorkloadParams( + user_specified_workload_params=self.workload_params) self.read_workload = WorkloadSpecificationReader( workload_params=self.workload_params, complete_workload_params=self.complete_workload_params, - selected_test_procedure=cfg.opts("workload", "test_procedure.name", mandatory=False) + selected_test_procedure=cfg.opts( + "workload", "test_procedure.name", mandatory=False) ) self.logger = logging.getLogger(__name__) @@ -1191,7 +1278,8 @@ def read(self, workload_name, workload_spec_file, mapping_dir): :return: A corresponding workload instance if the workload file is valid. """ - self.logger.info("Reading workload specification file [%s].", workload_spec_file) + self.logger.info( + "Reading workload specification file [%s].", workload_spec_file) # render the workload to a temporary file instead of dumping it into the logs. It is easier to check for error messages # involving lines numbers and it also does not bloat Benchmark's log file so much. tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".json") @@ -1201,12 +1289,14 @@ def read(self, workload_name, workload_spec_file, mapping_dir): complete_workload_params=self.complete_workload_params) with open(tmp.name, "wt", encoding="utf-8") as f: f.write(rendered) - self.logger.info("Final rendered workload for '%s' has been written to '%s'.", workload_spec_file, tmp.name) + self.logger.info( + "Final rendered workload for '%s' has been written to '%s'.", workload_spec_file, tmp.name) workload_spec = json.loads(rendered) except jinja2.exceptions.TemplateNotFound: self.logger.exception("Could not load [%s]", workload_spec_file) - raise exceptions.SystemSetupError("Workload {} does not exist".format(workload_name)) + raise exceptions.SystemSetupError( + "Workload {} does not exist".format(workload_name)) except jinja2.exceptions.TemplateSyntaxError as e: exception_message = f"Jinja2 Exception TemplateSyntaxError: {e}\n" @@ -1220,7 +1310,6 @@ def read(self, workload_name, workload_spec_file, mapping_dir): raise exceptions.SystemSetupError(exception_message) - except json.JSONDecodeError as e: self.logger.exception("Could not load [%s].", workload_spec_file) msg = "Could not load '{}': {}.".format(workload_spec_file, str(e)) @@ -1231,9 +1320,12 @@ def read(self, workload_name, workload_spec_file, mapping_dir): ctx_start = max(0, line_idx - ctx_line_count) ctx_end = min(line_idx + ctx_line_count, len(lines)) erroneous_lines = lines[ctx_start:ctx_end] - erroneous_lines.insert(line_idx - ctx_start + 1, "-" * (e.colno - 1) + "^ Error is here") - msg += " Lines containing the error:\n\n{}\n\n".format("\n".join(erroneous_lines)) - msg += "The complete workload has been written to '{}' for diagnosis. \n\n".format(tmp.name) + erroneous_lines.insert( + line_idx - ctx_start + 1, "-" * (e.colno - 1) + "^ Error is here") + msg += " Lines containing the error:\n\n{}\n\n".format( + "\n".join(erroneous_lines)) + msg += "The complete workload has been written to '{}' for diagnosis. \n\n".format( + tmp.name) console_message = f"Suggestion: Verify that [{workload_name}] workload has correctly formatted JSON files and " + \ "Jinja Templates. For Jinja2 errors, consider using a live Jinja2 parser. " + \ f"See common workload formatting errors:{WorkloadFileReader.COMMON_WORKLOAD_FORMAT_ERRORS}" @@ -1252,7 +1344,8 @@ def read(self, workload_name, workload_spec_file, mapping_dir): # Convert to string early on to avoid serialization errors with Jinja exceptions. raise WorkloadSyntaxError(msg, str(e)) # check the workload version before even attempting to validate the JSON format to avoid bogus errors. - raw_version = workload_spec.get("version", WorkloadFileReader.MAXIMUM_SUPPORTED_TRACK_VERSION) + raw_version = workload_spec.get( + "version", WorkloadFileReader.MAXIMUM_SUPPORTED_TRACK_VERSION) try: workload_version = int(raw_version) except ValueError: @@ -1260,15 +1353,15 @@ def read(self, workload_name, workload_spec_file, mapping_dir): workload_name, str(raw_version))) if WorkloadFileReader.MINIMUM_SUPPORTED_TRACK_VERSION > workload_version: raise exceptions.BenchmarkError("Workload {} is on version {} but needs to be updated at least to version {} to work with the " - "current version of Benchmark.".format(workload_name, workload_version, - WorkloadFileReader.MINIMUM_SUPPORTED_TRACK_VERSION)) + "current version of Benchmark.".format(workload_name, workload_version, + WorkloadFileReader.MINIMUM_SUPPORTED_TRACK_VERSION)) if WorkloadFileReader.MAXIMUM_SUPPORTED_TRACK_VERSION < workload_version: raise exceptions.BenchmarkError("Workload {} requires a newer version of Benchmark. " - "Please upgrade Benchmark (supported workload version: {}, " - "required workload version: {}).".format( - workload_name, - WorkloadFileReader.MAXIMUM_SUPPORTED_TRACK_VERSION, - workload_version)) + "Please upgrade Benchmark (supported workload version: {}, " + "required workload version: {}).".format( + workload_name, + WorkloadFileReader.MAXIMUM_SUPPORTED_TRACK_VERSION, + workload_version)) try: jsonschema.validate(workload_spec, self.workload_schema) except jsonschema.exceptions.ValidationError as ve: @@ -1276,10 +1369,11 @@ def read(self, workload_name, workload_spec_file, mapping_dir): "Workload '{}' is invalid.\n\nError details: {}\nInstance: {}\nPath: {}\nSchema path: {}".format( workload_name, ve.message, json.dumps( ve.instance, indent=4, sort_keys=True), - ve.absolute_path, ve.absolute_schema_path)) + ve.absolute_path, ve.absolute_schema_path)) try: - current_workload = self.read_workload(workload_name, workload_spec, mapping_dir) + current_workload = self.read_workload( + workload_name, workload_spec, mapping_dir) except Exception as e: console.error(e) raise @@ -1292,19 +1386,22 @@ def read(self, workload_name, workload_spec_file, mapping_dir): "{}\n\n" "All parameters exposed by this workload:\n" "{}".format( - ",".join(opts.double_quoted_list_of(sorted(unused_user_defined_workload_params))), + ",".join(opts.double_quoted_list_of( + sorted(unused_user_defined_workload_params))), ",".join(opts.double_quoted_list_of(sorted(opts.make_list_of_close_matches( unused_user_defined_workload_params, self.complete_workload_params.workload_defined_params )))), - "\n".join(opts.bulleted_list_of(sorted(list(self.workload_params.keys())))), + "\n".join(opts.bulleted_list_of( + sorted(list(self.workload_params.keys())))), "\n".join(opts.bulleted_list_of(self.complete_workload_params.sorted_workload_defined_params)))) self.logger.critical(err_msg) # also dump the message on the console console.println(err_msg) raise exceptions.WorkloadConfigError( - "Unused workload parameters {}.".format(sorted(unused_user_defined_workload_params)) + "Unused workload parameters {}.".format( + sorted(unused_user_defined_workload_params)) ) return current_workload @@ -1318,7 +1415,8 @@ def __init__(self, workload_plugin_path, runner_registry=None, scheduler_registr self.runner_registry = runner_registry self.scheduler_registry = scheduler_registry self.workload_processor_registry = workload_processor_registry - self.loader = modules.ComponentLoader(root_path=workload_plugin_path, component_entry_point="workload") + self.loader = modules.ComponentLoader( + root_path=workload_plugin_path, component_entry_point="workload") def can_load(self): return self.loader.can_load() @@ -1350,7 +1448,8 @@ def register_workload_processor(self, workload_processor): def register_standard_value_source(self, op_name, field_name, standard_value_source): # Define a value source for parameters for a given operation name and field name, for use in randomization - params.register_standard_value_source(op_name, field_name, standard_value_source) + params.register_standard_value_source( + op_name, field_name, standard_value_source) @property def meta_data(self): @@ -1375,7 +1474,8 @@ def __init__(self, workload_params=None, complete_workload_params=None, selected def __call__(self, workload_name, workload_specification, mapping_dir): self.name = workload_name - description = self._r(workload_specification, "description", mandatory=False, default_value="") + description = self._r(workload_specification, + "description", mandatory=False, default_value="") meta_data = self._r(workload_specification, "meta", mandatory=False) indices = [self._create_index(idx, mapping_dir) @@ -1384,25 +1484,27 @@ def __call__(self, workload_name, workload_specification, mapping_dir): for idx in self._r(workload_specification, "data-streams", mandatory=False, default_value=[])] if len(indices) > 0 and len(data_streams) > 0: # we guard against this early and support either or - raise WorkloadSyntaxError("indices and data-streams cannot both be specified") + raise WorkloadSyntaxError( + "indices and data-streams cannot both be specified") templates = [self._create_index_template(tpl, mapping_dir) for tpl in self._r(workload_specification, "templates", mandatory=False, default_value=[])] composable_templates = [self._create_index_template(tpl, mapping_dir) - for tpl in self._r(workload_specification, "composable-templates", mandatory=False, default_value=[])] + for tpl in self._r(workload_specification, "composable-templates", mandatory=False, default_value=[])] component_templates = [self._create_component_template(tpl, mapping_dir) - for tpl in self._r(workload_specification, "component-templates", mandatory=False, default_value=[])] + for tpl in self._r(workload_specification, "component-templates", mandatory=False, default_value=[])] corpora = self._create_corpora(self._r(workload_specification, "corpora", mandatory=False, default_value=[]), indices, data_streams) test_procedures = self._create_test_procedures(workload_specification) # at this point, *all* workload params must have been referenced in the templates return workload.Workload(name=self.name, meta_data=meta_data, - description=description, test_procedures=test_procedures, - indices=indices, - data_streams=data_streams, templates=templates, composable_templates=composable_templates, - component_templates=component_templates, corpora=corpora) + description=description, test_procedures=test_procedures, + indices=indices, + data_streams=data_streams, templates=templates, composable_templates=composable_templates, + component_templates=component_templates, corpora=corpora) def _error(self, msg): - raise WorkloadSyntaxError("Workload '%s' is invalid. %s" % (self.name, msg)) + raise WorkloadSyntaxError( + "Workload '%s' is invalid. %s" % (self.name, msg)) def _r(self, root, path, error_ctx=None, mandatory=True, default_value=None): if isinstance(path, str): @@ -1416,9 +1518,11 @@ def _r(self, root, path, error_ctx=None, mandatory=True, default_value=None): except KeyError: if mandatory: if error_ctx: - self._error("Mandatory element '%s' is missing in '%s'." % (".".join(path), error_ctx)) + self._error("Mandatory element '%s' is missing in '%s'." % ( + ".".join(path), error_ctx)) else: - self._error("Mandatory element '%s' is missing." % ".".join(path)) + self._error("Mandatory element '%s' is missing." % + ".".join(path)) else: return default_value @@ -1426,7 +1530,8 @@ def _create_index(self, index_spec, mapping_dir): index_name = self._r(index_spec, "name") body_file = self._r(index_spec, "body", mandatory=False) if body_file: - idx_body_tmpl_src = TemplateSource(mapping_dir, body_file, self.source) + idx_body_tmpl_src = TemplateSource( + mapping_dir, body_file, self.source) with self.source(os.path.join(mapping_dir, body_file), "rt") as f: idx_body_tmpl_src.load_template_from_string(f.read()) body = self._load_template( @@ -1456,7 +1561,8 @@ def _create_index_template(self, tpl_spec, mapping_dir): name = self._r(tpl_spec, "name") template_file = self._r(tpl_spec, "template") index_pattern = self._r(tpl_spec, "index-pattern") - delete_matching_indices = self._r(tpl_spec, "delete-matching-indices", mandatory=False, default_value=True) + delete_matching_indices = self._r( + tpl_spec, "delete-matching-indices", mandatory=False, default_value=True) template_file = os.path.join(mapping_dir, template_file) idx_tmpl_src = TemplateSource(mapping_dir, template_file, self.source) with self.source(template_file, "rt") as f: @@ -1468,18 +1574,22 @@ def _create_index_template(self, tpl_spec, mapping_dir): def _load_template(self, contents, description): self.logger.info("Loading template [%s].", description) - register_all_params_in_workload(contents, self.complete_workload_params) + register_all_params_in_workload( + contents, self.complete_workload_params) try: rendered = render_template(template_source=contents, template_vars=self.workload_params) return json.loads(rendered) except Exception as e: - self.logger.exception("Could not load file template for %s.", description) - raise WorkloadSyntaxError("Could not load file template for '%s'" % description, str(e)) + self.logger.exception( + "Could not load file template for %s.", description) + raise WorkloadSyntaxError( + "Could not load file template for '%s'" % description, str(e)) def _create_corpora(self, corpora_specs, indices, data_streams): if len(indices) > 0 and len(data_streams) > 0: - raise WorkloadSyntaxError("indices and data-streams cannot both be specified") + raise WorkloadSyntaxError( + "indices and data-streams cannot both be specified") document_corpora = [] known_corpora_names = set() for corpus_spec in corpora_specs: @@ -1489,10 +1599,12 @@ def _create_corpora(self, corpora_specs, indices, data_streams): self._error("Duplicate document corpus name [%s]." % name) known_corpora_names.add(name) - meta_data = self._r(corpus_spec, "meta", error_ctx=name, mandatory=False) + meta_data = self._r(corpus_spec, "meta", + error_ctx=name, mandatory=False) corpus = workload.DocumentCorpus(name=name, meta_data=meta_data) # defaults on corpus level - default_base_url = self._r(corpus_spec, "base-url", mandatory=False, default_value=None) + default_base_url = self._r( + corpus_spec, "base-url", mandatory=False, default_value=None) default_source_format = self._r(corpus_spec, "source-format", mandatory=False, default_value=workload.Documents.SOURCE_FORMAT_BULK) default_action_and_meta_data = self._r(corpus_spec, "includes-action-and-meta-data", mandatory=False, @@ -1502,32 +1614,40 @@ def _create_corpora(self, corpora_specs, indices, data_streams): corpus_target_type = None if len(indices) == 1: - corpus_target_idx = self._r(corpus_spec, "target-index", mandatory=False, default_value=indices[0].name) + corpus_target_idx = self._r( + corpus_spec, "target-index", mandatory=False, default_value=indices[0].name) elif len(indices) > 0: - corpus_target_idx = self._r(corpus_spec, "target-index", mandatory=False) + corpus_target_idx = self._r( + corpus_spec, "target-index", mandatory=False) if len(data_streams) == 1: corpus_target_ds = self._r(corpus_spec, "target-data-stream", mandatory=False, default_value=data_streams[0].name) elif len(data_streams) > 0: - corpus_target_ds = self._r(corpus_spec, "target-data-stream", mandatory=False) + corpus_target_ds = self._r( + corpus_spec, "target-data-stream", mandatory=False) if len(indices) == 1 and len(indices[0].types) == 1: corpus_target_type = self._r(corpus_spec, "target-type", mandatory=False, default_value=indices[0].types[0]) elif len(indices) > 0: - corpus_target_type = self._r(corpus_spec, "target-type", mandatory=False) + corpus_target_type = self._r( + corpus_spec, "target-type", mandatory=False) for doc_spec in self._r(corpus_spec, "documents"): - base_url = self._r(doc_spec, "base-url", mandatory=False, default_value=default_base_url) - source_format = self._r(doc_spec, "source-format", mandatory=False, default_value=default_source_format) + base_url = self._r( + doc_spec, "base-url", mandatory=False, default_value=default_base_url) + source_format = self._r( + doc_spec, "source-format", mandatory=False, default_value=default_source_format) if source_format in workload.Documents.SUPPORTED_SOURCE_FORMAT: - source_url = self._r(doc_spec, "source-url", mandatory=False) + source_url = self._r( + doc_spec, "source-url", mandatory=False) docs = self._r(doc_spec, "source-file") document_file_parts = list() for parts in self._r(doc_spec, "source-file-parts", mandatory=False, default_value=[]): - document_file_parts.append( { "name": self._r(parts, "name"), "size": self._r(parts, "size") } ) + document_file_parts.append( + {"name": self._r(parts, "name"), "size": self._r(parts, "size")}) if io.is_archive(docs): document_archive = docs document_file = io.splitext(docs)[0] @@ -1535,9 +1655,12 @@ def _create_corpora(self, corpora_specs, indices, data_streams): document_archive = None document_file = docs num_docs = self._r(doc_spec, "document-count") - compressed_bytes = self._r(doc_spec, "compressed-bytes", mandatory=False) - uncompressed_bytes = self._r(doc_spec, "uncompressed-bytes", mandatory=False) - doc_meta_data = self._r(doc_spec, "meta", error_ctx=name, mandatory=False) + compressed_bytes = self._r( + doc_spec, "compressed-bytes", mandatory=False) + uncompressed_bytes = self._r( + doc_spec, "uncompressed-bytes", mandatory=False) + doc_meta_data = self._r( + doc_spec, "meta", error_ctx=name, mandatory=False) includes_action_and_meta_data = self._r(doc_spec, "includes-action-and-meta-data", mandatory=False, default_value=default_action_and_meta_data) @@ -1551,64 +1674,79 @@ def _create_corpora(self, corpora_specs, indices, data_streams): # require to be specified if we're using data streams and we have no default target_ds = self._r(doc_spec, "target-data-stream", - mandatory=len(data_streams) > 0 and corpus_target_ds is None, + mandatory=len( + data_streams) > 0 and corpus_target_ds is None, default_value=corpus_target_ds, error_ctx=docs) if target_ds and len(indices) > 0: # if indices are in use we error - raise WorkloadSyntaxError("target-data-stream cannot be used when using indices") + raise WorkloadSyntaxError( + "target-data-stream cannot be used when using indices") elif target_ds and target_type: - raise WorkloadSyntaxError("target-type cannot be used when using data-streams") + raise WorkloadSyntaxError( + "target-type cannot be used when using data-streams") # need an index if we're using indices and no meta-data are present and we don't have a default target_idx = self._r(doc_spec, "target-index", - mandatory=len(indices) > 0 and corpus_target_idx is None, + mandatory=len( + indices) > 0 and corpus_target_idx is None, default_value=corpus_target_idx, error_ctx=docs) # either target_idx or target_ds if target_idx and len(data_streams) > 0: # if data streams are in use we error - raise WorkloadSyntaxError("target-index cannot be used when using data-streams") + raise WorkloadSyntaxError( + "target-index cannot be used when using data-streams") # we need one or the other if target_idx is None and target_ds is None: raise WorkloadSyntaxError(f"a {'target-index' if len(indices) > 0 else 'target-data-stream'} " - f"is required for {docs}" ) + f"is required for {docs}") docs = workload.Documents(source_format=source_format, - document_file=document_file, - document_file_parts=document_file_parts, - document_archive=document_archive, - base_url=base_url, - source_url=source_url, - includes_action_and_meta_data=includes_action_and_meta_data, - number_of_documents=num_docs, - compressed_size_in_bytes=compressed_bytes, - uncompressed_size_in_bytes=uncompressed_bytes, - target_index=target_idx, target_type=target_type, - target_data_stream=target_ds, meta_data=doc_meta_data) + document_file=document_file, + document_file_parts=document_file_parts, + document_archive=document_archive, + base_url=base_url, + source_url=source_url, + includes_action_and_meta_data=includes_action_and_meta_data, + number_of_documents=num_docs, + compressed_size_in_bytes=compressed_bytes, + uncompressed_size_in_bytes=uncompressed_bytes, + target_index=target_idx, target_type=target_type, + target_data_stream=target_ds, meta_data=doc_meta_data) corpus.documents.append(docs) else: - self._error("Unknown source-format [%s] in document corpus [%s]." % (source_format, name)) + self._error( + "Unknown source-format [%s] in document corpus [%s]." % (source_format, name)) document_corpora.append(corpus) return document_corpora def _create_test_procedures(self, workload_spec): - ops = self.parse_operations(self._r(workload_spec, "operations", mandatory=False, default_value=[])) - workload_params = self._r(workload_spec, "parameters", mandatory=False, default_value={}) + ops = self.parse_operations( + self._r(workload_spec, "operations", mandatory=False, default_value=[])) + workload_params = self._r( + workload_spec, "parameters", mandatory=False, default_value={}) test_procedures = [] known_test_procedure_names = set() default_test_procedure = None - test_procedure_specs, auto_generated = self._get_test_procedure_specs(workload_spec) + test_procedure_specs, auto_generated = self._get_test_procedure_specs( + workload_spec) number_of_test_procedures = len(test_procedure_specs) for test_procedure_spec in test_procedure_specs: - name = self._r(test_procedure_spec, "name", error_ctx="test_procedures") - description = self._r(test_procedure_spec, "description", error_ctx=name, mandatory=False) - user_info = self._r(test_procedure_spec, "user-info", error_ctx=name, mandatory=False) - test_procedure_params = self._r(test_procedure_spec, "parameters", error_ctx=name, mandatory=False, default_value={}) - meta_data = self._r(test_procedure_spec, "meta", error_ctx=name, mandatory=False) + name = self._r(test_procedure_spec, "name", + error_ctx="test_procedures") + description = self._r( + test_procedure_spec, "description", error_ctx=name, mandatory=False) + user_info = self._r(test_procedure_spec, + "user-info", error_ctx=name, mandatory=False) + test_procedure_params = self._r( + test_procedure_spec, "parameters", error_ctx=name, mandatory=False, default_value={}) + meta_data = self._r(test_procedure_spec, "meta", + error_ctx=name, mandatory=False) # if we only have one test_procedure it is treated as default test_procedure, no matter what the user has specified - default = number_of_test_procedures == 1 or self._r(test_procedure_spec, "default", error_ctx=name, mandatory=False) + default = number_of_test_procedures == 1 or self._r( + test_procedure_spec, "default", error_ctx=name, mandatory=False) selected = number_of_test_procedures == 1 or self.selected_test_procedure == name if default and default_test_procedure is not None: self._error("Both '%s' and '%s' are defined as default test_procedures. Please define only one of them as default." @@ -1621,12 +1759,13 @@ def _create_test_procedures(self, workload_spec): for op in self._r(test_procedure_spec, "schedule", error_ctx=name): if "clients_list" in op: - self.logger.info("Clients list specified: %s. Running multiple search tasks, "\ + self.logger.info("Clients list specified: %s. Running multiple search tasks, " "each scheduled with the corresponding number of clients from the list.", op["clients_list"]) for num_clients in op["clients_list"]: op["clients"] = num_clients - new_name = self._rename_task_based_on_num_clients(name, num_clients) + new_name = self._rename_task_based_on_num_clients( + name, num_clients) new_name = name + "_" + str(num_clients) + "_clients" new_task = self.parse_task(op, ops, new_name) @@ -1651,17 +1790,18 @@ def _create_test_procedures(self, workload_spec): known_task_names.add(sub_task.name) # merge params - final_test_procedure_params = dict(collections.merge_dicts(workload_params, test_procedure_params)) + final_test_procedure_params = dict( + collections.merge_dicts(workload_params, test_procedure_params)) test_procedure = workload.TestProcedure(name=name, - parameters=final_test_procedure_params, - meta_data=meta_data, - description=description, - user_info=user_info, - default=default, - selected=selected, - auto_generated=auto_generated, - schedule=schedule) + parameters=final_test_procedure_params, + meta_data=meta_data, + description=description, + user_info=user_info, + default=default, + selected=selected, + auto_generated=auto_generated, + schedule=schedule) if default: default_test_procedure = test_procedure @@ -1670,14 +1810,14 @@ def _create_test_procedures(self, workload_spec): if test_procedures and default_test_procedure is None: self._error( "No default test_procedure specified. Please edit the workload and add \"default\": true to one of the test_procedures %s." - % ", ".join([c.name for c in test_procedures])) + % ", ".join([c.name for c in test_procedures])) return test_procedures def _rename_task_based_on_num_clients(self, name: str, num_clients: int) -> str: has_underscore = "_" in name has_hyphen = "-" in name if has_underscore and has_hyphen: - self.logger.warning("The test procedure name %s contains a mix of _ and -. "\ + self.logger.warning("The test procedure name %s contains a mix of _ and -. " "Consider changing the name to avoid frustrating bugs in the future.", name) return name + "_" + str(num_clients) + "_clients" elif has_hyphen: @@ -1687,15 +1827,20 @@ def _rename_task_based_on_num_clients(self, name: str, num_clients: int) -> str: def _get_test_procedure_specs(self, workload_spec): schedule = self._r(workload_spec, "schedule", mandatory=False) - test_procedure = self._r(workload_spec, "test_procedure", mandatory=False) - test_procedures = self._r(workload_spec, "test_procedures", mandatory=False) + test_procedure = self._r( + workload_spec, "test_procedure", mandatory=False) + test_procedures = self._r( + workload_spec, "test_procedures", mandatory=False) - count_defined = len(list(filter(lambda e: e is not None, [schedule, test_procedure, test_procedures]))) + count_defined = len(list(filter(lambda e: e is not None, [ + schedule, test_procedure, test_procedures]))) if count_defined == 0: - self._error("You must define 'test_procedure', 'test_procedures' or 'schedule' but none is specified.") + self._error( + "You must define 'test_procedure', 'test_procedures' or 'schedule' but none is specified.") elif count_defined > 1: - self._error("Multiple out of 'test_procedure', 'test_procedures' or 'schedule' are defined but only one of them is allowed.") + self._error( + "Multiple out of 'test_procedure', 'test_procedures' or 'schedule' are defined but only one of them is allowed.") elif test_procedure is not None: return [test_procedure], False elif test_procedures is not None: @@ -1712,30 +1857,37 @@ def _get_test_procedure_specs(self, workload_spec): def parse_parallel(self, ops_spec, ops, test_procedure_name): # use same default values as #parseTask() in case the 'parallel' element did not specify anything - default_warmup_iterations = self._r(ops_spec, "warmup-iterations", error_ctx="parallel", mandatory=False) - default_iterations = self._r(ops_spec, "iterations", error_ctx="parallel", mandatory=False) - default_warmup_time_period = self._r(ops_spec, "warmup-time-period", error_ctx="parallel", mandatory=False) - default_time_period = self._r(ops_spec, "time-period", error_ctx="parallel", mandatory=False) - default_ramp_up_time_period = self._r(ops_spec, "ramp-up-time-period", error_ctx="parallel", mandatory=False) - clients = self._r(ops_spec, "clients", error_ctx="parallel", mandatory=False) - completed_by = self._r(ops_spec, "completed-by", error_ctx="parallel", mandatory=False) + default_warmup_iterations = self._r( + ops_spec, "warmup-iterations", error_ctx="parallel", mandatory=False) + default_iterations = self._r( + ops_spec, "iterations", error_ctx="parallel", mandatory=False) + default_warmup_time_period = self._r( + ops_spec, "warmup-time-period", error_ctx="parallel", mandatory=False) + default_time_period = self._r( + ops_spec, "time-period", error_ctx="parallel", mandatory=False) + default_ramp_up_time_period = self._r( + ops_spec, "ramp-up-time-period", error_ctx="parallel", mandatory=False) + clients = self._r(ops_spec, "clients", + error_ctx="parallel", mandatory=False) + completed_by = self._r(ops_spec, "completed-by", + error_ctx="parallel", mandatory=False) # now descent to each operation tasks = [] for task in self._r(ops_spec, "tasks", error_ctx="parallel"): tasks.append(self.parse_task(task, ops, test_procedure_name, default_warmup_iterations, default_iterations, - default_warmup_time_period, default_time_period, default_ramp_up_time_period, + default_warmup_time_period, default_time_period, default_ramp_up_time_period, completed_by)) - + for task in tasks: if task.ramp_up_time_period != default_ramp_up_time_period: - if default_ramp_up_time_period is None: - self._error(f"task '{task.name}' in 'parallel' element of test-procedure '{test_procedure_name}' specifies " + if default_ramp_up_time_period is None: + self._error(f"task '{task.name}' in 'parallel' element of test-procedure '{test_procedure_name}' specifies " f"a ramp-up-time-period but it is only allowed on the 'parallel' element.") - else: - self._error(f"task '{task.name}' specifies a different ramp-up-time-period than its enclosing " + else: + self._error(f"task '{task.name}' specifies a different ramp-up-time-period than its enclosing " f"'parallel' element in test-procedure '{test_procedure_name}'.") - + if completed_by: completion_task = None for task in tasks: @@ -1744,7 +1896,7 @@ def parse_parallel(self, ops_spec, ops, test_procedure_name): elif task.completes_parent: self._error( "'parallel' element for test_procedure '%s' contains multiple tasks with the name '%s' which are marked with " - "'completed-by' but only task is allowed to match." % (test_procedure_name, completed_by)) + "'completed-by' but only task is allowed to match." % (test_procedure_name, completed_by)) if not completion_task: self._error("'parallel' element for test_procedure '%s' is marked with 'completed-by' with task name '%s' but no task with " "this name exists." % (test_procedure_name, completed_by)) @@ -1758,43 +1910,50 @@ def parse_task(self, task_spec, ops, test_procedure_name, default_warmup_iterati op = ops[op_spec] else: # may as well an inline operation - op = self.parse_operation(op_spec, error_ctx="inline operation in test_procedure %s" % test_procedure_name) + op = self.parse_operation( + op_spec, error_ctx="inline operation in test_procedure %s" % test_procedure_name) - schedule = self._r(task_spec, "schedule", error_ctx=op.name, mandatory=False) - task_name = self._r(task_spec, "name", error_ctx=op.name, mandatory=False, default_value=op.name) + schedule = self._r(task_spec, "schedule", + error_ctx=op.name, mandatory=False) + task_name = self._r(task_spec, "name", error_ctx=op.name, + mandatory=False, default_value=op.name) task = workload.Task(name=task_name, - operation=op, - tags=self._r(task_spec, "tags", error_ctx=op.name, mandatory=False), - meta_data=self._r(task_spec, "meta", error_ctx=op.name, mandatory=False), - warmup_iterations=self._r(task_spec, "warmup-iterations", error_ctx=op.name, mandatory=False, - default_value=default_warmup_iterations), - iterations=self._r(task_spec, "iterations", error_ctx=op.name, mandatory=False, default_value=default_iterations), - warmup_time_period=self._r(task_spec, "warmup-time-period", error_ctx=op.name, - mandatory=False, - default_value=default_warmup_time_period), - time_period=self._r(task_spec, "time-period", error_ctx=op.name, mandatory=False, - default_value=default_time_period), - ramp_up_time_period=self._r(task_spec, "ramp-up-time-period", error_ctx=op.name, - mandatory=False, default_value=default_ramp_up_time_period), - clients=self._r(task_spec, "clients", error_ctx=op.name, mandatory=False, default_value=1), - completes_parent=(task_name == completed_by_name), - schedule=schedule, - # this is to provide scheduler-specific parameters for custom schedulers. - params=task_spec) + operation=op, + tags=self._r(task_spec, "tags", + error_ctx=op.name, mandatory=False), + meta_data=self._r( + task_spec, "meta", error_ctx=op.name, mandatory=False), + warmup_iterations=self._r(task_spec, "warmup-iterations", error_ctx=op.name, mandatory=False, + default_value=default_warmup_iterations), + iterations=self._r( + task_spec, "iterations", error_ctx=op.name, mandatory=False, default_value=default_iterations), + warmup_time_period=self._r(task_spec, "warmup-time-period", error_ctx=op.name, + mandatory=False, + default_value=default_warmup_time_period), + time_period=self._r(task_spec, "time-period", error_ctx=op.name, mandatory=False, + default_value=default_time_period), + ramp_up_time_period=self._r(task_spec, "ramp-up-time-period", error_ctx=op.name, + mandatory=False, default_value=default_ramp_up_time_period), + clients=self._r( + task_spec, "clients", error_ctx=op.name, mandatory=False, default_value=1), + completes_parent=(task_name == completed_by_name), + schedule=schedule, + # this is to provide scheduler-specific parameters for custom schedulers. + params=task_spec) if task.warmup_iterations is not None and task.time_period is not None: self._error( "Operation '%s' in test_procedure '%s' defines '%d' warmup iterations and a time period of '%d' seconds. Please do not " - "mix time periods and iterations." % (op.name, test_procedure_name, task.warmup_iterations, task.time_period)) + "mix time periods and iterations." % (op.name, test_procedure_name, task.warmup_iterations, task.time_period)) elif task.warmup_time_period is not None and task.iterations is not None: self._error( "Operation '%s' in test_procedure '%s' defines a warmup time period of '%d' seconds and '%d' iterations. Please do not " - "mix time periods and iterations." % (op.name, test_procedure_name, task.warmup_time_period, task.iterations)) - + "mix time periods and iterations." % (op.name, test_procedure_name, task.warmup_time_period, task.iterations)) + if (task.warmup_iterations is not None or task.iterations is not None) and task.ramp_up_time_period is not None: self._error(f"Operation '{op.name}' in test_procedure '{test_procedure_name}' defines a ramp-up time period of " f"{task.ramp_up_time_period} seconds as well as {task.warmup_iterations} warmup iterations and " f"{task.iterations} iterations but mixing time periods and iterations is not allowed.") - + if task.ramp_up_time_period is not None: if task.warmup_time_period is None: self._error(f"Operation '{op.name}' in test_procedure '{test_procedure_name}' defines a ramp-up time period of " @@ -1827,12 +1986,16 @@ def parse_operation(self, op_spec, error_ctx="operations"): # Cannot have parameters here params = {} else: - meta_data = self._r(op_spec, "meta", error_ctx=error_ctx, mandatory=False) + meta_data = self._r( + op_spec, "meta", error_ctx=error_ctx, mandatory=False) # Benchmark's core operations will still use enums then but we'll allow users to define arbitrary operations - op_type_name = self._r(op_spec, "operation-type", error_ctx=error_ctx) + op_type_name = self._r( + op_spec, "operation-type", error_ctx=error_ctx) # fallback to use the operation type as the operation name - op_name = self._r(op_spec, "name", error_ctx=error_ctx, mandatory=False, default_value=op_type_name) - param_source = self._r(op_spec, "param-source", error_ctx=error_ctx, mandatory=False) + op_name = self._r(op_spec, "name", error_ctx=error_ctx, + mandatory=False, default_value=op_type_name) + param_source = self._r( + op_spec, "param-source", error_ctx=error_ctx, mandatory=False) # just pass-through all parameters by default params = op_spec @@ -1840,13 +2003,16 @@ def parse_operation(self, op_spec, error_ctx="operations"): op = workload.OperationType.from_hyphenated_string(op_type_name) if "include-in-results_publishing" not in params: params["include-in-results_publishing"] = not op.admin_op - self.logger.debug("Using built-in operation type [%s] for operation [%s].", op_type_name, op_name) + self.logger.debug( + "Using built-in operation type [%s] for operation [%s].", op_type_name, op_name) except KeyError: - self.logger.info("Using user-provided operation type [%s] for operation [%s].", op_type_name, op_name) + self.logger.info( + "Using user-provided operation type [%s] for operation [%s].", op_type_name, op_name) try: return workload.Operation(name=op_name, meta_data=meta_data, - operation_type=op_type_name, params=params, - param_source=param_source) + operation_type=op_type_name, params=params, + param_source=param_source) except exceptions.InvalidSyntax as e: - raise WorkloadSyntaxError("Invalid operation [%s]: %s" % (op_name, str(e))) + raise WorkloadSyntaxError( + "Invalid operation [%s]: %s" % (op_name, str(e))) diff --git a/tests/aggregator_test.py b/tests/aggregator_test.py index 32be06cb..d963de60 100644 --- a/tests/aggregator_test.py +++ b/tests/aggregator_test.py @@ -50,6 +50,7 @@ def test_count_iterations_for_each_op(aggregator): mock_test_procedure.schedule = mock_schedule mock_workload.test_procedures = [mock_test_procedure] + mock_workload.find_test_procedure_or_default = Mock(return_value=mock_test_procedure) mock_test_execution = Mock(test_execution_id="test1", workload_params={}) aggregator.loaded_workload = mock_workload diff --git a/tests/worker_coordinator/worker_coordinator_test.py b/tests/worker_coordinator/worker_coordinator_test.py index ab7eba63..02626238 100644 --- a/tests/worker_coordinator/worker_coordinator_test.py +++ b/tests/worker_coordinator/worker_coordinator_test.py @@ -13,7 +13,7 @@ # not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an @@ -82,7 +82,8 @@ class StaticClientFactory: PATCHER = None def __init__(self, *args, **kwargs): - WorkerCoordinatorTests.StaticClientFactory.PATCHER = mock.patch("opensearchpy.OpenSearch") + WorkerCoordinatorTests.StaticClientFactory.PATCHER = mock.patch( + "opensearchpy.OpenSearch") self.opensearch = WorkerCoordinatorTests.StaticClientFactory.PATCHER.start() self.opensearch.indices.stats.return_value = {"mocked": True} @@ -95,26 +96,38 @@ def close(cls): def setUp(self): self.cfg = config.Config() - self.cfg.add(config.Scope.application, "system", "env.name", "unittest") - self.cfg.add(config.Scope.application, "system", "time.start", datetime(year=2017, month=8, day=20, hour=1, minute=0, second=0)) - self.cfg.add(config.Scope.application, "system", "test_execution.id", "6ebc6e53-ee20-4b0c-99b4-09697987e9f4") + self.cfg.add(config.Scope.application, + "system", "env.name", "unittest") + self.cfg.add(config.Scope.application, "system", "time.start", datetime( + year=2017, month=8, day=20, hour=1, minute=0, second=0)) + self.cfg.add(config.Scope.application, "system", + "test_execution.id", "6ebc6e53-ee20-4b0c-99b4-09697987e9f4") self.cfg.add(config.Scope.application, "system", "available.cores", 8) self.cfg.add(config.Scope.application, "node", "root.dir", "/tmp") - self.cfg.add(config.Scope.application, "workload", "test_procedure.name", "default") + self.cfg.add(config.Scope.application, "workload", + "test_procedure.name", "default") self.cfg.add(config.Scope.application, "workload", "params", {}) - self.cfg.add(config.Scope.application, "workload", "test.mode.enabled", True) + self.cfg.add(config.Scope.application, "workload", + "test.mode.enabled", True) self.cfg.add(config.Scope.application, "telemetry", "devices", []) - self.cfg.add(config.Scope.application, "telemetry", "params", {"ccr-stats-indices": {"default": ["leader_index"]}}) - self.cfg.add(config.Scope.application, "builder", "provision_config_instance.names", ["default"]) - self.cfg.add(config.Scope.application, "builder", "skip.rest.api.check", True) + self.cfg.add(config.Scope.application, "telemetry", "params", { + "ccr-stats-indices": {"default": ["leader_index"]}}) + self.cfg.add(config.Scope.application, "builder", + "provision_config_instance.names", ["default"]) + self.cfg.add(config.Scope.application, "builder", + "skip.rest.api.check", True) self.cfg.add(config.Scope.application, "client", "hosts", WorkerCoordinatorTests.Holder(all_hosts={"default": ["localhost:9200"]})) - self.cfg.add(config.Scope.application, "client", "options", WorkerCoordinatorTests.Holder(all_client_options={"default": {}})) - self.cfg.add(config.Scope.application, "worker_coordinator", "load_worker_coordinator_hosts", ["localhost"]) - self.cfg.add(config.Scope.application, "results_publishing", "datastore.type", "in-memory") + self.cfg.add(config.Scope.application, "client", "options", + WorkerCoordinatorTests.Holder(all_client_options={"default": {}})) + self.cfg.add(config.Scope.application, "worker_coordinator", + "load_worker_coordinator_hosts", ["localhost"]) + self.cfg.add(config.Scope.application, "results_publishing", + "datastore.type", "in-memory") default_test_procedure = workload.TestProcedure("default", default=True, schedule=[ - workload.Task(name="index", operation=workload.Operation("index", operation_type=workload.OperationType.Bulk), clients=4) + workload.Task(name="index", operation=workload.Operation( + "index", operation_type=workload.OperationType.Bulk), clients=4) ]) another_test_procedure = workload.TestProcedure("other", default=False) self.workload = workload.Workload( @@ -135,14 +148,17 @@ def create_test_worker_coordinator_target(self): @mock.patch("osbenchmark.utils.net.resolve") def test_start_benchmark_and_prepare_workload(self, resolve): # override load worker_coordinator host - self.cfg.add(config.Scope.applicationOverride, "worker_coordinator", "load_worker_coordinator_hosts", ["10.5.5.1", "10.5.5.2"]) + self.cfg.add(config.Scope.applicationOverride, "worker_coordinator", + "load_worker_coordinator_hosts", ["10.5.5.1", "10.5.5.2"]) resolve.side_effect = ["10.5.5.1", "10.5.5.2"] target = self.create_test_worker_coordinator_target() - d = worker_coordinator.WorkerCoordinator(target, self.cfg, os_client_factory_class=WorkerCoordinatorTests.StaticClientFactory) + d = worker_coordinator.WorkerCoordinator( + target, self.cfg, os_client_factory_class=WorkerCoordinatorTests.StaticClientFactory) d.prepare_benchmark(t=self.workload) - target.prepare_workload.assert_called_once_with(["10.5.5.1", "10.5.5.2"], self.cfg, self.workload) + target.prepare_workload.assert_called_once_with( + ["10.5.5.1", "10.5.5.2"], self.cfg, self.workload) d.start_benchmark() target.create_client.assert_has_calls(calls=[ @@ -157,11 +173,13 @@ def test_start_benchmark_and_prepare_workload(self, resolve): def test_assign_worker_coordinators_round_robin(self): target = self.create_test_worker_coordinator_target() - d = worker_coordinator.WorkerCoordinator(target, self.cfg, os_client_factory_class=WorkerCoordinatorTests.StaticClientFactory) + d = worker_coordinator.WorkerCoordinator( + target, self.cfg, os_client_factory_class=WorkerCoordinatorTests.StaticClientFactory) d.prepare_benchmark(t=self.workload) - target.prepare_workload.assert_called_once_with(["localhost"], self.cfg, self.workload) + target.prepare_workload.assert_called_once_with( + ["localhost"], self.cfg, self.workload) d.start_benchmark() @@ -177,7 +195,8 @@ def test_assign_worker_coordinators_round_robin(self): def test_client_reaches_join_point_others_still_executing(self): target = self.create_test_worker_coordinator_target() - d = worker_coordinator.WorkerCoordinator(target, self.cfg, os_client_factory_class=WorkerCoordinatorTests.StaticClientFactory) + d = worker_coordinator.WorkerCoordinator( + target, self.cfg, os_client_factory_class=WorkerCoordinatorTests.StaticClientFactory) d.prepare_benchmark(t=self.workload) d.start_benchmark() @@ -195,7 +214,8 @@ def test_client_reaches_join_point_others_still_executing(self): def test_client_reaches_join_point_which_completes_parent(self): target = self.create_test_worker_coordinator_target() - d = worker_coordinator.WorkerCoordinator(target, self.cfg, os_client_factory_class=WorkerCoordinatorTests.StaticClientFactory) + d = worker_coordinator.WorkerCoordinator( + target, self.cfg, os_client_factory_class=WorkerCoordinatorTests.StaticClientFactory) d.prepare_benchmark(t=self.workload) d.start_benchmark() @@ -206,8 +226,8 @@ def test_client_reaches_join_point_which_completes_parent(self): worker_local_timestamp=10, task_allocations=[ worker_coordinator.ClientAllocation(client_id=0, - task=worker_coordinator.JoinPoint(id=0, - clients_executing_completing_task=[0]))]) + task=worker_coordinator.JoinPoint(id=0, + clients_executing_completing_task=[0]))]) self.assertEqual(-1, d.current_step) self.assertEqual(1, len(d.workers_completed_current_step)) @@ -219,8 +239,8 @@ def test_client_reaches_join_point_which_completes_parent(self): worker_local_timestamp=11, task_allocations=[ worker_coordinator.ClientAllocation(client_id=1, - task=worker_coordinator.JoinPoint(id=0, - clients_executing_completing_task=[0]))]) + task=worker_coordinator.JoinPoint(id=0, + clients_executing_completing_task=[0]))]) self.assertEqual(-1, d.current_step) self.assertEqual(2, len(d.workers_completed_current_step)) @@ -229,8 +249,8 @@ def test_client_reaches_join_point_which_completes_parent(self): worker_local_timestamp=12, task_allocations=[ worker_coordinator.ClientAllocation(client_id=2, - task=worker_coordinator.JoinPoint(id=0, - clients_executing_completing_task=[0]))]) + task=worker_coordinator.JoinPoint(id=0, + clients_executing_completing_task=[0]))]) self.assertEqual(-1, d.current_step) self.assertEqual(3, len(d.workers_completed_current_step)) @@ -238,8 +258,8 @@ def test_client_reaches_join_point_which_completes_parent(self): worker_local_timestamp=13, task_allocations=[ worker_coordinator.ClientAllocation(client_id=3, - task=worker_coordinator.JoinPoint(id=0, - clients_executing_completing_task=[0]))]) + task=worker_coordinator.JoinPoint(id=0, + clients_executing_completing_task=[0]))]) # by now the previous step should be considered completed and we are at the next one self.assertEqual(0, d.current_step) @@ -295,11 +315,12 @@ def request_metric(self, absolute_time, relative_time, name, value): @mock.patch("osbenchmark.metrics.MetricsStore") def test_all_samples(self, metrics_store): post_process = worker_coordinator.SamplePostprocessor(metrics_store, - downsample_factor=1, - workload_meta_data={}, - test_procedure_meta_data={}) + downsample_factor=1, + workload_meta_data={}, + test_procedure_meta_data={}) - task = workload.Task("index", workload.Operation("index-op", "bulk", param_source="worker-coordinator-test-param-source")) + task = workload.Task("index", workload.Operation( + "index-op", "bulk", param_source="worker-coordinator-test-param-source")) samples = [ worker_coordinator.Sample( 0, 38598, 24, 0, task, metrics.SampleType.Normal, @@ -313,9 +334,11 @@ def test_all_samples(self, metrics_store): calls = [ self.latency(38598, 24, 10.0), self.service_time(38598, 24, 7.0), - self.client_processing_time(38598, 24, 0.7), self.processing_time(38598, 24, 9.0), + self.client_processing_time( + 38598, 24, 0.7), self.processing_time(38598, 24, 9.0), self.latency(38599, 25, 10.0), self.service_time(38599, 25, 7.0), - self.client_processing_time(38599, 25, 0.7), self.processing_time(38599, 25, 9.0), + self.client_processing_time( + 38599, 25, 0.7), self.processing_time(38599, 25, 9.0), self.throughput(38598, 24, 5000), self.throughput(38599, 25, 5000), ] @@ -324,11 +347,12 @@ def test_all_samples(self, metrics_store): @mock.patch("osbenchmark.metrics.MetricsStore") def test_downsamples(self, metrics_store): post_process = worker_coordinator.SamplePostprocessor(metrics_store, - downsample_factor=2, - workload_meta_data={}, - test_procedure_meta_data={}) + downsample_factor=2, + workload_meta_data={}, + test_procedure_meta_data={}) - task = workload.Task("index", workload.Operation("index-op", "bulk", param_source="worker-coordinator-test-param-source")) + task = workload.Task("index", workload.Operation( + "index-op", "bulk", param_source="worker-coordinator-test-param-source")) samples = [ worker_coordinator.Sample( @@ -344,7 +368,8 @@ def test_downsamples(self, metrics_store): calls = [ # only the first out of two request samples is included, throughput metrics are still complete self.latency(38598, 24, 10.0), self.service_time(38598, 24, 7.0), - self.client_processing_time(38598, 24, 0.7), self.processing_time(38598, 24, 9.0), + self.client_processing_time( + 38598, 24, 0.7), self.processing_time(38598, 24, 9.0), self.throughput(38598, 24, 5000), self.throughput(38599, 25, 5000), ] @@ -353,38 +378,40 @@ def test_downsamples(self, metrics_store): @mock.patch("osbenchmark.metrics.MetricsStore") def test_dependent_samples(self, metrics_store): post_process = worker_coordinator.SamplePostprocessor(metrics_store, - downsample_factor=1, - workload_meta_data={}, - test_procedure_meta_data={}) + downsample_factor=1, + workload_meta_data={}, + test_procedure_meta_data={}) - task = workload.Task("index", workload.Operation("index-op", "bulk", param_source="worker-coordinator-test-param-source")) + task = workload.Task("index", workload.Operation( + "index-op", "bulk", param_source="worker-coordinator-test-param-source")) samples = [ worker_coordinator.Sample( 0, 38598, 24, 0, task, metrics.SampleType.Normal, None, 0.01, 0.007, 0.0007, 0.009, None, 5000, "docs", 1, 1 / 2, - dependent_timing=[ - { - "absolute_time": 38601, - "request_start": 25, - "service_time": 0.05, - "operation": "index-op", - "operation-type": "bulk" - }, - { - "absolute_time": 38602, - "request_start": 26, - "service_time": 0.08, - "operation": "index-op", - "operation-type": "bulk" - } - ]), + dependent_timing=[ + { + "absolute_time": 38601, + "request_start": 25, + "service_time": 0.05, + "operation": "index-op", + "operation-type": "bulk" + }, + { + "absolute_time": 38602, + "request_start": 26, + "service_time": 0.08, + "operation": "index-op", + "operation-type": "bulk" + } + ]), ] post_process(samples) calls = [ self.latency(38598, 24, 10.0), self.service_time(38598, 24, 7.0), - self.client_processing_time(38598, 24, 0.7), self.processing_time(38598, 24, 9.0), + self.client_processing_time( + 38598, 24, 0.7), self.processing_time(38598, 24, 9.0), # dependent timings self.service_time(38601, 25, 50.0), self.service_time(38602, 26, 80.0), @@ -400,7 +427,8 @@ def test_single_host_assignment_clients_matches_cores(self): "cores": 4 }] - assignments = worker_coordinator.calculate_worker_assignments(host_configs, client_count=4) + assignments = worker_coordinator.calculate_worker_assignments( + host_configs, client_count=4) self.assertEqual([ { @@ -420,7 +448,8 @@ def test_single_host_assignment_more_clients_than_cores(self): "cores": 4 }] - assignments = worker_coordinator.calculate_worker_assignments(host_configs, client_count=6) + assignments = worker_coordinator.calculate_worker_assignments( + host_configs, client_count=6) self.assertEqual([ { @@ -440,7 +469,8 @@ def test_single_host_assignment_less_clients_than_cores(self): "cores": 4 }] - assignments = worker_coordinator.calculate_worker_assignments(host_configs, client_count=2) + assignments = worker_coordinator.calculate_worker_assignments( + host_configs, client_count=2) self.assertEqual([ { @@ -466,7 +496,8 @@ def test_multiple_host_assignment_more_clients_than_cores(self): } ] - assignments = worker_coordinator.calculate_worker_assignments(host_configs, client_count=16) + assignments = worker_coordinator.calculate_worker_assignments( + host_configs, client_count=16) self.assertEqual([ { @@ -501,7 +532,8 @@ def test_multiple_host_assignment_less_clients_than_cores(self): } ] - assignments = worker_coordinator.calculate_worker_assignments(host_configs, client_count=4) + assignments = worker_coordinator.calculate_worker_assignments( + host_configs, client_count=4) self.assertEqual([ { @@ -540,7 +572,8 @@ def test_uneven_assignment_across_hosts(self): } ] - assignments = worker_coordinator.calculate_worker_assignments(host_configs, client_count=17) + assignments = worker_coordinator.calculate_worker_assignments( + host_configs, client_count=17) self.assertEqual([ { @@ -575,10 +608,11 @@ def test_uneven_assignment_across_hosts(self): class AllocatorTests(TestCase): def setUp(self): - params.register_param_source_for_name("worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) + params.register_param_source_for_name( + "worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) def ta(self, task, client_index_in_task, global_client_index=None, total_clients=None): - return worker_coordinator.TaskAllocation(task, client_index_in_task, + return worker_coordinator.TaskAllocation(task, client_index_in_task, client_index_in_task if global_client_index is None else global_client_index, task.clients if total_clients is None else total_clients) @@ -606,7 +640,8 @@ def test_allocates_two_serial_tasks(self): def test_allocates_two_parallel_tasks(self): task = workload.Task("index", op("index", workload.OperationType.Bulk)) - allocator = worker_coordinator.Allocator([workload.Parallel([task, task])]) + allocator = worker_coordinator.Allocator( + [workload.Parallel([task, task])]) self.assertEqual(2, allocator.clients) self.assertEqual(3, len(allocator.allocations[0])) @@ -615,13 +650,17 @@ def test_allocates_two_parallel_tasks(self): self.assertEqual([{task}], allocator.tasks_per_joinpoint) for join_point in allocator.join_points: self.assertFalse(join_point.preceding_task_completes_parent) - self.assertEqual(0, join_point.num_clients_executing_completing_task) + self.assertEqual( + 0, join_point.num_clients_executing_completing_task) def test_a_task_completes_the_parallel_structure(self): - taskA = workload.Task("index-completing", op("index", workload.OperationType.Bulk), completes_parent=True) - taskB = workload.Task("index-non-completing", op("index", workload.OperationType.Bulk)) + taskA = workload.Task("index-completing", op("index", + workload.OperationType.Bulk), completes_parent=True) + taskB = workload.Task("index-non-completing", + op("index", workload.OperationType.Bulk)) - allocator = worker_coordinator.Allocator([workload.Parallel([taskA, taskB])]) + allocator = worker_coordinator.Allocator( + [workload.Parallel([taskA, taskB])]) self.assertEqual(2, allocator.clients) self.assertEqual(3, len(allocator.allocations[0])) @@ -630,19 +669,25 @@ def test_a_task_completes_the_parallel_structure(self): self.assertEqual([{taskA, taskB}], allocator.tasks_per_joinpoint) final_join_point = allocator.join_points[1] self.assertTrue(final_join_point.preceding_task_completes_parent) - self.assertEqual(1, final_join_point.num_clients_executing_completing_task) - self.assertEqual([0], final_join_point.clients_executing_completing_task) + self.assertEqual( + 1, final_join_point.num_clients_executing_completing_task) + self.assertEqual( + [0], final_join_point.clients_executing_completing_task) def test_allocates_mixed_tasks(self): - index = workload.Task("index", op("index", workload.OperationType.Bulk)) - stats = workload.Task("stats", op("stats", workload.OperationType.IndexStats)) - search = workload.Task("search", op("search", workload.OperationType.Search)) + index = workload.Task("index", op( + "index", workload.OperationType.Bulk)) + stats = workload.Task("stats", op( + "stats", workload.OperationType.IndexStats)) + search = workload.Task("search", op( + "search", workload.OperationType.Search)) allocator = worker_coordinator.Allocator([index, - workload.Parallel([index, stats, stats]), - index, - index, - workload.Parallel([search, search, search])]) + workload.Parallel( + [index, stats, stats]), + index, + index, + workload.Parallel([search, search, search])]) self.assertEqual(3, allocator.clients) @@ -651,19 +696,27 @@ def test_allocates_mixed_tasks(self): self.assertEqual(11, len(allocator.allocations[1])) self.assertEqual(11, len(allocator.allocations[2])) self.assertEqual(6, len(allocator.join_points)) - self.assertEqual([{index}, {index, stats}, {index}, {index}, {search}], allocator.tasks_per_joinpoint) + self.assertEqual([{index}, {index, stats}, {index}, {index}, { + search}], allocator.tasks_per_joinpoint) for join_point in allocator.join_points: self.assertFalse(join_point.preceding_task_completes_parent) - self.assertEqual(0, join_point.num_clients_executing_completing_task) + self.assertEqual( + 0, join_point.num_clients_executing_completing_task) def test_allocates_more_tasks_than_clients(self): - index_a = workload.Task("index-a", op("index-a", workload.OperationType.Bulk)) - index_b = workload.Task("index-b", op("index-b", workload.OperationType.Bulk), completes_parent=True) - index_c = workload.Task("index-c", op("index-c", workload.OperationType.Bulk)) - index_d = workload.Task("index-d", op("index-d", workload.OperationType.Bulk)) - index_e = workload.Task("index-e", op("index-e", workload.OperationType.Bulk)) - - allocator = worker_coordinator.Allocator([workload.Parallel(tasks=[index_a, index_b, index_c, index_d, index_e], clients=2)]) + index_a = workload.Task( + "index-a", op("index-a", workload.OperationType.Bulk)) + index_b = workload.Task( + "index-b", op("index-b", workload.OperationType.Bulk), completes_parent=True) + index_c = workload.Task( + "index-c", op("index-c", workload.OperationType.Bulk)) + index_d = workload.Task( + "index-d", op("index-d", workload.OperationType.Bulk)) + index_e = workload.Task( + "index-e", op("index-e", workload.OperationType.Bulk)) + + allocator = worker_coordinator.Allocator([workload.Parallel( + tasks=[index_a, index_b, index_c, index_d, index_e], clients=2)]) self.assertEqual(2, allocator.clients) @@ -673,33 +726,45 @@ def test_allocates_more_tasks_than_clients(self): # join_point, index_a, index_c, index_e, join_point self.assertEqual(5, len(allocations[0])) # we really have no chance to extract the join point so we just take what is there... - self.assertEqual([allocations[0][0], - self.ta(index_a, client_index_in_task=0, global_client_index=0, total_clients=2), - self.ta(index_c, client_index_in_task=0, global_client_index=2, total_clients=2), - self.ta(index_e, client_index_in_task=0, global_client_index=4, total_clients=2), + self.assertEqual([allocations[0][0], + self.ta(index_a, client_index_in_task=0, + global_client_index=0, total_clients=2), + self.ta(index_c, client_index_in_task=0, + global_client_index=2, total_clients=2), + self.ta(index_e, client_index_in_task=0, + global_client_index=4, total_clients=2), allocations[0][4]], allocations[0]) # join_point, index_a, index_c, None, join_point self.assertEqual(5, len(allocator.allocations[1])) - self.assertEqual([allocations[1][0], - self.ta(index_b, client_index_in_task=0, global_client_index=1, total_clients=2), - self.ta(index_d, client_index_in_task=0, global_client_index=3, total_clients=2), - None, allocations[1][4]], - allocations[1]) + self.assertEqual([allocations[1][0], + self.ta(index_b, client_index_in_task=0, + global_client_index=1, total_clients=2), + self.ta(index_d, client_index_in_task=0, + global_client_index=3, total_clients=2), + None, allocations[1][4]], + allocations[1]) - self.assertEqual([{index_a, index_b, index_c, index_d, index_e}], allocator.tasks_per_joinpoint) + self.assertEqual( + [{index_a, index_b, index_c, index_d, index_e}], allocator.tasks_per_joinpoint) self.assertEqual(2, len(allocator.join_points)) final_join_point = allocator.join_points[1] self.assertTrue(final_join_point.preceding_task_completes_parent) - self.assertEqual(1, final_join_point.num_clients_executing_completing_task) - self.assertEqual([1], final_join_point.clients_executing_completing_task) + self.assertEqual( + 1, final_join_point.num_clients_executing_completing_task) + self.assertEqual( + [1], final_join_point.clients_executing_completing_task) def test_considers_number_of_clients_per_subtask(self): - index_a = workload.Task("index-a", op("index-a", workload.OperationType.Bulk)) - index_b = workload.Task("index-b", op("index-b", workload.OperationType.Bulk)) - index_c = workload.Task("index-c", op("index-c", workload.OperationType.Bulk), clients=2, completes_parent=True) + index_a = workload.Task( + "index-a", op("index-a", workload.OperationType.Bulk)) + index_b = workload.Task( + "index-b", op("index-b", workload.OperationType.Bulk)) + index_c = workload.Task( + "index-c", op("index-c", workload.OperationType.Bulk), clients=2, completes_parent=True) - allocator = worker_coordinator.Allocator([workload.Parallel(tasks=[index_a, index_b, index_c], clients=3)]) + allocator = worker_coordinator.Allocator( + [workload.Parallel(tasks=[index_a, index_b, index_c], clients=3)]) self.assertEqual(3, allocator.clients) @@ -712,45 +777,54 @@ def test_considers_number_of_clients_per_subtask(self): # join_point, index_a, index_c, join_point self.assertEqual(4, len(allocations[0])) # we really have no chance to extract the join point so we just take what is there... - self.assertEqual([allocations[0][0], - self.ta(index_a, client_index_in_task=0, global_client_index=0, total_clients=3), - self.ta(index_c, client_index_in_task=1, global_client_index=3, total_clients=3), - allocations[0][3]], - allocations[0]) + self.assertEqual([allocations[0][0], + self.ta(index_a, client_index_in_task=0, + global_client_index=0, total_clients=3), + self.ta(index_c, client_index_in_task=1, + global_client_index=3, total_clients=3), + allocations[0][3]], + allocations[0]) # task that client 1 will execute: # join_point, index_b, None, join_point self.assertEqual(4, len(allocator.allocations[1])) - self.assertEqual([allocations[1][0], - self.ta(index_b, client_index_in_task=0, global_client_index=1, total_clients=3), - None, - allocations[1][3]], - allocations[1]) + self.assertEqual([allocations[1][0], + self.ta(index_b, client_index_in_task=0, + global_client_index=1, total_clients=3), + None, + allocations[1][3]], + allocations[1]) # tasks that client 2 will execute: self.assertEqual(4, len(allocator.allocations[2])) - self.assertEqual([allocations[2][0], - self.ta(index_c, client_index_in_task=0, global_client_index=2, total_clients=3), - None, - allocations[2][3]], - allocations[2]) + self.assertEqual([allocations[2][0], + self.ta(index_c, client_index_in_task=0, + global_client_index=2, total_clients=3), + None, + allocations[2][3]], + allocations[2]) - self.assertEqual([{index_a, index_b, index_c}], allocator.tasks_per_joinpoint) + self.assertEqual([{index_a, index_b, index_c}], + allocator.tasks_per_joinpoint) self.assertEqual(2, len(allocator.join_points)) final_join_point = allocator.join_points[1] self.assertTrue(final_join_point.preceding_task_completes_parent) # task index_c has two clients, hence we have to wait for two clients to finish - self.assertEqual(2, final_join_point.num_clients_executing_completing_task) - self.assertEqual([2, 0], final_join_point.clients_executing_completing_task) + self.assertEqual( + 2, final_join_point.num_clients_executing_completing_task) + self.assertEqual( + [2, 0], final_join_point.clients_executing_completing_task) class MetricsAggregationTests(TestCase): def setUp(self): - params.register_param_source_for_name("worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) + params.register_param_source_for_name( + "worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) def test_different_sample_types(self): - op = workload.Operation("index", workload.OperationType.Bulk, param_source="worker-coordinator-test-param-source") + op = workload.Operation("index", workload.OperationType.Bulk, + param_source="worker-coordinator-test-param-source") samples = [ worker_coordinator.Sample(0, 1470838595, 21, 0, op, metrics.SampleType.Warmup, @@ -766,19 +840,28 @@ def test_different_sample_types(self): throughput = aggregated[op] self.assertEqual(2, len(throughput)) - self.assertEqual((1470838595, 21, metrics.SampleType.Warmup, 3000, "docs/s"), throughput[0]) - self.assertEqual((1470838595.5, 21.5, metrics.SampleType.Normal, 3666.6666666666665, "docs/s"), throughput[1]) + self.assertEqual( + (1470838595, 21, metrics.SampleType.Warmup, 3000, "docs/s"), throughput[0]) + self.assertEqual((1470838595.5, 21.5, metrics.SampleType.Normal, + 3666.6666666666665, "docs/s"), throughput[1]) def test_single_metrics_aggregation(self): - op = workload.Operation("index", workload.OperationType.Bulk, param_source="worker-coordinator-test-param-source") + op = workload.Operation("index", workload.OperationType.Bulk, + param_source="worker-coordinator-test-param-source") samples = [ - worker_coordinator.Sample(0, 38595, 21, 0, op, metrics.SampleType.Normal, None, -1, -1, -1, -1, None, 5000, "docs", 1, 1 / 9), - worker_coordinator.Sample(0, 38596, 22, 0, op, metrics.SampleType.Normal, None, -1, -1, -1, -1, None, 5000, "docs", 2, 2 / 9), - worker_coordinator.Sample(0, 38597, 23, 0, op, metrics.SampleType.Normal, None, -1, -1, -1, -1, None, 5000, "docs", 3, 3 / 9), - worker_coordinator.Sample(0, 38598, 24, 0, op, metrics.SampleType.Normal, None, -1, -1, -1, -1, None, 5000, "docs", 4, 4 / 9), - worker_coordinator.Sample(0, 38599, 25, 0, op, metrics.SampleType.Normal, None, -1, -1, -1, -1, None, 5000, "docs", 5, 5 / 9), - worker_coordinator.Sample(0, 38600, 26, 0, op, metrics.SampleType.Normal, None, -1, -1, -1, -1, None, 5000, "docs", 6, 6 / 9), + worker_coordinator.Sample(0, 38595, 21, 0, op, metrics.SampleType.Normal, + None, -1, -1, -1, -1, None, 5000, "docs", 1, 1 / 9), + worker_coordinator.Sample(0, 38596, 22, 0, op, metrics.SampleType.Normal, + None, -1, -1, -1, -1, None, 5000, "docs", 2, 2 / 9), + worker_coordinator.Sample(0, 38597, 23, 0, op, metrics.SampleType.Normal, + None, -1, -1, -1, -1, None, 5000, "docs", 3, 3 / 9), + worker_coordinator.Sample(0, 38598, 24, 0, op, metrics.SampleType.Normal, + None, -1, -1, -1, -1, None, 5000, "docs", 4, 4 / 9), + worker_coordinator.Sample(0, 38599, 25, 0, op, metrics.SampleType.Normal, + None, -1, -1, -1, -1, None, 5000, "docs", 5, 5 / 9), + worker_coordinator.Sample(0, 38600, 26, 0, op, metrics.SampleType.Normal, + None, -1, -1, -1, -1, None, 5000, "docs", 6, 6 / 9), worker_coordinator.Sample(1, 38598.5, 24.5, 0, op, metrics.SampleType.Normal, None, -1, -1, -1, -1, None, 5000, "docs", 4.5, 7 / 9), worker_coordinator.Sample(1, 38599.5, 25.5, 0, op, metrics.SampleType.Normal, @@ -794,22 +877,31 @@ def test_single_metrics_aggregation(self): throughput = aggregated[op] self.assertEqual(6, len(throughput)) - self.assertEqual((38595, 21, metrics.SampleType.Normal, 5000, "docs/s"), throughput[0]) - self.assertEqual((38596, 22, metrics.SampleType.Normal, 5000, "docs/s"), throughput[1]) - self.assertEqual((38597, 23, metrics.SampleType.Normal, 5000, "docs/s"), throughput[2]) - self.assertEqual((38598, 24, metrics.SampleType.Normal, 5000, "docs/s"), throughput[3]) - self.assertEqual((38599, 25, metrics.SampleType.Normal, 6000, "docs/s"), throughput[4]) - self.assertEqual((38600, 26, metrics.SampleType.Normal, 6666.666666666667, "docs/s"), throughput[5]) + self.assertEqual((38595, 21, metrics.SampleType.Normal, + 5000, "docs/s"), throughput[0]) + self.assertEqual((38596, 22, metrics.SampleType.Normal, + 5000, "docs/s"), throughput[1]) + self.assertEqual((38597, 23, metrics.SampleType.Normal, + 5000, "docs/s"), throughput[2]) + self.assertEqual((38598, 24, metrics.SampleType.Normal, + 5000, "docs/s"), throughput[3]) + self.assertEqual((38599, 25, metrics.SampleType.Normal, + 6000, "docs/s"), throughput[4]) + self.assertEqual((38600, 26, metrics.SampleType.Normal, + 6666.666666666667, "docs/s"), throughput[5]) # self.assertEqual((1470838600.5, 26.5, metrics.SampleType.Normal, 10000), throughput[6]) def test_use_provided_throughput(self): op = workload.Operation("index-recovery", workload.OperationType.WaitForRecovery, - param_source="worker-coordinator-test-param-source") + param_source="worker-coordinator-test-param-source") samples = [ - worker_coordinator.Sample(0, 38595, 21, 0, op, metrics.SampleType.Normal, None, -1, -1, -1, -1, 8000, 5000, "byte", 1, 1 / 3), - worker_coordinator.Sample(0, 38596, 22, 0, op, metrics.SampleType.Normal, None, -1, -1, -1, -1, 8000, 5000, "byte", 2, 2 / 3), - worker_coordinator.Sample(0, 38597, 23, 0, op, metrics.SampleType.Normal, None, -1, -1, -1, -1, 8000, 5000, "byte", 3, 3 / 3), + worker_coordinator.Sample(0, 38595, 21, 0, op, metrics.SampleType.Normal, + None, -1, -1, -1, -1, 8000, 5000, "byte", 1, 1 / 3), + worker_coordinator.Sample(0, 38596, 22, 0, op, metrics.SampleType.Normal, + None, -1, -1, -1, -1, 8000, 5000, "byte", 2, 2 / 3), + worker_coordinator.Sample(0, 38597, 23, 0, op, metrics.SampleType.Normal, + None, -1, -1, -1, -1, 8000, 5000, "byte", 3, 3 / 3), ] aggregated = self.calculate_global_throughput(samples) @@ -819,9 +911,12 @@ def test_use_provided_throughput(self): throughput = aggregated[op] self.assertEqual(3, len(throughput)) - self.assertEqual((38595, 21, metrics.SampleType.Normal, 8000, "byte/s"), throughput[0]) - self.assertEqual((38596, 22, metrics.SampleType.Normal, 8000, "byte/s"), throughput[1]) - self.assertEqual((38597, 23, metrics.SampleType.Normal, 8000, "byte/s"), throughput[2]) + self.assertEqual((38595, 21, metrics.SampleType.Normal, + 8000, "byte/s"), throughput[0]) + self.assertEqual((38596, 22, metrics.SampleType.Normal, + 8000, "byte/s"), throughput[1]) + self.assertEqual((38597, 23, metrics.SampleType.Normal, + 8000, "byte/s"), throughput[2]) def calculate_global_throughput(self, samples): return worker_coordinator.ThroughputCalculator().calculate(samples) @@ -863,10 +958,14 @@ async def assert_schedule(self, expected_schedule, schedule_handle, infinite_sch schedule_handle.start() async for invocation_time, sample_type, progress_percent, runner, params in schedule_handle(): schedule_handle.before_request(now=idx) - exp_invocation_time, exp_sample_type, exp_progress_percent, exp_params = expected_schedule[idx] - self.assertAlmostEqual(exp_invocation_time, invocation_time, msg="Invocation time for sample at index %d does not match" % idx) - self.assertEqual(exp_sample_type, sample_type, "Sample type for sample at index %d does not match" % idx) - self.assertEqual(exp_progress_percent, progress_percent, "Current progress for sample at index %d does not match" % idx) + exp_invocation_time, exp_sample_type, exp_progress_percent, exp_params = expected_schedule[ + idx] + self.assertAlmostEqual(exp_invocation_time, invocation_time, + msg="Invocation time for sample at index %d does not match" % idx) + self.assertEqual(exp_sample_type, sample_type, + "Sample type for sample at index %d does not match" % idx) + self.assertEqual(exp_progress_percent, progress_percent, + "Current progress for sample at index %d does not match" % idx) self.assertIsNotNone(runner, "runner must be defined") self.assertEqual(exp_params, params, "Parameters do not match") idx += 1 @@ -874,17 +973,22 @@ async def assert_schedule(self, expected_schedule, schedule_handle, infinite_sch if infinite_schedule and idx == len(expected_schedule): break # simulate that the request is done - we only support throttling based on request count (ops). - schedule_handle.after_request(now=idx, weight=1, unit="ops", request_meta_data=None) + schedule_handle.after_request( + now=idx, weight=1, unit="ops", request_meta_data=None) if not infinite_schedule: - self.assertEqual(len(expected_schedule), idx, msg="Number of elements in the schedules do not match") + self.assertEqual(len(expected_schedule), idx, + msg="Number of elements in the schedules do not match") def setUp(self): self.test_workload = workload.Workload(name="unittest") self.runner_with_progress = SchedulerTests.RunnerWithProgress() - params.register_param_source_for_name("worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) + params.register_param_source_for_name( + "worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) runner.register_default_runners() - runner.register_runner("worker-coordinator-test-runner-with-completion", self.runner_with_progress, async_runner=True) - scheduler.register_scheduler("custom-complex-scheduler", SchedulerTests.CustomComplexScheduler) + runner.register_runner("worker-coordinator-test-runner-with-completion", + self.runner_with_progress, async_runner=True) + scheduler.register_scheduler( + "custom-complex-scheduler", SchedulerTests.CustomComplexScheduler) def tearDown(self): runner.remove_runner("worker-coordinator-test-runner-with-completion") @@ -892,16 +996,16 @@ def tearDown(self): def test_injects_parameter_source_into_scheduler(self): task = workload.Task(name="search", - schedule="custom-complex-scheduler", - operation=workload.Operation( - name="search", - operation_type=workload.OperationType.Search.to_hyphenated_string(), - param_source="worker-coordinator-test-param-source" - ), - clients=4, - params={ - "target-throughput": "5000 ops/s" - }) + schedule="custom-complex-scheduler", + operation=workload.Operation( + name="search", + operation_type=workload.OperationType.Search.to_hyphenated_string(), + param_source="worker-coordinator-test-param-source" + ), + clients=4, + params={ + "target-throughput": "5000 ops/s" + }) param_source = workload.operation_parameters(self.test_workload, task) task_allocation = worker_coordinator.TaskAllocation( @@ -910,16 +1014,18 @@ def test_injects_parameter_source_into_scheduler(self): global_client_index=0, total_clients=task.clients ) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) - self.assertIsNotNone(schedule.sched.parameter_source, "Parameter source has not been injected into scheduler") + self.assertIsNotNone(schedule.sched.parameter_source, + "Parameter source has not been injected into scheduler") self.assertEqual(param_source, schedule.sched.parameter_source) @run_async async def test_search_task_one_client(self): task = workload.Task("search", workload.Operation("search", workload.OperationType.Search.to_hyphenated_string(), - param_source="worker-coordinator-test-param-source"), - warmup_iterations=3, iterations=5, clients=1, params={"target-throughput": 10, "clients": 1}) + param_source="worker-coordinator-test-param-source"), + warmup_iterations=3, iterations=5, clients=1, params={"target-throughput": 10, "clients": 1}) param_source = workload.operation_parameters(self.test_workload, task) task_allocation = worker_coordinator.TaskAllocation( task=task, @@ -927,7 +1033,8 @@ async def test_search_task_one_client(self): global_client_index=0, total_clients=task.clients ) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) expected_schedule = [ (0, metrics.SampleType.Warmup, 1 / 8, {}), @@ -944,8 +1051,8 @@ async def test_search_task_one_client(self): @run_async async def test_search_task_two_clients(self): task = workload.Task("search", workload.Operation("search", workload.OperationType.Search.to_hyphenated_string(), - param_source="worker-coordinator-test-param-source"), - warmup_iterations=1, iterations=5, clients=2, params={"target-throughput": 10, "clients": 2}) + param_source="worker-coordinator-test-param-source"), + warmup_iterations=1, iterations=5, clients=2, params={"target-throughput": 10, "clients": 2}) param_source = workload.operation_parameters(self.test_workload, task) task_allocation = worker_coordinator.TaskAllocation( task=task, @@ -953,7 +1060,8 @@ async def test_search_task_two_clients(self): global_client_index=0, total_clients=task.clients ) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) expected_schedule = [ (0, metrics.SampleType.Warmup, 1 / 6, {}), @@ -969,9 +1077,10 @@ async def test_search_task_two_clients(self): async def test_schedule_param_source_determines_iterations_no_warmup(self): # we neither define any time-period nor any iteration count on the task. task = workload.Task("bulk-index", workload.Operation("bulk-index", workload.OperationType.Bulk.to_hyphenated_string(), - params={"body": ["a"], "size": 3}, - param_source="worker-coordinator-test-param-source"), - clients=4, params={"target-throughput": 4}) + params={ + "body": ["a"], "size": 3}, + param_source="worker-coordinator-test-param-source"), + clients=4, params={"target-throughput": 4}) param_source = workload.operation_parameters(self.test_workload, task) task_allocation = worker_coordinator.TaskAllocation( @@ -980,20 +1089,25 @@ async def test_schedule_param_source_determines_iterations_no_warmup(self): global_client_index=0, total_clients=task.clients ) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) await self.assert_schedule([ - (0.0, metrics.SampleType.Normal, 1 / 3, {"body": ["a"], "size": 3}), - (1.0, metrics.SampleType.Normal, 2 / 3, {"body": ["a"], "size": 3}), - (2.0, metrics.SampleType.Normal, 3 / 3, {"body": ["a"], "size": 3}), + (0.0, metrics.SampleType.Normal, + 1 / 3, {"body": ["a"], "size": 3}), + (1.0, metrics.SampleType.Normal, + 2 / 3, {"body": ["a"], "size": 3}), + (2.0, metrics.SampleType.Normal, + 3 / 3, {"body": ["a"], "size": 3}), ], schedule) @run_async async def test_schedule_param_source_determines_iterations_including_warmup(self): task = workload.Task("bulk-index", workload.Operation("bulk-index", workload.OperationType.Bulk.to_hyphenated_string(), - params={"body": ["a"], "size": 5}, - param_source="worker-coordinator-test-param-source"), - warmup_iterations=2, clients=4, params={"target-throughput": 4}) + params={ + "body": ["a"], "size": 5}, + param_source="worker-coordinator-test-param-source"), + warmup_iterations=2, clients=4, params={"target-throughput": 4}) param_source = workload.operation_parameters(self.test_workload, task) task_allocation = worker_coordinator.TaskAllocation( @@ -1002,23 +1116,30 @@ async def test_schedule_param_source_determines_iterations_including_warmup(self global_client_index=0, total_clients=task.clients ) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) await self.assert_schedule([ - (0.0, metrics.SampleType.Warmup, 1 / 5, {"body": ["a"], "size": 5}), - (1.0, metrics.SampleType.Warmup, 2 / 5, {"body": ["a"], "size": 5}), - (2.0, metrics.SampleType.Normal, 3 / 5, {"body": ["a"], "size": 5}), - (3.0, metrics.SampleType.Normal, 4 / 5, {"body": ["a"], "size": 5}), - (4.0, metrics.SampleType.Normal, 5 / 5, {"body": ["a"], "size": 5}), + (0.0, metrics.SampleType.Warmup, + 1 / 5, {"body": ["a"], "size": 5}), + (1.0, metrics.SampleType.Warmup, + 2 / 5, {"body": ["a"], "size": 5}), + (2.0, metrics.SampleType.Normal, + 3 / 5, {"body": ["a"], "size": 5}), + (3.0, metrics.SampleType.Normal, + 4 / 5, {"body": ["a"], "size": 5}), + (4.0, metrics.SampleType.Normal, + 5 / 5, {"body": ["a"], "size": 5}), ], schedule) @run_async async def test_schedule_defaults_to_iteration_based(self): # no time-period and no iterations specified on the task. Also, the parameter source does not define a size. task = workload.Task("bulk-index", workload.Operation("bulk-index", workload.OperationType.Bulk.to_hyphenated_string(), - params={"body": ["a"]}, - param_source="worker-coordinator-test-param-source"), - clients=1, params={"target-throughput": 4, "clients": 4}) + params={ + "body": ["a"]}, + param_source="worker-coordinator-test-param-source"), + clients=1, params={"target-throughput": 4, "clients": 4}) param_source = workload.operation_parameters(self.test_workload, task) task_allocation = worker_coordinator.TaskAllocation( @@ -1027,7 +1148,8 @@ async def test_schedule_defaults_to_iteration_based(self): global_client_index=0, total_clients=task.clients ) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) await self.assert_schedule([ (0.0, metrics.SampleType.Normal, 1 / 1, {"body": ["a"]}), @@ -1036,9 +1158,10 @@ async def test_schedule_defaults_to_iteration_based(self): @run_async async def test_schedule_for_warmup_time_based(self): task = workload.Task("time-based", workload.Operation("time-based", workload.OperationType.Bulk.to_hyphenated_string(), - params={"body": ["a"], "size": 11}, - param_source="worker-coordinator-test-param-source"), - warmup_time_period=0, clients=4, params={"target-throughput": 4, "clients": 4}) + params={ + "body": ["a"], "size": 11}, + param_source="worker-coordinator-test-param-source"), + warmup_time_period=0, clients=4, params={"target-throughput": 4, "clients": 4}) param_source = workload.operation_parameters(self.test_workload, task) task_allocation = worker_coordinator.TaskAllocation( @@ -1047,28 +1170,41 @@ async def test_schedule_for_warmup_time_based(self): global_client_index=0, total_clients=task.clients ) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) await self.assert_schedule([ - (0.0, metrics.SampleType.Normal, 1 / 11, {"body": ["a"], "size": 11}), - (1.0, metrics.SampleType.Normal, 2 / 11, {"body": ["a"], "size": 11}), - (2.0, metrics.SampleType.Normal, 3 / 11, {"body": ["a"], "size": 11}), - (3.0, metrics.SampleType.Normal, 4 / 11, {"body": ["a"], "size": 11}), - (4.0, metrics.SampleType.Normal, 5 / 11, {"body": ["a"], "size": 11}), - (5.0, metrics.SampleType.Normal, 6 / 11, {"body": ["a"], "size": 11}), - (6.0, metrics.SampleType.Normal, 7 / 11, {"body": ["a"], "size": 11}), - (7.0, metrics.SampleType.Normal, 8 / 11, {"body": ["a"], "size": 11}), - (8.0, metrics.SampleType.Normal, 9 / 11, {"body": ["a"], "size": 11}), - (9.0, metrics.SampleType.Normal, 10 / 11, {"body": ["a"], "size": 11}), - (10.0, metrics.SampleType.Normal, 11 / 11, {"body": ["a"], "size": 11}), + (0.0, metrics.SampleType.Normal, 1 / + 11, {"body": ["a"], "size": 11}), + (1.0, metrics.SampleType.Normal, 2 / + 11, {"body": ["a"], "size": 11}), + (2.0, metrics.SampleType.Normal, 3 / + 11, {"body": ["a"], "size": 11}), + (3.0, metrics.SampleType.Normal, 4 / + 11, {"body": ["a"], "size": 11}), + (4.0, metrics.SampleType.Normal, 5 / + 11, {"body": ["a"], "size": 11}), + (5.0, metrics.SampleType.Normal, 6 / + 11, {"body": ["a"], "size": 11}), + (6.0, metrics.SampleType.Normal, 7 / + 11, {"body": ["a"], "size": 11}), + (7.0, metrics.SampleType.Normal, 8 / + 11, {"body": ["a"], "size": 11}), + (8.0, metrics.SampleType.Normal, 9 / + 11, {"body": ["a"], "size": 11}), + (9.0, metrics.SampleType.Normal, 10 / + 11, {"body": ["a"], "size": 11}), + (10.0, metrics.SampleType.Normal, + 11 / 11, {"body": ["a"], "size": 11}), ], schedule) @run_async async def test_infinite_schedule_without_progress_indication(self): task = workload.Task("time-based", workload.Operation("time-based", workload.OperationType.Bulk.to_hyphenated_string(), - params={"body": ["a"]}, - param_source="worker-coordinator-test-param-source"), - warmup_time_period=0, clients=4, params={"target-throughput": 4, "clients": 4}) + params={ + "body": ["a"]}, + param_source="worker-coordinator-test-param-source"), + warmup_time_period=0, clients=4, params={"target-throughput": 4, "clients": 4}) param_source = workload.operation_parameters(self.test_workload, task) task_allocation = worker_coordinator.TaskAllocation( @@ -1077,7 +1213,8 @@ async def test_infinite_schedule_without_progress_indication(self): global_client_index=0, total_clients=task.clients ) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) await self.assert_schedule([ (0.0, metrics.SampleType.Normal, None, {"body": ["a"]}), @@ -1090,9 +1227,10 @@ async def test_infinite_schedule_without_progress_indication(self): @run_async async def test_finite_schedule_with_progress_indication(self): task = workload.Task("time-based", workload.Operation("time-based", workload.OperationType.Bulk.to_hyphenated_string(), - params={"body": ["a"], "size": 5}, - param_source="worker-coordinator-test-param-source"), - warmup_time_period=0, clients=4, params={"target-throughput": 4, "clients": 4}) + params={ + "body": ["a"], "size": 5}, + param_source="worker-coordinator-test-param-source"), + warmup_time_period=0, clients=4, params={"target-throughput": 4, "clients": 4}) param_source = workload.operation_parameters(self.test_workload, task) task_allocation = worker_coordinator.TaskAllocation( @@ -1101,23 +1239,30 @@ async def test_finite_schedule_with_progress_indication(self): global_client_index=0, total_clients=task.clients ) - schedule = worker_coordinator.schedule_for(task_allocation,param_source) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) await self.assert_schedule([ - (0.0, metrics.SampleType.Normal, 1 / 5, {"body": ["a"], "size": 5}), - (1.0, metrics.SampleType.Normal, 2 / 5, {"body": ["a"], "size": 5}), - (2.0, metrics.SampleType.Normal, 3 / 5, {"body": ["a"], "size": 5}), - (3.0, metrics.SampleType.Normal, 4 / 5, {"body": ["a"], "size": 5}), - (4.0, metrics.SampleType.Normal, 5 / 5, {"body": ["a"], "size": 5}), + (0.0, metrics.SampleType.Normal, + 1 / 5, {"body": ["a"], "size": 5}), + (1.0, metrics.SampleType.Normal, + 2 / 5, {"body": ["a"], "size": 5}), + (2.0, metrics.SampleType.Normal, + 3 / 5, {"body": ["a"], "size": 5}), + (3.0, metrics.SampleType.Normal, + 4 / 5, {"body": ["a"], "size": 5}), + (4.0, metrics.SampleType.Normal, + 5 / 5, {"body": ["a"], "size": 5}), ], schedule, infinite_schedule=False) @run_async async def test_schedule_with_progress_determined_by_runner(self): task = workload.Task("time-based", workload.Operation("time-based", "worker-coordinator-test-runner-with-completion", - params={"body": ["a"]}, - param_source="worker-coordinator-test-param-source"), - clients=1, - params={"target-throughput": 1, "clients": 1}) + params={ + "body": ["a"]}, + param_source="worker-coordinator-test-param-source"), + clients=1, + params={"target-throughput": 1, "clients": 1}) param_source = workload.operation_parameters(self.test_workload, task) task_allocation = worker_coordinator.TaskAllocation( @@ -1126,7 +1271,8 @@ async def test_schedule_with_progress_determined_by_runner(self): global_client_index=0, total_clients=task.clients ) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) await self.assert_schedule([ (0.0, metrics.SampleType.Normal, None, {"body": ["a"]}), @@ -1139,11 +1285,12 @@ async def test_schedule_with_progress_determined_by_runner(self): @run_async async def test_schedule_for_time_based(self): task = workload.Task("time-based", workload.Operation("time-based", workload.OperationType.Bulk.to_hyphenated_string(), - params={"body": ["a"], "size": 11}, - param_source="worker-coordinator-test-param-source"), - warmup_time_period=0.1, - time_period=0.1, - clients=1) + params={ + "body": ["a"], "size": 11}, + param_source="worker-coordinator-test-param-source"), + warmup_time_period=0.1, + time_period=0.1, + clients=1) param_source = workload.operation_parameters(self.test_workload, task) task_allocation = worker_coordinator.TaskAllocation( @@ -1152,7 +1299,8 @@ async def test_schedule_for_time_based(self): global_client_index=0, total_clients=task.clients ) - schedule_handle = worker_coordinator.schedule_for(task_allocation, param_source) + schedule_handle = worker_coordinator.schedule_for( + task_allocation, param_source) schedule_handle.start() self.assertEqual(0.0, schedule_handle.ramp_up_wait_time) schedule = schedule_handle() @@ -1168,8 +1316,10 @@ async def test_schedule_for_time_based(self): self.assertEqual(metrics.SampleType.Normal, sample_type) self.assertTrue(last_progress < progress_percent) last_progress = progress_percent - self.assertTrue(round(progress_percent, 2) >= 0.0, "progress should be >= 0.0 but was [%f]" % progress_percent) - self.assertTrue(round(progress_percent, 2) <= 1.0, "progress should be <= 1.0 but was [%f]" % progress_percent) + self.assertTrue(round(progress_percent, 2) >= 0.0, + "progress should be >= 0.0 but was [%f]" % progress_percent) + self.assertTrue(round(progress_percent, 2) <= 1.0, + "progress should be <= 1.0 but was [%f]" % progress_percent) self.assertIsNotNone(runner, "runner must be defined") self.assertEqual({"body": ["a"], "size": 11}, params) @@ -1256,8 +1406,10 @@ def setUp(self): runner.register_default_runners() self.runner_with_progress = AsyncExecutorTests.RunnerWithProgress() self.runner_overriding_throughput = AsyncExecutorTests.RunnerOverridingThroughput() - runner.register_runner("unit-test-recovery", self.runner_with_progress, async_runner=True) - runner.register_runner("override-throughput", self.runner_overriding_throughput, async_runner=True) + runner.register_runner("unit-test-recovery", + self.runner_with_progress, async_runner=True) + runner.register_runner( + "override-throughput", self.runner_overriding_throughput, async_runner=True) @mock.patch('osbenchmark.client.RequestContextHolder.on_client_request_end') @mock.patch('osbenchmark.client.RequestContextHolder.on_client_request_start') @@ -1265,55 +1417,60 @@ def setUp(self): @run_async async def test_execute_schedule_in_throughput_mode(self, opensearch, on_client_request_start, on_client_request_end): task_start = time.perf_counter() - opensearch.new_request_context.return_value = AsyncExecutorTests.StaticRequestTiming(task_start=task_start) + opensearch.new_request_context.return_value = AsyncExecutorTests.StaticRequestTiming( + task_start=task_start) - opensearch.bulk.return_value = as_future(io.StringIO('{"errors": false, "took": 8}')) + opensearch.bulk.return_value = as_future( + io.StringIO('{"errors": false, "took": 8}')) - params.register_param_source_for_name("worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) + params.register_param_source_for_name( + "worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) test_workload = workload.Workload(name="unittest", description="unittest workload", - indices=None, - test_procedures=None) + indices=None, + test_procedures=None) task = workload.Task("time-based", workload.Operation("time-based", workload.OperationType.Bulk.to_hyphenated_string(), - params={ - "body": ["action_metadata_line", "index_line"], - "action-metadata-present": True, - "bulk-size": 1, - "unit": "docs", - # we need this because WorkerCoordinatorTestParamSource does not know - # that we only have one bulk and hence size() returns - # incorrect results - "size": 1 - }, - param_source="worker-coordinator-test-param-source"), - warmup_time_period=0, clients=4) + params={ + "body": ["action_metadata_line", "index_line"], + "action-metadata-present": True, + "bulk-size": 1, + "unit": "docs", + # we need this because WorkerCoordinatorTestParamSource does not know + # that we only have one bulk and hence size() returns + # incorrect results + "size": 1 + }, + param_source="worker-coordinator-test-param-source"), + warmup_time_period=0, clients=4) param_source = workload.operation_parameters(test_workload, task) task_allocation = worker_coordinator.TaskAllocation(task=task, - client_index_in_task=0, - global_client_index=0, - total_clients=task.clients) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + client_index_in_task=0, + global_client_index=0, + total_clients=task.clients) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) sampler = worker_coordinator.Sampler(start_timestamp=task_start) cancel = threading.Event() complete = threading.Event() execute_schedule = worker_coordinator.AsyncExecutor(client_id=2, - task=task, - schedule=schedule, - opensearch={ - "default": opensearch - }, - sampler=sampler, - cancel=cancel, - complete=complete, - on_error="continue") + task=task, + schedule=schedule, + opensearch={ + "default": opensearch + }, + sampler=sampler, + cancel=cancel, + complete=complete, + on_error="continue") await execute_schedule() samples = sampler.samples self.assertTrue(len(samples) > 0) - self.assertFalse(complete.is_set(), "Executor should not auto-complete a normal task") + self.assertFalse(complete.is_set(), + "Executor should not auto-complete a normal task") previous_absolute_time = -1.0 previous_relative_time = -1.0 for sample in samples: @@ -1334,12 +1491,14 @@ async def test_execute_schedule_in_throughput_mode(self, opensearch, on_client_r @run_async async def test_execute_schedule_with_progress_determined_by_runner(self, opensearch): task_start = time.perf_counter() - opensearch.new_request_context.return_value = AsyncExecutorTests.StaticRequestTiming(task_start=task_start) + opensearch.new_request_context.return_value = AsyncExecutorTests.StaticRequestTiming( + task_start=task_start) - params.register_param_source_for_name("worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) + params.register_param_source_for_name( + "worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) test_workload = workload.Workload(name="unittest", description="unittest workload", - indices=None, - test_procedures=None) + indices=None, + test_procedures=None) task = workload.Task("time-based", workload.Operation("time-based", operation_type="unit-test-recovery", params={ "indices-to-restore": "*", @@ -1348,25 +1507,26 @@ async def test_execute_schedule_with_progress_determined_by_runner(self, opensea }, param_source="worker-coordinator-test-param-source"), warmup_time_period=0, clients=4) param_source = workload.operation_parameters(test_workload, task) task_allocation = worker_coordinator.TaskAllocation(task=task, - client_index_in_task=0, - global_client_index=0, - total_clients=task.clients) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + client_index_in_task=0, + global_client_index=0, + total_clients=task.clients) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) sampler = worker_coordinator.Sampler(start_timestamp=task_start) cancel = threading.Event() complete = threading.Event() execute_schedule = worker_coordinator.AsyncExecutor(client_id=2, - task=task, - schedule=schedule, - opensearch={ - "default": opensearch - }, - sampler=sampler, - cancel=cancel, - complete=complete, - on_error="continue") + task=task, + schedule=schedule, + opensearch={ + "default": opensearch + }, + sampler=sampler, + cancel=cancel, + complete=complete, + on_error="continue") await execute_schedule() samples = sampler.samples @@ -1374,7 +1534,8 @@ async def test_execute_schedule_with_progress_determined_by_runner(self, opensea self.assertEqual(5, len(samples)) self.assertTrue(self.runner_with_progress.completed) self.assertEqual(1.0, self.runner_with_progress.percent_completed) - self.assertFalse(complete.is_set(), "Executor should not auto-complete a normal task") + self.assertFalse(complete.is_set(), + "Executor should not auto-complete a normal task") previous_absolute_time = -1.0 previous_relative_time = -1.0 for sample in samples: @@ -1397,47 +1558,51 @@ async def test_execute_schedule_with_progress_determined_by_runner(self, opensea @run_async async def test_execute_schedule_runner_overrides_times(self, opensearch): task_start = time.perf_counter() - opensearch.new_request_context.return_value = AsyncExecutorTests.StaticRequestTiming(task_start=task_start) + opensearch.new_request_context.return_value = AsyncExecutorTests.StaticRequestTiming( + task_start=task_start) - params.register_param_source_for_name("worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) + params.register_param_source_for_name( + "worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) test_workload = workload.Workload(name="unittest", description="unittest workload", - indices=None, - test_procedures=None) + indices=None, + test_procedures=None) task = workload.Task("override-throughput", workload.Operation("override-throughput", - operation_type="override-throughput", params={ - # we need this because WorkerCoordinatorTestParamSource does not know that we only have one iteration and hence - # size() returns incorrect results - "size": 1 - }, - param_source="worker-coordinator-test-param-source"), - warmup_iterations=0, iterations=1, clients=1) + operation_type="override-throughput", params={ + # we need this because WorkerCoordinatorTestParamSource does not know that we only have one iteration and hence + # size() returns incorrect results + "size": 1 + }, + param_source="worker-coordinator-test-param-source"), + warmup_iterations=0, iterations=1, clients=1) param_source = workload.operation_parameters(test_workload, task) task_allocation = worker_coordinator.TaskAllocation(task=task, - client_index_in_task=0, - global_client_index=0, - total_clients=task.clients) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + client_index_in_task=0, + global_client_index=0, + total_clients=task.clients) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) sampler = worker_coordinator.Sampler(start_timestamp=task_start) cancel = threading.Event() complete = threading.Event() execute_schedule = worker_coordinator.AsyncExecutor(client_id=0, - task=task, - schedule=schedule, - opensearch={ - "default": opensearch - }, - sampler=sampler, - cancel=cancel, - complete=complete, - on_error="continue") + task=task, + schedule=schedule, + opensearch={ + "default": opensearch + }, + sampler=sampler, + cancel=cancel, + complete=complete, + on_error="continue") await execute_schedule() samples = sampler.samples - self.assertFalse(complete.is_set(), "Executor should not auto-complete a normal task") + self.assertFalse(complete.is_set(), + "Executor should not auto-complete a normal task") self.assertEqual(1, len(samples)) sample = samples[0] self.assertEqual(0, sample.client_id) @@ -1469,27 +1634,29 @@ def perform_request(*args, **kwargs): # one has been "consumed". opensearch.transport.perform_request.side_effect = perform_request - params.register_param_source_for_name("worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) + params.register_param_source_for_name( + "worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) test_workload = workload.Workload(name="unittest", description="unittest workload", - indices=None, - test_procedures=None) + indices=None, + test_procedures=None) # in one second (0.5 warmup + 0.5 measurement) we should get 1000 [ops/s] / 4 [clients] = 250 samples for target_throughput, bounds in {10: [2, 4], 100: [24, 26], 1000: [235, 255]}.items(): task = workload.Task("time-based", workload.Operation("time-based", - workload.OperationType.Search.to_hyphenated_string(), - params={ - "index": "_all", - "type": None, - "body": {"query": {"match_all": {}}}, - "request-params": {}, - "cache": False, - "response-compression-enabled": True - }, - param_source="worker-coordinator-test-param-source"), - warmup_time_period=0.5, time_period=0.5, clients=4, - params={"target-throughput": target_throughput, "clients": 4}, - completes_parent=True) + workload.OperationType.Search.to_hyphenated_string(), + params={ + "index": "_all", + "type": None, + "body": {"query": {"match_all": {}}}, + "request-params": {}, + "cache": False, + "response-compression-enabled": True + }, + param_source="worker-coordinator-test-param-source"), + warmup_time_period=0.5, time_period=0.5, clients=4, + params={ + "target-throughput": target_throughput, "clients": 4}, + completes_parent=True) sampler = worker_coordinator.Sampler(start_timestamp=0) cancel = threading.Event() @@ -1497,20 +1664,21 @@ def perform_request(*args, **kwargs): param_source = workload.operation_parameters(test_workload, task) task_allocation = worker_coordinator.TaskAllocation(task=task, - client_index_in_task=0, - global_client_index=0, - total_clients=task.clients) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + client_index_in_task=0, + global_client_index=0, + total_clients=task.clients) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) execute_schedule = worker_coordinator.AsyncExecutor(client_id=0, - task=task, - schedule=schedule, - opensearch={ - "default": opensearch - }, - sampler=sampler, - cancel=cancel, - complete=complete, - on_error="continue") + task=task, + schedule=schedule, + opensearch={ + "default": opensearch + }, + sampler=sampler, + cancel=cancel, + complete=complete, + on_error="continue") await execute_schedule() samples = sampler.samples @@ -1520,7 +1688,8 @@ def perform_request(*args, **kwargs): upper_bound = bounds[1] self.assertTrue(lower_bound <= sample_size <= upper_bound, msg="Expected sample size to be between %d and %d but was %d" % (lower_bound, upper_bound, sample_size)) - self.assertTrue(complete.is_set(), "Executor should auto-complete a task that terminates its parent") + self.assertTrue(complete.is_set( + ), "Executor should auto-complete a task that terminates its parent") @mock.patch("opensearchpy.OpenSearch") @run_async @@ -1531,46 +1700,49 @@ async def test_cancel_execute_schedule(self, opensearch): "request_end": 11, "client_request_end": 12 } - opensearch.bulk.return_value = as_future(io.StringIO('{"errors": false, "took": 8}')) + opensearch.bulk.return_value = as_future( + io.StringIO('{"errors": false, "took": 8}')) - params.register_param_source_for_name("worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) + params.register_param_source_for_name( + "worker-coordinator-test-param-source", WorkerCoordinatorTestParamSource) test_workload = workload.Workload(name="unittest", description="unittest workload", - indices=None, - test_procedures=None) + indices=None, + test_procedures=None) # in one second (0.5 warmup + 0.5 measurement) we should get 1000 [ops/s] / 4 [clients] = 250 samples for target_throughput in [10, 100, 1000]: task = workload.Task("time-based", workload.Operation("time-based", - workload.OperationType.Bulk.to_hyphenated_string(), - params={ - "body": ["action_metadata_line", "index_line"], - "action-metadata-present": True, - "bulk-size": 1 - }, - param_source="worker-coordinator-test-param-source"), - warmup_time_period=0.5, time_period=0.5, clients=4, - params={"target-throughput": target_throughput, "clients": 4}) + workload.OperationType.Bulk.to_hyphenated_string(), + params={ + "body": ["action_metadata_line", "index_line"], + "action-metadata-present": True, + "bulk-size": 1 + }, + param_source="worker-coordinator-test-param-source"), + warmup_time_period=0.5, time_period=0.5, clients=4, + params={"target-throughput": target_throughput, "clients": 4}) param_source = workload.operation_parameters(test_workload, task) task_allocation = worker_coordinator.TaskAllocation(task=task, - client_index_in_task=0, - global_client_index=0, - total_clients=task.clients) - schedule = worker_coordinator.schedule_for(task_allocation, param_source) + client_index_in_task=0, + global_client_index=0, + total_clients=task.clients) + schedule = worker_coordinator.schedule_for( + task_allocation, param_source) sampler = worker_coordinator.Sampler(start_timestamp=0) cancel = threading.Event() complete = threading.Event() execute_schedule = worker_coordinator.AsyncExecutor(client_id=0, - task=task, - schedule=schedule, - opensearch={ - "default": opensearch - }, - sampler=sampler, - cancel=cancel, - complete=complete, - on_error="continue") + task=task, + schedule=schedule, + opensearch={ + "default": opensearch + }, + sampler=sampler, + cancel=cancel, + complete=complete, + on_error="continue") cancel.set() await execute_schedule() @@ -1605,29 +1777,30 @@ def start(self): pass async def __call__(self): - invocations = [(0, metrics.SampleType.Warmup, 0, AsyncExecutorTests.context_managed(run), None)] + invocations = [(0, metrics.SampleType.Warmup, 0, + AsyncExecutorTests.context_managed(run), None)] for invocation in invocations: yield invocation task = workload.Task("no-op", workload.Operation("no-op", workload.OperationType.Bulk.to_hyphenated_string(), - params={}, - param_source="worker-coordinator-test-param-source"), - warmup_time_period=0.5, time_period=0.5, clients=4, - params={"clients": 4}) + params={}, + param_source="worker-coordinator-test-param-source"), + warmup_time_period=0.5, time_period=0.5, clients=4, + params={"clients": 4}) sampler = worker_coordinator.Sampler(start_timestamp=0) cancel = threading.Event() complete = threading.Event() execute_schedule = worker_coordinator.AsyncExecutor(client_id=2, - task=task, - schedule=ScheduleHandle(), - opensearch={ - "default": opensearch - }, - sampler=sampler, - cancel=cancel, - complete=complete, - on_error="continue") + task=task, + schedule=ScheduleHandle(), + opensearch={ + "default": opensearch + }, + sampler=sampler, + cancel=cancel, + complete=complete, + on_error="continue") with self.assertRaisesRegex(exceptions.BenchmarkError, r"Cannot run task \[no-op\]: expected unit test exception"): await execute_schedule() @@ -1702,7 +1875,8 @@ async def test_execute_single_with_connection_error_always_aborts(self, on_clien opensearch = None params = None # ES client uses pseudo-status "N/A" in this case... - runner = mock.Mock(side_effect=as_future(exception=opensearchpy.ConnectionError("N/A", "no route to host", None))) + runner = mock.Mock(side_effect=as_future( + exception=opensearchpy.ConnectionError("N/A", "no route to host", None))) with self.assertRaises(exceptions.BenchmarkAssertionError) as ctx: await worker_coordinator.execute_single(self.context_managed(runner), opensearch, params, on_error=on_error) @@ -1715,8 +1889,8 @@ async def test_execute_single_with_connection_error_always_aborts(self, on_clien async def test_execute_single_with_http_400_aborts_when_specified(self, on_client_request_end): opensearch = None params = None - runner = mock.Mock(side_effect= - as_future(exception=opensearchpy.NotFoundError(404, "not found", "the requested document could not be found"))) + runner = mock.Mock(side_effect=as_future(exception=opensearchpy.NotFoundError( + 404, "not found", "the requested document could not be found"))) with self.assertRaises(exceptions.BenchmarkAssertionError) as ctx: await worker_coordinator.execute_single(self.context_managed(runner), opensearch, params, on_error="abort") @@ -1729,8 +1903,8 @@ async def test_execute_single_with_http_400_aborts_when_specified(self, on_clien async def test_execute_single_with_http_400(self, on_client_request_end): opensearch = None params = None - runner = mock.Mock(side_effect= - as_future(exception=opensearchpy.NotFoundError(404, "not found", "the requested document could not be found"))) + runner = mock.Mock(side_effect=as_future(exception=opensearchpy.NotFoundError( + 404, "not found", "the requested document could not be found"))) ops, unit, request_meta_data = await worker_coordinator.execute_single( self.context_managed(runner), opensearch, params, on_error="continue") @@ -1749,8 +1923,8 @@ async def test_execute_single_with_http_400(self, on_client_request_end): async def test_execute_single_with_http_413(self, on_client_request_end): opensearch = None params = None - runner = mock.Mock(side_effect= - as_future(exception=opensearchpy.NotFoundError(413, b"", b""))) + runner = mock.Mock(side_effect=as_future( + exception=opensearchpy.NotFoundError(413, b"", b""))) ops, unit, request_meta_data = await worker_coordinator.execute_single( self.context_managed(runner), opensearch, params, on_error="continue") @@ -1803,4 +1977,5 @@ async def f(x): end = time.perf_counter() self.assertEqual(2, return_value) duration = end - start - self.assertTrue(0.9 <= duration <= 1.2, "Should sleep for roughly 1 second but took [%.2f] seconds." % duration) + self.assertTrue(0.9 <= duration <= 1.2, + "Should sleep for roughly 1 second but took [%.2f] seconds." % duration)