diff --git a/.github/workflows/build-assets.yml b/.github/workflows/build-assets.yml index 2bf3aa3e723..12ebea45e5a 100644 --- a/.github/workflows/build-assets.yml +++ b/.github/workflows/build-assets.yml @@ -259,8 +259,8 @@ jobs: docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "\ cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/build -DVCPKG_FILES_DIR=/hpcc-dev -DMAKE_DOCS_ONLY=ON -DUSE_NATIVE_LIBRARIES=ON -DDOCS_AUTO=ON -DDOC_LANGS=ALL && \ cmake --build /hpcc-dev/build --parallel $(nproc) --target all" - docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "cd /hpcc-dev/build/Release/docs/EN_US && zip ALL_HPCC_DOCS_EN_US-${{ needs.preamble.outputs.community_tag }}.zip *.pdf" - docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "cd /hpcc-dev/build/Release/docs/PT_BR && zip ALL_HPCC_DOCS_PT_BR-${{ needs.preamble.outputs.community_tag }}.zip *.pdf" + docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "cd /hpcc-dev/build/Release/docs/EN_US && zip ALL_HPCC_DOCS_EN_US-$(echo '${{ needs.preamble.outputs.community_tag }}' | sed 's/community_//' ).zip *.pdf" + docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "cd /hpcc-dev/build/Release/docs/PT_BR && zip ALL_HPCC_DOCS_PT_BR-$(echo '${{ needs.preamble.outputs.community_tag }}' | sed 's/community_//' ).zip *.pdf" - name: Upload Artifacts for ECLIDE build if: ${{ !matrix.ln && !matrix.container && matrix.documentation }} diff --git a/common/workunit/workunit.cpp b/common/workunit/workunit.cpp index 3d3f16ddb04..4e84d075bf2 100644 --- a/common/workunit/workunit.cpp +++ b/common/workunit/workunit.cpp @@ -13911,7 +13911,7 @@ extern WORKUNIT_API void associateLocalFile(IWUQuery * query, WUFileType type, c OwnedIFile target = createIFile(destPathName); if (!target->exists()) { - source->copyTo(target, 0, NULL, true); + source->copyTo(target, 0, NULL, false); } query->addAssociatedFile(type, destPathName, "localhost", description, crc, minActivity, maxActivity); // Should we delete the local files? No - they may not be finished with diff --git a/dali/base/dadfs.cpp b/dali/base/dadfs.cpp index 2b6fa5281f3..f64f40c9ca2 100644 --- a/dali/base/dadfs.cpp +++ b/dali/base/dadfs.cpp @@ -3566,23 +3566,33 @@ class CDistributedFile: public CDistributedFileBase offset_t maxPartSz = 0, minPartSz = (offset_t)-1, totalPartSz = 0; - maxSkewPart = 0; - minSkewPart = 0; - for (unsigned p=0; p maxPartSz) - { - maxPartSz = size; - maxSkewPart = p; - } - if (size < minPartSz) + maxSkewPart = 0; + minSkewPart = 0; + for (unsigned p=0; p maxPartSz) + { + maxPartSz = size; + maxSkewPart = p; + } + if (size < minPartSz) + { + minPartSz = size; + minSkewPart = p; + } + totalPartSz += size; } - totalPartSz += size; + } + catch (IException *e) + { + // guard against getFileSize throwing an exception (if parts missing) + EXCLOG(e); + e->Release(); + return false; } offset_t avgPartSz = totalPartSz / np; if (0 == avgPartSz) diff --git a/dali/daliadmin/daadmin.cpp b/dali/daliadmin/daadmin.cpp index 26954d26776..f3f25fad57b 100644 --- a/dali/daliadmin/daadmin.cpp +++ b/dali/daliadmin/daadmin.cpp @@ -2264,6 +2264,101 @@ void getxref(const char *dst) conn->close(); } +void checkFileSizeOne(IUserDescriptor *user, const char *lfn, bool fix) +{ + try + { + CDfsLogicalFileName dlfn; + dlfn.set(lfn); + Owned dFile = queryDistributedFileDirectory().lookup(dlfn, user, AccessMode::tbdRead, false, false, nullptr, defaultPrivilegedUser, 30000); // 30 sec timeout + if (dFile) + { + if (dFile->querySuperFile()) + WARNLOG("Skipping: file '%s' is a superfile", lfn); + else + { + bool fileLocked = false; + COnScopeExit ensureFileUnlock([&]() { if (fileLocked) dFile->unlockProperties(); }); + unsigned numParts = dFile->numParts(); + for (unsigned p=0; pqueryPart(p); + IPropertyTree &attrs = part.queryAttributes(); + if (!attrs.hasProp("@size")) + { + if (fix) + { + offset_t partSize; + try + { + partSize = part.getFileSize(true, true); + if (!fileLocked) + { + // we lock the file once, so that the individual part lock/unlocks are effectively a NOP + dFile->lockProperties(30000); + fileLocked = true; + PROGLOG("File '%s' has missing @size attributes", lfn); + } + part.lockProperties(30000); + } + catch (IException *e) + { + EXCLOG(e); + e->Release(); + continue; + } + COnScopeExit ensurePartUnlock([&]() { part.unlockProperties(); }); + PROGLOG("Part %u: Setting @size to %" I64F "u", p+1, partSize); + attrs.setPropInt64("@size", partSize); + } + else + PROGLOG("File '%s' missing @size on part %u", lfn, p+1); + } + } + } + } + else + WARNLOG("File '%s' not found", lfn); + } + catch (IException *e) + { + EXCLOG(e); + e->Release(); + } +} + +void checkFileSize(IUserDescriptor *user, const char *lfnPattern, bool fix) +{ + if (containsWildcard(lfnPattern)) + { + unsigned count = 0; + Owned iter = queryDistributedFileDirectory().getDFAttributesIterator(lfnPattern, user, true, false); // no supers + CCycleTimer timer; + if (iter->first()) + { + while (true) + { + IPropertyTree &attr = iter->query(); + const char *lfn = attr.queryProp("@name"); + checkFileSizeOne(user, lfn, fix); + ++count; + + if (!iter->next()) + break; + else if (timer.elapsedCycles() >= queryOneSecCycles()*10) // log every 10 secs + { + PROGLOG("Processed %u files", count); + timer.reset(); + } + } + } + PROGLOG("Total files processed %u files", count); + } + else + checkFileSizeOne(user, lfnPattern, fix); +} + + struct CTreeItem : public CInterface { String *tail; diff --git a/dali/daliadmin/daadmin.hpp b/dali/daliadmin/daadmin.hpp index 7a86e86ab30..687d0d882b2 100644 --- a/dali/daliadmin/daadmin.hpp +++ b/dali/daliadmin/daadmin.hpp @@ -73,6 +73,7 @@ extern DALIADMIN_API void listmatches(const char *path, const char *match, const extern DALIADMIN_API void dfsreplication(const char *clusterMask, const char *lfnMask, unsigned redundancy, bool dryRun); extern DALIADMIN_API void migrateFiles(const char *srcGroup, const char *tgtGroup, const char *filemask, const char *_options); extern DALIADMIN_API void getxref(const char *dst); +extern DALIADMIN_API void checkFileSize(IUserDescriptor *user, const char *lfnPattern, bool fix); extern DALIADMIN_API void listworkunits(const char *test, const char *min, const char *max); extern DALIADMIN_API void workunittimings(const char *wuid); diff --git a/dali/daliadmin/daliadmin.cpp b/dali/daliadmin/daliadmin.cpp index 26c002fbb0f..0491b7d42f1 100644 --- a/dali/daliadmin/daliadmin.cpp +++ b/dali/daliadmin/daliadmin.cpp @@ -55,34 +55,35 @@ void usage(const char *exe) printf(" count -- counts xpath matches\n"); printf("\n"); printf("Logical File meta information commands:\n"); - printf(" dfsfile -- get meta information for file\n"); - printf(" dfsmeta -- get new meta information for file\n"); - printf(" setdfspartattr [] -- set attribute of a file part to value, or delete the attribute if not provided\n"); - printf(" dfspart -- get meta information for part num\n"); + printf(" checksuperfile [fix=true|false] -- check superfile links consistent and optionally fix\n"); + printf(" checksubfile -- check subfile links to parent consistent\n"); + printf(" checkfilesize [fix=true|false] -- check file size attributes and optionally fix"); + printf(" cleanscopes -- remove empty scopes\n"); + printf(" clusternodes [filename] -- get IPs for cluster group. Written to optional filename if provided\n"); printf(" dfscheck -- verify dfs file information is valid\n"); + printf(" dfscompratio -- returns compression ratio of file\n"); printf(" dfscsv -- get csv info. for files matching mask\n"); + printf(" dfsexists -- sets return value to 0 if file exists\n"); + printf(" dfsfile -- get meta information for file\n"); printf(" dfsgroup [filename] -- get IPs for logical group (aka cluster). Written to optional filename if provided\n"); - printf(" clusternodes [filename] -- get IPs for cluster group. Written to optional filename if provided\n"); printf(" dfsls [] [options]-- get list of files within a scope (options=lrs)\n"); printf(" dfsmap -- get part files (primary and replicates)\n"); - printf(" dfsexists -- sets return value to 0 if file exists\n"); + printf(" dfsmeta -- get new meta information for file\n"); printf(" dfsparents -- list superfiles containing file\n"); + printf(" dfspart -- get meta information for part num\n"); + printf(" dfsperm -- returns LDAP permission for file\n"); + printf(" dfsreplication [dryrun] -- set redundancy for files matching mask, on specified clusters only\n"); + printf(" dfsscopes -- lists logical scopes (mask = * for all)\n"); printf(" dfsunlink -- unlinks file from all super parents\n"); printf(" dfsverify -- verifies parts exist, returns 0 if ok\n"); - printf(" setprotect -- overwrite protects logical file\n"); - printf(" unprotect -- unprotect (if id=* then clear all)\n"); - printf(" listprotect -- list protected files\n"); - printf(" checksuperfile [fix=true|false] -- check superfile links consistent and optionally fix\n"); - printf(" checksubfile -- check subfile links to parent consistent\n"); + printf(" holdlock -- hold a lock to the logical-file until a key is pressed"); printf(" listexpires -- lists logical files with expiry value\n"); + printf(" listprotect -- list protected files\n"); printf(" listrelationships \n"); - printf(" dfsperm -- returns LDAP permission for file\n"); - printf(" dfscompratio -- returns compression ratio of file\n"); - printf(" dfsscopes -- lists logical scopes (mask = * for all)\n"); - printf(" cleanscopes -- remove empty scopes\n"); printf(" normalizefilenames [] -- normalize existing logical filenames that match, e.g. .::.::scope::.::name -> scope::name\n"); - printf(" dfsreplication [dryrun] -- set redundancy for files matching mask, on specified clusters only\n"); - printf(" holdlock -- hold a lock to the logical-file until a key is pressed"); + printf(" setdfspartattr [] -- set attribute of a file part to value, or delete the attribute if not provided\n"); + printf(" setprotect -- overwrite protects logical file\n"); + printf(" unprotect -- unprotect (if id=* then clear all)\n"); printf("\n"); printf("Workunit commands:\n"); printf(" listworkunits [= [ []]] -- list workunits that match prop=val in workunit name range lower to upper\n"); @@ -90,14 +91,17 @@ void usage(const char *exe) printf(" workunittimings \n"); printf("\n"); printf("Other dali server and misc commands:\n"); - printf(" serverlist -- list server IPs (mask optional)\n"); - printf(" clusterlist -- list clusters (mask optional)\n"); printf(" auditlog \n"); + printf(" cleanglobalwuid [dryrun] [noreconstruct]\n"); + printf(" clusterlist -- list clusters (mask optional)\n"); printf(" coalesce -- force transaction coalesce\n"); - printf(" mpping -- time MP connect\n"); + printf(" dalilocks [ ] [ files ] -- get all locked files/xpaths\n"); printf(" daliping [ ] -- time dali server connect\n"); printf(" getxref -- get all XREF information\n"); - printf(" dalilocks [ ] [ files ] -- get all locked files/xpaths\n"); + printf(" migratefiles [] [dryrun] [createmaps] [listonly] [verbose]\n"); + printf(" mpping -- time MP connect\n"); + printf(" serverlist -- list server IPs (mask optional)\n"); + printf(" translatetoxpath logicalfile [File|SuperFile|Scope]\n"); printf(" unlock <[path|file]> -- unlocks either matching xpath(s) or matching logical file(s), can contain wildcards\n"); printf(" validatestore [fix=]\n" " [verbose=]\n" @@ -106,9 +110,6 @@ void usage(const char *exe) printf(" wuidcompress -- scan workunits that match and compress resources of \n"); printf(" wuiddecompress -- scan workunits that match and decompress resources of \n"); printf(" xmlsize [] -- analyse size usage in xml file, display individual items above 'percentage' \n"); - printf(" migratefiles [] [dryrun] [createmaps] [listonly] [verbose]\n"); - printf(" translatetoxpath logicalfile [File|SuperFile|Scope]\n"); - printf(" cleanglobalwuid [dryrun] [noreconstruct]\n"); printf("\n"); printf("Common options\n"); printf(" server= -- server ip\n"); @@ -148,6 +149,8 @@ int main(int argc, const char* argv[]) StringBuffer tmps; for (int i=1;igetPropBool("fix"); + checkFileSize(userDesc, params.item(1), fix); + } else if (strieq(cmd,"dalilocks")) { CHECKPARAMS(0,2); bool filesonly = false; diff --git a/devdoc/UserBuildAssets.md b/devdoc/UserBuildAssets.md new file mode 100644 index 00000000000..e38375ac754 --- /dev/null +++ b/devdoc/UserBuildAssets.md @@ -0,0 +1,146 @@ +# Build Assets for individual developer + +## Build Assets +The modern tool used for generating all our official assets is the Github Actions build-asset workflow on the hpcc-systems/HPCC-Platform repository, located [here](https://github.com/hpcc-systems/HPCC-Platform/actions/workflows/build-assets.yml). Developers and contributors can utilize this same workflow on their own forked repository. This allows developers to quickly create assets for testing changes and test for errors before the peer review process. + +Build assets will generate every available project under the HPCC-Platform namespace. There currently is not an option to control which packages in the build matrix get generated. But most packages get built in parallel, and __released__ after the individual matrix job is completed, so there is no waiting on packages you don't need. Exceptions to this are for packages that require other builds to complete, such as the __ECLIDE__. + +Upon completion of each step and matrix job in the workflow, the assets will be output to the repositories tags tab. An example for the `hpcc-systems` user repository is [hpcc-systems/HPCC-Platform/tags](https://github.com/hpcc-systems/HPCC-Platform/tags). + +![Tag tab screenshot](/devdoc/resources/images/repository-tag-tab.png) + +## Dependent variables +The build assets workflow requires several __repository secrets__ be available on a developers machine in order to run properly. You can access these secrets and variables by going to the `settings` tab in your forked repository, and then clicking on the `Secrets and Variables - Actions` drop down under `Security` on the lefthand side of the settings screen. + +![Actions secrets and variables](/devdoc/resources/images/actions-secrets-and-variables.png) + +Create a secret by clicking the green `New Repository Secret` button. The following secrets are needed; + +* LNB_ACTOR - Your Github username +* LNB_TOKEN - Classic Github token for your user with LN repo access +* DOCKER_USERNAME - Your docker.io username +* DOCKER_PASSWORD - Your docker.io password +* SIGNING_CERTIFICATE - pks12 self signed cert encoded to base64 for windows signing +* SIGNING_CERTIFICATE_PASSPHRASE - passphrase for pks12 cert +* SIGNING_SECRET - ssh-keygen private key for signing linux builds +* SIGN_MODULES_KEYID - email used to generate key +* SIGN_MODULES_PASSPHRASE - passphrase for private key + +### Generating the windows signing certificate +To generate the self signed certificate for windows packages, you will need to do the following steps. + +1. Generate a root certificate authority + +```openssl req -x509 -sha256 -days 365 -nodes -newkey rsa:2048 -subj "/CN=example.com/C=US/L=Boca Raton" -keyout rootCA.key -out rootCA.crt``` + +2. Create the server secret key + +`openssl genrsa -out server.key 2048` + +3. generate a csr.conf file +``` +cat > csr.conf < cert.conf < hpcc_sign_cert.base64` + +On MacOS: +`base64 -i hpcc_sign_cert.pfx -o hpcc_sign_cert.base64` + +From here you can `cat` the output of hpcc_sign_cert.base64 and copy the output into the variable SIGNING_CERTIFICATE in Github Actions. + +### Generating a signing key for linux builds +For linux builds we're going to generate a private key using GnuPG (gpg). + +Start the process by entering a terminal and run the command;`gpg --full-generate-key` + +You will be given several options in this process. + +For type of key, select `RSA and RSA default`. + +For keysize, enter `4096`. + +For expiration date, select `0 = key does not expire`. + +Input your real name. + +Input your company email address. + +For comment, input something like `Github actions key for signing linux builds`. + +Then it will ask you to enter a passphrase for the key, and confirm the passphrase. Do not leave this blank. + +A key should be output and entered into your gpg keychain. Now we need to export the key for use in the github actions secret. + +To extract your key run `gpg --output private.pgp --armor --export-secret-key `. + +Now open private.pgp, copy all, and go to github actions secrets. Paste the output into the secret "SIGNING_SECRET" + +## Starting a build +The build-asset workflow is kicked off by a tag being pushed to the developers HPCC-Platform repository. Before we push the tag to our HPCC-Platform repository, we will want to have other tags in place if we want LN and ECLIDE builds to function correctly. Suggested tag patterns are `community_HPCC-12345-rc1` or `HPCC-12345-rc1`. + +If you choose not to tag the LN and ECLIDE builds, the community builds will generate but errors will be thrown for any build utilizing the LN repository. ECLIDE will not even attempt a build unless you are also successfully building LN due to the dependency scheme we use. The 'Baremetal' builds are designed to generate our clienttools targets for windows-2022 and macos-12 distributions. These jobs contain both the COMMUNITY and LN builds. If the LN build is not tagged, the COMMUNITY section of the job will run, and the assets will be uploaded, but the job will fail when it tries to build LN. + +If you choose to precede your Jira number with `community_` then you must tag LN with `internal_` and ECLIDE with `eclide_`. Otherwise just use the Jira tag in all three repositories. + +Once the LN and ECLIDE repository tags have been created and pushed with the same base branch that your work is based on for the HPCC-Platform, then you are free to push the HPCC-Platform tag which will initiate the build process. + +The summary of the build-asset workflow can then be viewed for progress, and individual jobs can be selected to check build outputs. +![Build Summary HPCC-12345](/devdoc/resources/images/HPCC-12345-build-in-progress.png) + +## Asset output + +Assets from the workflow will be released into the corresponding tag location, either in the HPCC-Platform repository for all community based builds, or the LN repository for any builds containing proprietary plugins. Simply browse to the releases or tag tab of your repository and select the tag name you just built. The assets will show up there as the build completes. An example of this on the hpcc-systems repository is [hpcc-systems/HPCC-Platform/releases](https://github.com/hpcc-systems/HPCC-Platform/releases). \ No newline at end of file diff --git a/devdoc/resources/images/HPCC-12345-build-in-progress.png b/devdoc/resources/images/HPCC-12345-build-in-progress.png new file mode 100644 index 00000000000..70185a9e6ac Binary files /dev/null and b/devdoc/resources/images/HPCC-12345-build-in-progress.png differ diff --git a/devdoc/resources/images/actions-secrets-and-variables.png b/devdoc/resources/images/actions-secrets-and-variables.png new file mode 100644 index 00000000000..1c267910fc5 Binary files /dev/null and b/devdoc/resources/images/actions-secrets-and-variables.png differ diff --git a/devdoc/resources/images/repository-tag-tab.png b/devdoc/resources/images/repository-tag-tab.png new file mode 100644 index 00000000000..16bdb37acbc Binary files /dev/null and b/devdoc/resources/images/repository-tag-tab.png differ diff --git a/esp/src/package-lock.json b/esp/src/package-lock.json index 650354ec8f3..83e594ec3dc 100644 --- a/esp/src/package-lock.json +++ b/esp/src/package-lock.json @@ -20,7 +20,7 @@ "@hpcc-js/common": "2.71.16", "@hpcc-js/comms": "2.92.0", "@hpcc-js/dataflow": "8.1.6", - "@hpcc-js/eclwatch": "2.74.0", + "@hpcc-js/eclwatch": "2.74.2", "@hpcc-js/graph": "2.85.14", "@hpcc-js/html": "2.42.19", "@hpcc-js/layout": "2.49.21", @@ -1906,20 +1906,20 @@ } }, "node_modules/@hpcc-js/dgrid": { - "version": "2.32.17", - "resolved": "https://registry.npmjs.org/@hpcc-js/dgrid/-/dgrid-2.32.17.tgz", - "integrity": "sha512-M0QP4vvylMlAMl5iAWKe94zx6xK7SjeQt+iAsN7izwJrZ4PlAPym/bn05VLGfI7iQLT72d/6TRrku/Lh2PyDSg==", + "version": "2.32.19", + "resolved": "https://registry.npmjs.org/@hpcc-js/dgrid/-/dgrid-2.32.19.tgz", + "integrity": "sha512-nFKWjepBJIceN2sTMk8N283OFvU5zwfFAeGqBnT3iQRO2vQRaJzZt4G+9xtgVPbnyWuGiqHhIxYoGJLUOMpLbQ==", "dependencies": { "@hpcc-js/common": "^2.71.16", "@hpcc-js/ddl-shim": "^2.20.6", - "@hpcc-js/dgrid-shim": "^2.24.8", + "@hpcc-js/dgrid-shim": "^2.24.10", "@hpcc-js/util": "^2.51.0" } }, "node_modules/@hpcc-js/dgrid-shim": { - "version": "2.24.8", - "resolved": "https://registry.npmjs.org/@hpcc-js/dgrid-shim/-/dgrid-shim-2.24.8.tgz", - "integrity": "sha512-04+r+7Qa2LSc/aWx+d/QzdRoerPCIpiCXcrXPBf7tBHxOzU8gAIW0WU7wiilUmL2ZdHyLXQrzcT0gKVHkKlJaQ==" + "version": "2.24.10", + "resolved": "https://registry.npmjs.org/@hpcc-js/dgrid-shim/-/dgrid-shim-2.24.10.tgz", + "integrity": "sha512-4PD4GvKn2/HQvgzeP+Gd0Halj4KySk0QW1C7dqfyNWV8AUaseT9SSUvyu2ftGPUrzq65sJ0fSaq4zh3Js9dbaQ==" }, "node_modules/@hpcc-js/dgrid2": { "version": "2.3.18", @@ -1928,18 +1928,18 @@ "dependencies": { "@hpcc-js/common": "^2.71.16", "@hpcc-js/preact-shim": "^2.16.10", - "@hpcc-js/util": "^2.51.0" + "@hpcc-js/util": "^2.50.6" } }, "node_modules/@hpcc-js/eclwatch": { - "version": "2.74.0", - "resolved": "https://registry.npmjs.org/@hpcc-js/eclwatch/-/eclwatch-2.74.0.tgz", - "integrity": "sha512-l33wC724CKZ/XCeErt6fGNbXrUHFJAY8TInl7KhpYRbimYW/rdLEQ8DuqzPFqHGm1ev2ym8HGn8tIk84M/3g8g==", + "version": "2.74.2", + "resolved": "https://registry.npmjs.org/@hpcc-js/eclwatch/-/eclwatch-2.74.2.tgz", + "integrity": "sha512-FY5CQ/Pezq5enRZtVXzmxV2utv+Fiq7Gn7guMz2IhYWmenNDgclIrfKHeXL8nISJsPNl/VJOHCwyxBWuhuGBdw==", "dependencies": { "@hpcc-js/codemirror": "^2.61.3", "@hpcc-js/common": "^2.71.16", "@hpcc-js/comms": "^2.92.0", - "@hpcc-js/dgrid": "^2.32.17", + "@hpcc-js/dgrid": "^2.32.19", "@hpcc-js/graph": "^2.85.14", "@hpcc-js/layout": "^2.49.21", "@hpcc-js/phosphor": "^2.18.7", @@ -2058,12 +2058,12 @@ "resolved": "https://registry.npmjs.org/@hpcc-js/timeline/-/timeline-2.51.24.tgz", "integrity": "sha512-QNgXhJ6/hQHfP2Lge2zL1X5ERI813KKpFN+DNFqufhWoZIT/7x3kr1If8r1mC74hYt4xqkFAdoveEepFT+lYhQ==", "dependencies": { - "@hpcc-js/api": "^2.12.16", - "@hpcc-js/chart": "^2.83.2", - "@hpcc-js/common": "^2.71.16", - "@hpcc-js/html": "^2.42.19", - "@hpcc-js/layout": "^2.49.21", - "@hpcc-js/react": "^2.53.15" + "@hpcc-js/api": "^2.12.15", + "@hpcc-js/chart": "^2.83.1", + "@hpcc-js/common": "^2.71.15", + "@hpcc-js/html": "^2.42.18", + "@hpcc-js/layout": "^2.49.20", + "@hpcc-js/react": "^2.53.14" } }, "node_modules/@hpcc-js/tree": { diff --git a/esp/src/package.json b/esp/src/package.json index 745acde3fe6..a7685259e97 100644 --- a/esp/src/package.json +++ b/esp/src/package.json @@ -46,7 +46,7 @@ "@hpcc-js/common": "2.71.16", "@hpcc-js/comms": "2.92.0", "@hpcc-js/dataflow": "8.1.6", - "@hpcc-js/eclwatch": "2.74.0", + "@hpcc-js/eclwatch": "2.74.2", "@hpcc-js/graph": "2.85.14", "@hpcc-js/html": "2.42.19", "@hpcc-js/layout": "2.49.21", diff --git a/esp/src/src-react/components/Activities.tsx b/esp/src/src-react/components/Activities.tsx index 0f1103c7a1f..3339d5c1a82 100644 --- a/esp/src/src-react/components/Activities.tsx +++ b/esp/src/src-react/components/Activities.tsx @@ -178,10 +178,18 @@ export const Activities: React.FunctionComponent = ({ key: "open", text: nlsHPCC.Open, disabled: !uiState.wuSelected && !uiState.thorClusterSelected, iconProps: { iconName: "WindowEdit" }, onClick: () => { if (selection.length === 1) { - window.location.href = `#/operations/clusters/${selection[0].ClusterName}`; + let url = `#/operations/clusters/${selection[0].ClusterName}`; + if (selection[0].Wuid) { + url = `#/workunits/${selection[0].Wuid}`; + } + window.location.href = url; } else { for (let i = selection.length - 1; i >= 0; --i) { - window.open(`#/operations/clusters/${selection[i].ClusterName}`, "_blank"); + let url = `#/operations/clusters/${selection[i].ClusterName}`; + if (selection[i].Wuid) { + url = `#/workunits/${selection[i].Wuid}`; + } + window.open(url, "_blank"); } } } diff --git a/esp/src/src-react/components/ECLArchive.tsx b/esp/src/src-react/components/ECLArchive.tsx index e48267c3114..7f43a368726 100644 --- a/esp/src/src-react/components/ECLArchive.tsx +++ b/esp/src/src-react/components/ECLArchive.tsx @@ -53,8 +53,9 @@ export const ECLArchive: React.FunctionComponent = ({ }, [archive, metrics]); React.useEffect(() => { - if (metrics.length) { - setSelectionText(archive?.content(selection) ?? ""); + const text = archive?.content(selection) ?? ""; + if (text) { + setSelectionText(text); setMarkers(archive?.markers(selection) ?? []); setSelectedMetrics(archive?.metrics(selection) ?? []); } else { diff --git a/esp/src/src-react/components/Helpers.tsx b/esp/src/src-react/components/Helpers.tsx index 7e391bc9223..458308debd8 100644 --- a/esp/src/src-react/components/Helpers.tsx +++ b/esp/src/src-react/components/Helpers.tsx @@ -230,7 +230,7 @@ export const Helpers: React.FunctionComponent = ({ = ({ const columns = React.useMemo((): FluentColumns => { return { Severity: { - label: nlsHPCC.Severity, field: "", width: 72, sortable: false, + label: nlsHPCC.Severity, width: 72, sortable: false, className: (value, row) => { switch (value) { case "Error": @@ -80,8 +80,8 @@ export const InfoGrid: React.FunctionComponent = ({ return ""; } }, - Source: { - label: `${nlsHPCC.Source} / ${nlsHPCC.Cost}`, field: "", width: 144, sortable: false, + Priority: { + label: `${nlsHPCC.Source} / ${nlsHPCC.Cost}`, width: 144, formatter: (Source, row) => { if (Source === "Cost Optimizer") { return formatCost(+row.Cost); @@ -90,7 +90,7 @@ export const InfoGrid: React.FunctionComponent = ({ } }, Priority: { - label: `${nlsHPCC.Priority} / ${nlsHPCC.TimePenalty}`, field: "", width: 144, sortable: false, + label: `${nlsHPCC.Priority} / ${nlsHPCC.TimePenalty}`, width: 144, sortable: false, formatter: (Priority, row) => { if (row.Source === "Cost Optimizer") { return `${formatTwoDigits(+row.Priority / 1000)} (${nlsHPCC.Seconds})`; @@ -98,10 +98,10 @@ export const InfoGrid: React.FunctionComponent = ({ return Priority; } }, - Code: { label: nlsHPCC.Code, field: "", width: 45, sortable: false }, + Code: { label: nlsHPCC.Code, width: 45 }, Message: { - label: nlsHPCC.Message, field: "", - sortable: false, + label: nlsHPCC.Message, + sortable: true, formatter: (Message, idx) => { const info = extractGraphInfo(Message); if (info.graphID && info.subgraphID) { @@ -114,15 +114,15 @@ export const InfoGrid: React.FunctionComponent = ({ return Message; } }, - Column: { label: nlsHPCC.Col, field: "", width: 36, sortable: false }, - LineNo: { label: nlsHPCC.Line, field: "", width: 36, sortable: false }, + Column: { label: nlsHPCC.Col, width: 36 }, + LineNo: { label: nlsHPCC.Line, width: 36 }, Activity: { - label: nlsHPCC.Activity, field: "", width: 56, sortable: false, + label: nlsHPCC.Activity, width: 56, formatter: (activityId, row) => { return activityId ? a{activityId} : ""; } }, - FileName: { label: nlsHPCC.FileName, field: "", width: 360, sortable: false } + FileName: { label: nlsHPCC.FileName, width: 360 } }; }, [wuid]); @@ -220,7 +220,6 @@ export const InfoGrid: React.FunctionComponent = ({ = ({ }) .on("click", (row, col, sel) => { setTimelineFilter(sel ? row[7].ScopeName : ""); + if (sel) { + setSelectedMetricsSource("scopesTable"); + pushUrl(`${parentUrl}/${row[7].Id}`); + } }) ); @@ -516,6 +520,10 @@ export const Metrics: React.FunctionComponent = ({ key: "refresh", text: nlsHPCC.Refresh, iconProps: { iconName: "Refresh" }, onClick: () => { refresh(); + timeline + .clear() + .lazyRender() + ; } }, { @@ -536,7 +544,7 @@ export const Metrics: React.FunctionComponent = ({ setShowMetricOptions(true); } } - ], [dockpanel, hotspots, onHotspot, options, refresh, setOptions, showTimeline]); + ], [dockpanel, hotspots, onHotspot, options, refresh, setOptions, showTimeline, timeline]); const formatColumns = React.useMemo((): Utility.ColumnMap => { const copyColumns: Utility.ColumnMap = {}; @@ -584,6 +592,17 @@ export const Metrics: React.FunctionComponent = ({ } ], [dot, formatColumns, fullscreen, metrics, wuid]); + const setShowMetricOptionsHook = React.useCallback((show: boolean) => { + setShowMetricOptions(show); + scopesTable + .metrics(metrics, options, timelineFilter, scopeFilter) + .render(() => { + updateScopesTable(selectedMetrics); + }) + ; + + }, [metrics, options, scopeFilter, scopesTable, selectedMetrics, timelineFilter, updateScopesTable]); + return @@ -618,13 +637,13 @@ export const Metrics: React.FunctionComponent = ({ /> - + - + } />; diff --git a/esp/src/src-react/components/MetricsPropertiesTables.tsx b/esp/src/src-react/components/MetricsPropertiesTables.tsx index 031b5daf8ad..470516b55f7 100644 --- a/esp/src/src-react/components/MetricsPropertiesTables.tsx +++ b/esp/src/src-react/components/MetricsPropertiesTables.tsx @@ -6,13 +6,19 @@ import nlsHPCC from "src/nlsHPCC"; import { AutosizeHpccJSComponent } from "../layouts/HpccJSAdapter"; interface MetricsPropertiesTablesProps { + scopesTableColumns?: string[]; scopes?: IScope[]; } export const MetricsPropertiesTables: React.FunctionComponent = ({ + scopesTableColumns = [], scopes = [] }) => { + const sortByColumns = React.useMemo(() => { + return ["id", "type", "name", ...scopesTableColumns]; + }, [scopesTableColumns]); + // Props Table --- const propsTable = useConst(() => new Table() .columns([nlsHPCC.Property, nlsHPCC.Value, "Avg", "Min", "Max", "Delta", "StdDev", "SkewMin", "SkewMax", "NodeMin", "NodeMax"]) @@ -22,19 +28,34 @@ export const MetricsPropertiesTables: React.FunctionComponent { const props = []; scopes.forEach((item, idx) => { + const scopeProps = []; for (const key in item.__groupedProps) { const row = item.__groupedProps[key]; - props.push([row.Key, row.Value, row.Avg, row.Min, row.Max, row.Delta, row.StdDev, row.SkewMin, row.SkewMax, row.NodeMin, row.NodeMax]); + scopeProps.push([row.Key, row.Value, row.Avg, row.Min, row.Max, row.Delta, row.StdDev, row.SkewMin, row.SkewMax, row.NodeMin, row.NodeMax]); } + scopeProps.sort((l, r) => { + const lIdx = sortByColumns.indexOf(l[0]); + const rIdx = sortByColumns.indexOf(r[0]); + if (lIdx >= 0 && rIdx >= 0) { + return lIdx <= rIdx ? -1 : 1; + } else if (lIdx >= 0) { + return -1; + } else if (rIdx >= 0) { + return 1; + } + return 0; + }); if (idx < scopes.length - 1) { - props.push(["------------------------------", "------------------------------"]); + scopeProps.push(["------------------------------", "------------------------------"]); } + props.push(...scopeProps); }); + propsTable ?.data(props) ?.lazyRender() ; - }, [propsTable, scopes]); + }, [propsTable, scopes, sortByColumns]); return ; }; diff --git a/esp/src/src-react/components/Resources.tsx b/esp/src/src-react/components/Resources.tsx index 08797dc3315..d3cb0a73a53 100644 --- a/esp/src/src-react/components/Resources.tsx +++ b/esp/src/src-react/components/Resources.tsx @@ -117,7 +117,7 @@ export const Resources: React.FunctionComponent = ({ = ({ = ({ = ({ const [showZapForm, setShowZapForm] = React.useState(false); const [showThorSlaveLogs, setShowThorSlaveLogs] = React.useState(false); - const [showMessageBar, setShowMessageBar] = React.useState(false); - const dismissMessageBar = React.useCallback(() => setShowMessageBar(false), []); + const [messageBarContent, setMessageBarContent] = React.useState(); + const dismissMessageBar = React.useCallback(() => setMessageBarContent(undefined), []); + const showMessageBar = React.useCallback((content: MessageBarContent) => { + setMessageBarContent(content); + const t = window.setTimeout(function () { + dismissMessageBar(); + window.clearTimeout(t); + }, 2400); + }, [dismissMessageBar]); React.useEffect(() => { setJobname(workunit?.Jobname); @@ -69,7 +84,40 @@ export const WorkunitSummary: React.FunctionComponent = ({ }, [workunit]) }); + const nextWuid = React.useCallback((wuids: WUQuery.ECLWorkunit[]) => { + let found = false; + for (const wu of wuids) { + if (wu.Wuid !== wuid) { + pushUrl(`/workunits/${wu.Wuid}`); + found = true; + break; + } + } + if (!found) { + showMessageBar({ type: MessageBarType.warning, message: nlsHPCC.WorkunitNotFound }); + } + }, [showMessageBar, wuid]); + const buttons = React.useMemo((): ICommandBarItemProps[] => [ + { + key: "next", iconOnly: true, tooltipHostProps: { content: nlsHPCC.NextWorkunit }, iconProps: { iconName: "Previous" }, + onClick: () => { + const now = new Date(Date.now()); + const tomorrow = new Date(now.getTime() + (24 * 60 * 60 * 1000)); + workunitService.WUQuery({ StartDate: `${wuidToDate(wuid)}T${wuidToTime(wuid)}Z`, EndDate: tomorrow.toISOString(), Sortby: "Wuid", Descending: false, Count: 2 } as WUQuery.Request).then(response => { + nextWuid(response?.Workunits?.ECLWorkunit || []); + }).catch(err => logger.error(err)); + } + }, + { + key: "previous", iconOnly: true, tooltipHostProps: { content: nlsHPCC.PreviousWorkunit }, iconProps: { iconName: "Next" }, + onClick: () => { + workunitService.WUQuery({ EndDate: `${wuidToDate(wuid)}T${wuidToTime(wuid)}Z`, Count: 2 } as WUQuery.Request).then(response => { + nextWuid(response?.Workunits?.ECLWorkunit || []); + }).catch(err => logger.error(err)); + } + }, + { key: "divider_0", itemType: ContextualMenuItemType.Divider, onRender: () => }, { key: "refresh", text: nlsHPCC.Refresh, iconProps: { iconName: "Refresh" }, onClick: () => { @@ -91,15 +139,9 @@ export const WorkunitSummary: React.FunctionComponent = ({ Jobname: jobname, Description: description, Protected: _protected - }) - .then(_ => { - setShowMessageBar(true); - const t = window.setTimeout(function () { - setShowMessageBar(false); - window.clearTimeout(t); - }, 2400); - }) - .catch(err => logger.error(err)); + }).then(_ => { + showMessageBar({ type: MessageBarType.success, message: nlsHPCC.SuccessfullySaved }); + }).catch(err => logger.error(err)); } }, { @@ -163,7 +205,7 @@ export const WorkunitSummary: React.FunctionComponent = ({ key: "slaveLogs", text: nlsHPCC.SlaveLogs, disabled: !workunit?.ThorLogList, onClick: () => setShowThorSlaveLogs(true) }, - ], [_protected, canDelete, canDeschedule, canReschedule, canSave, description, jobname, refresh, refreshSavings, setShowDeleteConfirm, workunit, wuid]); + ], [_protected, canDelete, canDeschedule, canReschedule, canSave, description, jobname, nextWuid, refresh, refreshSavings, setShowDeleteConfirm, showMessageBar, workunit, wuid]); const serviceNames = React.useMemo(() => { return workunit?.ServiceNames?.Item?.join("\n") || ""; @@ -191,13 +233,9 @@ export const WorkunitSummary: React.FunctionComponent = ({ - {showMessageBar && - - {nlsHPCC.SuccessfullySaved} + {messageBarContent && + + {messageBarContent.message} } diff --git a/esp/src/src-react/components/Workunits.tsx b/esp/src/src-react/components/Workunits.tsx index 88b3236c38a..504f1f698c4 100644 --- a/esp/src/src-react/components/Workunits.tsx +++ b/esp/src/src-react/components/Workunits.tsx @@ -55,9 +55,6 @@ function formatQuery(_filter): { [id: string]: any } { if (filter.Type === true) { filter.Type = "archived workunits"; } - if (filter.Type === true) { - filter.Type = "archived workunits"; - } if (filter.Protected === true) { filter.Protected = "Protected"; } diff --git a/esp/src/src-react/components/forms/AddPackageMap.tsx b/esp/src/src-react/components/forms/AddPackageMap.tsx index 068c51aa3c5..c5e667a4815 100644 --- a/esp/src/src-react/components/forms/AddPackageMap.tsx +++ b/esp/src/src-react/components/forms/AddPackageMap.tsx @@ -1,6 +1,7 @@ import * as React from "react"; -import { Checkbox, DefaultButton, Dropdown, PrimaryButton, Stack, TextField, } from "@fluentui/react"; +import { Checkbox, DefaultButton, Dropdown, IDropdownOption, PrimaryButton, Stack, TextField, } from "@fluentui/react"; import { useForm, Controller } from "react-hook-form"; +import { FileSprayService } from "@hpcc-js/comms"; import { scopedLogger } from "@hpcc-js/util"; import * as WsPackageMaps from "src/WsPackageMaps"; import { TypedDropdownOption } from "../PackageMaps"; @@ -15,6 +16,7 @@ interface AddPackageMapValues { Target: string; Process: string; DaliIp: string; + RemoteStorage: string; Activate: boolean OverWrite: boolean; } @@ -25,10 +27,13 @@ const defaultValues: AddPackageMapValues = { Target: "", Process: "", DaliIp: "", + RemoteStorage: "", Activate: true, OverWrite: false }; +const fileSprayService = new FileSprayService({ baseUrl: "" }); + interface AddPackageMapProps { showForm: boolean; setShowForm: (_: boolean) => void; @@ -47,6 +52,14 @@ export const AddPackageMap: React.FunctionComponent = ({ const { handleSubmit, control, reset } = useForm({ defaultValues }); + const [remoteTargets, setRemoteTargets] = React.useState([]); + + React.useEffect(() => { + fileSprayService.GetRemoteTargets({}).then(response => { + setRemoteTargets(response?.TargetNames?.Item?.map(item => { return { key: item, text: item }; })); + }).catch(err => logger.error(err)); + }, []); + const closeForm = React.useCallback(() => { setShowForm(false); }, [setShowForm]); @@ -168,6 +181,21 @@ export const AddPackageMap: React.FunctionComponent = ({ value={value} />} /> + { + onChange(option.key); + }} + />} + />
= 48 && i <= 57)); - if (m !== n) { - tz[++y] = ""; - n = m; +function isSign(code: number) { + return code === minus || code === plus; +} + +function compare(a, b, opts: { sign: boolean }) { + const checkSign = opts.sign; + let ia = 0; + let ib = 0; + const ma = a.length; + const mb = b.length; + let ca, cb; // character code + let za, zb; // leading zero count + let na, nb; // number length + let sa, sb; // number sign + let ta, tb; // temporary + let bias; + + while (ia < ma && ib < mb) { + ca = a.charCodeAt(ia); + cb = b.charCodeAt(ib); + za = zb = 0; + na = nb = 0; + sa = sb = true; + bias = 0; + + // skip over leading spaces + while (isWhitespace(ca)) { + ia += 1; + ca = a.charCodeAt(ia); + } + while (isWhitespace(cb)) { + ib += 1; + cb = b.charCodeAt(ib); + } + + // skip and save sign + if (checkSign) { + ta = a.charCodeAt(ia + 1); + if (isSign(ca) && isDigit(ta)) { + if (ca === minus) { + sa = false; + } + ia += 1; + ca = ta; + } + tb = b.charCodeAt(ib + 1); + if (isSign(cb) && isDigit(tb)) { + if (cb === minus) { + sb = false; + } + ib += 1; + cb = tb; } - tz[y] += j; } - return tz; - } - const aa = chunkify(a); - const bb = chunkify(b); - - for (let x = 0; aa[x] && bb[x]; x++) { - if (aa[x] !== bb[x]) { - const c = Number(aa[x]); - const d = Number(bb[x]); - // tslint:disable-next-line: triple-equals - if (c == aa[x] && d == bb[x]) { - return c - d; - } else return (aa[x] > bb[x]) ? 1 : -1; + // compare digits with other symbols + if (isDigit(ca) && !isDigit(cb)) { + return -1; + } + if (!isDigit(ca) && isDigit(cb)) { + return 1; + } + + // compare negative and positive + if (!sa && sb) { + return -1; + } + if (sa && !sb) { + return 1; } - } - return aa.length - bb.length; -} -/* ******************************************************************** - * Alphanum sort() function version - case insensitive - * - Slower, but easier to modify for arrays of objects which contain - * string properties - * - */ -export function alphanumCase(a, b) { - function chunkify(t) { - const tz = []; - let x = 0; - let y = -1; - let n = false; - let i; - let j; - - // eslint-disable-next-line no-cond-assign - while (i = (j = t.charAt(x++)).charCodeAt(0)) { - // tslint:disable-next-line: triple-equals - const m = (i == 46 || (i >= 48 && i <= 57)); // jshint ignore:line - if (m !== n) { - tz[++y] = ""; - n = m; + // count leading zeros + while (ca === zero) { + za += 1; + ia += 1; + ca = a.charCodeAt(ia); + } + while (cb === zero) { + zb += 1; + ib += 1; + cb = b.charCodeAt(ib); + } + + // count numbers + while (isDigit(ca) || isDigit(cb)) { + if (isDigit(ca) && isDigit(cb) && bias === 0) { + if (sa) { + if (ca < cb) { + bias = -1; + } else if (ca > cb) { + bias = 1; + } + } else { + if (ca > cb) { + bias = -1; + } else if (ca < cb) { + bias = 1; + } + } + } + if (isDigit(ca)) { + ia += 1; + na += 1; + ca = a.charCodeAt(ia); + } + if (isDigit(cb)) { + ib += 1; + nb += 1; + cb = b.charCodeAt(ib); } - tz[y] += j; } - return tz; - } - const aa = chunkify(a.toLowerCase()); - const bb = chunkify(b.toLowerCase()); - - for (let x = 0; aa[x] && bb[x]; x++) { - if (aa[x] !== bb[x]) { - const c = Number(aa[x]); - const d = Number(bb[x]); - // tslint:disable-next-line: triple-equals - if (c == aa[x] && d == bb[x]) { // jshint ignore:line - return c - d; - } else return (aa[x] > bb[x]) ? 1 : -1; + // compare number length + if (sa) { + if (na < nb) { + return -1; + } + if (na > nb) { + return 1; + } + } else { + if (na > nb) { + return -1; + } + if (na < nb) { + return 1; + } + } + + // compare numbers + if (bias) { + return bias; + } + + // compare leading zeros + if (sa) { + if (za > zb) { + return -1; + } + if (za < zb) { + return 1; + } + } else { + if (za < zb) { + return -1; + } + if (za > zb) { + return 1; + } + } + + // compare ascii codes + if (ca < cb) { + return -1; + } + if (ca > cb) { + return 1; } + + ia += 1; + ib += 1; + } + + // compare length + if (ma < mb) { + return -1; } - return aa.length - bb.length; + if (ma > mb) { + return 1; + } + return 0; } +// ----------------------------------------------------------------------------------------------- export function onDomMutate(domNode, callback, observerOpts) { observerOpts = observerOpts || { attributes: true, attributeFilter: ["style"] }; @@ -458,8 +537,10 @@ export function onDomMutate(domNode, callback, observerOpts) { observer.observe(domNode, observerOpts); } -export function alphanumCompare(l, r, caseInsensitive: boolean = true, reverse: boolean = true): number { - const cmp = caseInsensitive ? alphanumCase(l, r) : alphanum(l, r); +export function alphanumCompare(_l, _r, caseInsensitive: boolean = true, reverse: boolean = true): number { + const l = caseInsensitive && typeof _l === "string" ? _l.toLocaleLowerCase() : _l; + const r = caseInsensitive && typeof _r === "string" ? _r.toLocaleLowerCase() : _r; + const cmp = compare(l, r, { sign: false }); if (cmp !== 0) { return cmp * (reverse ? -1 : 1); } diff --git a/esp/src/src/nls/hpcc.ts b/esp/src/src/nls/hpcc.ts index b7c1ea32962..9d413f84644 100644 --- a/esp/src/src/nls/hpcc.ts +++ b/esp/src/src/nls/hpcc.ts @@ -586,6 +586,7 @@ export = { Newest: "Newest", NewPassword: "New Password", NextSelection: "Next Selection", + NextWorkunit: "Next Workunit", NoContent: "(No content)", NoContentPleaseSelectItem: "No content - please select an item", NoCommon: "No Common", @@ -716,6 +717,7 @@ export = { PressCtrlCToCopy: "Press ctrl+c to copy.", Preview: "Preview", PreviousSelection: "Previous Selection", + PreviousWorkunit: "Previous Workunit", PrimaryLost: "Primary Lost", PrimaryMonitoring: "Primary Monitoring", Priority: "Priority", @@ -1128,6 +1130,7 @@ export = { WildcardFilter: "Wildcard Filter", Workflows: "Workflows", Workunit: "Workunit", + WorkunitNotFound: "Workunit not found", Workunits: "Workunits", WorkUnitScopeDefaultPermissions: "Workunit Scope Default Permissions", Wrap: "Wrap", diff --git a/esp/src/src/store/util/SimpleQueryEngine.ts b/esp/src/src/store/util/SimpleQueryEngine.ts index e7b8a9e9bc4..e9f74c8f863 100644 --- a/esp/src/src/store/util/SimpleQueryEngine.ts +++ b/esp/src/src/store/util/SimpleQueryEngine.ts @@ -2,7 +2,7 @@ import { alphanumCompare } from "../../Utility"; import { BaseRow, QueryOptions, QueryRequest, QuerySort } from "../Store"; function createSortFunc(sortSet: QuerySort, alphanumColumns: { [id: string]: boolean }) { - return typeof sortSet == "function" ? sortSet : function (a, b) { + return typeof sortSet == "function" ? sortSet : function (a: any, b: any) { for (let i = 0; sortSet[i]; i++) { const sort = sortSet[i]; if (alphanumColumns[sort.attribute as string]) { @@ -16,6 +16,9 @@ function createSortFunc(sortSet: QuerySort, alphanumColumn // valueOf enables proper comparison of dates aValue = aValue != null ? aValue.valueOf() : aValue; bValue = bValue != null ? bValue.valueOf() : bValue; + if (typeof aValue === "string" && typeof bValue === "string") { + return aValue.localeCompare(bValue, undefined, { sensitivity: "base" }) * (sort.descending ? -1 : 1); + } if (aValue != bValue) { return !!sort.descending == (aValue == null || aValue > bValue) ? -1 : 1; } diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 19fd450f10f..7950c9fd306 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1315,9 +1315,10 @@ Add resource object Pass in a dictionary with me defined */}} {{- define "hpcc.addResources" }} -{{- if .me }} - {{- $limits := omit .me "cpu" }} - {{- $requests := pick .me "cpu" }} +{{- $resources := .me | default .defaults }} +{{- if $resources }} + {{- $limits := omit $resources "cpu" }} + {{- $requests := pick $resources "cpu" }} resources: {{- if $limits }} limits: @@ -1335,17 +1336,16 @@ Add resources object for stub pods Pass in dict with root, me and instances defined */}} {{- define "hpcc.addStubResources" -}} -{{- $stubInstanceResources := .root.Values.global.stubInstanceResources | default dict -}} -{{- $milliCPUPerInstance := $stubInstanceResources.cpu | default "50m" -}} -{{- $memPerInstance := $stubInstanceResources.memory | default "200Mi" -}} -{{- $milliCPUs := int (include "hpcc.k8sCPUStringToMilliCPU" $milliCPUPerInstance) -}} -{{- $bytes := int64 (include "hpcc.k8sMemoryStringToBytes" $memPerInstance) -}} -{{- $totalBytes := mul .instances $bytes }} +{{- $stubInstanceResources := .stubResources | default .root.Values.global.stubInstanceResources | default dict }} +{{- $milliCPUText := $stubInstanceResources.cpu | default "200m" }} +{{- $milliCPUs := int (include "hpcc.k8sCPUStringToMilliCPU" $milliCPUText) }} +{{- $memoryText := $stubInstanceResources.memory | default "50Mi" }} +{{- $memory := int64 (include "hpcc.k8sMemoryStringToBytes" $memoryText) }} resources: limits: - memory: {{ include "hpcc.bytesToK8sMemoryString" $totalBytes | quote }} + memory: {{ include "hpcc.bytesToK8sMemoryString" $memory | quote }} requests: - cpu: {{ printf "%dm" (mul .instances $milliCPUs) | quote }} + cpu: {{ printf "%dm" $milliCPUs | quote }} {{- end -}} {{/* diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index ecb9a3fb0e6..e8789fe3d30 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -172,7 +172,7 @@ spec: {{- if .useChildProcesses }} {{- include "hpcc.addResources" (dict "me" .resources) | indent 8 }} {{- else }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" .maxActive)) | indent 8 }} +{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "stubResources" .stubResources)) | indent 8 }} {{- end }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 171885bde33..79f53b4975f 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -184,7 +184,8 @@ spec: {{- if .useChildProcesses }} {{- include "hpcc.addResources" (dict "me" .resources) | indent 8 }} {{- else }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" .maxActive)) | indent 8 }} +{{- $defaultResources := dict "cpu" "1" "memory" "1Gi" }} +{{- include "hpcc.addResources" (dict "me" .timedChildResources "defaults" $defaultResources) | indent 8 }} {{- end }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index b84035e1c92..5007bbf47cd 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -95,7 +95,8 @@ spec: {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} {{- $omitResources := hasKey $.Values.global "omitResources" | ternary $.Values.global.omitResources $.Values.global.privileged }} {{- if not $omitResources }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" 1)) | indent 8 }} +{{- $defaultResources := dict "cpu" "500m" "memory" "200Mi" }} +{{- include "hpcc.addResources" (dict "me" .resources "defaults" $defaultResources) | indent 8 }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index ae0d2d2d4f8..a5a702b944a 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -148,7 +148,8 @@ spec: {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} {{- $omitResources := hasKey $.Values.global "omitResources" | ternary $.Values.global.omitResources $.Values.global.privileged }} {{- if not $omitResources }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" 1)) | indent 8 }} +{{- $defaultResources := dict "cpu" "500m" "memory" "200Mi" }} +{{- include "hpcc.addResources" (dict "me" .topoResources "defaults" $defaultResources) | indent 8 }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} workingDir: /var/lib/HPCCSystems diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index b05b3c2bdfa..1386b94dc09 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -395,7 +395,7 @@ spec: {{- if $commonCtx.eclAgentUseChildProcesses }} {{- include "hpcc.addResources" (dict "me" .eclAgentResources) | indent 8 }} {{- else }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" .maxJobs)) | indent 8 }} +{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "stubResources" .stubResources)) | indent 8 }} {{- end }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} @@ -458,7 +458,7 @@ spec: {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} {{- $omitResources := hasKey $.Values.global "omitResources" | ternary $.Values.global.omitResources $.Values.global.privileged }} {{- if not $omitResources }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" .maxGraphs)) | indent 8 }} +{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "stubResources" .stubResources)) | indent 8 }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: diff --git a/helm/hpcc/values.schema.json b/helm/hpcc/values.schema.json index c4cc710b8c9..17826c12af7 100644 --- a/helm/hpcc/values.schema.json +++ b/helm/hpcc/values.schema.json @@ -1431,6 +1431,9 @@ "resources": { "$ref": "#/definitions/resources" }, + "timedChildResources": { + "$ref": "#/definitions/resources" + }, "cost": { "$ref" : "#/definitions/componentCost" }, @@ -1617,6 +1620,9 @@ "resources": { "$ref": "#/definitions/resources" }, + "stubResources": { + "$ref": "#/definitions/resources" + }, "jobMemory": { "$ref": "#/definitions/memory" }, @@ -1725,6 +1731,9 @@ "channelResources": { "$ref": "#/definitions/resources" }, + "topoResources": { + "$ref": "#/definitions/resources" + }, "annotations": { "type": "object", "additionalProperties": { "type": "string" } @@ -2597,6 +2606,9 @@ "eclAgentResources": { "$ref": "#/definitions/resources" }, + "stubResources": { + "$ref": "#/definitions/resources" + }, "cost": { "$ref" : "#/definitions/componentCost" }, diff --git a/testing/helm/tests/resourced.yaml b/testing/helm/tests/resourced.yaml new file mode 100644 index 00000000000..a8d3a0053e6 --- /dev/null +++ b/testing/helm/tests/resourced.yaml @@ -0,0 +1,867 @@ +# Default values for hpcc. + +global: + # Settings in the global section apply to all HPCC components in all subcharts + + image: + ## It is recommended to name a specific version rather than latest, for any non-trivial deployment + ## For best results, the helm chart version and platform version should match, which is the default if version is + ## not specified. Do not override without good reason as undefined behavior may result. + ## version: x.y.z + root: "hpccsystems" # change this if you want to pull your images from somewhere other than DockerHub hpccsystems + pullPolicy: IfNotPresent + ## If you need to provide credentials to pull your image, they should be added as a k8s secret, and the secret name provided here + # imagePullSecrets: xxx + + ## busybox image is used for some initialization/termination tasks - you can override the location here + #busybox: "myrepo/busybox:stable" + + ## It is possible (but not recommended) to change the uid/gid that the HPCC containers run under + ## user: + ## uid: 10000 + ## gid: 10001 + + # logging sets the default logging information for all components. Can be overridden locally + logging: + detail: 80 + + # tracing sets the default tracing information for all components. Can be overridden locally + tracing: + disabled: false + alwaysCreateTraceIds: true + + ## resource settings for stub components + #stubInstanceResources: + # memory: "200Mi" + # cpu: "20m" + + ## env adds default environment variables for all components. Environment settings can also be added or overridden locally + #env: + #- name: SMTPserver + # value: mysmtpserver + + # Specify a defaultEsp to control which eclservices service is returned from Std.File.GetEspURL, and other uses + # If not specified, the first esp component that exposes eclservices application is assumed. + # Can also be overridden locally in individual components + ## defaultEsp: eclservices + + egress: + ## If restricted is set, NetworkPolicies will include egress restrictions to allow connections from pods only to the minimum required by the system + ## Set to false to disable all egress policy restrictions (not recommended) + restricted: true + + ## The kube-system namespace is not generally labelled by default - to enable more restrictive egress control for dns lookups we need to be told the label + ## If not provided, DNS lookups on port 53 will be allowed to connect anywhere + ## The namespace may be labelled using a command such as "kubectl label namespace kube-system name=kube-system" + # kubeSystemLabel: "kube-system" + + ## To properly allow access to the kubectl API from pods that need it, the cidr of the kubectl endpoint needs to be supplied + ## This may be obtained via "kubectl get endpoints --namespace default kubernetes" + ## If these are not supplied, egress controls will allow access to any IPs/ports from any pod where API access is needed + # kubeApiCidr: 172.17.0.3/32 + # kubeApiPort: 7443 + + ## named egress sections defined here, can be referenced by components, or they can define their own egress section explicitly + #engineEgress: + #- to: + # - ipBlock: + # cidr: 10.9.8.7/32 + # ports: + # - protocol: TCP + # port: 443 + + + cost: + currencyCode: USD + # The following are example pricing based on standard Azure pricing and should be updated to reflect actual rates + perCpu: 0.0565000000001 # D64ds_v4 compute node ($2,639.68/month for 64 vCPU) + storageAtRest: 0.0208000000001 # Blob storage pricing (East US/Flag NS/LRS redundancy/Hot) + storageReads: 0.00400000000001 # Blob storage pricing (East US/Flag NS/LRS redundancy/Hot) + storageWrites: 0.0500000000001 # Blob storage pricing (East US/Flag NS/LRS redundancy/Hot) + + # postJobCommand will execute at the end of a dynamically launched K8s job, + # when the main entrypoint process finishes, or if the readiness probes trigger a preStop event. + # This can be useful if injected sidecars are installed that need to be told to stop. + # If they are not stopped, the pod continues running with the side car container only, in a "NotReady" state. + # An example of this is the Istio envoy side car. It can be stopped with the command below. + # Set postJobCommandViaSidecar to true, if the command needs to run with privilege, this will enable the command + # to run as root in a sidecar in same process space as other containers, allowing it to for example send signals + # to processes in sidecars + # misc: + # postJobCommand: "curl -sf -XPOST http://127.0.0.1:15020/quitquitquit" + # Or example for linkerd + # postJobCommand: "kill $(pgrep linkerd2-proxy)" + # postJobCommandViaSidecar: true + + ## visibilities section can be used to set labels, annotations and service type for any service with the specified visibility + visibilities: + cluster: + type: ClusterIP + local: + annotations: + # This annotation will make azure load balancer use an internal rather than an internet-visible address + # May want different values on different cloud providers or use-cases. For example on AWS you may want to use + #service.beta.kubernetes.io/aws-load-balancer-internal: "true" + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + type: LoadBalancer + # If ingress is specified, an ingress Network Policy will be created for any pod implementing a service with this visibility + # Default allows ingress from anywhere, but more restrictive rules can be used if preferred. + # Ingress rules can also be overridden by individual services + ingress: + - {} + global: + #labels: + # mylabel: "4" + type: LoadBalancer + ingress: + - {} + ## CIDRS allowed to access this service. + #loadBalancerSourceRanges: [1.2.3.4/32, 5.6.7.8/32] + + # example expert section. The sysctl list will be applied to each pod in a privileged init container + # expert: + # sysctl: + # - kernel.dmesg_restrict=0 + +# For pod placement instruction and examples please reference docs/placements.md +# The following is for tolerations of Spot Node Pool on Azure. Other cloud providers +# may have different taints for Spot Node Pool. The tolerations are harmless when +# there is no taint on the node pool. +#placements: +# - pods: ["all"] +# placement: +# tolerations: +# - key: "kubernetes.azure.com/scalesetpriority" +# operator: "Equal" +# value: "spot" +# effect: "NoSchedule" + +security: + eclSecurity: + # Possible values: + # allow - functionality is permitted + # deny - functionality is not permitted + # allowSigned - functionality permitted only if code signed + embedded: "allow" + pipe: "allow" + extern: "allow" + datafile: "allow" + +## storage: +## +## 1. If an engine component has the dataPlane property set, then that plane will be the default data location for that component. +## 2. If there is a plane definition with a category of "data" then the first matching plane will be the default data location +## +## If a data plane contains the storageClass property then an implicit pvc will be created for that data plane. +## +## If plane.pvc is defined, a Persistent Volume Claim must exist with that name, storageClass and storageSize are not used. +## +## If plane.storageClass is defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If set to "", choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) +## +## plane.forcePermissions=true is required by some types of provisioned +## storage, where the mounted filing system has insufficient permissions to be +## read by the hpcc pods. Examples include using hostpath storage (e.g. on +## minikube and docker for desktop), or using NFS mounted storage. + +storage: + planes: + # name: + # prefix: # Root directory for accessing the plane (if pvc defined), or url to access plane. + # category: data|dali|lz|dll|spill|temp # What category of data is stored on this plane? + # + # For dynamic pvc creation: + # storageClass: '' + # storageSize: 1Gi + # + # For persistent storage: + # pvc: # The name of the persistant volume claim + # forcePermissions: false + # hosts: [ ] # Inline list of hosts + # hostGroup: # Name of the host group for bare metal - must match the name of the storage plane.. + # + # Other options: + # subPath: # Optional sub directory within to use as the root directory + # numDevices: 1 # number of devices that are part of the plane + # secret: # what secret is required to access the files. This could optionally become a list if required (or add secrets:). + # defaultSprayParts: 4 # The number of partitions created when spraying (default: 1) + # eclwatchVisible: true # Can the lz plane be visible from ECLWatch (default: true) + # cost: # The storage cost + # storageAtRest: 0.0135 # Storage at rest cost: cost per GiB/month + # storageapi: # Optional information to allow access to storage api + # type: azurefile | azureblob + # account: # azure storage account name + # secret: # secret name (under secrets/storage) for accessing SAS token + # containers: [ ] # a list of containers + + - name: dali + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/dalistorage" + category: dali + - name: sasha + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/sashastorage" + category: sasha + - name: dll + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/queries" + category: dll + - name: data + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/hpcc-data" + category: data + - name: mydropzone + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/mydropzone" + category: lz + - name: debug + disabled: False + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/debug" + category: debug + +## The certificates section can be used to enable cert-manager to generate TLS certificates for each component in the hpcc. +## You must first install cert-manager to use this feature. +## https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm +## +## The Certificate issuers are divided into "local" (those which will be used for local mutual TLS) and "public" those +## which will be publicly accessible and therefore need to be recognized by browsers and/or other entities. +## +## Both public and local issuers have a spec section. The contents of the "spec" are documented in the cert-manager +## "Issuer configuration" documentation. https://cert-manager.io/docs/configuration/#supported-issuer-types +## +## The default configuration is meant to provide reasonable functionality without additional dependencies. +## +## Public issuers can be tricky if you want browsers to recognize the certificates. This is a complex topic outside the scope +## of this comment. The default for the public issuer generates self signed certificates. The expectation is that this will be +## overridden by the configuration of an external certificate authority or vault in QA and production environments. +## +## The default for the local (mTLS) issuer is designed to act as our own local certificate authority. We only need to recognize +## what a component is, and that it belongs to this cluster. +## But a kubernetes secret must be provided for the certificate authority key-pair. The default name for the secret +## is "hpcc-local-issuer-key-pair". The secret is a standard kubernetes.io/tls secret and should provide data values for +## "tls.crt" and "tls.key". +## +## The local issuer can also be configured to use an external certificate authority or vault. +## +certificates: + enabled: false + issuers: + local: + name: hpcc-local-issuer + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-local-issuer-key-pair + public: + name: hpcc-public-issuer + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + selfSigned: {} + vaultclient: + name: hpcc-vaultclient-issuer + enabled: false + ## domain: hpcc.example.com + rolePrefix: "hpcc-" + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-vaultclient-issuer-key-pair + remote: + name: hpcc-remote-issuer + ## set enabled to true if adding remoteClients for any components + enabled: false + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-remote-issuer-key-pair + signing: # intended to be used for signing/verification purposes only, e.g. by dafilesrv + name: hpcc-signing-issuer + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-signing-issuer-key-pair + +## The secrets section contains a set of categories, each of which contain a list of secrets. The categories determine which +## components have access to the secrets. +## For each secret: +## name is the name that it is accessed by within the platform +## secret is the name of the secret that should be published +secrets: + #timeout: 300 # timeout period for cached secrets. Should be similar to the k8s refresh period. + + #Secret categories follow, remove the {} if a secret is defined in a section + storage: {} + ## Secrets that are required for accessing storage. Currently exposed in the engines, but in the future will + ## likely be restricted to esp (when it becomes the meta-data provider) + ## For example, to set the secret associated with the azure storage account "mystorageaccount" use + ##azure-mystorageaccount: storage-myazuresecret + + authn: {} + ## Category to deploy authentication secrets to container, and to create a key name alias to reference those secrets + #ldapadmincredskey: "admincredssecretname" ## Default k/v for LDAP authentication secrets + #testauthusercreds1: "testauthusercreds1" ## Default k/v for test authentication secrets + #testauthusercreds2: "testauthusercreds2" ## Default k/v for test authentication secrets + + ecl: {} + ## Category for secrets published to all components that run ecl. These secrets are for use by internal + ## ECL processing. For example HTTPCALL and SOAPCALL have built in support for secrets that are not directly + ## accessible to users, that is, accessed directly via ECL code. + + eclUser: {} + ## Category for secrets accessible via ecl code. These are secrets that users can access directly. Be cautious about + ## what secrets you add to this category as they are easily accessed by ECL code. + + codeSign: {} + #gpg-private-key-1: codesign-gpg-key-1 + #gpg-private-key-2: codesign-gpg-key-2 + + codeVerify: {} + #gpg-public-key-1: codesign-gpg-public-key-1 + #gpg-public-key-2: codesign-gpg-public-key-2 + + system: {} + ## Category for secrets published to all components for system level useage + + git: {} + ## Category to provide passwords for eclccserver to access private git repos + +## The vaults section mirrors the secret section but leverages vault for the storage of secrets. +## There is an additional category for vaults named "eclUser". "eclUser" vault +## secrets are readable directly from ECL code. Other secret categories are read internally +## by system components and not exposed directly to ECL code. +## +## For each vault: +## name is the name that it is accessed by within the platform +## url is the url used to read a secret from the vault. +## kind is the type of vault being accessed, or the protocol to use to access the secrets +## client_secret a kubernetes level secret that contains the client_token used to retrive secrets. +## if a client_secret is not provided "vault kubernetes auth" will be attempted. + +vaults: + storage: + git: + authn: + ecl: + # vault using vault client certs or kubernetes auth depending on whether certificates.issuers.vaultclient.enabled is true + # to use approle authentication specify appRoleId and appRoleSecret + # - name: my-ecl-vault + #Note the data node in the URL is there for the REST APIs use. The path inside the vault starts after /data + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/ecl/${secret} + # kind: kv-v2 + # namespace: + eclUser: + # vault using vault client certs or kubernetes auth depending on whether certificates.issuers.vaultclient.enabled is true + # to use approle authentication specify appRoleId and appRoleSecret + # - name: my-eclUser-vault + #Note the data node in the URL is there for the REST APIs use. The path inside the vault starts after /data + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/eclUser/${secret} + # kind: kv-v2 + esp: + + ## The keys for code signing may be imported from the vault. Multiple keys may be imported. + ## gpg keys may be imported as follows: + ## vault kv put secret/codeSign/gpg-private-key-1 passphrase= private=@ + ## vault kv put secret/codeSign/gpg-private-key-2 passphrase= private=@ + codeSign: + # - name: codesign-private-keys + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/codeSign/${secret} + # kind: kv-v2 + # namespace: mynamespace # for use with enterprise vaults segmented by namespaces + ## The keys for verifying signed code may be imported from the vault. + ## vault kv put secret/codeVerify/gpg-public-key-1 public=@ + ## vault kv put secret/codeVerify/gpg-public-key-2 public=@ + codeVerify: + # - name: codesign-public-keys + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/codeVerify/${secret} + # kind: kv-v2 + # namespace: mynamespace # for use with enterprise vaults segmented by namespaces + +bundles: [] +## Specifying bundles here will cause the indicated bundles to be downloaded and installed automatically +## whenever an eclccserver pod is started +# for example +# - name: DataPatterns + +# A dafilesrv 'stream' service is required to expose HPCC file access to 3rd parties (e.g. Spark / Java) +# Access will only be granted to requests that have been signed by the DFUFileAccess service +dafilesrv: +- name: rowservice + disabled: true # disabled by default because requires cert-manager etc. (see certificates section) + application: stream + service: + servicePort: 7600 + visibility: local + +# Enable if bare-metal systems require read access to this systems' data planes via ~remote:: +# If legacy ~foreign:: access is required, Dali will also need to be exposed via a service definition in the dali configuration +# NB: ingress rules should be added to limit access. +- name: direct-access + disabled: true + application: directio + service: + servicePort: 7200 + visibility: local + +- name: spray-service + application: spray + service: + servicePort: 7300 + visibility: cluster + + +dali: +- name: mydali + auth: none + services: # internal house keeping services + coalescer: + service: + servicePort: 8877 + #interval: 2 # (hours) + #at: "* * * * *" # cron type schedule, i.e. Min(0-59) Hour(0-23) DayOfMonth(1-31) Month(1-12) DayOfWeek(0-6) + #minDeltaSize: 50 # (Kb) will not start coalescing until delta log is above this threshold + resources: + cpu: "1" + memory: "10G" + + resources: + cpu: "2" + memory: "20G" + +sasha: + #disabled: true # disable all services. Alternatively set sasha to null (sasha: null) + wu-archiver: + #disabled: true + service: + servicePort: 8877 + plane: sasha + #interval: 6 # (hours) + #limit: 1000 # threshold number of workunits before archiving starts (0 disables) + #cutoff: 8 # minimum workunit age to archive (days) + #backup: 0 # minimum workunit age to backup (days, 0 disables) + #at: "* * * * *" + #duration: 0 # (maxDuration) - Maximum duration to run WorkUnit archiving session (hours, 0 unlimited) + #throttle: 0 # throttle ratio (0-99, 0 no throttling, 50 is half speed) + #retryinterval: 7 # minimal time before retrying archive of failed WorkUnits (days) + #keepResultFiles: false # option to keep result files owned by workunits after workunit is archived + resources: + cpu: "1" + memory: "4Gi" + + dfuwu-archiver: + #disabled: true + service: + servicePort: 8877 + plane: sasha + #forcePermissions: false + #limit: 1000 # threshold number of DFU workunits before archiving starts (0 disables) + #cutoff: 14 # minimum DFU workunit age to archive (days) + #interval: 24 # minimum interval between running DFU recovery archiver (in hours, 0 disables) + #at: "* * * * *" # schedule to run DFU workunit archiver (cron format) + #duration: 0 # (maxDuration) maximum duration to run DFU WorkUnit archiving session (hours, 0 unlimited) + #throttle: 0 # throttle ratio (0-99, 0 no throttling, 50 is half speed) + resources: + cpu: "1" + memory: "4Gi" + + dfurecovery-archiver: + #disabled: true + #limit: 20 # threshold number of DFU recovery items before archiving starts (0 disables) + #cutoff: 4 # minimum DFU recovery item age to archive (days) + #interval: 12 # minimum interval between running DFU recovery archiver(in hours, 0 disables) + #at: "* * * * *" # schedule to run DFU recovery archiver (cron format) + resources: + cpu: "1" + memory: "4Gi" + + file-expiry: + #disabled: true + #interval: 1 + #at: "* 3 * * *" + #persistExpiryDefault: 7 + #expiryDefault: 4 + #user: sasha + resources: + cpu: "1" + memory: "4Gi" + +dfuserver: +- name: dfuserver + maxJobs: 1 + resources: + cpu: "1" + memory: "1800Mi" + +eclagent: +- name: hthor + ## replicas indicates how many eclagent pods should be started + replicas: 1 + ## maxActive controls how many workunits may be active at once (per replica) + maxActive: 4 + ## prefix may be used to set a filename prefix applied to any relative filenames used by jobs submitted to this queue + prefix: hthor + ## Set to false if you want to launch each workunit in its own container, true to run as child processes in eclagent pod + useChildProcesses: false + ## type may be 'hthor' (the default) or 'roxie', to specify that the roxie engine rather than the hthor engine should be used for eclagent workunit processing + type: hthor + ## The following resources apply to child hThor pods when useChildProcesses=false, otherwise they apply to hThor pod. + resources: + cpu: "1" + memory: "1G" + stubResources: + cpu: "100m" + memory: "100Mi" + #egress: engineEgress + +- name: roxie-workunit + replicas: 1 + prefix: roxie_workunit + maxActive: 20 + useChildProcesses: true + type: roxie + #resources: + # cpu: "1" + # memory: "1G" + #egress: engineEgress + resources: + cpu: "1" + memory: "1G" + stubResources: + cpu: "100m" + memory: "100Mi" + +eclccserver: +- name: myeclccserver + replicas: 1 + ## Set to false if you want to launch each workunit compile in its own container, true to run as child processes in eclccserver pod. + useChildProcesses: false + ## If non-zero, and useChildProcesses is false, try spending up to this number of seconds compiling using a child process before switching to + ## a separate container. Speeds up throughput of small jobs. + childProcessTimeLimit: 10 + ## maxActive controls how many workunit compiles may be active at once (per replica) + maxActive: 4 + ## Specify a list of queues to listen on if you don't want this eclccserver listening on all queues. If empty or missing, listens on all queues + listen: [] + ## The following allows eclcc options (names start with a -) and debug options to be defined for each of the workunits that are compiled. + #options: + #- name: globalAutoHoist + # value: false + # cluster: name # optional cluster this is applied to + + # used to configure the authentication for git when using the option to compile from a repo. Also requires an associated secret. + #gitUsername: + + ## The following resources apply to child compile pods when useChildProcesses=false, otherwise they apply to eclccserver pod. + resources: + cpu: "1" + memory: "20Gi" + timedChildResources: + cpu: "1" + memory: "798Mi" + +esp: +- name: eclwatch + ## Pre-configured esp applications include eclwatch, eclservices, and eclqueries + application: eclwatch + auth: none + replicas: 1 + resources: + cpu: "4" + memory: "8G" + ## The following 'corsAllowed' section is used to configure CORS support + ## origin - the origin to support CORS requests from + ## headers - the headers to allow for the given origin via CORS + ## methods - the HTTP methods to allow for the given origin via CORS + ## + #corsAllowed: + ## origin starting with https will only allow https CORS + #- origin: https://*.my.com + # headers: + # - "X-X" + # methods: + # - "GET" + # - "OPTIONS" + ## origin starting with http will allow http or https CORS + #- origin: http://www.example.com + # headers: + # - "*" + # methods: + # - "GET" + # - "POST" + # - "OPTIONS" + +# Add remote clients to generated client certificates and make the ESP require that one of the generated certificates is provided by a client in order to connect +# When setting up remote clients make sure that certificates.issuers.remote.enabled is set to true. +# remoteClients: +# - name: petfoodapplicationprod +# organization: petfoodDept +# secretTemplate: +# annotations: +# kubed.appscode.com/sync: "hpccenv=petfoodAppProd" # use kubed config-syncer to replicate certificate to namespace with matching annotation (also supports syncing with separate aks clusters) + +# trustClients and remoteClients can be combined. Trust is far easier to manage and should now be the preferred mechanism. +# Trust is similar to remoteClients, but unlike remoteClients, the client certificates are generated elsewhere. +# If trust is present then esp will use mtls, with trust controlled by certificates.issuers.remote, which must be enabled. +# When using trustClients the remote issuer of each environment should point to the same certifate authority. +# Verification of identity is automatic if the CA matches, but only the clients listed here are actually allowed access +# trustClients: +# - commonName: rabc.example.com + + service: + ## port can be used to change the local port used by the pod. If omitted, the default port (8880) is used + port: 8888 + ## servicePort controls the port that this service will be exposed on, either internally to the cluster, or externally + servicePort: 8010 + ## wsdlAddress should be set to the host and port which clients can use to hit this service. + # This address is added to the service wsdl files which simplify setting up a SOAP client to hit this service. There may be many external factors determining the address + # that is accible to clients. + # wsdlAddress: clientfacingaddress:8010 + ## Specify visibility: local (or global) if you want the service available from outside the cluster. Typically, eclwatch and wsecl are published externally, while eclservices is designed for internal use. + visibility: local + ## Annotations can be specified on a service - for example to specify provider-specific information such as service.beta.kubernetes.io/azure-load-balancer-internal-subnet + #annotations: + # service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "mysubnet" + # The service.annotations prefixed with hpcc.eclwatch.io should not be declared here. They can be declared + # in other services in order to be exposed in the ECLWatch interface. Similar function can be used by other + # applications. For other applications, the "eclwatch" inside the service.annotations should be replaced by + # their application names. + # hpcc.eclwatch.io/enabled: "true" + # hpcc.eclwatch.io/description: "some description" + ## You can also specify labels on a service + #labels: + # mylabel: "3" + ## Links specify the web links for a service. The web links may be shown on ECLWatch. + #links: + #- name: linkname + # description: "some description" + # url: "http://abc.com/def?g=1" + ## CIDRS allowed to access this service. + #loadBalancerSourceRanges: [1.2.3.4/32, 5.6.7.8/32] + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 70M + #resources: + # cpu: "1" + # memory: "2G" +- name: eclservices + application: eclservices + auth: none + replicas: 1 + service: + servicePort: 8010 + visibility: cluster + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: eclqueries + application: eclqueries + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8002 + #annotations: + # hpcc.eclwatch.io/enabled: "true" + # hpcc.eclwatch.io/description: "Roxie Test page" + # hpcc.eclwatch.io/port: "8002" + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: esdl-sandbox + application: esdl-sandbox + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8899 + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: sql2ecl + application: sql2ecl + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8510 + #domain: hpccsql.com + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: dfs + application: dfs + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8520 + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" + + +#- name: ldapenvironment + #ldapenvironment is a stand alone ESP service used to help stand up new HPCC LDAP Environments +# application: ldapenvironment +# auth: ldap +# #specify the hpcc branch Root Name +# hpccRootName: ou=hpcc,dc=myldap,dc=com +# #specify all BaseDN with your LDAP Server's "dc=" settings +# sharedFilesBaseDN: ou=files,ou=hpcc,dc=myldap,dc=com +# sharedGroupsBaseDN: ou=groups,ou=hpcc,dc=myldap,dc=com +# sharedUsersBaseDN: ou=users,ou=hpcc,dc=myldap,dc=com +# sharedResourcesBaseDN: ou=smc,ou=espservices,ou=hpcc,dc=myldap,dc=com +# sharedWorkunitsBaseDN: ou=workunits,ou=hpcc,dc=myldap,dc=com +# adminGroupName: HPCCAdmins +# replicas: 1 +# service: +# visibility: local +# servicePort: 8511 + +roxie: +- name: roxie + disabled: false + prefix: roxie + services: + - name: roxie + servicePort: 9876 + listenQueue: 200 + numThreads: 30 + visibility: local +# trustClients: +# - commonName: rabc.example.com +# - commonName: rbcd.example.com + # Can override ingress rules for each service if desired - for example to add no additional ingress permissions you can use + # ingress: [] + +# Trust is similar to remoteClients, but unlike remoteClients, the client certificates are generated elsewhere. +# If trust is present then roxie will use mtls with trust controlled by certificates.issuer.remote. +# Using the trust section the remote issuer of each environment should point to the same certifate authority. +# Verification of identity is automatic if the CA matches, but only the clients listed here are actually allowed access +# trust: +# - commonName: abc.example.com +# - commonName: bcd.example.com + + ## replicas indicates the number of replicas per channel + replicas: 2 + numChannels: 2 + ## Set singleNode to true for a scalable cluster of "single-node" roxie servers, each implementing all channels locally + singleNode: false + ## Adjust traceLevel to taste (1 is default) + traceLevel: 1 + ## set totalMemoryLimit to indicate how much memory is preallocated for roxie row data + # totalMemoryLimit: "1Gi" # Default 1Gi, capped to 75% of resources.memory if defined. + ## Set mtuPayload to the maximum amount of data Roxie will put in a single packet. This should be just less than the system MTU. Default is 1400 + # mtuPayload: 3800 + + ## resources specifies the resources required by each agent pod + resources: + cpu: "8" + memory: "12G" + topoResources: + cpu: "789m" + memory: "543Mi" + serverResources: + cpu: "2" + memory: "8G" + channelResources: + cpu: "4" + memory: "6Gi" + + ## Set serverReplicas to indicate a separate replicaSet of roxie servers, with agent pods not acting as servers + serverReplicas: 0 + ## If serverReplicas is set, the resources required for the server pods can be configured separately from the agent (channel) pods + #serverResources: + # cpu: "1" + # memory: "4Gi" + #channelResources: + # cpu: "2" + # memory: "8Gi" + + # Roxie may take a while to start up if there are a lot of queries to load. Yuo may need to + #override the default startup/readiness probing by setting these values + #minStartupTime: 30 # How long to wait before initiating startup probing + #maxStartupTime: 600 # Maximum time to wait for startup to complete before failing + topoServer: + replicas: 1 + #directAccessPlanes: [] #add direct access planes that roxie will read from without copying the data to its default data plane + #ldapUser: roxie_file_access #add system username for accessing files + #egress: engineEgress + +## The [manager/worker/eclAgent]Resources define the resource limits for each container. +## If numWorkersPerPod is >1 (must be a factor of numWorkers). +## NB: Each worker corresponds to a container, that will be resourced according to +## workerResources, meaning that if numWorkersPerPod>1, N * workerResources.cpu, +## N * workerResources.memory etc., will be required in total for the pod. +## +## By default the available Thor memory will be based on the resourced container memory. +## This can be overriden by setting [worker/manager]Memory.query and +## [worker/manager]Memory.thirdParty. +thor: +- name: thor + prefix: thor + numWorkers: 2 + maxJobs: 4 + maxGraphs: 2 + #maxGraphStartupTime: 600 + #numWorkersPerPod: 1 + managerResources: + cpu: "1" + memory: "2G" + workerResources: + cpu: "4" + memory: "4G" + #workerMemory: + # query: "3G" + # thirdParty: "500M" + eclAgentResources: + cpu: "1" + memory: "432M" + #egress: engineEgress + +eclscheduler: +- name: eclscheduler + resources: + cpu: "567m" + memory: "4321M" diff --git a/testing/regress/ecl/indexmerge.ecl b/testing/regress/ecl/indexmerge.ecl new file mode 100644 index 00000000000..e089e4c95a5 --- /dev/null +++ b/testing/regress/ecl/indexmerge.ecl @@ -0,0 +1,40 @@ +/*############################################################################## + + HPCC SYSTEMS software Copyright (C) 2024 HPCC Systems®. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +############################################################################## */ + +//class=file +//class=index +//version multiPart=false +//version multiPart=true + +import ^ as root; +multiPart := #IFDEFINED(root.multiPart, true); +useLocal := #IFDEFINED(root.useLocal, false); +useTranslation := #IFDEFINED(root.useTranslation, false); + +//--- end of version configuration --- + +import $.setup; +import setup.TS; +Files := setup.Files(multiPart, useLocal, useTranslation); + +//Read from an index, merging from a large number of cursor positions to generate a sorted output + +TS_searchIndex := Files.getSearchIndex(); +filtered := TS_searchIndex(KEYED(kind=1 AND word[1]='a')); +withOrder := STEPPED(filtered, doc, PRIORITY(3),HINT(maxseeklookahead(50))); + +OUTPUT(COUNT(NOFOLD(withOrder)) = 305475); diff --git a/testing/regress/ecl/key/indexmerge.xml b/testing/regress/ecl/key/indexmerge.xml new file mode 100644 index 00000000000..44a8709ad35 --- /dev/null +++ b/testing/regress/ecl/key/indexmerge.xml @@ -0,0 +1,3 @@ + + true +