diff --git a/.github/workflows/build-assets.yml b/.github/workflows/build-assets.yml index c761ec0cb51..12ebea45e5a 100644 --- a/.github/workflows/build-assets.yml +++ b/.github/workflows/build-assets.yml @@ -107,6 +107,7 @@ jobs: cmake_options_extra: "-DVCPKG_TARGET_TRIPLET=x64-centos-7-dynamic" - os: amazonlinux cmake_options_extra: "-DVCPKG_TARGET_TRIPLET=x64-amazonlinux-dynamic" + - os: rockylinux-8 - os: ubuntu-22.04 name: LN k8s ln: true @@ -128,6 +129,10 @@ jobs: name: LN cmake_options_extra: "-DVCPKG_TARGET_TRIPLET=x64-centos-7-dynamic" ln: true + - os: rockylinux-8 + name: LN + cmake_options_extra: "" + ln: true - os: centos-7-rh-python38 name: LN Python 3.8 cmake_options_extra: "-DVCPKG_TARGET_TRIPLET=x64-centos-7-dynamic -DCUSTOM_LABEL=_rh_python38" @@ -254,8 +259,8 @@ jobs: docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "\ cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/build -DVCPKG_FILES_DIR=/hpcc-dev -DMAKE_DOCS_ONLY=ON -DUSE_NATIVE_LIBRARIES=ON -DDOCS_AUTO=ON -DDOC_LANGS=ALL && \ cmake --build /hpcc-dev/build --parallel $(nproc) --target all" - docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "cd /hpcc-dev/build/Release/docs/EN_US && zip ALL_HPCC_DOCS_EN_US-${{ needs.preamble.outputs.community_tag }}.zip *.pdf" - docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "cd /hpcc-dev/build/Release/docs/PT_BR && zip ALL_HPCC_DOCS_PT_BR-${{ needs.preamble.outputs.community_tag }}.zip *.pdf" + docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "cd /hpcc-dev/build/Release/docs/EN_US && zip ALL_HPCC_DOCS_EN_US-$(echo '${{ needs.preamble.outputs.community_tag }}' | sed 's/community_//' ).zip *.pdf" + docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "cd /hpcc-dev/build/Release/docs/PT_BR && zip ALL_HPCC_DOCS_PT_BR-$(echo '${{ needs.preamble.outputs.community_tag }}' | sed 's/community_//' ).zip *.pdf" - name: Upload Artifacts for ECLIDE build if: ${{ !matrix.ln && !matrix.container && matrix.documentation }} diff --git a/cmake_modules/FindCBLAS.cmake b/cmake_modules/FindCBLAS.cmake deleted file mode 100644 index c799438cf79..00000000000 --- a/cmake_modules/FindCBLAS.cmake +++ /dev/null @@ -1,60 +0,0 @@ -################################################################################ -# HPCC SYSTEMS software Copyright (C) 2016 HPCC Systems®. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -################################################################################ - -if(NOT CBLAS_FOUND) - if(WIN32) - set(cblas_lib "cblas") - else() - set(cblas_lib cblas tatlas satlas) - endif() - - find_path(CBLAS_INCLUDE_DIR NAMES cblas.h) - find_library(CBLAS_LIBRARIES NAMES ${cblas_lib} PATHS /usr/lib/atlas /usr/lib64/atlas) - - include(FindPackageHandleStandardArgs) - find_package_handle_standard_args(CBLAS - DEFAULT_MSG - CBLAS_LIBRARIES - CBLAS_INCLUDE_DIR) - - if (APPLE AND ${CMAKE_SYSTEM_VERSION} VERSION_LESS "18.2.0") # 18.2.0 is macOS Mojave (10.14) - set(LIB_TO_DO ${CBLAS_LIBRARIES}) - - set(CBLAS_DEPS_LIBS "") - foreach (lib libquadmath;libgfortran;libgcc_s) - message("otool -L ${LIB_TO_DO} | egrep ${lib}(.[0-9]{1,})*.dylib | sed \"s/^[[:space:]]//g\" | cut -d' ' -f1") - execute_process( - COMMAND bash "-c" "otool -L \"${LIB_TO_DO}\" | egrep \"${lib}(.[0-9]{1,})*.dylib\" | sed \"s/^[[:space:]]//g\" | cut -d' ' -f1" - OUTPUT_VARIABLE otoolOut - ERROR_VARIABLE otoolErr - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - if (NOT "${otoolErr}" STREQUAL "") - message(FATAL_ERROR "Failed to check dependent lib ${lib} for ${LIB_TO_DO}") - endif() - - if ("${otoolOut}" STREQUAL "") - message(FATAL_ERROR "${LIB_TO_DO} dependencies changed. Run otool -L check manually and update file FindCBLAS.cmake") - endif() - list(APPEND CBLAS_DEPS_LIBS ${otoolOut}) - if ("${otoolOut}" MATCHES ".*libgfortran.*") - set(LIB_TO_DO "${otoolOut}") - endif() - endforeach() - endif() - - mark_as_advanced(CBLAS_INCLUDE_DIR CBLAS_LIBRARIES) -endif() diff --git a/cmake_modules/FindCOUCHBASE.cmake b/cmake_modules/FindCOUCHBASE.cmake deleted file mode 100644 index f61d15c51c4..00000000000 --- a/cmake_modules/FindCOUCHBASE.cmake +++ /dev/null @@ -1,52 +0,0 @@ -################################################################################ -# HPCC SYSTEMS software Copyright (C) 2016 HPCC Systems®. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -################################################################################ - - -# - Try to find the Couchbase c library -# Once done this will define -# -# LIBCOUCHBASE_FOUND, if false, do not try to link with libev -# LIBCOUCHBASE_LIBRARIES, Library path and libs -# LIBCOUCHBASE_INCLUDE_DIR, where to find the libev headers - - -IF (NOT LIBCOUCHBASE_FOUND) - - #couchbase.h - #libcouchbase - - FIND_PATH ( - LIBCOUCHBASE_INCLUDE_DIR - NAMES couchbase.h - PATH_SUFFIXES libcouchbase - ) - - FIND_LIBRARY ( - LIBCOUCHBASE_LIBRARIES - NAMES couchbase libcouchbase - PATHS /usr/lib64 - PATH_SUFFIXES libcouchbase - ) - - include(FindPackageHandleStandardArgs) - find_package_handle_standard_args( - couchbase DEFAULT_MSG - LIBCOUCHBASE_LIBRARIES - LIBCOUCHBASE_INCLUDE_DIR - ) - - MARK_AS_ADVANCED(LIBCOUCHBASE_INCLUDE_DIR LIBCOUCHBASE_LIBRARIES) -ENDIF() diff --git a/cmake_modules/FindCPPUNIT.cmake b/cmake_modules/FindCPPUNIT.cmake deleted file mode 100644 index 3d2d5301a74..00000000000 --- a/cmake_modules/FindCPPUNIT.cmake +++ /dev/null @@ -1,74 +0,0 @@ -################################################################################ -# HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems®. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -################################################################################ - - -# - Try to find the CppUnit unit testing library -# Once done this will define -# -# CPPUNIT_FOUND - system has the CppUnit library -# CPPUNIT_INCLUDE_DIR - the CppUnit include directory -# CPPUNIT_LIBRARIES - The libraries needed to use CppUnit - -IF (NOT CPPUNIT_FOUND) - IF (WIN32) - SET (cppunit_dll "cppunit_dll") - ELSE() - SET (cppunit_dll "cppunit") - ENDIF() - - IF (NOT "${EXTERNALS_DIRECTORY}" STREQUAL "") - - IF (UNIX) - IF (${ARCH64BIT} EQUAL 1) - SET (osdir "linux64_gcc4.1.1") - ELSE() - SET (osdir "linux32_gcc4.1.1") - ENDIF() - ELSEIF(WIN32) - IF (${ARCH64BIT} EQUAL 1) - SET (osdir "win64") - ELSE() - SET (osdir "win32") - ENDIF() - ELSE() - SET (osdir "unknown") - ENDIF() - - IF (NOT ("${osdir}" STREQUAL "unknown")) - FIND_PATH (CPPUNIT_INCLUDE_DIR NAMES cppunit/TestFixture.h PATHS "${EXTERNALS_DIRECTORY}/cppunit/include" NO_DEFAULT_PATH) - FIND_LIBRARY (CPPUNIT_LIBRARIES NAMES ${cppunit_dll} PATHS "${EXTERNALS_DIRECTORY}/cppunit/lib/${osdir}" NO_DEFAULT_PATH) - ENDIF() - - ENDIF() - - # if we didn't find in externals, look in system include path - if (USE_NATIVE_LIBRARIES) - FIND_PATH (CPPUNIT_INCLUDE_DIR NAMES cppunit/TestFixture.h) - FIND_LIBRARY (CPPUNIT_LIBRARIES NAMES ${cppunit_dll}) - endif() - - include(FindPackageHandleStandardArgs) - find_package_handle_standard_args(CppUnit DEFAULT_MSG - CPPUNIT_LIBRARIES - CPPUNIT_INCLUDE_DIR - ) - - IF (CPPUNIT_FOUND AND WIN32) - STRING(REPLACE "cppunit_dll" "cppunitd_dll" CPPUNIT_DEBUG_LIBRARIES "${CPPUNIT_LIBRARIES}") - set (CPPUNIT_LIBRARIES optimized ${CPPUNIT_LIBRARIES} debug ${CPPUNIT_DEBUG_LIBRARIES}) - ENDIF() - MARK_AS_ADVANCED(CPPUNIT_INCLUDE_DIR CPPUNIT_LIBRARIES) -ENDIF() diff --git a/cmake_modules/FindHIREDIS.cmake b/cmake_modules/FindHIREDIS.cmake deleted file mode 100644 index 114f3e0ea75..00000000000 --- a/cmake_modules/FindHIREDIS.cmake +++ /dev/null @@ -1,63 +0,0 @@ -################################################################################ -# HPCC SYSTEMS software Copyright (C) 2015 HPCC Systems®. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -################################################################################ - -# - Try to find the hiredis library -# Once done this will define -# -# HIREDIS_FOUND - system has the hiredis library -# HIREDIS_INCLUDE_DIR - the hiredis include directory(s) -# HIREDIS_LIBRARY - The library needed to use hiredis - -IF (NOT HIREDIS_FOUND) - IF (WIN32) - SET (libhiredis "libhiredis") - ELSE() - SET (libhiredis "hiredis") - ENDIF() - - FIND_PATH(HIREDIS_INCLUDE_DIR hiredis/hiredis.h PATHS /usr/include /usr/share/include /usr/local/include PATH_SUFFIXES hiredis) - FIND_LIBRARY(HIREDIS_LIBRARY NAMES ${libhiredis} PATHS /usr/lib /usr/share /usr/lib64 /usr/local/lib /usr/local/lib64) - - IF(EXISTS "${HIREDIS_INCLUDE_DIR}/hiredis/hiredis.h") - #MAJOR - FILE (STRINGS "${HIREDIS_INCLUDE_DIR}/hiredis/hiredis.h" major REGEX "#define HIREDIS_MAJOR") - STRING(REGEX REPLACE "#define HIREDIS_MAJOR " "" major "${major}") - STRING(REGEX REPLACE "\"" "" major "${major}") - #MINOR - FILE (STRINGS "${HIREDIS_INCLUDE_DIR}/hiredis/hiredis.h" minor REGEX "#define HIREDIS_MINOR") - STRING(REGEX REPLACE "#define HIREDIS_MINOR " "" minor "${minor}") - STRING(REGEX REPLACE "\"" "" minor "${minor}") - #PATCH - FILE (STRINGS "${HIREDIS_INCLUDE_DIR}/hiredis/hiredis.h" patch REGEX "#define HIREDIS_PATCH") - STRING(REGEX REPLACE "#define HIREDIS_PATCH " "" patch "${patch}") - STRING(REGEX REPLACE "\"" "" patch "${patch}") - - SET(HIREDIS_VERSION_STRING "${major}.${minor}.${patch}") - IF ("${HIREDIS_VERSION_STRING}" VERSION_LESS "${HIREDIS_FIND_VERSION}") - MESSAGE("WARNING - connection caching not avaliable with libhiredis version '${HIREDIS_VERSION_STRING}' as incompatible with min version>=${HIREDIS_FIND_VERSION}") - ENDIF() - ENDIF() - - - include(FindPackageHandleStandardArgs) - find_package_handle_standard_args(hiredis DEFAULT_MSG - HIREDIS_LIBRARY - HIREDIS_INCLUDE_DIR - ) - - MARK_AS_ADVANCED(HIREDIS_INCLUDE_DIR HIREDIS_LIBRARY) -ENDIF() - diff --git a/cmake_modules/FindMYSQL.cmake b/cmake_modules/FindMYSQL.cmake deleted file mode 100644 index db946772b7a..00000000000 --- a/cmake_modules/FindMYSQL.cmake +++ /dev/null @@ -1,38 +0,0 @@ -################################################################################ -# HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems®. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -################################################################################ - - -# - Try to find the MYSQL library -# Once done this will define -# -# MYSQL_FOUND - system has the MYSQL library -# MYSQL_INCLUDE_DIR - the MYSQL include directory -# MYSQL_LIBRARIES - The libraries needed to use MYSQL - -IF (NOT MYSQL_FOUND) - FIND_PATH (MYSQL_INCLUDE_DIR NAMES mysql.h PATH_SUFFIXES mysql) - - SET (MYSQL_NAMES ${MYSQL_NAMES} mysqlclient libmysql) - FIND_LIBRARY (MYSQL_LIBRARIES NAMES ${MYSQL_NAMES}) - - include(FindPackageHandleStandardArgs) - find_package_handle_standard_args(MYSQL DEFAULT_MSG - MYSQL_LIBRARIES - MYSQL_INCLUDE_DIR - ) - - MARK_AS_ADVANCED(MYSQL_INCLUDE_DIR MYSQL_LIBRARIES) -ENDIF() diff --git a/cmake_modules/FindOPENLDAP.cmake b/cmake_modules/FindOPENLDAP.cmake deleted file mode 100644 index c67ee886a32..00000000000 --- a/cmake_modules/FindOPENLDAP.cmake +++ /dev/null @@ -1,72 +0,0 @@ -################################################################################ -# HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems®. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -################################################################################ - - -# - Try to find the OpenLDAP libraries -# Once done this will define -# -# OPENLDAP_FOUND - system has the OpenLDAP library -# OPENLDAP_INCLUDE_DIR - the OpenLDAP include directory -# OPENLDAP_LIBRARIES - The libraries needed to use OpenLDAP -# -# Note that we use winldap for windows builds at present -# -IF (NOT OPENLDAP_FOUND) - IF (WIN32) - SET (ldap_dll "wldap32") - SET (lber_dll "netapi32") - SET (ldap_inc "Winldap.h") - ELSE() - SET (ldap_dll "ldap") - SET (lber_dll "lber") - SET (ldap_inc "ldap.h") - ENDIF() - - IF (NOT "${EXTERNALS_DIRECTORY}" STREQUAL "") - IF (UNIX) - IF (${ARCH64BIT} EQUAL 1) - SET (osincdir "openldap/linux64_gcc4.1.1/include") - SET (oslibdir "openldap/linux64_gcc4.1.1") - ELSE() - SET (osincdir "openldap/linux32_gcc4.1.1/include") - SET (oslibdir "openldap/linux32_gcc4.1.1") - ENDIF() - ELSEIF(WIN32) - SET (osincdir "winldap/include") - IF (${ARCH64BIT} EQUAL 1) - SET (oslibdir "winldap/lib64") - ELSE() - SET (oslibdir "winldap/lib32") - ENDIF() - ELSE() - SET (osincdir "unknown") - ENDIF() - IF (NOT ("${osincdir}" STREQUAL "unknown")) - FIND_PATH (OPENLDAP_INCLUDE_DIR NAMES ${ldap_inc} PATHS "${EXTERNALS_DIRECTORY}/${osincdir}" NO_DEFAULT_PATH) - FIND_LIBRARY (OPENLDAP_LIBRARIES NAMES ${ldap_dll} PATHS "${EXTERNALS_DIRECTORY}/${oslibdir}" NO_DEFAULT_PATH) - ENDIF() - ENDIF() - - # if we didn't find in externals, look in system include path - FIND_PATH (OPENLDAP_INCLUDE_DIR NAMES ${ldap_inc}) - FIND_LIBRARY (LDAP_LIBRARY ${ldap_dll}) - FIND_LIBRARY (LBER_LIBRARY ${lber_dll}) - - include(FindPackageHandleStandardArgs) - find_package_handle_standard_args(OPENLDAP DEFAULT_MSG OPENLDAP_INCLUDE_DIR LDAP_LIBRARY LBER_LIBRARY) - set(OPENLDAP_LIBRARIES ${LDAP_LIBRARY} ${LBER_LIBRARY}) - mark_as_advanced(OPENLDAP_INCLUDE_DIR OPENLDAP_LIBRARIES LDAP_LIBRARY LBER_LIBRARY) -ENDIF() diff --git a/cmake_modules/FindSQLITE3.cmake b/cmake_modules/FindSQLITE3.cmake deleted file mode 100644 index eb1bad4da65..00000000000 --- a/cmake_modules/FindSQLITE3.cmake +++ /dev/null @@ -1,41 +0,0 @@ -################################################################################ -# HPCC SYSTEMS software Copyright (C) 2014 HPCC Systems®. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -################################################################################ - -# - Try to find the Sqlite3 headers and libraries -# Once done this will define -# -# SQLITE3_FOUND - system has the SQLITE3 headers and library -# SQLITE3_INCLUDE_DIR - the SQLITE3 include directory -# SQLITE3_LIBRARIES - The libraries needed to use SQLITE3 - -IF (NOT SQLITE3_FOUND) - IF (WIN32) - SET (sqlite3_lib "libsqlite3") - ELSE() - SET (sqlite3_lib "sqlite3") - ENDIF() - - FIND_PATH (SQLITE3_INCLUDE_DIR NAMES sqlite3.h) - FIND_LIBRARY (SQLITE3_LIBRARIES NAMES ${sqlite3_lib}) - - include(FindPackageHandleStandardArgs) - find_package_handle_standard_args(SQLITE3 DEFAULT_MSG - SQLITE3_LIBRARIES - SQLITE3_INCLUDE_DIR - ) - - MARK_AS_ADVANCED(SQLITE3_INCLUDE_DIR SQLITE3_LIBRARIES) -ENDIF() diff --git a/cmake_modules/commonSetup.cmake b/cmake_modules/commonSetup.cmake index b62d4cc6905..82a950b3d7f 100644 --- a/cmake_modules/commonSetup.cmake +++ b/cmake_modules/commonSetup.cmake @@ -135,6 +135,7 @@ IF ("${COMMONSETUP_DONE}" STREQUAL "") if ( CLIENTTOOLS_ONLY ) set(PLATFORM OFF) set(DEVEL OFF) + set(USE_PARQUET OFF) endif() # The following options need to be set after the project() command @@ -721,27 +722,23 @@ IF ("${COMMONSETUP_DONE}" STREQUAL "") IF ( NOT MAKE_DOCS_ONLY ) IF (USE_OPENLDAP) - find_package(OPENLDAP) - IF (OPENLDAP_FOUND) - add_definitions (-D_USE_OPENLDAP) - ELSE() - message(FATAL_ERROR "OPENLDAP requested but package not found") - ENDIF() + find_package(PkgConfig) + pkg_check_modules(OPENLDAP REQUIRED IMPORTED_TARGET ldap) + add_definitions (-D_USE_OPENLDAP) ELSE() add_definitions (-D_NO_LDAP) ENDIF(USE_OPENLDAP) IF (USE_CPPUNIT) - find_package(CPPUNIT) - IF (CPPUNIT_FOUND) + find_package(CppUnit CONFIG REQUIRED) + IF (CppUnit_FOUND) add_definitions (-D_USE_CPPUNIT) - include_directories(${CPPUNIT_INCLUDE_DIR}) ELSE() message(FATAL_ERROR "CPPUNIT requested but package not found") ENDIF() ELSE() SET(CPPUNIT_INCLUDE_DIR "") - SET(CPPUNIT_LIBRARIES "") + SET(CppUnit_LIBRARIES "") ENDIF(USE_CPPUNIT) IF (CONTAINERIZED) diff --git a/cmake_modules/distrocheck.sh b/cmake_modules/distrocheck.sh index 8ab1807f561..e9cbad62edf 100755 --- a/cmake_modules/distrocheck.sh +++ b/cmake_modules/distrocheck.sh @@ -70,6 +70,14 @@ if [ $VALUE -ge 1 ]; then exit 1; fi +# Distribution is Rocky +VALUE=`grep -c -i 'rocky' temp.txt` +if [ $VALUE -ge 1 ]; then + echo -n "RPM" + rm temp.txt + exit 1; +fi + ############### DEB DISTROS ################## # Distribution is Ubuntu diff --git a/cmake_modules/getpackagerevisionarch.sh b/cmake_modules/getpackagerevisionarch.sh index 63e378246b9..2dad702e18f 100755 --- a/cmake_modules/getpackagerevisionarch.sh +++ b/cmake_modules/getpackagerevisionarch.sh @@ -85,7 +85,7 @@ elif [ -e /etc/redhat-release ]; then OS_GROUP=`/bin/rpm -q --qf "%{NAME}" --whatprovides /etc/redhat-release | sed 's/-release.*//' | tr '[A-Z]' '[a-z]'` REDHAT_VERSION=`/bin/rpm -q --qf "%{VERSION}" --whatprovides /etc/redhat-release | cut -f1 -d"."` case "$OS_GROUP" in - "centos"* | "fedora" | "rocky") + "centos"* | "fedora") if [ ${NOARCH} -eq 0 ]; then OUTPUT="el${REDHAT_VERSION}.${ARCH}" else @@ -100,6 +100,13 @@ elif [ -e /etc/redhat-release ]; then OUTPUT="el${REDHAT_VERSION}" fi ;; + "rocky") + if [ ${NOARCH} -eq 0 ]; then + OUTPUT="rocky${REDHAT_VERSION}.${ARCH}" + else + OUTPUT="rocky${REDHAT_VERSION}" + fi + ;; esac fi elif [ -e /etc/SuSE-release ]; then diff --git a/common/fileview2/fvtransform.cpp b/common/fileview2/fvtransform.cpp index ead6ba5f284..57c0d269bb0 100644 --- a/common/fileview2/fvtransform.cpp +++ b/common/fileview2/fvtransform.cpp @@ -295,7 +295,7 @@ void ViewTransformerRegistry::addPlugins(const char * name) loadedPlugins->loadFromList(name); Owned errorReporter = createThrowingErrorReceiver(); - EclRepositoryManager collection; + EclRepositoryManager collection(nullptr); collection.addSharedSourceFileEclRepository(errorReporter, name, ESFallowplugins|ESFnodependencies, 0, false); dataServer.setown(collection.createPackage(nullptr)); diff --git a/common/remote/CMakeLists.txt b/common/remote/CMakeLists.txt index 9ef20874dbb..2758f5b02ca 100644 --- a/common/remote/CMakeLists.txt +++ b/common/remote/CMakeLists.txt @@ -71,7 +71,7 @@ target_link_libraries ( remote hql mp ${URIPARSER_LIBRARIES} - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) IF (USE_OPENSSL) diff --git a/common/thorhelper/thorread.cpp b/common/thorhelper/thorread.cpp index d1dd5169a69..591d5d553cc 100644 --- a/common/thorhelper/thorread.cpp +++ b/common/thorhelper/thorread.cpp @@ -1767,7 +1767,7 @@ bool ParquetDiskRowReader::matches(const char * _format, bool _streamRemote, IDi bool ParquetDiskRowReader::setInputFile(const char * localFilename, const char * logicalFilename, unsigned partNumber, offset_t baseOffset, const IPropertyTree * inputOptions, const FieldFilterArray & expectedFilter) { DBGLOG(0, "Opening File: %s", localFilename); - parquetFileReader = new parquetembed::ParquetReader("read", localFilename, 50000, nullptr, parquetActivityCtx, &mapping->queryExpectedMeta()->queryRecordAccessor(true)); + parquetFileReader = new parquetembed::ParquetReader("read", localFilename, 50000, nullptr, parquetActivityCtx, mapping->queryExpectedMeta()->queryTypeInfo()); auto st = parquetFileReader->processReadFile(); if (!st.ok()) throw MakeStringException(0, "%s: %s.", st.CodeAsString().c_str(), st.message().c_str()); @@ -2037,7 +2037,7 @@ IDiskRowReader * doCreateLocalDiskReader(const char * format, IDiskReadMapping * { auto foundReader = genericFileTypeMap.find(format); - if (foundReader != genericFileTypeMap.end()) + if (foundReader != genericFileTypeMap.end() && foundReader->second) return foundReader->second(_mapping); UNIMPLEMENTED; @@ -2087,6 +2087,8 @@ MODULE_INIT(INIT_PRIORITY_STANDARD) genericFileTypeMap.emplace("xml", [](IDiskReadMapping * _mapping) { return new XmlDiskRowReader(_mapping); }); #ifdef _USE_PARQUET genericFileTypeMap.emplace(PARQUET_FILE_TYPE_NAME, [](IDiskReadMapping * _mapping) { return new ParquetDiskRowReader(_mapping); }); +#else + genericFileTypeMap.emplace(PARQUET_FILE_TYPE_NAME, [](IDiskReadMapping * _mapping) { return nullptr; }); #endif // Stuff the file type names that were just instantiated into a list; diff --git a/common/thorhelper/thorread.hpp b/common/thorhelper/thorread.hpp index ab6101e2a5b..8179354c70b 100644 --- a/common/thorhelper/thorread.hpp +++ b/common/thorhelper/thorread.hpp @@ -27,6 +27,8 @@ #include "jrowstream.hpp" #include "rtlkey.hpp" +#define PARQUET_FILE_TYPE_NAME "parquet" + //--- Classes and interfaces for reading instances of files //The following is constant for the life of a disk read activity interface IDiskReadOutputMapping : public IInterface diff --git a/common/workunit/workunit.cpp b/common/workunit/workunit.cpp index 3d3f16ddb04..4e84d075bf2 100644 --- a/common/workunit/workunit.cpp +++ b/common/workunit/workunit.cpp @@ -13911,7 +13911,7 @@ extern WORKUNIT_API void associateLocalFile(IWUQuery * query, WUFileType type, c OwnedIFile target = createIFile(destPathName); if (!target->exists()) { - source->copyTo(target, 0, NULL, true); + source->copyTo(target, 0, NULL, false); } query->addAssociatedFile(type, destPathName, "localhost", description, crc, minActivity, maxActivity); // Should we delete the local files? No - they may not be finished with diff --git a/dali/base/dadfs.cpp b/dali/base/dadfs.cpp index 2b6fa5281f3..f64f40c9ca2 100644 --- a/dali/base/dadfs.cpp +++ b/dali/base/dadfs.cpp @@ -3566,23 +3566,33 @@ class CDistributedFile: public CDistributedFileBase offset_t maxPartSz = 0, minPartSz = (offset_t)-1, totalPartSz = 0; - maxSkewPart = 0; - minSkewPart = 0; - for (unsigned p=0; p maxPartSz) - { - maxPartSz = size; - maxSkewPart = p; - } - if (size < minPartSz) + maxSkewPart = 0; + minSkewPart = 0; + for (unsigned p=0; p maxPartSz) + { + maxPartSz = size; + maxSkewPart = p; + } + if (size < minPartSz) + { + minPartSz = size; + minSkewPart = p; + } + totalPartSz += size; } - totalPartSz += size; + } + catch (IException *e) + { + // guard against getFileSize throwing an exception (if parts missing) + EXCLOG(e); + e->Release(); + return false; } offset_t avgPartSz = totalPartSz / np; if (0 == avgPartSz) diff --git a/dali/daliadmin/daadmin.cpp b/dali/daliadmin/daadmin.cpp index 26954d26776..f3f25fad57b 100644 --- a/dali/daliadmin/daadmin.cpp +++ b/dali/daliadmin/daadmin.cpp @@ -2264,6 +2264,101 @@ void getxref(const char *dst) conn->close(); } +void checkFileSizeOne(IUserDescriptor *user, const char *lfn, bool fix) +{ + try + { + CDfsLogicalFileName dlfn; + dlfn.set(lfn); + Owned dFile = queryDistributedFileDirectory().lookup(dlfn, user, AccessMode::tbdRead, false, false, nullptr, defaultPrivilegedUser, 30000); // 30 sec timeout + if (dFile) + { + if (dFile->querySuperFile()) + WARNLOG("Skipping: file '%s' is a superfile", lfn); + else + { + bool fileLocked = false; + COnScopeExit ensureFileUnlock([&]() { if (fileLocked) dFile->unlockProperties(); }); + unsigned numParts = dFile->numParts(); + for (unsigned p=0; pqueryPart(p); + IPropertyTree &attrs = part.queryAttributes(); + if (!attrs.hasProp("@size")) + { + if (fix) + { + offset_t partSize; + try + { + partSize = part.getFileSize(true, true); + if (!fileLocked) + { + // we lock the file once, so that the individual part lock/unlocks are effectively a NOP + dFile->lockProperties(30000); + fileLocked = true; + PROGLOG("File '%s' has missing @size attributes", lfn); + } + part.lockProperties(30000); + } + catch (IException *e) + { + EXCLOG(e); + e->Release(); + continue; + } + COnScopeExit ensurePartUnlock([&]() { part.unlockProperties(); }); + PROGLOG("Part %u: Setting @size to %" I64F "u", p+1, partSize); + attrs.setPropInt64("@size", partSize); + } + else + PROGLOG("File '%s' missing @size on part %u", lfn, p+1); + } + } + } + } + else + WARNLOG("File '%s' not found", lfn); + } + catch (IException *e) + { + EXCLOG(e); + e->Release(); + } +} + +void checkFileSize(IUserDescriptor *user, const char *lfnPattern, bool fix) +{ + if (containsWildcard(lfnPattern)) + { + unsigned count = 0; + Owned iter = queryDistributedFileDirectory().getDFAttributesIterator(lfnPattern, user, true, false); // no supers + CCycleTimer timer; + if (iter->first()) + { + while (true) + { + IPropertyTree &attr = iter->query(); + const char *lfn = attr.queryProp("@name"); + checkFileSizeOne(user, lfn, fix); + ++count; + + if (!iter->next()) + break; + else if (timer.elapsedCycles() >= queryOneSecCycles()*10) // log every 10 secs + { + PROGLOG("Processed %u files", count); + timer.reset(); + } + } + } + PROGLOG("Total files processed %u files", count); + } + else + checkFileSizeOne(user, lfnPattern, fix); +} + + struct CTreeItem : public CInterface { String *tail; diff --git a/dali/daliadmin/daadmin.hpp b/dali/daliadmin/daadmin.hpp index 7a86e86ab30..687d0d882b2 100644 --- a/dali/daliadmin/daadmin.hpp +++ b/dali/daliadmin/daadmin.hpp @@ -73,6 +73,7 @@ extern DALIADMIN_API void listmatches(const char *path, const char *match, const extern DALIADMIN_API void dfsreplication(const char *clusterMask, const char *lfnMask, unsigned redundancy, bool dryRun); extern DALIADMIN_API void migrateFiles(const char *srcGroup, const char *tgtGroup, const char *filemask, const char *_options); extern DALIADMIN_API void getxref(const char *dst); +extern DALIADMIN_API void checkFileSize(IUserDescriptor *user, const char *lfnPattern, bool fix); extern DALIADMIN_API void listworkunits(const char *test, const char *min, const char *max); extern DALIADMIN_API void workunittimings(const char *wuid); diff --git a/dali/daliadmin/daliadmin.cpp b/dali/daliadmin/daliadmin.cpp index 26c002fbb0f..0491b7d42f1 100644 --- a/dali/daliadmin/daliadmin.cpp +++ b/dali/daliadmin/daliadmin.cpp @@ -55,34 +55,35 @@ void usage(const char *exe) printf(" count -- counts xpath matches\n"); printf("\n"); printf("Logical File meta information commands:\n"); - printf(" dfsfile -- get meta information for file\n"); - printf(" dfsmeta -- get new meta information for file\n"); - printf(" setdfspartattr [] -- set attribute of a file part to value, or delete the attribute if not provided\n"); - printf(" dfspart -- get meta information for part num\n"); + printf(" checksuperfile [fix=true|false] -- check superfile links consistent and optionally fix\n"); + printf(" checksubfile -- check subfile links to parent consistent\n"); + printf(" checkfilesize [fix=true|false] -- check file size attributes and optionally fix"); + printf(" cleanscopes -- remove empty scopes\n"); + printf(" clusternodes [filename] -- get IPs for cluster group. Written to optional filename if provided\n"); printf(" dfscheck -- verify dfs file information is valid\n"); + printf(" dfscompratio -- returns compression ratio of file\n"); printf(" dfscsv -- get csv info. for files matching mask\n"); + printf(" dfsexists -- sets return value to 0 if file exists\n"); + printf(" dfsfile -- get meta information for file\n"); printf(" dfsgroup [filename] -- get IPs for logical group (aka cluster). Written to optional filename if provided\n"); - printf(" clusternodes [filename] -- get IPs for cluster group. Written to optional filename if provided\n"); printf(" dfsls [] [options]-- get list of files within a scope (options=lrs)\n"); printf(" dfsmap -- get part files (primary and replicates)\n"); - printf(" dfsexists -- sets return value to 0 if file exists\n"); + printf(" dfsmeta -- get new meta information for file\n"); printf(" dfsparents -- list superfiles containing file\n"); + printf(" dfspart -- get meta information for part num\n"); + printf(" dfsperm -- returns LDAP permission for file\n"); + printf(" dfsreplication [dryrun] -- set redundancy for files matching mask, on specified clusters only\n"); + printf(" dfsscopes -- lists logical scopes (mask = * for all)\n"); printf(" dfsunlink -- unlinks file from all super parents\n"); printf(" dfsverify -- verifies parts exist, returns 0 if ok\n"); - printf(" setprotect -- overwrite protects logical file\n"); - printf(" unprotect -- unprotect (if id=* then clear all)\n"); - printf(" listprotect -- list protected files\n"); - printf(" checksuperfile [fix=true|false] -- check superfile links consistent and optionally fix\n"); - printf(" checksubfile -- check subfile links to parent consistent\n"); + printf(" holdlock -- hold a lock to the logical-file until a key is pressed"); printf(" listexpires -- lists logical files with expiry value\n"); + printf(" listprotect -- list protected files\n"); printf(" listrelationships \n"); - printf(" dfsperm -- returns LDAP permission for file\n"); - printf(" dfscompratio -- returns compression ratio of file\n"); - printf(" dfsscopes -- lists logical scopes (mask = * for all)\n"); - printf(" cleanscopes -- remove empty scopes\n"); printf(" normalizefilenames [] -- normalize existing logical filenames that match, e.g. .::.::scope::.::name -> scope::name\n"); - printf(" dfsreplication [dryrun] -- set redundancy for files matching mask, on specified clusters only\n"); - printf(" holdlock -- hold a lock to the logical-file until a key is pressed"); + printf(" setdfspartattr [] -- set attribute of a file part to value, or delete the attribute if not provided\n"); + printf(" setprotect -- overwrite protects logical file\n"); + printf(" unprotect -- unprotect (if id=* then clear all)\n"); printf("\n"); printf("Workunit commands:\n"); printf(" listworkunits [= [ []]] -- list workunits that match prop=val in workunit name range lower to upper\n"); @@ -90,14 +91,17 @@ void usage(const char *exe) printf(" workunittimings \n"); printf("\n"); printf("Other dali server and misc commands:\n"); - printf(" serverlist -- list server IPs (mask optional)\n"); - printf(" clusterlist -- list clusters (mask optional)\n"); printf(" auditlog \n"); + printf(" cleanglobalwuid [dryrun] [noreconstruct]\n"); + printf(" clusterlist -- list clusters (mask optional)\n"); printf(" coalesce -- force transaction coalesce\n"); - printf(" mpping -- time MP connect\n"); + printf(" dalilocks [ ] [ files ] -- get all locked files/xpaths\n"); printf(" daliping [ ] -- time dali server connect\n"); printf(" getxref -- get all XREF information\n"); - printf(" dalilocks [ ] [ files ] -- get all locked files/xpaths\n"); + printf(" migratefiles [] [dryrun] [createmaps] [listonly] [verbose]\n"); + printf(" mpping -- time MP connect\n"); + printf(" serverlist -- list server IPs (mask optional)\n"); + printf(" translatetoxpath logicalfile [File|SuperFile|Scope]\n"); printf(" unlock <[path|file]> -- unlocks either matching xpath(s) or matching logical file(s), can contain wildcards\n"); printf(" validatestore [fix=]\n" " [verbose=]\n" @@ -106,9 +110,6 @@ void usage(const char *exe) printf(" wuidcompress -- scan workunits that match and compress resources of \n"); printf(" wuiddecompress -- scan workunits that match and decompress resources of \n"); printf(" xmlsize [] -- analyse size usage in xml file, display individual items above 'percentage' \n"); - printf(" migratefiles [] [dryrun] [createmaps] [listonly] [verbose]\n"); - printf(" translatetoxpath logicalfile [File|SuperFile|Scope]\n"); - printf(" cleanglobalwuid [dryrun] [noreconstruct]\n"); printf("\n"); printf("Common options\n"); printf(" server= -- server ip\n"); @@ -148,6 +149,8 @@ int main(int argc, const char* argv[]) StringBuffer tmps; for (int i=1;igetPropBool("fix"); + checkFileSize(userDesc, params.item(1), fix); + } else if (strieq(cmd,"dalilocks")) { CHECKPARAMS(0,2); bool filesonly = false; diff --git a/dali/datest/datest.cmake b/dali/datest/datest.cmake index 99e3a5e1ab4..60ae18153b6 100644 --- a/dali/datest/datest.cmake +++ b/dali/datest/datest.cmake @@ -51,7 +51,7 @@ target_link_libraries ( datest eclrtl wsdfuaccess dalibase - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) diff --git a/dali/daunittest/CMakeLists.txt b/dali/daunittest/CMakeLists.txt index f540961be1c..d8d6fe5c718 100644 --- a/dali/daunittest/CMakeLists.txt +++ b/dali/daunittest/CMakeLists.txt @@ -49,7 +49,7 @@ target_link_libraries ( daunittest hrpc remote dalibase - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) diff --git a/devdoc/UserBuildAssets.md b/devdoc/UserBuildAssets.md new file mode 100644 index 00000000000..e38375ac754 --- /dev/null +++ b/devdoc/UserBuildAssets.md @@ -0,0 +1,146 @@ +# Build Assets for individual developer + +## Build Assets +The modern tool used for generating all our official assets is the Github Actions build-asset workflow on the hpcc-systems/HPCC-Platform repository, located [here](https://github.com/hpcc-systems/HPCC-Platform/actions/workflows/build-assets.yml). Developers and contributors can utilize this same workflow on their own forked repository. This allows developers to quickly create assets for testing changes and test for errors before the peer review process. + +Build assets will generate every available project under the HPCC-Platform namespace. There currently is not an option to control which packages in the build matrix get generated. But most packages get built in parallel, and __released__ after the individual matrix job is completed, so there is no waiting on packages you don't need. Exceptions to this are for packages that require other builds to complete, such as the __ECLIDE__. + +Upon completion of each step and matrix job in the workflow, the assets will be output to the repositories tags tab. An example for the `hpcc-systems` user repository is [hpcc-systems/HPCC-Platform/tags](https://github.com/hpcc-systems/HPCC-Platform/tags). + +![Tag tab screenshot](/devdoc/resources/images/repository-tag-tab.png) + +## Dependent variables +The build assets workflow requires several __repository secrets__ be available on a developers machine in order to run properly. You can access these secrets and variables by going to the `settings` tab in your forked repository, and then clicking on the `Secrets and Variables - Actions` drop down under `Security` on the lefthand side of the settings screen. + +![Actions secrets and variables](/devdoc/resources/images/actions-secrets-and-variables.png) + +Create a secret by clicking the green `New Repository Secret` button. The following secrets are needed; + +* LNB_ACTOR - Your Github username +* LNB_TOKEN - Classic Github token for your user with LN repo access +* DOCKER_USERNAME - Your docker.io username +* DOCKER_PASSWORD - Your docker.io password +* SIGNING_CERTIFICATE - pks12 self signed cert encoded to base64 for windows signing +* SIGNING_CERTIFICATE_PASSPHRASE - passphrase for pks12 cert +* SIGNING_SECRET - ssh-keygen private key for signing linux builds +* SIGN_MODULES_KEYID - email used to generate key +* SIGN_MODULES_PASSPHRASE - passphrase for private key + +### Generating the windows signing certificate +To generate the self signed certificate for windows packages, you will need to do the following steps. + +1. Generate a root certificate authority + +```openssl req -x509 -sha256 -days 365 -nodes -newkey rsa:2048 -subj "/CN=example.com/C=US/L=Boca Raton" -keyout rootCA.key -out rootCA.crt``` + +2. Create the server secret key + +`openssl genrsa -out server.key 2048` + +3. generate a csr.conf file +``` +cat > csr.conf < cert.conf < hpcc_sign_cert.base64` + +On MacOS: +`base64 -i hpcc_sign_cert.pfx -o hpcc_sign_cert.base64` + +From here you can `cat` the output of hpcc_sign_cert.base64 and copy the output into the variable SIGNING_CERTIFICATE in Github Actions. + +### Generating a signing key for linux builds +For linux builds we're going to generate a private key using GnuPG (gpg). + +Start the process by entering a terminal and run the command;`gpg --full-generate-key` + +You will be given several options in this process. + +For type of key, select `RSA and RSA default`. + +For keysize, enter `4096`. + +For expiration date, select `0 = key does not expire`. + +Input your real name. + +Input your company email address. + +For comment, input something like `Github actions key for signing linux builds`. + +Then it will ask you to enter a passphrase for the key, and confirm the passphrase. Do not leave this blank. + +A key should be output and entered into your gpg keychain. Now we need to export the key for use in the github actions secret. + +To extract your key run `gpg --output private.pgp --armor --export-secret-key `. + +Now open private.pgp, copy all, and go to github actions secrets. Paste the output into the secret "SIGNING_SECRET" + +## Starting a build +The build-asset workflow is kicked off by a tag being pushed to the developers HPCC-Platform repository. Before we push the tag to our HPCC-Platform repository, we will want to have other tags in place if we want LN and ECLIDE builds to function correctly. Suggested tag patterns are `community_HPCC-12345-rc1` or `HPCC-12345-rc1`. + +If you choose not to tag the LN and ECLIDE builds, the community builds will generate but errors will be thrown for any build utilizing the LN repository. ECLIDE will not even attempt a build unless you are also successfully building LN due to the dependency scheme we use. The 'Baremetal' builds are designed to generate our clienttools targets for windows-2022 and macos-12 distributions. These jobs contain both the COMMUNITY and LN builds. If the LN build is not tagged, the COMMUNITY section of the job will run, and the assets will be uploaded, but the job will fail when it tries to build LN. + +If you choose to precede your Jira number with `community_` then you must tag LN with `internal_` and ECLIDE with `eclide_`. Otherwise just use the Jira tag in all three repositories. + +Once the LN and ECLIDE repository tags have been created and pushed with the same base branch that your work is based on for the HPCC-Platform, then you are free to push the HPCC-Platform tag which will initiate the build process. + +The summary of the build-asset workflow can then be viewed for progress, and individual jobs can be selected to check build outputs. +![Build Summary HPCC-12345](/devdoc/resources/images/HPCC-12345-build-in-progress.png) + +## Asset output + +Assets from the workflow will be released into the corresponding tag location, either in the HPCC-Platform repository for all community based builds, or the LN repository for any builds containing proprietary plugins. Simply browse to the releases or tag tab of your repository and select the tag name you just built. The assets will show up there as the build completes. An example of this on the hpcc-systems repository is [hpcc-systems/HPCC-Platform/releases](https://github.com/hpcc-systems/HPCC-Platform/releases). \ No newline at end of file diff --git a/devdoc/resources/images/HPCC-12345-build-in-progress.png b/devdoc/resources/images/HPCC-12345-build-in-progress.png new file mode 100644 index 00000000000..70185a9e6ac Binary files /dev/null and b/devdoc/resources/images/HPCC-12345-build-in-progress.png differ diff --git a/devdoc/resources/images/actions-secrets-and-variables.png b/devdoc/resources/images/actions-secrets-and-variables.png new file mode 100644 index 00000000000..1c267910fc5 Binary files /dev/null and b/devdoc/resources/images/actions-secrets-and-variables.png differ diff --git a/devdoc/resources/images/repository-tag-tab.png b/devdoc/resources/images/repository-tag-tab.png new file mode 100644 index 00000000000..16bdb37acbc Binary files /dev/null and b/devdoc/resources/images/repository-tag-tab.png differ diff --git a/dockerfiles/vcpkg/rockylinux-8.dockerfile b/dockerfiles/vcpkg/rockylinux-8.dockerfile index 3ba849a08dc..17c76d889b3 100644 --- a/dockerfiles/vcpkg/rockylinux-8.dockerfile +++ b/dockerfiles/vcpkg/rockylinux-8.dockerfile @@ -3,4 +3,8 @@ FROM hpccsystems/platform-build-base-rockylinux-8:$VCPKG_REF ENTRYPOINT ["/bin/bash", "--login", "-c"] +RUN yum install -y \ + rpm-build && \ + yum -y clean all && rm -rf /var/cache + CMD ["/bin/bash"] diff --git a/ecl/eclagent/CMakeLists.txt b/ecl/eclagent/CMakeLists.txt index 14a335b1d6a..b3dda9aaab7 100644 --- a/ecl/eclagent/CMakeLists.txt +++ b/ecl/eclagent/CMakeLists.txt @@ -48,7 +48,7 @@ install ( TARGETS hthor RUNTIME DESTINATION ${EXEC_DIR} ) target_link_libraries ( hthor hthorlib wuanalysis - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) Install ( PROGRAMS ${CMAKE_CURRENT_SOURCE_DIR}/start_eclagent DESTINATION ${EXEC_DIR} COMPONENT Runtime ) diff --git a/ecl/eclcc/eclcc.cpp b/ecl/eclcc/eclcc.cpp index ece9f7717d7..d4c904c9945 100644 --- a/ecl/eclcc/eclcc.cpp +++ b/ecl/eclcc/eclcc.cpp @@ -207,6 +207,8 @@ struct EclCompileInstance : public CInterfaceOf virtual IHqlExpression *lookupDFSlayout(const char *filename, IErrorReceiver &errs, const ECLlocation &location, bool isOpt) const override; virtual unsigned lookupClusterSize() const override; virtual void getTargetPlatform(StringBuffer & result) override; + virtual IInterface * getGitUpdateLock(const char * key) override; + public: EclCC & eclcc; @@ -239,11 +241,11 @@ struct EclCompileInstance : public CInterfaceOf Linked errorProcessor; }; -class EclCC +class EclCC final : implements CUnsharedInterfaceOf { public: EclCC(int _argc, const char **_argv) - : programName(_argv[0]) + : programName(_argv[0]), repositoryManager(this) { argc = _argc; argv = _argv; @@ -284,12 +286,14 @@ class EclCC // interface ICodegenContextCallback - void pushCluster(const char *clusterName); - void popCluster(); - bool allowAccess(const char * category, bool isSigned); - IHqlExpression *lookupDFSlayout(const char *filename, IErrorReceiver &errs, const ECLlocation &location, bool isOpt) const; - unsigned lookupClusterSize() const; - void getTargetPlatform(StringBuffer & result); + virtual void noteCluster(const char *clusterName) override; + virtual void pushCluster(const char *clusterName) override; + virtual void popCluster() override; + virtual bool allowAccess(const char * category, bool isSigned) override; + virtual IHqlExpression *lookupDFSlayout(const char *filename, IErrorReceiver &errs, const ECLlocation &location, bool isOpt) const override; + virtual unsigned lookupClusterSize() const override; + virtual void getTargetPlatform(StringBuffer & result) override; + virtual IInterface * getGitUpdateLock(const char * key) override; protected: void appendNeverSimplifyList(const char *attribsList); @@ -387,6 +391,7 @@ class EclCC StringAttr optMetaLocation; StringBuffer neverSimplifyRegEx; StringAttr optDefaultGitPrefix; + StringAttr optGitLock; // A key used to lock access to git updates StringAttr optGitUser; StringAttr optGitPasswordPath; @@ -1719,7 +1724,7 @@ void EclCC::processXmlFile(EclCompileInstance & instance, const char *archiveXML if (optCheckEclVersion) instance.checkEclVersionCompatible(); - EclRepositoryManager localRepositoryManager; + EclRepositoryManager localRepositoryManager(&instance); processDefinitions(localRepositoryManager); localRepositoryManager.inherit(repositoryManager); // Definitions, plugins, std library etc. Owned contents; @@ -1839,7 +1844,7 @@ void EclCC::processFile(EclCompileInstance & instance) attributePackage = optDefaultRepo.str(); } - EclRepositoryManager localRepositoryManager; + EclRepositoryManager localRepositoryManager(&instance); processDefinitions(localRepositoryManager); localRepositoryManager.inherit(repositoryManager); // don't include -I if (!optNoBundles) @@ -2066,7 +2071,7 @@ void EclCC::processReference(EclCompileInstance & instance, const char * queryAt if (optArchive || optGenerateDepend || optSaveQueryArchive) instance.archive.setown(createAttributeArchive()); - EclRepositoryManager localRepositoryManager; + EclRepositoryManager localRepositoryManager(&instance); processDefinitions(localRepositoryManager); localRepositoryManager.inherit(repositoryManager); if (!optNoBundles) @@ -2381,6 +2386,7 @@ bool EclCompileInstance::reportErrorSummary() void EclCompileInstance::noteCluster(const char *clusterName) { + eclcc.noteCluster(clusterName); } void EclCompileInstance::pushCluster(const char *clusterName) @@ -2398,6 +2404,12 @@ unsigned EclCompileInstance::lookupClusterSize() const return eclcc.lookupClusterSize(); } +IInterface * EclCompileInstance::getGitUpdateLock(const char * key) +{ + return eclcc.getGitUpdateLock(key); +} + + bool EclCompileInstance::allowAccess(const char * category, bool isSigned) { return eclcc.allowAccess(category, isSigned); @@ -2443,6 +2455,10 @@ void EclCC::appendNeverSimplifyList(const char *attribsList) } } +void EclCC::noteCluster(const char *clusterName) +{ +} + void EclCC::pushCluster(const char *clusterName) { clusters.append(clusterName); @@ -2460,6 +2476,9 @@ bool EclCC::checkDaliConnected() const { if (!daliConnected) { + if (isEmptyString(optDFS) || disconnectReported) + return false; + try { Owned serverGroup = createIGroup(optDFS.str(), DALI_SERVER_PORT); @@ -2489,7 +2508,7 @@ unsigned EclCC::lookupClusterSize() const { CriticalBlock b(dfsCrit); // Overkill at present but maybe one day codegen will start threading? If it does the stack is also iffy! #ifndef _CONTAINERIZED - if (!optDFS || disconnectReported || !checkDaliConnected()) + if (!checkDaliConnected()) return 0; #endif if (prevClusterSize != -1) @@ -2515,10 +2534,56 @@ unsigned EclCC::lookupClusterSize() const return prevClusterSize; } +IInterface * EclCC::getGitUpdateLock(const char * path) +{ + if (optGitLock.isEmpty()) + return nullptr; + + VStringBuffer lockPath("/GitUpdateLocks/%s/hash%llx", optGitLock.str(), rtlHash64VStr(path, HASH64_INIT)); + + CriticalBlock b(dfsCrit); + if (!checkDaliConnected()) + return nullptr; + + DBGLOG("Get git update lock for '%s':'%s'", optGitLock.str(), path); + const unsigned lockTimeout = 30 * 60 * 1000; // 30 minutes - fetches from git can take a long time + const unsigned connectTimeout = 3 * 1000; + unsigned traceTimeout = connectTimeout * 2; + CCycleTimer elapsed; + for (;;) + { + try + { + unsigned remaining = elapsed.remainingMs(lockTimeout); + if (remaining == 0) + break; + + Owned connection = querySDS().connect(lockPath, myProcessSession(), RTM_LOCK_WRITE|RTM_CREATE_QUERY, connectTimeout); + if (connection) + return connection.getClear(); + } + catch (IException * e) + { + unsigned errcode = e->errorCode(); + e->Release(); + if (errcode != SDSExcpt_LockTimeout) + break; + } + if (elapsed.elapsedMs() >= traceTimeout) + { + DBGLOG("Blocked waiting for a git update lock on '%s' for %u seconds", path, elapsed.elapsedMs() / 1000); + traceTimeout *= 2; + } + } + DBGLOG("Failed to get git update lock for '%s'", path); + return nullptr; +} + + IHqlExpression *EclCC::lookupDFSlayout(const char *filename, IErrorReceiver &errs, const ECLlocation &location, bool isOpt) const { CriticalBlock b(dfsCrit); // Overkill at present but maybe one day codegen will start threading? - if (!optDFS || disconnectReported) + if (isEmptyString(optDFS) || disconnectReported) { // Dali lookup disabled, yet translation requested. Should we report if OPT set? if (!(optArchive || optGenerateDepend || optSyntax || optGenerateMeta || optEvaluateResult || disconnectReported)) @@ -2842,6 +2907,9 @@ int EclCC::parseCommandLineOptions(int argc, const char* argv[]) { optScope.set(tempArg); } + else if (iter.matchOption(optGitLock, "--gitlock")) + { + } else if (iter.matchOption(optGitUser, "--gituser")) { } diff --git a/ecl/eclcc/eclcc.hpp b/ecl/eclcc/eclcc.hpp index 8ff08bfaa1b..01f2bcbd59f 100644 --- a/ecl/eclcc/eclcc.hpp +++ b/ecl/eclcc/eclcc.hpp @@ -96,6 +96,7 @@ const char * const helpText[] = { "?! --fastsyntax Delay expanding functions when parsing. May speed up processing for some queries", "? --fetchrepos Automatically download missing repositories associated with dependencies", "! --gituser=x Which user should be used for accessing git repositories (for servers)", + "! --gitlock=key The dali key (e.g. plane name) that should be used to protect updates to git repositories", " -help, --help Display this message", " -help -v Display verbose help message", "! --ignoresimplified Do not use simplified expressions when syntax checking", diff --git a/ecl/eclccserver/eclccserver.cpp b/ecl/eclccserver/eclccserver.cpp index 5758b5eaf9a..18c20951dfa 100644 --- a/ecl/eclccserver/eclccserver.cpp +++ b/ecl/eclccserver/eclccserver.cpp @@ -235,6 +235,45 @@ static bool getHomeFolder(StringBuffer & homepath) return true; } +static bool guardGitUpdates = false; +static StringBuffer gitLockKey; +static void configGitLock() +{ + Owned config = getComponentConfig(); + if (config->getPropBool("@enableEclccDali", true)) + { + if (config->getPropBool("@guardGitUpdates", true)) + { + if (isContainerized()) + { + //Containerized: each git plane needs to be protected independently + gitLockKey.append(config->queryProp("@gitPlane")); + } + else + { + //Bare metal - git repos are fetched locally, so protect per host-ip + const char * hostname = GetCachedHostName(); + if (hostname) + { + gitLockKey.append("host"); + + for (const byte * cur = (const byte *)hostname; *cur; cur++) + { + //remove '.' and other unsupported characters from the key name + if (isalnum(*cur)) + gitLockKey.append(*cur); + else + gitLockKey.append("_"); + } + } + } + + if (!gitLockKey.isEmpty()) + guardGitUpdates = true; + } + } +} + class EclccCompileThread : implements IPooledThread, implements IErrorReporter, public CInterface { StringAttr wuid; @@ -644,6 +683,9 @@ class EclccCompileThread : implements IPooledThread, implements IErrorReporter, if (!repoRootPath.isEmpty()) eclccCmd.appendf(" \"--repocachepath=%s\"", repoRootPath.str()); + if (guardGitUpdates) + eclccCmd.appendf(" \"--gitlock=%s\"", gitLockKey.str()); + if (config->queryProp("@defaultRepo")) eclccCmd.appendf(" --defaultrepo=%s", config->queryProp("@defaultRepo")); if (config->queryProp("@defaultRepoVersion")) @@ -835,7 +877,13 @@ class EclccCompileThread : implements IPooledThread, implements IErrorReporter, if (GetCurrentDirectory(sizeof(dir), dir)) repoRootPath.append(dir); } - if (repoRootPath.length()) + + if (guardGitUpdates) + { + addPathSepChar(repoRootPath).append("repos"); + recursiveCreateDirectory(repoRootPath.str()); + } + else if (repoRootPath.length()) { addPathSepChar(repoRootPath).append("repos_").append(idxStr); recursiveCreateDirectory(repoRootPath.str()); @@ -1421,6 +1469,7 @@ int main(int argc, const char *argv[]) { initClientProcess(serverGroup, DCR_EclCCServer); openLogFile(); + configGitLock(); const char *wuid = globals->queryProp("@workunit"); if (wuid) { diff --git a/ecl/hql/CMakeLists.txt b/ecl/hql/CMakeLists.txt index 32ea50dadaa..928067568b1 100644 --- a/ecl/hql/CMakeLists.txt +++ b/ecl/hql/CMakeLists.txt @@ -185,7 +185,7 @@ target_link_libraries ( hql nbcd eclrtl deftype - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) IF (USE_ZLIB) diff --git a/ecl/hql/hql.hpp b/ecl/hql/hql.hpp index 1ab43ae1b65..325efbf37a9 100644 --- a/ecl/hql/hql.hpp +++ b/ecl/hql/hql.hpp @@ -226,6 +226,9 @@ interface ICodegenContextCallback : public IInterface * Which platform was this query originally targeted to? */ virtual void getTargetPlatform(StringBuffer & result) = 0; + /* + */ + virtual IInterface * getGitUpdateLock(const char * key) = 0; }; diff --git a/ecl/hql/hqlplugininfo.cpp b/ecl/hql/hqlplugininfo.cpp index e0d1cd409c7..c48ab0fd236 100644 --- a/ecl/hql/hqlplugininfo.cpp +++ b/ecl/hql/hqlplugininfo.cpp @@ -29,7 +29,7 @@ namespace repositoryCommon { IEclPackage * loadPlugins(const char * pluginPath) { MultiErrorReceiver errs; - EclRepositoryManager collection; + EclRepositoryManager collection(nullptr); collection.addQuerySourceFileEclRepository(&errs, pluginPath, ESFallowplugins|ESFnodependencies, (unsigned) -1);//Preload implicits/dlls if (errs.errCount()) { diff --git a/ecl/hql/hqlrepository.cpp b/ecl/hql/hqlrepository.cpp index 8b7c5a47649..78b0edc575f 100644 --- a/ecl/hql/hqlrepository.cpp +++ b/ecl/hql/hqlrepository.cpp @@ -793,6 +793,7 @@ IEclSourceCollection * EclRepositoryManager::resolveGitCollection(const char * r throw makeStringExceptionV(99, "Unsupported repository link format '%s'", defaultUrl); bool alreadyExists = false; + Owned gitUpdateLock(getGitUpdateLock(repoPath)); if (checkDirExists(repoPath)) { if (options.cleanRepos) @@ -855,6 +856,10 @@ IEclSourceCollection * EclRepositoryManager::resolveGitCollection(const char * r ok = true; } } + //All following operations are read-only and should not be affected if the git repo is updated behind the scenes + //this could become a read/write lock if that proved to be an issue. + gitUpdateLock.clear(); + gitDownloadCycles += gitDownloadTimer.elapsedCycles(); if (error) { diff --git a/ecl/hql/hqlrepository.hpp b/ecl/hql/hqlrepository.hpp index a69febb6159..9bac5fd7626 100644 --- a/ecl/hql/hqlrepository.hpp +++ b/ecl/hql/hqlrepository.hpp @@ -38,7 +38,9 @@ class EclRepositoryMapping : public CInterface class HQL_API EclRepositoryManager { public: - EclRepositoryManager() = default; + EclRepositoryManager(ICodegenContextCallback * _callback) : callback(_callback) + { + } EclRepositoryManager(const EclRepositoryManager & other) = delete; void addNestedRepository(IIdAtom * scopeId, IEclSourceCollection * source, bool includeInArchive); @@ -86,10 +88,17 @@ class HQL_API EclRepositoryManager unsigned runGitCommand(StringBuffer * output, const char *args, const char * cwd, bool needCredentials); IEclPackage * queryRepository(IIdAtom * name, const char * defaultUrl, IEclSourceCollection * overrideSource, bool includeDefinitions); + IInterface * getGitUpdateLock(const char * path) + { + if (!callback) + return nullptr; + return callback->getGitUpdateLock(path); + } private: mutable IErrorReceiver * errorReceiver = nullptr; // mutable to allow const methods to set it, it logically doesn't change the object using DependencyInfo = std::pair>; + ICodegenContextCallback * callback; CIArrayOf repos; std::vector dependencies; IArrayOf sharedSources; // plugins, std library, bundles diff --git a/ecl/hqlcpp/hqlecl.cpp b/ecl/hqlcpp/hqlecl.cpp index 673ad1ecbf9..fd0e554b2d9 100644 --- a/ecl/hqlcpp/hqlecl.cpp +++ b/ecl/hqlcpp/hqlecl.cpp @@ -150,6 +150,8 @@ class NullContextCallback : implements ICodegenContextCallback, public CInterfac virtual bool allowAccess(const char * category, bool isSigned) override { return true; } virtual IHqlExpression *lookupDFSlayout(const char *filename, IErrorReceiver &errs, const ECLlocation &location, bool isOpt) const override { return nullptr; } virtual unsigned lookupClusterSize() const override { return 0; } + virtual IInterface * getGitUpdateLock(const char * key) override { return nullptr; } + virtual void getTargetPlatform(StringBuffer & result) override { workunit->getDebugValue("targetClusterType", StringBufferAdaptor(result)); diff --git a/ecllibrary/std/DataPatterns/Profile.ecl b/ecllibrary/std/DataPatterns/Profile.ecl index 49c9f834dcf..65643ed6317 100644 --- a/ecllibrary/std/DataPatterns/Profile.ecl +++ b/ecllibrary/std/DataPatterns/Profile.ecl @@ -615,9 +615,9 @@ EXPORT Profile(inFile, // Pattern mapping a UNICODE datatype; using regex due to the complexity // of the character set #UNIQUENAME(_MapUpperCharUni); - LOCAL %_MapUpperCharUni%(UNICODE s) := REGEXREPLACE(u'\\p{Uppercase_Letter}', s, u'A'); + LOCAL %_MapUpperCharUni%(UNICODE s) := REGEXREPLACE(u'\\p{Lu}', s, u'A'); #UNIQUENAME(_MapLowerCharUni); - LOCAL %_MapLowerCharUni%(UNICODE s) := REGEXREPLACE(u'[[\\p{Lowercase_Letter}][\\p{Titlecase_Letter}][\\p{Modifier_Letter}][\\p{Other_Letter}]]', s, u'a'); + LOCAL %_MapLowerCharUni%(UNICODE s) := REGEXREPLACE(u'[[\\p{Ll}][\\p{Lt}][\\p{Lm}][\\p{Lo}]]', s, u'a'); #UNIQUENAME(_MapDigitUni); LOCAL %_MapDigitUni%(UNICODE s) := REGEXREPLACE(u'[1-9]', s, u'9'); // Leave '0' as-is and replace with '9' later #UNIQUENAME(_MapAllUni); diff --git a/esp/clients/wsdfuaccess/CMakeLists.txt b/esp/clients/wsdfuaccess/CMakeLists.txt index 844e8ac6897..f6ba409df4b 100644 --- a/esp/clients/wsdfuaccess/CMakeLists.txt +++ b/esp/clients/wsdfuaccess/CMakeLists.txt @@ -73,7 +73,7 @@ target_link_libraries ( wsdfuaccess dafsstream thorhelper dalibase - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) if (NOT CONTAINERIZED) diff --git a/esp/scm/ws_access.ecm b/esp/scm/ws_access.ecm index da34ed9dd5d..a00a43e95e8 100644 --- a/esp/scm/ws_access.ecm +++ b/esp/scm/ws_access.ecm @@ -1006,7 +1006,7 @@ ESPresponse [nil_remove] UserAccountExportResponse [http_content("application/octet-stream")] binary Result; }; -ESPservice [version("1.17"), auth_feature("NONE"), exceptions_inline("./smc_xslt/exceptions.xslt")] ws_access +ESPservice [version("1.17"), generated_client_version("0.0"), auth_feature("NONE"), exceptions_inline("./smc_xslt/exceptions.xslt")] ws_access { ESPmethod [client_xslt("/esp/xslt/access_users.xslt")] Users(UserRequest, UserResponse); ESPmethod [client_xslt("/esp/xslt/access_useredit.xslt")] UserEdit(UserEditRequest, UserEditResponse); diff --git a/esp/scm/ws_cloud.ecm b/esp/scm/ws_cloud.ecm index f50f0e5b2ae..2b662bc6934 100644 --- a/esp/scm/ws_cloud.ecm +++ b/esp/scm/ws_cloud.ecm @@ -35,7 +35,7 @@ ESPresponse [encode(0)] GetServicesResponse [json_inline(1)] string Result; }; -ESPservice [auth_feature("CloudAccess:ACCESS"), version("1.01"), exceptions_inline("./smc_xslt/exceptions.xslt")] WsCloud +ESPservice [auth_feature("CloudAccess:ACCESS"), version("1.01"), generated_client_version("0.0"), exceptions_inline("./smc_xslt/exceptions.xslt")] WsCloud { ESPmethod [auth_feature("CloudAccess:READ")] GetPODs(GetPODsRequest, GetPODsResponse); ESPmethod [auth_feature("CloudAccess:READ"), min_ver("1.01")] GetServices(GetServicesRequest, GetServicesResponse); diff --git a/esp/scm/ws_configmgr.ecm b/esp/scm/ws_configmgr.ecm index 455b49c97f6..fa72a3588f7 100644 --- a/esp/scm/ws_configmgr.ecm +++ b/esp/scm/ws_configmgr.ecm @@ -440,7 +440,7 @@ ESPrequest WizardTestRequest -ESPservice [auth_feature("DEFERRED"),version("2.0"), default_client_version("2.0"), exceptions_inline("xslt/exceptions.xslt"), disable_profile_execution] ws_configmgr +ESPservice [auth_feature("DEFERRED"), version("2.0"), default_client_version("2.0"), exceptions_inline("xslt/exceptions.xslt"), disable_profile_execution] ws_configmgr { ESPMethod [ diff --git a/esp/scm/ws_esdlconfig.ecm b/esp/scm/ws_esdlconfig.ecm index aa2214cd4fa..a3dec5c9854 100644 --- a/esp/scm/ws_esdlconfig.ecm +++ b/esp/scm/ws_esdlconfig.ecm @@ -306,7 +306,7 @@ ESPresponse [exceptions_inline] ListESDLBindingsResponse }; #define VERSION_FOR_ESDLCMD "1.5" -ESPservice [auth_feature("ESDLConfigAccess:ACCESS"), version("1.5"), exceptions_inline("./smc_xslt/exceptions.xslt")] WsESDLConfig +ESPservice [auth_feature("ESDLConfigAccess:ACCESS"), version("1.5"), generated_client_version("0.0"), exceptions_inline("./smc_xslt/exceptions.xslt")] WsESDLConfig { ESPmethod Echo(EchoRequest, EchoResponse); ESPmethod [auth_feature("ESDLConfigAccess:WRITE")] PublishESDLDefinition(PublishESDLDefinitionRequest, PublishESDLDefinitionResponse); diff --git a/esp/scm/ws_loggingservice.ecm b/esp/scm/ws_loggingservice.ecm index be329b88dc6..59a43fead84 100644 --- a/esp/scm/ws_loggingservice.ecm +++ b/esp/scm/ws_loggingservice.ecm @@ -52,7 +52,7 @@ ESPresponse [exceptions_inline] UpdateLogResponse string StatusMessage; }; -ESPService [auth_feature("DEFERRED"), version("1.0"), noforms, use_method_name] WsLoggingService +ESPService [auth_feature("DEFERRED"), version("1.0"), generated_client_version("0.0"), noforms, use_method_name] WsLoggingService { ESPmethod GetTransactionSeed(GetTransactionSeedRequest, GetTransactionSeedResponse); ESPmethod UpdateLog(UpdateLogRequest, UpdateLogResponse); diff --git a/esp/scm/ws_machine.ecm b/esp/scm/ws_machine.ecm index 5224ff02b7e..78322b0d307 100644 --- a/esp/scm/ws_machine.ecm +++ b/esp/scm/ws_machine.ecm @@ -457,7 +457,7 @@ ESPresponse [encode(0), nil_remove, exceptions_inline] GetNodeGroupUsageResponse }; //-------- service --------- -ESPservice [auth_feature("DEFERRED"), version("1.18")] ws_machine +ESPservice [auth_feature("DEFERRED"), version("1.18"), generated_client_version("0.0")] ws_machine { ESPmethod [resp_xsl_default("./smc_xslt/clusterprocesses.xslt"), exceptions_inline("./smc_xslt/exceptions.xslt")] GetTargetClusterInfo(GetTargetClusterInfoRequest, GetTargetClusterInfoResponse); diff --git a/esp/scm/ws_resources.ecm b/esp/scm/ws_resources.ecm index f41d77d19ba..12f100b0cc4 100644 --- a/esp/scm/ws_resources.ecm +++ b/esp/scm/ws_resources.ecm @@ -113,7 +113,7 @@ ESPresponse [nil_remove, exceptions_inline] TargetQueryResponse ESParray Roxies; }; -ESPservice [auth_feature("ResourceQueryAccess:ACCESS"), version("1.03"), exceptions_inline("./smc_xslt/exceptions.xslt"), disable_profile_execution] WsResources +ESPservice [auth_feature("ResourceQueryAccess:ACCESS"), version("1.03"), generated_client_version("0.0"), exceptions_inline("./smc_xslt/exceptions.xslt"), disable_profile_execution] WsResources { ESPmethod [auth_feature("ResourceQueryAccess:READ"), min_ver("1.03")] TargetQuery(TargetQueryRequest, TargetQueryResponse); ESPmethod [auth_feature("ResourceQueryAccess:READ")] ServiceQuery(ServiceQueryRequest, ServiceQueryResponse); diff --git a/esp/scm/ws_topology.ecm b/esp/scm/ws_topology.ecm index ca67e8bcf78..a7e7e33c3cc 100644 --- a/esp/scm/ws_topology.ecm +++ b/esp/scm/ws_topology.ecm @@ -665,7 +665,7 @@ ESPresponse [exceptions_inline] TpListLogFilesResponse ESParray Files; }; -ESPservice [auth_feature("DEFERRED"), noforms, version("1.32"), cache_group("ESPWsTP"), exceptions_inline("./smc_xslt/exceptions.xslt")] WsTopology +ESPservice [auth_feature("DEFERRED"), noforms, version("1.32"), generated_client_version("0.0"), cache_group("ESPWsTP"), exceptions_inline("./smc_xslt/exceptions.xslt")] WsTopology { ESPmethod [cache_seconds(180), cache_global(1), resp_xsl_default("/esp/xslt/targetclusters.xslt")] TpTargetClusterQuery(TpTargetClusterQueryRequest, TpTargetClusterQueryResponse); ESPmethod [cache_seconds(180), cache_global(1), resp_xsl_default("/esp/xslt/topology.xslt")] TpClusterQuery(TpClusterQueryRequest, TpClusterQueryResponse); diff --git a/esp/services/ws_access/CMakeLists.txt b/esp/services/ws_access/CMakeLists.txt index 0024175be69..a0b4c1d3547 100644 --- a/esp/services/ws_access/CMakeLists.txt +++ b/esp/services/ws_access/CMakeLists.txt @@ -51,11 +51,6 @@ include_directories ( ${HPCC_SOURCE_DIR}/common/thorhelper ) -if(USE_OPENLDAP) - # NOTE - this should not be needed, it's the result of poor encapsulation and using CLdapSecManager directly - include_directories ( ${OPENLDAP_INCLUDE_DIR} ) -endif() - ADD_DEFINITIONS( -D_USRDLL -Dws_access_API_LOCAL -DESP_SERVICE_ws_access) HPCC_ADD_LIBRARY( ws_access SHARED ${SRCS} ) diff --git a/esp/services/ws_account/CMakeLists.txt b/esp/services/ws_account/CMakeLists.txt index 546b893a316..549c4a9de4e 100644 --- a/esp/services/ws_account/CMakeLists.txt +++ b/esp/services/ws_account/CMakeLists.txt @@ -49,11 +49,6 @@ include_directories ( ${HPCC_SOURCE_DIR}/common/thorhelper ) -if (USE_OPENLDAP) - # NOTE - this should not be needed, it's the result of poor encapsulation and using CLdapSecManager directly - include_directories ( ${OPENLDAP_INCLUDE_DIR} ) -endif () - ADD_DEFINITIONS( -D_USRDLL -Dws_account_API_LOCAL -DESP_SERVICE_ws_account) HPCC_ADD_LIBRARY( ws_account SHARED ${SRCS} ) diff --git a/esp/services/ws_cloud/CMakeLists.txt b/esp/services/ws_cloud/CMakeLists.txt index cbe496ae9a0..84f20635ac2 100644 --- a/esp/services/ws_cloud/CMakeLists.txt +++ b/esp/services/ws_cloud/CMakeLists.txt @@ -51,11 +51,6 @@ include_directories ( ${HPCC_SOURCE_DIR}/esp/smc/SMCLib ) -if (USE_OPENLDAP) - # NOTE - this should not be needed, it's the result of poor encapsulation and using CLdapSecManager directly - include_directories ( ${OPENLDAP_INCLUDE_DIR} ) -endif () - ADD_DEFINITIONS( -D_USRDLL -DWS_CLOUD_EXPORTS -DESP_SERVICE_WsCloud) HPCC_ADD_LIBRARY( ws_cloud SHARED ${SRCS} ) diff --git a/esp/services/ws_fs/ws_fsService.cpp b/esp/services/ws_fs/ws_fsService.cpp index 4a83c2cc65c..43e500e94b2 100644 --- a/esp/services/ws_fs/ws_fsService.cpp +++ b/esp/services/ws_fs/ws_fsService.cpp @@ -331,19 +331,13 @@ static void DeepAssign(IEspContext &context, IConstDFUWorkUnit *src, IEspDFUWork dest.setStateMessage(statemsg.str()); CDateTime startAt; - CDateTime stoppAt; + CDateTime stopAt; prog->getTimeStarted(startAt); - prog->getTimeStopped(stoppAt); + prog->getTimeStopped(stopAt); + StringBuffer tmpstr; - startAt.getDateString(tmpstr); - tmpstr.append(" "); - startAt.getTimeString(tmpstr); - dest.setTimeStarted(tmpstr.str()); - tmpstr.clear(); - stoppAt.getDateString(tmpstr); - tmpstr.append(" "); - stoppAt.getTimeString(tmpstr); - dest.setTimeStopped(tmpstr.str()); + dest.setTimeStarted(startAt.getString(tmpstr).str()); + dest.setTimeStopped(stopAt.getString(tmpstr.clear()).str()); StringBuffer prgmsg; prog->formatProgressMessage(prgmsg); @@ -1115,7 +1109,14 @@ bool CFileSprayEx::onGetDFUWorkunits(IEspContext &context, IEspGetDFUWorkunits & resultWU->setID(wu->queryId()); StringBuffer jobname, user, cluster; resultWU->setJobName(wu->getJobName(jobname).str()); - resultWU->setCommand(wu->getCommand()); + DFUcmd command = wu->getCommand(); + resultWU->setCommand(command); + if (version >= 1.03) + { + StringBuffer cmdStr; + encodeDFUcommand(command, cmdStr); + resultWU->setCommandMessage(cmdStr.str()); + } resultWU->setUser(wu->getUser(user).str()); const char* clusterName = wu->getClusterName(cluster).str(); diff --git a/esp/services/ws_smc/CMakeLists.txt b/esp/services/ws_smc/CMakeLists.txt index 4614135ffd4..09d416fdcc3 100644 --- a/esp/services/ws_smc/CMakeLists.txt +++ b/esp/services/ws_smc/CMakeLists.txt @@ -65,11 +65,6 @@ include_directories ( ${HPCC_SOURCE_DIR}/esp/espcommon ) -if (USE_OPENLDAP) - # NOTE - this should not be needed, it's the result of poor encapsulation and using CLdapSecManager directly - include_directories ( ${OPENLDAP_INCLUDE_DIR} ) -endif () - ADD_DEFINITIONS( -D_USRDLL -DWS_SMC_EXPORTS -DWSSMC_API_LOCAL -DESP_SERVICE_WsSMC) HPCC_ADD_LIBRARY( ws_smc SHARED ${SRCS} ) diff --git a/esp/services/ws_workunits/CMakeLists.txt b/esp/services/ws_workunits/CMakeLists.txt index 9356d5d1a41..8358ed771ed 100644 --- a/esp/services/ws_workunits/CMakeLists.txt +++ b/esp/services/ws_workunits/CMakeLists.txt @@ -135,7 +135,7 @@ target_link_libraries ( ws_workunits pkgfiles wuanalysis ws_dfsclient - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ${COMMON_ESP_SERVICE_LIBS} ) diff --git a/esp/src/package-lock.json b/esp/src/package-lock.json index 650354ec8f3..83e594ec3dc 100644 --- a/esp/src/package-lock.json +++ b/esp/src/package-lock.json @@ -20,7 +20,7 @@ "@hpcc-js/common": "2.71.16", "@hpcc-js/comms": "2.92.0", "@hpcc-js/dataflow": "8.1.6", - "@hpcc-js/eclwatch": "2.74.0", + "@hpcc-js/eclwatch": "2.74.2", "@hpcc-js/graph": "2.85.14", "@hpcc-js/html": "2.42.19", "@hpcc-js/layout": "2.49.21", @@ -1906,20 +1906,20 @@ } }, "node_modules/@hpcc-js/dgrid": { - "version": "2.32.17", - "resolved": "https://registry.npmjs.org/@hpcc-js/dgrid/-/dgrid-2.32.17.tgz", - "integrity": "sha512-M0QP4vvylMlAMl5iAWKe94zx6xK7SjeQt+iAsN7izwJrZ4PlAPym/bn05VLGfI7iQLT72d/6TRrku/Lh2PyDSg==", + "version": "2.32.19", + "resolved": "https://registry.npmjs.org/@hpcc-js/dgrid/-/dgrid-2.32.19.tgz", + "integrity": "sha512-nFKWjepBJIceN2sTMk8N283OFvU5zwfFAeGqBnT3iQRO2vQRaJzZt4G+9xtgVPbnyWuGiqHhIxYoGJLUOMpLbQ==", "dependencies": { "@hpcc-js/common": "^2.71.16", "@hpcc-js/ddl-shim": "^2.20.6", - "@hpcc-js/dgrid-shim": "^2.24.8", + "@hpcc-js/dgrid-shim": "^2.24.10", "@hpcc-js/util": "^2.51.0" } }, "node_modules/@hpcc-js/dgrid-shim": { - "version": "2.24.8", - "resolved": "https://registry.npmjs.org/@hpcc-js/dgrid-shim/-/dgrid-shim-2.24.8.tgz", - "integrity": "sha512-04+r+7Qa2LSc/aWx+d/QzdRoerPCIpiCXcrXPBf7tBHxOzU8gAIW0WU7wiilUmL2ZdHyLXQrzcT0gKVHkKlJaQ==" + "version": "2.24.10", + "resolved": "https://registry.npmjs.org/@hpcc-js/dgrid-shim/-/dgrid-shim-2.24.10.tgz", + "integrity": "sha512-4PD4GvKn2/HQvgzeP+Gd0Halj4KySk0QW1C7dqfyNWV8AUaseT9SSUvyu2ftGPUrzq65sJ0fSaq4zh3Js9dbaQ==" }, "node_modules/@hpcc-js/dgrid2": { "version": "2.3.18", @@ -1928,18 +1928,18 @@ "dependencies": { "@hpcc-js/common": "^2.71.16", "@hpcc-js/preact-shim": "^2.16.10", - "@hpcc-js/util": "^2.51.0" + "@hpcc-js/util": "^2.50.6" } }, "node_modules/@hpcc-js/eclwatch": { - "version": "2.74.0", - "resolved": "https://registry.npmjs.org/@hpcc-js/eclwatch/-/eclwatch-2.74.0.tgz", - "integrity": "sha512-l33wC724CKZ/XCeErt6fGNbXrUHFJAY8TInl7KhpYRbimYW/rdLEQ8DuqzPFqHGm1ev2ym8HGn8tIk84M/3g8g==", + "version": "2.74.2", + "resolved": "https://registry.npmjs.org/@hpcc-js/eclwatch/-/eclwatch-2.74.2.tgz", + "integrity": "sha512-FY5CQ/Pezq5enRZtVXzmxV2utv+Fiq7Gn7guMz2IhYWmenNDgclIrfKHeXL8nISJsPNl/VJOHCwyxBWuhuGBdw==", "dependencies": { "@hpcc-js/codemirror": "^2.61.3", "@hpcc-js/common": "^2.71.16", "@hpcc-js/comms": "^2.92.0", - "@hpcc-js/dgrid": "^2.32.17", + "@hpcc-js/dgrid": "^2.32.19", "@hpcc-js/graph": "^2.85.14", "@hpcc-js/layout": "^2.49.21", "@hpcc-js/phosphor": "^2.18.7", @@ -2058,12 +2058,12 @@ "resolved": "https://registry.npmjs.org/@hpcc-js/timeline/-/timeline-2.51.24.tgz", "integrity": "sha512-QNgXhJ6/hQHfP2Lge2zL1X5ERI813KKpFN+DNFqufhWoZIT/7x3kr1If8r1mC74hYt4xqkFAdoveEepFT+lYhQ==", "dependencies": { - "@hpcc-js/api": "^2.12.16", - "@hpcc-js/chart": "^2.83.2", - "@hpcc-js/common": "^2.71.16", - "@hpcc-js/html": "^2.42.19", - "@hpcc-js/layout": "^2.49.21", - "@hpcc-js/react": "^2.53.15" + "@hpcc-js/api": "^2.12.15", + "@hpcc-js/chart": "^2.83.1", + "@hpcc-js/common": "^2.71.15", + "@hpcc-js/html": "^2.42.18", + "@hpcc-js/layout": "^2.49.20", + "@hpcc-js/react": "^2.53.14" } }, "node_modules/@hpcc-js/tree": { diff --git a/esp/src/package.json b/esp/src/package.json index 745acde3fe6..a7685259e97 100644 --- a/esp/src/package.json +++ b/esp/src/package.json @@ -46,7 +46,7 @@ "@hpcc-js/common": "2.71.16", "@hpcc-js/comms": "2.92.0", "@hpcc-js/dataflow": "8.1.6", - "@hpcc-js/eclwatch": "2.74.0", + "@hpcc-js/eclwatch": "2.74.2", "@hpcc-js/graph": "2.85.14", "@hpcc-js/html": "2.42.19", "@hpcc-js/layout": "2.49.21", diff --git a/esp/src/src-react/components/Activities.tsx b/esp/src/src-react/components/Activities.tsx index 0f1103c7a1f..3339d5c1a82 100644 --- a/esp/src/src-react/components/Activities.tsx +++ b/esp/src/src-react/components/Activities.tsx @@ -178,10 +178,18 @@ export const Activities: React.FunctionComponent = ({ key: "open", text: nlsHPCC.Open, disabled: !uiState.wuSelected && !uiState.thorClusterSelected, iconProps: { iconName: "WindowEdit" }, onClick: () => { if (selection.length === 1) { - window.location.href = `#/operations/clusters/${selection[0].ClusterName}`; + let url = `#/operations/clusters/${selection[0].ClusterName}`; + if (selection[0].Wuid) { + url = `#/workunits/${selection[0].Wuid}`; + } + window.location.href = url; } else { for (let i = selection.length - 1; i >= 0; --i) { - window.open(`#/operations/clusters/${selection[i].ClusterName}`, "_blank"); + let url = `#/operations/clusters/${selection[i].ClusterName}`; + if (selection[i].Wuid) { + url = `#/workunits/${selection[i].Wuid}`; + } + window.open(url, "_blank"); } } } diff --git a/esp/src/src-react/components/ECLArchive.tsx b/esp/src/src-react/components/ECLArchive.tsx index e48267c3114..7f43a368726 100644 --- a/esp/src/src-react/components/ECLArchive.tsx +++ b/esp/src/src-react/components/ECLArchive.tsx @@ -53,8 +53,9 @@ export const ECLArchive: React.FunctionComponent = ({ }, [archive, metrics]); React.useEffect(() => { - if (metrics.length) { - setSelectionText(archive?.content(selection) ?? ""); + const text = archive?.content(selection) ?? ""; + if (text) { + setSelectionText(text); setMarkers(archive?.markers(selection) ?? []); setSelectedMetrics(archive?.metrics(selection) ?? []); } else { diff --git a/esp/src/src-react/components/Helpers.tsx b/esp/src/src-react/components/Helpers.tsx index 7e391bc9223..458308debd8 100644 --- a/esp/src/src-react/components/Helpers.tsx +++ b/esp/src/src-react/components/Helpers.tsx @@ -230,7 +230,7 @@ export const Helpers: React.FunctionComponent = ({ = ({ const [warningChecked, setWarningChecked] = React.useState(true); const [infoChecked, setInfoChecked] = React.useState(true); const [otherChecked, setOtherChecked] = React.useState(true); - const [filterCounts, setFilterCounts] = React.useState({ cost: 0, error: 0, warning: 0, info: 0, other: 0 }); + const [filterCounts, setFilterCounts] = React.useState({ cost: 0, penalty: 0, error: 0, warning: 0, info: 0, other: 0 }); const [exceptions] = useWorkunitExceptions(wuid); const [data, setData] = React.useState([]); const { @@ -64,7 +65,7 @@ export const InfoGrid: React.FunctionComponent = ({ const columns = React.useMemo((): FluentColumns => { return { Severity: { - label: nlsHPCC.Severity, field: "", width: 72, sortable: false, + label: nlsHPCC.Severity, width: 72, sortable: false, className: (value, row) => { switch (value) { case "Error": @@ -79,19 +80,28 @@ export const InfoGrid: React.FunctionComponent = ({ return ""; } }, - Source: { - label: `${nlsHPCC.Source} / ${nlsHPCC.Cost}`, field: "", width: 144, sortable: false, + Priority: { + label: `${nlsHPCC.Source} / ${nlsHPCC.Cost}`, width: 144, formatter: (Source, row) => { if (Source === "Cost Optimizer") { - return formatCost(+row.Priority); + return formatCost(+row.Cost); } return Source; } }, - Code: { label: nlsHPCC.Code, field: "", width: 45, sortable: false }, + Priority: { + label: `${nlsHPCC.Priority} / ${nlsHPCC.TimePenalty}`, width: 144, sortable: false, + formatter: (Priority, row) => { + if (row.Source === "Cost Optimizer") { + return `${formatTwoDigits(+row.Priority / 1000)} (${nlsHPCC.Seconds})`; + } + return Priority; + } + }, + Code: { label: nlsHPCC.Code, width: 45 }, Message: { - label: nlsHPCC.Message, field: "", - sortable: false, + label: nlsHPCC.Message, + sortable: true, formatter: (Message, idx) => { const info = extractGraphInfo(Message); if (info.graphID && info.subgraphID) { @@ -104,15 +114,15 @@ export const InfoGrid: React.FunctionComponent = ({ return Message; } }, - Column: { label: nlsHPCC.Col, field: "", width: 36, sortable: false }, - LineNo: { label: nlsHPCC.Line, field: "", width: 36, sortable: false }, + Column: { label: nlsHPCC.Col, width: 36 }, + LineNo: { label: nlsHPCC.Line, width: 36 }, Activity: { - label: nlsHPCC.Activity, field: "", width: 56, sortable: false, + label: nlsHPCC.Activity, width: 56, formatter: (activityId, row) => { return activityId ? a{activityId} : ""; } }, - FileName: { label: nlsHPCC.FileName, field: "", width: 360, sortable: false } + FileName: { label: nlsHPCC.FileName, width: 360 } }; }, [wuid]); @@ -121,6 +131,7 @@ export const InfoGrid: React.FunctionComponent = ({ React.useEffect(() => { const filterCounts: FilterCounts = { cost: 0, + penalty: 0, error: 0, warning: 0, info: 0, @@ -209,7 +220,6 @@ export const InfoGrid: React.FunctionComponent = ({ = ({ }) .on("click", (row, col, sel) => { setTimelineFilter(sel ? row[7].ScopeName : ""); + if (sel) { + setSelectedMetricsSource("scopesTable"); + pushUrl(`${parentUrl}/${row[7].Id}`); + } }) ); @@ -516,6 +520,10 @@ export const Metrics: React.FunctionComponent = ({ key: "refresh", text: nlsHPCC.Refresh, iconProps: { iconName: "Refresh" }, onClick: () => { refresh(); + timeline + .clear() + .lazyRender() + ; } }, { @@ -536,7 +544,7 @@ export const Metrics: React.FunctionComponent = ({ setShowMetricOptions(true); } } - ], [dockpanel, hotspots, onHotspot, options, refresh, setOptions, showTimeline]); + ], [dockpanel, hotspots, onHotspot, options, refresh, setOptions, showTimeline, timeline]); const formatColumns = React.useMemo((): Utility.ColumnMap => { const copyColumns: Utility.ColumnMap = {}; @@ -584,6 +592,17 @@ export const Metrics: React.FunctionComponent = ({ } ], [dot, formatColumns, fullscreen, metrics, wuid]); + const setShowMetricOptionsHook = React.useCallback((show: boolean) => { + setShowMetricOptions(show); + scopesTable + .metrics(metrics, options, timelineFilter, scopeFilter) + .render(() => { + updateScopesTable(selectedMetrics); + }) + ; + + }, [metrics, options, scopeFilter, scopesTable, selectedMetrics, timelineFilter, updateScopesTable]); + return @@ -618,13 +637,13 @@ export const Metrics: React.FunctionComponent = ({ /> - + - + } />; diff --git a/esp/src/src-react/components/MetricsPropertiesTables.tsx b/esp/src/src-react/components/MetricsPropertiesTables.tsx index 031b5daf8ad..470516b55f7 100644 --- a/esp/src/src-react/components/MetricsPropertiesTables.tsx +++ b/esp/src/src-react/components/MetricsPropertiesTables.tsx @@ -6,13 +6,19 @@ import nlsHPCC from "src/nlsHPCC"; import { AutosizeHpccJSComponent } from "../layouts/HpccJSAdapter"; interface MetricsPropertiesTablesProps { + scopesTableColumns?: string[]; scopes?: IScope[]; } export const MetricsPropertiesTables: React.FunctionComponent = ({ + scopesTableColumns = [], scopes = [] }) => { + const sortByColumns = React.useMemo(() => { + return ["id", "type", "name", ...scopesTableColumns]; + }, [scopesTableColumns]); + // Props Table --- const propsTable = useConst(() => new Table() .columns([nlsHPCC.Property, nlsHPCC.Value, "Avg", "Min", "Max", "Delta", "StdDev", "SkewMin", "SkewMax", "NodeMin", "NodeMax"]) @@ -22,19 +28,34 @@ export const MetricsPropertiesTables: React.FunctionComponent { const props = []; scopes.forEach((item, idx) => { + const scopeProps = []; for (const key in item.__groupedProps) { const row = item.__groupedProps[key]; - props.push([row.Key, row.Value, row.Avg, row.Min, row.Max, row.Delta, row.StdDev, row.SkewMin, row.SkewMax, row.NodeMin, row.NodeMax]); + scopeProps.push([row.Key, row.Value, row.Avg, row.Min, row.Max, row.Delta, row.StdDev, row.SkewMin, row.SkewMax, row.NodeMin, row.NodeMax]); } + scopeProps.sort((l, r) => { + const lIdx = sortByColumns.indexOf(l[0]); + const rIdx = sortByColumns.indexOf(r[0]); + if (lIdx >= 0 && rIdx >= 0) { + return lIdx <= rIdx ? -1 : 1; + } else if (lIdx >= 0) { + return -1; + } else if (rIdx >= 0) { + return 1; + } + return 0; + }); if (idx < scopes.length - 1) { - props.push(["------------------------------", "------------------------------"]); + scopeProps.push(["------------------------------", "------------------------------"]); } + props.push(...scopeProps); }); + propsTable ?.data(props) ?.lazyRender() ; - }, [propsTable, scopes]); + }, [propsTable, scopes, sortByColumns]); return ; }; diff --git a/esp/src/src-react/components/Resources.tsx b/esp/src/src-react/components/Resources.tsx index 08797dc3315..d3cb0a73a53 100644 --- a/esp/src/src-react/components/Resources.tsx +++ b/esp/src/src-react/components/Resources.tsx @@ -117,7 +117,7 @@ export const Resources: React.FunctionComponent = ({ = ({ = ({ = ({ const [showZapForm, setShowZapForm] = React.useState(false); const [showThorSlaveLogs, setShowThorSlaveLogs] = React.useState(false); - const [showMessageBar, setShowMessageBar] = React.useState(false); - const dismissMessageBar = React.useCallback(() => setShowMessageBar(false), []); + const [messageBarContent, setMessageBarContent] = React.useState(); + const dismissMessageBar = React.useCallback(() => setMessageBarContent(undefined), []); + const showMessageBar = React.useCallback((content: MessageBarContent) => { + setMessageBarContent(content); + const t = window.setTimeout(function () { + dismissMessageBar(); + window.clearTimeout(t); + }, 2400); + }, [dismissMessageBar]); React.useEffect(() => { setJobname(workunit?.Jobname); @@ -69,7 +84,40 @@ export const WorkunitSummary: React.FunctionComponent = ({ }, [workunit]) }); + const nextWuid = React.useCallback((wuids: WUQuery.ECLWorkunit[]) => { + let found = false; + for (const wu of wuids) { + if (wu.Wuid !== wuid) { + pushUrl(`/workunits/${wu.Wuid}`); + found = true; + break; + } + } + if (!found) { + showMessageBar({ type: MessageBarType.warning, message: nlsHPCC.WorkunitNotFound }); + } + }, [showMessageBar, wuid]); + const buttons = React.useMemo((): ICommandBarItemProps[] => [ + { + key: "next", iconOnly: true, tooltipHostProps: { content: nlsHPCC.NextWorkunit }, iconProps: { iconName: "Previous" }, + onClick: () => { + const now = new Date(Date.now()); + const tomorrow = new Date(now.getTime() + (24 * 60 * 60 * 1000)); + workunitService.WUQuery({ StartDate: `${wuidToDate(wuid)}T${wuidToTime(wuid)}Z`, EndDate: tomorrow.toISOString(), Sortby: "Wuid", Descending: false, Count: 2 } as WUQuery.Request).then(response => { + nextWuid(response?.Workunits?.ECLWorkunit || []); + }).catch(err => logger.error(err)); + } + }, + { + key: "previous", iconOnly: true, tooltipHostProps: { content: nlsHPCC.PreviousWorkunit }, iconProps: { iconName: "Next" }, + onClick: () => { + workunitService.WUQuery({ EndDate: `${wuidToDate(wuid)}T${wuidToTime(wuid)}Z`, Count: 2 } as WUQuery.Request).then(response => { + nextWuid(response?.Workunits?.ECLWorkunit || []); + }).catch(err => logger.error(err)); + } + }, + { key: "divider_0", itemType: ContextualMenuItemType.Divider, onRender: () => }, { key: "refresh", text: nlsHPCC.Refresh, iconProps: { iconName: "Refresh" }, onClick: () => { @@ -91,15 +139,9 @@ export const WorkunitSummary: React.FunctionComponent = ({ Jobname: jobname, Description: description, Protected: _protected - }) - .then(_ => { - setShowMessageBar(true); - const t = window.setTimeout(function () { - setShowMessageBar(false); - window.clearTimeout(t); - }, 2400); - }) - .catch(err => logger.error(err)); + }).then(_ => { + showMessageBar({ type: MessageBarType.success, message: nlsHPCC.SuccessfullySaved }); + }).catch(err => logger.error(err)); } }, { @@ -163,7 +205,7 @@ export const WorkunitSummary: React.FunctionComponent = ({ key: "slaveLogs", text: nlsHPCC.SlaveLogs, disabled: !workunit?.ThorLogList, onClick: () => setShowThorSlaveLogs(true) }, - ], [_protected, canDelete, canDeschedule, canReschedule, canSave, description, jobname, refresh, refreshSavings, setShowDeleteConfirm, workunit, wuid]); + ], [_protected, canDelete, canDeschedule, canReschedule, canSave, description, jobname, nextWuid, refresh, refreshSavings, setShowDeleteConfirm, showMessageBar, workunit, wuid]); const serviceNames = React.useMemo(() => { return workunit?.ServiceNames?.Item?.join("\n") || ""; @@ -177,8 +219,8 @@ export const WorkunitSummary: React.FunctionComponent = ({ const potentialSavings = React.useMemo(() => { return exceptions.reduce((prev, cur) => { - if (isNumeric(cur.Priority)) { - prev += cur.Priority; + if (isNumeric(cur.Cost)) { + prev += cur.Cost; } return prev; }, 0) || 0; @@ -191,13 +233,9 @@ export const WorkunitSummary: React.FunctionComponent = ({ - {showMessageBar && - - {nlsHPCC.SuccessfullySaved} + {messageBarContent && + + {messageBarContent.message} } diff --git a/esp/src/src-react/components/Workunits.tsx b/esp/src/src-react/components/Workunits.tsx index 88b3236c38a..504f1f698c4 100644 --- a/esp/src/src-react/components/Workunits.tsx +++ b/esp/src/src-react/components/Workunits.tsx @@ -55,9 +55,6 @@ function formatQuery(_filter): { [id: string]: any } { if (filter.Type === true) { filter.Type = "archived workunits"; } - if (filter.Type === true) { - filter.Type = "archived workunits"; - } if (filter.Protected === true) { filter.Protected = "Protected"; } diff --git a/esp/src/src-react/components/forms/AddPackageMap.tsx b/esp/src/src-react/components/forms/AddPackageMap.tsx index 068c51aa3c5..c5e667a4815 100644 --- a/esp/src/src-react/components/forms/AddPackageMap.tsx +++ b/esp/src/src-react/components/forms/AddPackageMap.tsx @@ -1,6 +1,7 @@ import * as React from "react"; -import { Checkbox, DefaultButton, Dropdown, PrimaryButton, Stack, TextField, } from "@fluentui/react"; +import { Checkbox, DefaultButton, Dropdown, IDropdownOption, PrimaryButton, Stack, TextField, } from "@fluentui/react"; import { useForm, Controller } from "react-hook-form"; +import { FileSprayService } from "@hpcc-js/comms"; import { scopedLogger } from "@hpcc-js/util"; import * as WsPackageMaps from "src/WsPackageMaps"; import { TypedDropdownOption } from "../PackageMaps"; @@ -15,6 +16,7 @@ interface AddPackageMapValues { Target: string; Process: string; DaliIp: string; + RemoteStorage: string; Activate: boolean OverWrite: boolean; } @@ -25,10 +27,13 @@ const defaultValues: AddPackageMapValues = { Target: "", Process: "", DaliIp: "", + RemoteStorage: "", Activate: true, OverWrite: false }; +const fileSprayService = new FileSprayService({ baseUrl: "" }); + interface AddPackageMapProps { showForm: boolean; setShowForm: (_: boolean) => void; @@ -47,6 +52,14 @@ export const AddPackageMap: React.FunctionComponent = ({ const { handleSubmit, control, reset } = useForm({ defaultValues }); + const [remoteTargets, setRemoteTargets] = React.useState([]); + + React.useEffect(() => { + fileSprayService.GetRemoteTargets({}).then(response => { + setRemoteTargets(response?.TargetNames?.Item?.map(item => { return { key: item, text: item }; })); + }).catch(err => logger.error(err)); + }, []); + const closeForm = React.useCallback(() => { setShowForm(false); }, [setShowForm]); @@ -168,6 +181,21 @@ export const AddPackageMap: React.FunctionComponent = ({ value={value} />} /> + { + onChange(option.key); + }} + />} + />
{ dojoConfig.currencyCode = info["currencyCode"] ?? ""; }); -const formatTwoDigits = d3Format(",.2f"); +export const formatTwoDigits = d3Format(",.2f"); const formatSixDigits = d3Format(",.6f"); export function formatCost(value): string { if (isNaN(value)) { diff --git a/esp/src/src/Utility.ts b/esp/src/src/Utility.ts index 86e29851547..15a55881a0c 100644 --- a/esp/src/src/Utility.ts +++ b/esp/src/src/Utility.ts @@ -336,115 +336,194 @@ export function isObjectEmpty(obj) { } return true; } +// ----------------------------------------------------------------------------------------------- +// Modified from alphanum-sort: https://github.com/TrySound/alphanum-sort © Bogdan Chadkin +// The MIT License (MIT) +const zero = "0".charCodeAt(0); +const plus = "+".charCodeAt(0); +const minus = "-".charCodeAt(0); + +function isWhitespace(code: number) { + return code <= 32; +} + +function isDigit(code: number) { + return 48 <= code && code <= 57; +} -/* alphanum.js (C) Brian Huisman - * Based on the Alphanum Algorithm by David Koelle - * The Alphanum Algorithm is discussed at http://www.DaveKoelle.com - * - * Distributed under same license as original - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/* ******************************************************************** - * Alphanum sort() function version - case sensitive - * - Slower, but easier to modify for arrays of objects which contain - * string properties - * - */ -export function alphanum(a, b) { - function chunkify(t) { - const tz = []; - let x = 0; - let y = -1; - let n = false; - let i; - let j; - - // eslint-disable-next-line no-cond-assign - while (i = (j = t.charAt(x++)).charCodeAt(0)) { - // tslint:disable-next-line: triple-equals - const m = (i == 46 || (i >= 48 && i <= 57)); - if (m !== n) { - tz[++y] = ""; - n = m; +function isSign(code: number) { + return code === minus || code === plus; +} + +function compare(a, b, opts: { sign: boolean }) { + const checkSign = opts.sign; + let ia = 0; + let ib = 0; + const ma = a.length; + const mb = b.length; + let ca, cb; // character code + let za, zb; // leading zero count + let na, nb; // number length + let sa, sb; // number sign + let ta, tb; // temporary + let bias; + + while (ia < ma && ib < mb) { + ca = a.charCodeAt(ia); + cb = b.charCodeAt(ib); + za = zb = 0; + na = nb = 0; + sa = sb = true; + bias = 0; + + // skip over leading spaces + while (isWhitespace(ca)) { + ia += 1; + ca = a.charCodeAt(ia); + } + while (isWhitespace(cb)) { + ib += 1; + cb = b.charCodeAt(ib); + } + + // skip and save sign + if (checkSign) { + ta = a.charCodeAt(ia + 1); + if (isSign(ca) && isDigit(ta)) { + if (ca === minus) { + sa = false; + } + ia += 1; + ca = ta; + } + tb = b.charCodeAt(ib + 1); + if (isSign(cb) && isDigit(tb)) { + if (cb === minus) { + sb = false; + } + ib += 1; + cb = tb; } - tz[y] += j; } - return tz; - } - const aa = chunkify(a); - const bb = chunkify(b); - - for (let x = 0; aa[x] && bb[x]; x++) { - if (aa[x] !== bb[x]) { - const c = Number(aa[x]); - const d = Number(bb[x]); - // tslint:disable-next-line: triple-equals - if (c == aa[x] && d == bb[x]) { - return c - d; - } else return (aa[x] > bb[x]) ? 1 : -1; + // compare digits with other symbols + if (isDigit(ca) && !isDigit(cb)) { + return -1; + } + if (!isDigit(ca) && isDigit(cb)) { + return 1; + } + + // compare negative and positive + if (!sa && sb) { + return -1; + } + if (sa && !sb) { + return 1; } - } - return aa.length - bb.length; -} -/* ******************************************************************** - * Alphanum sort() function version - case insensitive - * - Slower, but easier to modify for arrays of objects which contain - * string properties - * - */ -export function alphanumCase(a, b) { - function chunkify(t) { - const tz = []; - let x = 0; - let y = -1; - let n = false; - let i; - let j; - - // eslint-disable-next-line no-cond-assign - while (i = (j = t.charAt(x++)).charCodeAt(0)) { - // tslint:disable-next-line: triple-equals - const m = (i == 46 || (i >= 48 && i <= 57)); // jshint ignore:line - if (m !== n) { - tz[++y] = ""; - n = m; + // count leading zeros + while (ca === zero) { + za += 1; + ia += 1; + ca = a.charCodeAt(ia); + } + while (cb === zero) { + zb += 1; + ib += 1; + cb = b.charCodeAt(ib); + } + + // count numbers + while (isDigit(ca) || isDigit(cb)) { + if (isDigit(ca) && isDigit(cb) && bias === 0) { + if (sa) { + if (ca < cb) { + bias = -1; + } else if (ca > cb) { + bias = 1; + } + } else { + if (ca > cb) { + bias = -1; + } else if (ca < cb) { + bias = 1; + } + } + } + if (isDigit(ca)) { + ia += 1; + na += 1; + ca = a.charCodeAt(ia); + } + if (isDigit(cb)) { + ib += 1; + nb += 1; + cb = b.charCodeAt(ib); } - tz[y] += j; } - return tz; - } - const aa = chunkify(a.toLowerCase()); - const bb = chunkify(b.toLowerCase()); - - for (let x = 0; aa[x] && bb[x]; x++) { - if (aa[x] !== bb[x]) { - const c = Number(aa[x]); - const d = Number(bb[x]); - // tslint:disable-next-line: triple-equals - if (c == aa[x] && d == bb[x]) { // jshint ignore:line - return c - d; - } else return (aa[x] > bb[x]) ? 1 : -1; + // compare number length + if (sa) { + if (na < nb) { + return -1; + } + if (na > nb) { + return 1; + } + } else { + if (na > nb) { + return -1; + } + if (na < nb) { + return 1; + } + } + + // compare numbers + if (bias) { + return bias; + } + + // compare leading zeros + if (sa) { + if (za > zb) { + return -1; + } + if (za < zb) { + return 1; + } + } else { + if (za < zb) { + return -1; + } + if (za > zb) { + return 1; + } + } + + // compare ascii codes + if (ca < cb) { + return -1; + } + if (ca > cb) { + return 1; } + + ia += 1; + ib += 1; + } + + // compare length + if (ma < mb) { + return -1; } - return aa.length - bb.length; + if (ma > mb) { + return 1; + } + return 0; } +// ----------------------------------------------------------------------------------------------- export function onDomMutate(domNode, callback, observerOpts) { observerOpts = observerOpts || { attributes: true, attributeFilter: ["style"] }; @@ -458,8 +537,10 @@ export function onDomMutate(domNode, callback, observerOpts) { observer.observe(domNode, observerOpts); } -export function alphanumCompare(l, r, caseInsensitive: boolean = true, reverse: boolean = true): number { - const cmp = caseInsensitive ? alphanumCase(l, r) : alphanum(l, r); +export function alphanumCompare(_l, _r, caseInsensitive: boolean = true, reverse: boolean = true): number { + const l = caseInsensitive && typeof _l === "string" ? _l.toLocaleLowerCase() : _l; + const r = caseInsensitive && typeof _r === "string" ? _r.toLocaleLowerCase() : _r; + const cmp = compare(l, r, { sign: false }); if (cmp !== 0) { return cmp * (reverse ? -1 : 1); } diff --git a/esp/src/src/nls/bs/hpcc.ts b/esp/src/src/nls/bs/hpcc.ts index 9cf35e59f1a..7a4097a5a93 100644 --- a/esp/src/src/nls/bs/hpcc.ts +++ b/esp/src/src/nls/bs/hpcc.ts @@ -56,8 +56,10 @@ AppendCluster: "Dodajte Klaster", Apply: "Primjenite", Apps: "Aplikacije", + ArchiveDFUWorkunit: "Arhiviraj DFU radnu jedinicu", ArchivedOnly: "Samo Arhiviran", ArchivedWarning: "Upozorenje: koristite kratak vremenski period. Ako koristite duži vremenski period, pretraživanje radnih jedinica može trajati duže od dozviljenog vremena za pretraživanje .", + ArchiveECLWorkunit: "Arhiviraj ECL radnu jedinicu", AreYouSureYouWantToResetTheme: "Jeste li sigurni da se želite vratiti na zadanu temu", Attach: "Prikačite", Attribute: "Atribut", @@ -69,6 +71,8 @@ AutoRefreshEvery: "Automatsko osvježivanje svakih x minuta", AutoRefreshIncrement: "Automatski korak osvježivanja", Back: "Nazad", + BackupDFUWorkunit: "Rezervna DFU radna jedinica", + BackupECLWorkunit: "Rezervna ECL radna jedinica", BannerColor: "Boja Reklamnog Bloka", BannerColorTooltip: "Promijenite boju pozadine gornje navigacije", BannerMessage: "Poruka za Reklamni Blok", @@ -87,6 +91,7 @@ BoundBy: "ograničen sa:", Branches: "Grane", BrowserStats: "Statistika Pregledača", + BuildDate: "Datum izgradnje", Busy: "Zauzet", CallerID: "ID Pozivaoca", Cancel: "Poništite", @@ -160,6 +165,7 @@ CopyWUIDs: "Kopiraj WUID-ove u klipbord", CopyWUIDToClipboard: "Kopirajte WUID u klipbord", Cost: "Cijena", + Costs: "Cijena", Count: "Izbrojte", CountFailed: "Brojanje nije uspjelo", CountTotal: "Ukupan broj", @@ -254,6 +260,7 @@ DownloadSelectionAsCSV: "Preuzmite odabir kao CSV", DownloadToCSV: "Preuzmite u CSV formatu", DownloadToCSVNonFlatWarning: "Napomena: preuzimanje datoteka koje sadrže ugniježđene skupove podataka kao podatke razdvojene zarezima možda neće biti formatirani kako se očekuje", + DownloadToDOT: "Preuzmi na DOT", DropZone: "Zona Prijema", DueToInctivity: "Bićete odjavljeni iz svih ECL Watch sesija za 3 minuta zbog neaktivnosti.", Duration: "Trajanje", @@ -329,6 +336,7 @@ FileParts: "Dio Datoteke", FilePath: "Lokacija Datoteke", FilePermission: "Dozvola Za Pristup Datoteci", + FilePermissionError: "Došlo je do greške prilikom pristupa datoteci dozvola", Files: "Datoteke", FileScopeDefaultPermissions: "Unaprijed Definisane Dozvole za Prostor Datoteka", FileScopes: "Skop Datoteka", @@ -367,12 +375,14 @@ GetDFSCSV: "DFS CSV", GetDFSMap: "DFS Mapa", GetDFSParents: "DFS Roditelji", + GetLastServerMessage: "Preuzmi posljednju poruku servera", GetLogicalFile: "Logički Fajl", GetLogicalFilePart: "Logički Dio Fajla", GetPart: "Dobavite Dio", GetProtectedList: "Zaštićena Lista", GetSoftwareInformation: "Želite li dobiti informacije o softveru", GetValue: "Vrijednost", + GetVersion: "Preuzmi verziju", Graph: "Graf", GraphControl: "Kontrola Grafova", Graphs: "Grafikoni", @@ -437,6 +447,7 @@ Largest: "Najveći", LargestFile: "Najveća Datoteka", LargestSize: "Najveća veličina", + LastAccessed: "Posljednji pristup", LastEdit: "Poslednja Izmjena", LastEditedBy: "Autor Poslednje Izmjene", LastEditTime: "Vrijeme Poslednje Izmjene", @@ -446,6 +457,7 @@ LastNHours: "Posljednjih N Sati", LastNRows: "Posljednjih N Redova", LastRun: "Zadnji Ran", + LatestReleases: "Najnovija izdanja", Layout: "Raspored", LDAPWarning: "Greška LDAP Servica: ‘Previše korisnika’ - Molimo koristite filter.", LearnMore: "Naučite više", @@ -573,6 +585,7 @@ NextSelection: "Slijedeća Selekcija", NoCommon: "Nema uobičajenog", NoContent: "(Bez sadržaja)", + NoContentPleaseSelectItem: "Nema sadržaja - odaberite stavku", noDataMessage: "...Nema Redova...", Node: "Čvor (Node)", NodeGroup: "Grupa Čvorova", @@ -673,6 +686,7 @@ PlaceholderFirstName: "Jovan", PlaceholderLastName: "Smit", Platform: "Platforma", + PlatformBuildIsNNNDaysOld: "Verzija platforme je stara NNN dana", Playground: "Igralište", PleaseEnableCookies: "ECL Watch zahtijeva da kolačići budu omogućeni za nastavak.", PleaseEnterANumber: "Unestite Broj 1 -", @@ -685,10 +699,13 @@ PleaseSelectATopologyItem: "Izaberite ciljnu platformu, servis ili mašinu.", PleaseSelectAUserOrGroup: "Izaberite Korisnika ili Grupu zajedno sa Imenom Datoteke", PleaseSelectAUserToAdd: "Izaberite korisnika kojeg želite da dodate", + PleaseUpgradeToLaterPointRelease: "Molimo nadogradite na kasnije izdanje", Plugins: "Dopune", + PodName: "Naziv pod-a", Pods: "Grupe Kontejnera", PodsAccessError: "Nije moguće dohvatiti listu Podova", Port: "Port", + PotentialSavings: "Potencijalne uštede", Prefix: "Prefiks", PrefixPlaceholder: "filename{:length}, filesize{:[B|L][1-8]}", Preflight: "Provjera prije isporuke", @@ -760,6 +777,7 @@ RemoteCopy: "Kopija sa udaljenog servera", RemoteDali: "Daleki Dali", RemoteDaliIP: "Daleki Dali IP Adresa", + RemoteStorage: "Udaljena pohrana", Remove: "Uklonite", RemoveAttributeQ: "Izabrani atribut će biti uklonjen. Da li ste sigurni da to želite?", RemoveAtttributes: "Uklonite Atribut(e)", @@ -792,6 +810,8 @@ Restarted: "Ponovo Pokternut", Restarts: "Ponovno pokretanje", Restore: "Vratite na Staro Stanje", + RestoreDFUWorkunit: "Vrati DFU radnu jedinicu", + RestoreECLWorkunit: "Vrati ECL radnu jedinicu", Restricted: "Ograničen", Resubmit: "Ponovo Podnesite", Resubmitted: "Ponovo Poslat", @@ -812,6 +832,7 @@ Sample: "Primjer", SampleRequest: "Primjer Zahtjeva", SampleResponse: "Primjer Odgovora", + Sasha: "Saša", Save: "Sačuvajte", Scope: "Područje", SearchResults: "Rezultati Pretraživanja", @@ -821,6 +842,8 @@ SecurityWarning: "Sigurnosno Upozorenje", SeeConfigurationManager: "Pogledajte konfiguiraciju", SelectA: "Odaberite", + SelectAnOption: "Odaberite opciju", + Selected: "Odabrano", SelectEllipsis: "Odaberite ...", SelectPackageFile: "Izaberi Paket", SelectValue: "Odaberite vrijednost", @@ -878,6 +901,7 @@ Starting: "Polazak", StartTime: "Vrijeme početka", State: "Stanje", + Statistics: "Statistika", Stats: "Statistike", Status: "Status", Stopped: "Zaustavljen", @@ -918,6 +942,7 @@ TargetClustersLegacy: "Ciljni Klaster (Kako je ranije bilo)", TargetName: "Naziv Cilja", TargetNamePlaceholder: "neko::logicko::ime", + TargetPlane: "Destinacija za posao", TargetRowTagRequired: "Morate označiti ciljni red u tabeli", Targets: "Ciljne Platforme", TargetScope: "Ciljni Opseg", diff --git a/esp/src/src/nls/hpcc.ts b/esp/src/src/nls/hpcc.ts index 5d22070f349..9d413f84644 100644 --- a/esp/src/src/nls/hpcc.ts +++ b/esp/src/src/nls/hpcc.ts @@ -586,6 +586,7 @@ export = { Newest: "Newest", NewPassword: "New Password", NextSelection: "Next Selection", + NextWorkunit: "Next Workunit", NoContent: "(No content)", NoContentPleaseSelectItem: "No content - please select an item", NoCommon: "No Common", @@ -716,6 +717,7 @@ export = { PressCtrlCToCopy: "Press ctrl+c to copy.", Preview: "Preview", PreviousSelection: "Previous Selection", + PreviousWorkunit: "Previous Workunit", PrimaryLost: "Primary Lost", PrimaryMonitoring: "Primary Monitoring", Priority: "Priority", @@ -835,6 +837,7 @@ export = { Save: "Save", Scope: "Scope", SearchResults: "Search Results", + Seconds: "Seconds", SecondsRemaining: "Seconds Remaining", Security: "Security", SecurityWarning: "Security Warning", @@ -959,6 +962,7 @@ export = { TimeMaxTotalExecuteMinutes: "Time Max Total Execute Minutes", TimeMeanTotalExecuteMinutes: "Time Mean Total Execute Minutes", TimeMinTotalExecuteMinutes: "Time Min Total Execute Minutes", + TimePenalty: "Time Penalty", TimeStamp: "Time Stamp", TimeSeconds: "Time (Seconds)", TimeStarted: "Time Started", @@ -1126,6 +1130,7 @@ export = { WildcardFilter: "Wildcard Filter", Workflows: "Workflows", Workunit: "Workunit", + WorkunitNotFound: "Workunit not found", Workunits: "Workunits", WorkUnitScopeDefaultPermissions: "Workunit Scope Default Permissions", Wrap: "Wrap", diff --git a/esp/src/src/nls/hr/hpcc.ts b/esp/src/src/nls/hr/hpcc.ts index 8a6cbfee0cf..72c1c4b2558 100644 --- a/esp/src/src/nls/hr/hpcc.ts +++ b/esp/src/src/nls/hr/hpcc.ts @@ -56,8 +56,10 @@ AppendCluster: "Dodajte Klaster", Apply: "Primjenite", Apps: "Aplikacije", + ArchiveDFUWorkunit: "Arhiviraj radnu jedinicu DFU", ArchivedOnly: "Samo Arhiviran", ArchivedWarning: "Upozorenje: koristite kratak vremenski period. Ako koristite duži vremenski period, pretraživanje radnih jedinica može trajati duže od dozviljenog vremena za pretraživanje .", + ArchiveECLWorkunit: "Arhiviraj radnu jedinicu ECL", AreYouSureYouWantToResetTheme: "Jeste li sigurni da se želite vratiti na zadanu temu", Attach: "Prikačite", Attribute: "Atribut", @@ -69,6 +71,8 @@ AutoRefreshEvery: "Automatsko osvježivanje svakih x minuta", AutoRefreshIncrement: "Automatski korak osvježivanja", Back: "Natrag", + BackupDFUWorkunit: "Sigurnosna DFU radna jedinica", + BackupECLWorkunit: "Sigurnosna ECL radna jedinica", BannerColor: "Boja Reklamnog Bloka", BannerColorTooltip: "Promijenite boju pozadine gornje navigacije", BannerMessage: "Poruka za Reklamni Blok", @@ -87,6 +91,7 @@ BoundBy: "ograničen sa:", Branches: "Grane", BrowserStats: "Statistika Preglednika", + BuildDate: "Datum gradnje", Busy: "Zauzet", CallerID: "ID Pozivatelja", Cancel: "Poništite", @@ -160,6 +165,7 @@ CopyWUIDs: "Kopiraj WUID-ove u međuspremnik", CopyWUIDToClipboard: "Kopirajte WUID u međuspremnik", Cost: "Cijena", + Costs: "Cijena", Count: "Izbrojte", CountFailed: "Brojanje nije uspjelo", CountTotal: "Ukupan broj", @@ -254,6 +260,7 @@ DownloadSelectionAsCSV: "Preuzmite odabir kao CSV", DownloadToCSV: "Preuzmite u CSV formatu", DownloadToCSVNonFlatWarning: "Napomena: preuzimanje datoteka koje sadrže ugniježđene skupove podataka kao podatke odvojene zarezima možda neće biti formatirane kako se očekuje", + DownloadToDOT: "Preuzmi na DOT", DropZone: "Zona Prijema", DueToInctivity: "Bit ćete odjavljeni iz svih ECL Watch sjednica za 3 minute zbog neaktivnosti.", Duration: "Trajanje", @@ -329,6 +336,7 @@ FileParts: "Dio Datoteke", FilePath: "Lokacija Datoteke", FilePermission: "Dozvola Za Pristup Datoteci", + FilePermissionError: "Došlo je do pogreške prilikom pristupa fajlu sa dozvolama", Files: "Datoteke", FileScopeDefaultPermissions: "Unaprijed Definisane Dozvole za Prostor Datoteka", FileScopes: "Skop Datoteka", @@ -367,12 +375,14 @@ GetDFSCSV: "DFS CSV", GetDFSMap: "DFS Mapa", GetDFSParents: "DFS Roditelji", + GetLastServerMessage: "Dohvati posljednju poruku poslužitelja", GetLogicalFile: "Logička Datoteka", GetLogicalFilePart: "Logički Dio Datoteke", GetPart: "Dobavite Dio", GetProtectedList: "Zaštićeni Popis", GetSoftwareInformation: "Želite li dobiti informacije o softveru", GetValue: "Vrijednost", + GetVersion: "Preuzmi verziju", Graph: "Graf", GraphControl: "Kontrola Grafikona", Graphs: "Grafikoni", @@ -437,6 +447,7 @@ Largest: "Najveći", LargestFile: "Najveća Datoteka", LargestSize: "Najveća veličina", + LastAccessed: "Zadnji pristup", LastEdit: "Posljednja Izmjena", LastEditedBy: "Autor Zadnje Izmjene", LastEditTime: "Vrijeme Posljednje Izmjene", @@ -446,6 +457,7 @@ LastNHours: "Posljednjih N Sati", LastNRows: "Posljednjih N Redova", LastRun: "Zadnji Ran", + LatestReleases: "Najnovija izdanja", Layout: "Raspored", LDAPWarning: "Greška LDAP Servica: ‘Previše korisnika’ - Molimo koristite filter.", LearnMore: "Naučite više", @@ -573,6 +585,7 @@ NextSelection: "Sljedeći Odabir", NoCommon: "Nema uobičajenog", NoContent: "(Bez sadržaja)", + NoContentPleaseSelectItem: "Nema sadržaja - odaberite stavku", noDataMessage: "...Nema Redova...", Node: "Čvor (Node)", NodeGroup: "Grupa Čvorova", @@ -673,6 +686,7 @@ PlaceholderFirstName: "Jovan", PlaceholderLastName: "Smit", Platform: "Platforma", + PlatformBuildIsNNNDaysOld: "Verzija platforme je stara NNN dana", Playground: "Igralište", PleaseEnableCookies: "ECL Watch zahtijeva da kolačići budu omogućeni za nastavak.", PleaseEnterANumber: "Unestite Broj 1 -", @@ -685,10 +699,13 @@ PleaseSelectATopologyItem: "Izaberite ciljnu platformu, servis ili mašinu.", PleaseSelectAUserOrGroup: "Izaberite Korisnika ili Grupu zajedno sa Imenom Datoteke", PleaseSelectAUserToAdd: "Izaberite korisnika koga želite da dodate", + PleaseUpgradeToLaterPointRelease: "Molimo koristite noviji point rilis", Plugins: "Dodatci", + PodName: "Naziv pod-a", Pods: "Grupe Kontejnera", PodsAccessError: "Nije moguće dohvatiti popis podova", Port: "Port", + PotentialSavings: "Potencijalne uštede", Prefix: "Prefiks", PrefixPlaceholder: "filename{:length}, filesize{:[B|L][1-8]}", Preflight: "Provjera prije isporuke", @@ -760,6 +777,7 @@ RemoteCopy: "Kopija sa udaljenog servera", RemoteDali: "Daleki Dali", RemoteDaliIP: "Daleki Dali IP Adresa", + RemoteStorage: "Udaljena pohrana", Remove: "Uklonite", RemoveAttributeQ: "Izabrani atribut će biti uklonjen. Da li ste sigurni da to želite?", RemoveAtttributes: "Uklonite Atribut(e)", @@ -792,6 +810,8 @@ Restarted: "Ponovo Pokternut", Restarts: "Ponovno pokretanje", Restore: "Vratite na Staro Stanje", + RestoreDFUWorkunit: "Vrati DFU radnu jedinicu", + RestoreECLWorkunit: "Vrati ECL radnu jedinicu", Restricted: "Ograničen", Resubmit: "Ponovo Podnesite", Resubmitted: "Ponovo Poslat", @@ -812,6 +832,7 @@ Sample: "Primjer", SampleRequest: "Primjer Zahtjeva", SampleResponse: "Primjer Odgovora", + Sasha: "Saša", Save: "Sačuvajte", Scope: "Područje", SearchResults: "Rezultati Pretraživanja", @@ -821,6 +842,8 @@ SecurityWarning: "Sigurnosno Upozorenje", SeeConfigurationManager: "Pogledajte konfiguiraciju", SelectA: "Odaberite", + SelectAnOption: "Odaberite opciju", + Selected: "Odabran", SelectEllipsis: "Odaberite ...", SelectPackageFile: "Izaberi Paket", SelectValue: "Odaberite vrijednost", @@ -878,6 +901,7 @@ Starting: "Polazak", StartTime: "Vrijeme početka", State: "Stanje", + Statistics: "Statistika", Stats: "Statistike", Status: "Status", Stopped: "Zaustavljen", @@ -918,6 +942,7 @@ TargetClustersLegacy: "Ciljni klaster (kao ranije)", TargetName: "Naziv Cilja", TargetNamePlaceholder: "neko::logicko::ime", + TargetPlane: "Odrediste za rad", TargetRowTagRequired: "Morate označiti ciljni red u tabeli", Targets: "Ciljne Platforme", TargetScope: "Ciljni Opseg", diff --git a/esp/src/src/nls/sr/hpcc.ts b/esp/src/src/nls/sr/hpcc.ts index 62ef1336e4c..a1924b198c1 100644 --- a/esp/src/src/nls/sr/hpcc.ts +++ b/esp/src/src/nls/sr/hpcc.ts @@ -56,8 +56,10 @@ AppendCluster: "Додајте Кластер", Apply: "Примените", Apps: "Апликације", + ArchiveDFUWorkunit: "Архивирај ДФУ радну јединицу", ArchivedOnly: "Само Архивиран", ArchivedWarning: "Упозорење: користите кратак временски период. Ако користите дужи временски период, претраживање радних јединица може трајати дуже од дозвиљеног времена за претраживање.", + ArchiveECLWorkunit: "Архивирај ЕЦЛ радну јединицу", AreYouSureYouWantToResetTheme: "Јесте ли сигурни да се желите вратити на задану тему", Attach: "Причврстите", Attribute: "Атрибут", @@ -69,6 +71,8 @@ AutoRefreshEvery: "Аутоматско освеживање сваких к минута", AutoRefreshIncrement: "Аутоматски корак освежавања", Back: "Назад", + BackupDFUWorkunit: "Резервна ДФУ радна јединица", + BackupECLWorkunit: "Резервна ЕЦЛ радна јединица", BannerColor: "Боја Рекламног Блока", BannerColorTooltip: "Промените боју позадине горње навигације", BannerMessage: "Порука за Рекламни Блок", @@ -87,6 +91,7 @@ BoundBy: "ограничено од:", Branches: "Филијале", BrowserStats: "Статистика Прегледача", + BuildDate: "Датум изградње", Busy: "заузет", CallerID: "ИД позиваоца", Cancel: "Поништите", @@ -161,6 +166,7 @@ CopyWUIDs: "Копирај ВУИД-ове у клипборд", CopyWUIDToClipboard: "Копирај ВУИД у клипборд", Cost: "Цена", + Costs: "Цене", Count: "Избројте", CountFailed: "Бројање није успело", CountTotal: "Укупно броји", @@ -255,6 +261,7 @@ DownloadSelectionAsCSV: "Преузми избор као ЦСВ", DownloadToCSV: "Преузмите у ЦСВ формату", DownloadToCSVNonFlatWarning: "Напомена: преузимање датотека које садрже угнежђене скупове података као податке раздвојене зарезима можда неће бити форматирано како је очекивано", + DownloadToDOT: "Преузми на ДОТ", DropZone: "Зона Пријема", DueToInctivity: "Бићете одјављени из свих ЕЦЛ Вач сесија за 3 минута због неактивности.", Duration: "Трајање", @@ -330,6 +337,7 @@ FileParts: "Дио Датотеке", FilePath: "Локација Датотеке", FilePermission: "Дозвола за приступ фајлу", + FilePermissionError: "Дошло је до грешке приликом приступа датотеци дозвола", Files: "Датотеке", FileScopeDefaultPermissions: "Предефинисанe дозволе за простор Фајлова", FileScopes: "Скоп Датотека", @@ -368,12 +376,14 @@ GetDFSCSV: "ДФС ЦСВ", GetDFSMap: "ДФС Mапа", GetDFSParents: "ДФС Родитељи", + GetLastServerMessage: "Преузми последњу поруку сервера", GetLogicalFile: "Логички Фајл", GetLogicalFilePart: "Део Логичке Датотеке", GetPart: "Добавите Део", GetProtectedList: "Заштићена Листа", GetSoftwareInformation: "Желите ли да добијетe информације о софтверу", GetValue: "Вредност", + GetVersion: "Преузмите верзију", Graph: "Граф", GraphControl: "Контрола графика", Graphs: "Графикони", @@ -438,6 +448,7 @@ Largest: "Највећи", LargestFile: "Највећа Датотека", LargestSize: "Највећа величина", + LastAccessed: "Последњи приступ", LastEdit: "Последња измена", LastEditedBy: "Аутор последње измене", LastEditTime: "Време последњих промена", @@ -447,6 +458,7 @@ LastNHours: "Последњих Н Сати", LastNRows: "Последњих Н Редова", LastRun: "Задњи Ран", + LatestReleases: "Најновија издања", Layout: "Распоред", LDAPWarning: "<б>Грешка ЛДАП Сервица: ‘Превише корисника’ - Молимо користите филтер.", LearnMore: "Научите више", @@ -574,6 +586,7 @@ NextSelection: "Следећи Избор", NoCommon: "Није уобичајено", NoContent: "(Без садржаја)", + NoContentPleaseSelectItem: "Нема садржаја - изаберите ставку", noDataMessage: "...Нема Редова...", Node: "Чвор (Нод)", NodeGroup: "Група Чворова", @@ -674,6 +687,7 @@ PlaceholderFirstName: "Џон", PlaceholderLastName: "Смит", Platform: "Платформа", + PlatformBuildIsNNNDaysOld: "Верзија платформе је стара ННН дана", Playground: "Игралиште", PleaseEnableCookies: "ЕЦЛ Вач захтијева да колачићи буду омогућени за наставак", PleaseEnterANumber: "Унестите Број 1 -", @@ -686,10 +700,13 @@ PleaseSelectATopologyItem: "Одаберите циљну платформу, сервис или машину.", PleaseSelectAUserOrGroup: "Изаберите Корисника или Групу заједно са Именом Фајла", PleaseSelectAUserToAdd: "Изаберите корисника којeг желите да додате", + PleaseUpgradeToLaterPointRelease: "Пређите на касније издање", Plugins: "Допуне", + PodName: "Подназив", Pods: "Групе контејнера", PodsAccessError: "Није могуће преузети листу подова", Port: "Порт", + PotentialSavings: "Потенцијалне уштеде", Prefix: "Префикс", PrefixPlaceholder: "имедатотеке{:дужина}, величинадатотеке{:[B|L][1-8]}", Preflight: "Провера пре испоруке", @@ -761,6 +778,7 @@ RemoteCopy: "Копија са удаљеног сервера", RemoteDali: "Далеки Дали", RemoteDaliIP: "Далеки Дали ИП Адреса", + RemoteStorage: "Даљинско складиштење", Remove: "Уклоните", RemoveAttributeQ: "Изабрани атрибута ће бити уклоњен. Да ли сте сигурни да то желите?", RemoveAtttributes: "Уклоните Атрибут(е)", @@ -793,6 +811,8 @@ Restarted: "Поново Поктернут", Restarts: "Поново покрени", Restore: "Вратите на Старо Стање", + RestoreDFUWorkunit: "Врати ДФУ радну јединицу", + RestoreECLWorkunit: "Врати ЕЦЛ радну јединицу", Restricted: "Ограничен", Resubmit: "Поново Поднесите", Resubmitted: "Поново Послат", @@ -813,6 +833,7 @@ Sample: "Пример", SampleRequest: "Пример Захтева", SampleResponse: "Пример Одговора", + Sasha: "Саша", Save: "Сачувајте", Scope: "Подручје", SearchResults: "Резултати Претраживања", @@ -822,6 +843,8 @@ SecurityWarning: "Безбедносно упозорење", SeeConfigurationManager: "Погледајте конфигурацију", SelectA: "Изаберите", + SelectAnOption: "Изаберите опцију", + Selected: "Изабрано", SelectEllipsis: "Изабери ...", SelectPackageFile: "Изаберитe Пакет", SelectValue: "Изаберите вредност", @@ -879,6 +902,7 @@ Starting: "Полазак", StartTime: "Почетно време", State: "Стање", + Statistics: "Статистика", Stats: "Статистике", Status: "Статус", Stopped: "Заустављен", @@ -919,6 +943,7 @@ TargetClustersLegacy: "Циљни кластер (као раније)", TargetName: "Назив Циља", TargetNamePlaceholder: "неко::логићко::име", + TargetPlane: "Дестинација за посао", TargetRowTagRequired: "Морате ознацити циљни ред у табели", Targets: "Циљне Платформе", TargetScope: "Циљни Обим", diff --git a/esp/src/src/store/util/SimpleQueryEngine.ts b/esp/src/src/store/util/SimpleQueryEngine.ts index e7b8a9e9bc4..e9f74c8f863 100644 --- a/esp/src/src/store/util/SimpleQueryEngine.ts +++ b/esp/src/src/store/util/SimpleQueryEngine.ts @@ -2,7 +2,7 @@ import { alphanumCompare } from "../../Utility"; import { BaseRow, QueryOptions, QueryRequest, QuerySort } from "../Store"; function createSortFunc(sortSet: QuerySort, alphanumColumns: { [id: string]: boolean }) { - return typeof sortSet == "function" ? sortSet : function (a, b) { + return typeof sortSet == "function" ? sortSet : function (a: any, b: any) { for (let i = 0; sortSet[i]; i++) { const sort = sortSet[i]; if (alphanumColumns[sort.attribute as string]) { @@ -16,6 +16,9 @@ function createSortFunc(sortSet: QuerySort, alphanumColumn // valueOf enables proper comparison of dates aValue = aValue != null ? aValue.valueOf() : aValue; bValue = bValue != null ? bValue.valueOf() : bValue; + if (typeof aValue === "string" && typeof bValue === "string") { + return aValue.localeCompare(bValue, undefined, { sensitivity: "base" }) * (sort.descending ? -1 : 1); + } if (aValue != bValue) { return !!sort.descending == (aValue == null || aValue > bValue) ? -1 : 1; } diff --git a/fs/dafsserver/CMakeLists.txt b/fs/dafsserver/CMakeLists.txt index 7a144e10da9..3286906a4ea 100644 --- a/fs/dafsserver/CMakeLists.txt +++ b/fs/dafsserver/CMakeLists.txt @@ -64,7 +64,7 @@ target_link_libraries ( dafsserver dalibase thorhelper ftslavelib - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) IF (USE_OPENSSL) diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index ed38a9c4aaa..be24dd5a427 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1315,9 +1315,10 @@ Add resource object Pass in a dictionary with me defined */}} {{- define "hpcc.addResources" }} -{{- if .me }} - {{- $limits := omit .me "cpu" }} - {{- $requests := pick .me "cpu" }} +{{- $resources := .me | default .defaults }} +{{- if $resources }} + {{- $limits := omit $resources "cpu" }} + {{- $requests := pick $resources "cpu" }} resources: {{- if $limits }} limits: @@ -1335,17 +1336,16 @@ Add resources object for stub pods Pass in dict with root, me and instances defined */}} {{- define "hpcc.addStubResources" -}} -{{- $stubInstanceResources := .root.Values.global.stubInstanceResources | default dict -}} -{{- $milliCPUPerInstance := $stubInstanceResources.cpu | default "50m" -}} -{{- $memPerInstance := $stubInstanceResources.memory | default "200Mi" -}} -{{- $milliCPUs := int (include "hpcc.k8sCPUStringToMilliCPU" $milliCPUPerInstance) -}} -{{- $bytes := int64 (include "hpcc.k8sMemoryStringToBytes" $memPerInstance) -}} -{{- $totalBytes := mul .instances $bytes }} +{{- $stubInstanceResources := .stubResources | default .root.Values.global.stubInstanceResources | default dict }} +{{- $milliCPUText := $stubInstanceResources.cpu | default "200m" }} +{{- $milliCPUs := int (include "hpcc.k8sCPUStringToMilliCPU" $milliCPUText) }} +{{- $memoryText := $stubInstanceResources.memory | default "50Mi" }} +{{- $memory := int64 (include "hpcc.k8sMemoryStringToBytes" $memoryText) }} resources: limits: - memory: {{ include "hpcc.bytesToK8sMemoryString" $totalBytes | quote }} + memory: {{ include "hpcc.bytesToK8sMemoryString" $memory | quote }} requests: - cpu: {{ printf "%dm" (mul .instances $milliCPUs) | quote }} + cpu: {{ printf "%dm" $milliCPUs | quote }} {{- end -}} {{/* diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 6a3f4b3ebb8..0323a6ace7b 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -172,7 +172,7 @@ spec: {{- if .useChildProcesses }} {{- include "hpcc.addResources" (dict "me" .resources) | indent 8 }} {{- else }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" .maxActive)) | indent 8 }} +{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "stubResources" .stubResources)) | indent 8 }} {{- end }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 77329a31e09..0ec46cbeb9d 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -27,6 +27,7 @@ Pass in dict with root and me */}} {{- define "hpcc.eclccServerConfigMap" -}} {{- $compileJobName := printf "compile-job-_HPCC_JOBNAME_" }} +{{- $gitPlane := .me.gitPlane | default (include "hpcc.getDefaultGitPlane" .root) }} apiVersion: v1 metadata: name: {{ .me.name }}-configmap @@ -37,6 +38,9 @@ data: {{ toYaml (omit .me "logging" "tracing") | indent 6 }} {{- include "hpcc.generateLoggingConfig" . | indent 6 }} {{- include "hpcc.generateTracingConfig" . | indent 6 }} +{{- if $gitPlane }} + gitPlane: {{ $gitPlane }} +{{- end }} queues: {{ include "hpcc.generateConfigMapQueues" .root | indent 6 }} {{ include "hpcc.generateVaultConfig" . | indent 6 }} @@ -180,7 +184,8 @@ spec: {{- if .useChildProcesses }} {{- include "hpcc.addResources" (dict "me" .resources) | indent 8 }} {{- else }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" .maxActive)) | indent 8 }} +{{- $defaultResources := dict "cpu" "1" "memory" "1Gi" }} +{{- include "hpcc.addResources" (dict "me" .timedChildResources "defaults" $defaultResources) | indent 8 }} {{- end }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 2e869205aa8..e58848e1ac2 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -95,7 +95,8 @@ spec: {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} {{- $omitResources := hasKey $.Values.global "omitResources" | ternary $.Values.global.omitResources $.Values.global.privileged }} {{- if not $omitResources }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" 1)) | indent 8 }} +{{- $defaultResources := dict "cpu" "500m" "memory" "200Mi" }} +{{- include "hpcc.addResources" (dict "me" .resources "defaults" $defaultResources) | indent 8 }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index f765940ba3b..80dda41882e 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -148,7 +148,8 @@ spec: {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} {{- $omitResources := hasKey $.Values.global "omitResources" | ternary $.Values.global.omitResources $.Values.global.privileged }} {{- if not $omitResources }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" 1)) | indent 8 }} +{{- $defaultResources := dict "cpu" "500m" "memory" "200Mi" }} +{{- include "hpcc.addResources" (dict "me" .topoResources "defaults" $defaultResources) | indent 8 }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} workingDir: /var/lib/HPCCSystems diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index b16ca975c78..399cc0e9da3 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -395,7 +395,7 @@ spec: {{- if $commonCtx.eclAgentUseChildProcesses }} {{- include "hpcc.addResources" (dict "me" .eclAgentResources) | indent 8 }} {{- else }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" .maxJobs)) | indent 8 }} +{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "stubResources" .stubResources)) | indent 8 }} {{- end }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} @@ -458,7 +458,7 @@ spec: {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} {{- $omitResources := hasKey $.Values.global "omitResources" | ternary $.Values.global.omitResources $.Values.global.privileged }} {{- if not $omitResources }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "instances" .maxGraphs)) | indent 8 }} +{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "stubResources" .stubResources)) | indent 8 }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: diff --git a/helm/hpcc/values.schema.json b/helm/hpcc/values.schema.json index 22cdcf51205..17826c12af7 100644 --- a/helm/hpcc/values.schema.json +++ b/helm/hpcc/values.schema.json @@ -1421,12 +1421,19 @@ "type": "string", "description": "The default repo version used if not supplied for the defaultRepo" }, + "guardGitUpdates": { + "type": "boolean", + "description": "If enabled all updates of the git repositories are protected by holding a lock in dali" + }, "terminationGracePeriodSeconds": { "$ref": "#/definitions/terminationGracePeriodSeconds" }, "resources": { "$ref": "#/definitions/resources" }, + "timedChildResources": { + "$ref": "#/definitions/resources" + }, "cost": { "$ref" : "#/definitions/componentCost" }, @@ -1613,6 +1620,9 @@ "resources": { "$ref": "#/definitions/resources" }, + "stubResources": { + "$ref": "#/definitions/resources" + }, "jobMemory": { "$ref": "#/definitions/memory" }, @@ -1721,6 +1731,9 @@ "channelResources": { "$ref": "#/definitions/resources" }, + "topoResources": { + "$ref": "#/definitions/resources" + }, "annotations": { "type": "object", "additionalProperties": { "type": "string" } @@ -2593,6 +2606,9 @@ "eclAgentResources": { "$ref": "#/definitions/resources" }, + "stubResources": { + "$ref": "#/definitions/resources" + }, "cost": { "$ref" : "#/definitions/componentCost" }, diff --git a/plugins/fileservices/fileservices.cpp b/plugins/fileservices/fileservices.cpp index 7ccfd26ffc9..a8dea33ee96 100644 --- a/plugins/fileservices/fileservices.cpp +++ b/plugins/fileservices/fileservices.cpp @@ -611,7 +611,7 @@ static void blockUntilComplete(const char * label, IClientFileSpray &server, ICo VStringBuffer reason("Blocked by fileservice activity: %s, workunit: %s", label, wuid); setWorkunitState(ctx, WUStateBlocked, reason.str()); - + bool isStartTimeRecorded = false; while(true) { @@ -630,6 +630,7 @@ static void blockUntilComplete(const char * label, IClientFileSpray &server, ICo } IConstDFUWorkunit & dfuwu = result->getResult(); + DFUstate state = (DFUstate)dfuwu.getState(); bool aborting = false; Owned wu = ctx->updateWorkUnit(); // may return NULL if (wu.get()) { // if updatable (e.g. not hthor with no agent context) @@ -645,11 +646,32 @@ static void blockUntilComplete(const char * label, IClientFileSpray &server, ICo stat_type costFileAccess = money2cost_type(dfuwu.getFileAccessCost()); updateWorkunitStat(wu, SSTdfuworkunit, wuScope, StCostFileAccess, "", costFileAccess); wu->setApplicationValue(label, dfuwu.getID(), dfuwu.getSummaryMessage(), true); + if (!isStartTimeRecorded) + { + switch (state) + { + case DFUstate_started: + case DFUstate_aborting: + case DFUstate_monitoring: + case DFUstate_aborted: + case DFUstate_failed: + case DFUstate_finished: + + const char * whenStarted = dfuwu.getTimeStarted(); + if (!isEmptyString(whenStarted)) + { + CDateTime startedAt; + startedAt.setString(whenStarted); + updateWorkunitStat(wu, SSTdfuworkunit, wuScope, StWhenStarted, 0, startedAt.getTimeStamp()); + isStartTimeRecorded = true; + } + break; + } + } wu->commit(); wu.clear(); } - DFUstate state = (DFUstate)dfuwu.getState(); if (stateout) stateout->clear().append(dfuwu.getStateMessage()); switch(state) diff --git a/plugins/mongodb/README.md b/plugins/mongodb/README.md index 6c9747080d9..c9f2317c0e4 100755 --- a/plugins/mongodb/README.md +++ b/plugins/mongodb/README.md @@ -125,10 +125,14 @@ Not every ECL or MongoDB datatype translates seemlessly to the other side. | MongoDB datatypes | ECL equivalent | | ----------------- | -------------- | | b_date | STRING, INTEGER | -| b_regex | Unsupported | -| b_timestamp | Unsupported | +| b_regex | {String pattern, String options} | +| b_timestamp | {Unsigned t, Unsigned i} | -The MongoDB date datatype can be converted to an integer in MongoDB or it will automatically be converted to a STRING by the plugin. Typically Dates before 1970 get returned by MongoDB as INTEGERS. Also, Unsigned Integers are unsupported in MongoDB. This means that in order to insert UINTEGERs into the database the plugin converts them to b_int64 which is a 64 bit signed integer. +The MongoDB date datatype can be converted to an integer in MongoDB or it will automatically be converted to a STRING by the plugin. Typically Dates before 1970 get returned by MongoDB as INTEGERS. + +Due to regex and timestamp types being returned by MongoDB as objects, ECL records that map to these types are defined in the mongodb.ecllib file for your use. For information about the regex and timestamp types: [Manual](https://www.mongodb.com/docs/manual/reference/mongodb-extended-json/#bson-data-types-and-associated-representations) + +Unsigned Integers are unsupported in MongoDB. This means that in order to insert UINTEGERs into the database the plugin converts them to b_int64 which is a 64 bit signed integer. ### Inserting Documents diff --git a/plugins/mongodb/examples/mongodb-test.ecl b/plugins/mongodb/examples/mongodb-test.ecl index 08af1447304..902dbe4cf34 100644 --- a/plugins/mongodb/examples/mongodb-test.ecl +++ b/plugins/mongodb/examples/mongodb-test.ecl @@ -57,6 +57,18 @@ layoutDates := {STRING bucket_start_date, STRING bucket_end_date}; layoutEmployee := {INTEGER1 id, STRING25 first, STRING25 last, REAL salary}; layoutperson := {String username, String address, String email}; +layoutRegex := RECORD + STRING name; + INTEGER uniqueID; + mongodb.regexType regex; +END; + +layoutTimestamp := RECORD + STRING name; + INTEGER uniqueID; + mongodb.timestampType timestamp; +END; + // Example/Test functions // Returns the unique _id and name every document in the listingsAndReviews collection @@ -161,6 +173,16 @@ dataset(layoutEmployee) findInfo(BOOLEAN mybool) := EMBED(mongodb : user(user), ); ENDEMBED; +// Gets all the documents from the regexTest collection for testing the coversion of MongoDB regex data to ECL +dataset(layoutRegex) getRegex() := EMBED(mongodb : user(user), password(pwd), server(server), database('mydb'), collection('regexTest')) + find({}); +ENDEMBED; + +// Gets all the documents from the timestampTest collection for testing the coversion of MongoDB timestamp data to ECL +dataset(layoutTimestamp) getTimestamp() := EMBED(mongodb : user(user), password(pwd), server(server), database('mydb'), collection('timestampTest')) + find({}); +ENDEMBED; + // $or is not allowed in the M0 tier of MongoDB atlas INTEGER ppl := 8; // Matches all the documents that match either expression. Then it groups them by the number of beds they have and counts the number of documents in each group. @@ -193,6 +215,8 @@ SEQUENTIAL OUTPUT(insertMany(employeeDS), NAMED('InsertMany')); createIndex(1); OUTPUT(findInfo(mybool), NAMED('RemoveOnQuery')); + OUTPUT(getRegex(), NAMED('TestRegexSupport')); + OUTPUT(getTimestamp(), NAMED('TestTimestampSupport')); OUTPUT(findCountOR(nights,ppl), NAMED('OrCountAggregate')); OUTPUT('Done', Named('Status')); ); diff --git a/plugins/mongodb/mongodb.ecllib b/plugins/mongodb/mongodb.ecllib index 576dabe89a2..39ea5e08450 100644 --- a/plugins/mongodb/mongodb.ecllib +++ b/plugins/mongodb/mongodb.ecllib @@ -26,3 +26,7 @@ EXPORT boolean supportsScript := true; EXPORT updateResultRecord := {INTEGER matched_count, INTEGER modified_count}; EXPORT insertManyResultRecord := {INTEGER inserted_count}; EXPORT deleteResultRecord := {INTEGER deleted_count}; + +// For information about the regex and timestamp types: https://www.mongodb.com/docs/manual/reference/mongodb-extended-json/#bson-data-types-and-associated-representations +EXPORT regexType := {STRING pattern, STRING options}; +EXPORT timestampType := {UNSIGNED t, UNSIGNED i}; diff --git a/plugins/mongodb/mongodbembed.cpp b/plugins/mongodb/mongodbembed.cpp index 9b7c3a548c6..6333734a8f1 100755 --- a/plugins/mongodb/mongodbembed.cpp +++ b/plugins/mongodb/mongodbembed.cpp @@ -149,17 +149,16 @@ namespace mongodbembed std::string key = std::string(start, end - start); // Get datatype result += std::string(row, lastBrkt - row); // Add everything before we went into nested document // Some data types are unsupported as they are not straightforward to deserialize - if (key == "$regularExpression") + // Regex and timestamp both get deserialized to their child objects + if (key == "$regularExpression" || key == "$timestamp") { - UNSUPPORTED("Regular Expressions"); // TO DO handle unsupported types by not throwing an exception. - } - else if (key == "$timestamp") - { - while (*end && *end != '}') - end++; // Skip over timestamp - row = ++end; + // remove type identifier and create a nested object for the regex or timestamp values + while (*end && *end != '{') + end++; start = end; - result += "\"\""; + while (*end && *end != '}') + end++; + result += std::string(start, ++end - start); } // Both of these get deserialized to strings and are surround by quotation marks else if (key == "$date" || key == "$oid") @@ -192,14 +191,6 @@ namespace mongodbembed while (*end && *end != '}') end++; // Get out of both nested documents end++; - - while (*end && *end != '}') - end++; - end++; - - depth--; - row = end; // Set row to just after the nested document - start = end; // move start to the next place for parsing } else { @@ -208,13 +199,6 @@ namespace mongodbembed end++; result += std::string(start, ++end - start); // Only add the data inside the quotation marks to result string - - while (*end && *end != '}') - end++; // Only have to get out of one nested document - end++; - depth--; - row = end; // Set row to just after the nested document - start = end; // move start to the next place for parsing } } else if (key == "$numberDouble" || key == "$numberDecimal" || key == "$numberLong") @@ -229,17 +213,23 @@ namespace mongodbembed end++; result += std::string(start, end++ - start); // Only add the data inside the quotation marks to result string - while (*end && *end != '}') - end++; // Only have to get out of one nested document - end++; - depth--; - row = end; - start = end; } else { failx("EJSON datatype error: '%s' is not supported in the current version.", key.c_str()); } + + // Get out of nested object. + while (*end && *end != '}') + end++; + + if (*end) + end++; + else + failx("Read past the end of stream while converting EJSON types to ECL."); + depth--; + row = end; // Set row to just after the nested document + start = end; // move start to the next place for parsing } /** diff --git a/plugins/mysql/CMakeLists.txt b/plugins/mysql/CMakeLists.txt index 313444d2a23..766cf2f2ec6 100644 --- a/plugins/mysql/CMakeLists.txt +++ b/plugins/mysql/CMakeLists.txt @@ -27,7 +27,7 @@ project(mysqlembed) if(MYSQLEMBED) ADD_PLUGIN(mysqlembed) if(MAKE_MYSQLEMBED) - find_package(MYSQL REQUIRED) + find_package(unofficial-libmysql REQUIRED) find_package(ZLIB REQUIRED) set( SRCS @@ -41,7 +41,7 @@ if(MYSQLEMBED) ./../../rtl/nbcd ./../../common/deftype ./../../system/jlib - ${MYSQL_INCLUDE_DIR}) + ) add_definitions(-D_USRDLL -DMYSQLEMBED_EXPORTS) @@ -58,10 +58,10 @@ if(MYSQLEMBED) CALC_DEPS) target_link_libraries( mysqlembed - ${MYSQL_LIBRARIES} eclrtl roxiemem jlib + unofficial::libmysql::libmysql ZLIB::ZLIB ) endif() diff --git a/plugins/mysql/mysqlembed.cpp b/plugins/mysql/mysqlembed.cpp index af829a92ad3..31cc34a5172 100644 --- a/plugins/mysql/mysqlembed.cpp +++ b/plugins/mysql/mysqlembed.cpp @@ -16,8 +16,8 @@ ############################################################################## */ #include "platform.h" -#include "mysql.h" -#include "mysqld_error.h" +#include +#include #include "jexcept.hpp" #include "jthread.hpp" #include "hqlplugins.hpp" diff --git a/plugins/parquet/parquetembed.cpp b/plugins/parquet/parquetembed.cpp index e247c3fb07c..ee6a9e53377 100644 --- a/plugins/parquet/parquetembed.cpp +++ b/plugins/parquet/parquetembed.cpp @@ -95,6 +95,42 @@ extern void fail(const char *message) rtlFail(0, msg.str()); } +/** + * @brief Utility function for getting the xpath or field name from an RtlFieldInfo object. + * + * @param outXPath The buffer for storing output. + * @param field RtlFieldInfo object storing metadata for field. + */ +void xpathOrName(StringBuffer &outXPath, const RtlFieldInfo *field) +{ + outXPath.clear(); + + if (field->xpath) + { + if (field->xpath[0] == xpathCompoundSeparatorChar) + { + outXPath.append(field->xpath + 1); + } + else + { + const char *sep = strchr(field->xpath, xpathCompoundSeparatorChar); + + if (!sep) + { + outXPath.append(field->xpath); + } + else + { + outXPath.append(field->xpath, 0, static_cast(sep - field->xpath)); + } + } + } + else + { + outXPath.append(field->name); + } +} + /** * @brief Contructs a ParquetReader for a specific file location. * @@ -107,7 +143,7 @@ ParquetReader::ParquetReader(const char *option, const char *_location, int _max : ParquetReader(option, _location, _maxRowCountInTable, _partitionFields, _activityCtx, nullptr) {} // Constructs a ParquetReader with the expected record layout of the Parquet file -ParquetReader::ParquetReader(const char *option, const char *_location, int _maxRowCountInTable, const char *_partitionFields, const IThorActivityContext *_activityCtx, const RtlRecord *_expectedRecord) +ParquetReader::ParquetReader(const char *option, const char *_location, int _maxRowCountInTable, const char *_partitionFields, const IThorActivityContext *_activityCtx, const RtlTypeInfo * _expectedRecord) : partOption(option), location(_location), expectedRecord(_expectedRecord) { maxRowCountInTable = _maxRowCountInTable; @@ -274,14 +310,17 @@ void divide_row_groups(const IThorActivityContext *activityCtx, __int64 totalRow __int64 ParquetReader::readColumns(__int64 currTable) { auto rowGroupReader = queryCurrentTable(currTable); // Sets currentTableMetadata - for (int i = 0; i < expectedRecord->getNumFields(); i++) + int numFields = getNumFields(expectedRecord); + for (int i = 0; i < numFields; i++) { - int columnIndex = currentTableMetadata->schema()->ColumnIndex(expectedRecord->queryName(i)); + StringBuffer fieldName; + xpathOrName(fieldName, expectedRecord->queryFields()[i]); + int columnIndex = currentTableMetadata->schema()->group_node()->FieldIndex(fieldName.str()); if (columnIndex >= 0) { std::shared_ptr column; reportIfFailure(rowGroupReader->Column(columnIndex)->Read(&column)); - parquetTable.insert(std::make_pair(expectedRecord->queryName(i), column->chunk(0))); + parquetTable.insert(std::make_pair(fieldName.str(), column->chunk(0))); } } @@ -814,7 +853,7 @@ void ParquetWriter::beginSet(const char *fieldName) } arrow::ArrayBuilder *childBuilder; arrow::FieldPath match = getNestedFieldBuilder(fieldName, childBuilder); - fieldBuilderStack.push_back(std::make_shared(fieldName, childBuilder, CPNTSet, match)); + fieldBuilderStack.push_back(std::make_shared(fieldName, childBuilder, CPNTSet, std::move(match))); arrow::ListBuilder *listBuilder = static_cast(childBuilder); reportIfFailure(listBuilder->Append()); @@ -833,7 +872,7 @@ void ParquetWriter::beginRow(const char *fieldName) { arrow::ArrayBuilder *childBuilder; arrow::FieldPath match = getNestedFieldBuilder(fieldName, childBuilder); - fieldBuilderStack.push_back(std::make_shared(fieldName, childBuilder, CPNTDataset, match)); + fieldBuilderStack.push_back(std::make_shared(fieldName, childBuilder, CPNTDataset, std::move(match))); arrow::StructBuilder *structBuilder = static_cast(childBuilder); reportIfFailure(structBuilder->Append()); @@ -1013,42 +1052,6 @@ void ParquetRowStream::stop() shouldRead = false; } -/** - * @brief Utility function for getting the xpath or field name from an RtlFieldInfo object. - * - * @param outXPath The buffer for storing output. - * @param field RtlFieldInfo object storing metadata for field. - */ -void ParquetRowBuilder::xpathOrName(StringBuffer &outXPath, const RtlFieldInfo *field) const -{ - outXPath.clear(); - - if (field->xpath) - { - if (field->xpath[0] == xpathCompoundSeparatorChar) - { - outXPath.append(field->xpath + 1); - } - else - { - const char *sep = strchr(field->xpath, xpathCompoundSeparatorChar); - - if (!sep) - { - outXPath.append(field->xpath); - } - else - { - outXPath.append(field->xpath, 0, static_cast(sep - field->xpath)); - } - } - } - else - { - outXPath.append(field->name); - } -} - /** * @brief Gets the current array index taking into account the nested status of the row. * @@ -1961,7 +1964,6 @@ ParquetEmbedFunctionContext::ParquetEmbedFunctionContext(const IContextLogger &_ bool ParquetEmbedFunctionContext::getBooleanResult() { UNIMPLEMENTED_X("Parquet Scalar Return Type BOOLEAN"); - return false; } void ParquetEmbedFunctionContext::getDataResult(size32_t &len, void *&result) @@ -1972,19 +1974,16 @@ void ParquetEmbedFunctionContext::getDataResult(size32_t &len, void *&result) double ParquetEmbedFunctionContext::getRealResult() { UNIMPLEMENTED_X("Parquet Scalar Return Type REAL"); - return 0.0; } __int64 ParquetEmbedFunctionContext::getSignedResult() { UNIMPLEMENTED_X("Parquet Scalar Return Type SIGNED"); - return 0; } unsigned __int64 ParquetEmbedFunctionContext::getUnsignedResult() { UNIMPLEMENTED_X("Parquet Scalar Return Type UNSIGNED"); - return 0; } void ParquetEmbedFunctionContext::getStringResult(size32_t &chars, char *&result) @@ -2036,7 +2035,6 @@ byte *ParquetEmbedFunctionContext::getRowResult(IEngineRowAllocator *_resultAllo size32_t ParquetEmbedFunctionContext::getTransformResult(ARowBuilder &rowBuilder) { UNIMPLEMENTED_X("Parquet Transform Result"); - return 0; } /** @@ -2229,10 +2227,7 @@ class ParquetEmbedContext : public CInterfaceOf virtual IEmbedFunctionContext *createFunctionContextEx(ICodeContext *ctx, const IThorActivityContext *activityCtx, unsigned flags, const char *options) override { if (flags & EFimport) - { UNSUPPORTED("IMPORT"); - return nullptr; - } else return new ParquetEmbedFunctionContext(ctx ? ctx->queryContextLogger() : queryDummyContextLogger(), activityCtx, options, flags); } @@ -2240,7 +2235,6 @@ class ParquetEmbedContext : public CInterfaceOf virtual IEmbedServiceContext *createServiceContext(const char *service, unsigned flags, const char *options) override { throwUnexpected(); - return nullptr; } }; diff --git a/plugins/parquet/parquetembed.hpp b/plugins/parquet/parquetembed.hpp index 39a3e3c5aef..0c9fec5d774 100644 --- a/plugins/parquet/parquetembed.hpp +++ b/plugins/parquet/parquetembed.hpp @@ -45,8 +45,6 @@ extern void UNSUPPORTED(const char *feature) __attribute__((noreturn)); extern void failx(const char *msg, ...) __attribute__((noreturn)) __attribute__((format(printf, 1, 2))); extern void fail(const char *msg) __attribute__((noreturn)); -#define PARQUET_FILE_TYPE_NAME "parquet" - #define reportIfFailure(st) \ if (!st.ok()) \ { \ @@ -107,8 +105,12 @@ struct ArrayBuilderTracker unsigned int childCount = 0; unsigned int childrenProcessed = 0; - ArrayBuilderTracker(const char *_nodeName, arrow::ArrayBuilder *_struct, PathNodeType _nodeType, arrow::FieldPath _nodePath) - : nodeName(_nodeName), nodeType(_nodeType), structPtr(_struct), nodePath(_nodePath) { if (nodeType == CPNTDataset) childCount == structPtr->num_children(); } + ArrayBuilderTracker(const char *_nodeName, arrow::ArrayBuilder *_struct, PathNodeType _nodeType, arrow::FieldPath && _nodePath) + : nodeName(_nodeName), nodeType(_nodeType), structPtr(_struct), nodePath(std::move(_nodePath)) + { + if (nodeType == CPNTDataset) + childCount = structPtr->num_children(); + } bool finishedChildren() { return childrenProcessed < childCount; } }; @@ -361,7 +363,7 @@ class PARQUETEMBED_PLUGIN_API ParquetReader { public: ParquetReader(const char *option, const char *_location, int _maxRowCountInTable, const char *_partitionFields, const IThorActivityContext *_activityCtx); - ParquetReader(const char *option, const char *_location, int _maxRowCountInTable, const char *_partitionFields, const IThorActivityContext *_activityCtx, const RtlRecord *_expectedRecord); + ParquetReader(const char *option, const char *_location, int _maxRowCountInTable, const char *_partitionFields, const IThorActivityContext *_activityCtx, const RtlTypeInfo *_expectedRecord); ~ParquetReader(); arrow::Status processReadFile(); @@ -397,7 +399,7 @@ class PARQUETEMBED_PLUGIN_API ParquetReader size_t maxRowCountInTable = 0; // Max table size set by user. std::string partOption; // Begins with either read or write and ends with the partitioning type if there is one i.e. 'readhivepartition'. std::string location; // Full path to location for reading parquet files. Can be a filename or directory. - const RtlRecord *expectedRecord = nullptr; // Expected record layout of Parquet file. Only available when used in the platform i.e. not available when used as a plugin. + const RtlTypeInfo * expectedRecord = nullptr; // Expected record layout of Parquet file. Only available when used in the platform i.e. not available when used as a plugin. const IThorActivityContext *activityCtx = nullptr; // Context about the thor worker configuration. std::shared_ptr scanner = nullptr; // Scanner for reading through partitioned files. std::shared_ptr rbatchReader = nullptr; // RecordBatchReader reads a dataset one record batch at a time. Must be kept alive for rbatchItr. @@ -513,7 +515,6 @@ class PARQUETEMBED_PLUGIN_API ParquetRowBuilder : public CInterfaceOf roxieServer; if (port) { - const char *protocol = roxieFarm.queryProp("@protocol"); - bool serviceTLS = roxieFarm.getPropBool("@tls") || (protocol && streq(protocol, "ssl")); StringBuffer certFileName; StringBuffer keyFileName; StringBuffer passPhraseStr; diff --git a/roxie/roxiemem/CMakeLists.txt b/roxie/roxiemem/CMakeLists.txt index 1bc22b5c9ef..c0e020a43e2 100644 --- a/roxie/roxiemem/CMakeLists.txt +++ b/roxie/roxiemem/CMakeLists.txt @@ -57,7 +57,7 @@ endif() target_link_libraries ( roxiemem jlib - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) diff --git a/rtl/nbcd/CMakeLists.txt b/rtl/nbcd/CMakeLists.txt index 83fd323061c..98cf0264457 100644 --- a/rtl/nbcd/CMakeLists.txt +++ b/rtl/nbcd/CMakeLists.txt @@ -46,7 +46,7 @@ if (NOT PLUGIN) endif() target_link_libraries ( nbcd jlib - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) diff --git a/system/jhtree/CMakeLists.txt b/system/jhtree/CMakeLists.txt index dffe4b7caca..e7bc9e51727 100644 --- a/system/jhtree/CMakeLists.txt +++ b/system/jhtree/CMakeLists.txt @@ -68,7 +68,7 @@ target_link_libraries ( jhtree eclrtl zcrypt jlib - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) diff --git a/system/jlib/jsocket.cpp b/system/jlib/jsocket.cpp index a06315afcb4..4142a6bd125 100644 --- a/system/jlib/jsocket.cpp +++ b/system/jlib/jsocket.cpp @@ -1931,6 +1931,8 @@ void CSocket::readtms(void* buf, size32_t min_size, size32_t max_size, size32_t if (state != ss_open) THROWJSOCKEXCEPTION(JSOCKERR_not_opened); + // NB: The semantics here, effectively mean min_size is always >0, because it first waits on wait_read + // i.e. something has to be on socket to continue (or error/graceful close). CCycleTimer timer; while (true) { diff --git a/system/security/LdapSecurity/CMakeLists.txt b/system/security/LdapSecurity/CMakeLists.txt index 30d9aaba0db..a766fbe6e92 100644 --- a/system/security/LdapSecurity/CMakeLists.txt +++ b/system/security/LdapSecurity/CMakeLists.txt @@ -47,7 +47,6 @@ include_directories ( ./../../../dali/base ./../../../system/mp ./../../../common/workunit - ${OPENLDAP_INCLUDE_DIR} ) ADD_DEFINITIONS( -DLDAPSECURITY_EXPORTS -D_USRDLL ) @@ -58,7 +57,7 @@ target_link_libraries ( LdapSecurity jlib dalibase workunit - ${OPENLDAP_LIBRARIES} + PkgConfig::OPENLDAP ) diff --git a/system/security/securesocket/securesocket.cpp b/system/security/securesocket/securesocket.cpp index de953a56769..2b276bef1cc 100644 --- a/system/security/securesocket/securesocket.cpp +++ b/system/security/securesocket/securesocket.cpp @@ -834,6 +834,12 @@ void CSecureSocket::readtms(void* buf, size32_t min_size, size32_t max_size, siz sizeRead = 0; CCycleTimer timer; + // for semantics to work with a timeout, have to be non-blocking when reading SSL + // because wait_read can't guarantee that there are bytes ready to read, only that + // there are bytes pending on the underlying socket. + // We put in non-blocking mode, so that if after wait_read says there's something, + // SSL_read won't block and will respond with a SSL_ERROR_WANT_READ/SSL_ERROR_WANT_WRITE + // if not ready. ScopedNonBlockingMode scopedNonBlockingMode; if (WAIT_FOREVER != timeoutMs) scopedNonBlockingMode.init(this); @@ -870,10 +876,13 @@ void CSecureSocket::readtms(void* buf, size32_t min_size, size32_t max_size, siz else { ssl_err = SSL_get_error(m_ssl, rc); + // NB: if timeout != WAIT_FOREVER, nonBlocking should always be true here if (nonBlocking && (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE)) // NB: SSL_read can cause SSL_ERROR_WANT_WRITE { - if (0 == min_size) // if here, implies nothing read, since it would have exited already in (rc > 0) block. - break; + // NB: we must be below min_size if here (otherwise would have exited in (rc > 0) block above) + + // To maintain consistent semantics with jsocket, we continue waiting even in the min_size = 0 case. + // NB: jsocket::readtms always blocks (wait_read) initially, meaning in effect min_size is always treated as >0 } else { diff --git a/system/xmllib/CMakeLists.txt b/system/xmllib/CMakeLists.txt index a85a2842472..f8f8c286b92 100644 --- a/system/xmllib/CMakeLists.txt +++ b/system/xmllib/CMakeLists.txt @@ -73,5 +73,5 @@ target_link_libraries ( xmllib ) IF (USE_CPPUNIT) - target_link_libraries (xmllib ${CPPUNIT_LIBRARIES}) + target_link_libraries (xmllib ${CppUnit_LIBRARIES}) ENDIF() diff --git a/testing/esp/esdlcmd/inputs/ws_usemethodname.ecm b/testing/esp/esdlcmd/inputs/ws_usemethodname.ecm index 6a51a81a902..34c2172cdb0 100644 --- a/testing/esp/esdlcmd/inputs/ws_usemethodname.ecm +++ b/testing/esp/esdlcmd/inputs/ws_usemethodname.ecm @@ -25,7 +25,7 @@ ESPresponse OrangeResponse bool Peels; }; -ESPservice[version("1"), use_method_name] WsUseMethodName +ESPservice[version("1"), generated_client_version("0.0"), use_method_name] WsUseMethodName { ESPmethod Unique(FooRequest, FooResponse); ESPmethod Apple(FruitRequest, FruitResponse); diff --git a/testing/esp/esdlcmd/inputs/ws_userequestname.ecm b/testing/esp/esdlcmd/inputs/ws_userequestname.ecm index c587485a189..db0812a3095 100644 --- a/testing/esp/esdlcmd/inputs/ws_userequestname.ecm +++ b/testing/esp/esdlcmd/inputs/ws_userequestname.ecm @@ -25,7 +25,7 @@ ESPresponse OrangeResponse bool Peels; }; -ESPservice[version("1")] WsUseRequestName +ESPservice[version("1"), generated_client_version("0.0")] WsUseRequestName { ESPmethod Unique(FooRequest, FooResponse); ESPmethod Apple(FruitRequest, FruitResponse); diff --git a/testing/helm/tests/resourced.yaml b/testing/helm/tests/resourced.yaml new file mode 100644 index 00000000000..a8d3a0053e6 --- /dev/null +++ b/testing/helm/tests/resourced.yaml @@ -0,0 +1,867 @@ +# Default values for hpcc. + +global: + # Settings in the global section apply to all HPCC components in all subcharts + + image: + ## It is recommended to name a specific version rather than latest, for any non-trivial deployment + ## For best results, the helm chart version and platform version should match, which is the default if version is + ## not specified. Do not override without good reason as undefined behavior may result. + ## version: x.y.z + root: "hpccsystems" # change this if you want to pull your images from somewhere other than DockerHub hpccsystems + pullPolicy: IfNotPresent + ## If you need to provide credentials to pull your image, they should be added as a k8s secret, and the secret name provided here + # imagePullSecrets: xxx + + ## busybox image is used for some initialization/termination tasks - you can override the location here + #busybox: "myrepo/busybox:stable" + + ## It is possible (but not recommended) to change the uid/gid that the HPCC containers run under + ## user: + ## uid: 10000 + ## gid: 10001 + + # logging sets the default logging information for all components. Can be overridden locally + logging: + detail: 80 + + # tracing sets the default tracing information for all components. Can be overridden locally + tracing: + disabled: false + alwaysCreateTraceIds: true + + ## resource settings for stub components + #stubInstanceResources: + # memory: "200Mi" + # cpu: "20m" + + ## env adds default environment variables for all components. Environment settings can also be added or overridden locally + #env: + #- name: SMTPserver + # value: mysmtpserver + + # Specify a defaultEsp to control which eclservices service is returned from Std.File.GetEspURL, and other uses + # If not specified, the first esp component that exposes eclservices application is assumed. + # Can also be overridden locally in individual components + ## defaultEsp: eclservices + + egress: + ## If restricted is set, NetworkPolicies will include egress restrictions to allow connections from pods only to the minimum required by the system + ## Set to false to disable all egress policy restrictions (not recommended) + restricted: true + + ## The kube-system namespace is not generally labelled by default - to enable more restrictive egress control for dns lookups we need to be told the label + ## If not provided, DNS lookups on port 53 will be allowed to connect anywhere + ## The namespace may be labelled using a command such as "kubectl label namespace kube-system name=kube-system" + # kubeSystemLabel: "kube-system" + + ## To properly allow access to the kubectl API from pods that need it, the cidr of the kubectl endpoint needs to be supplied + ## This may be obtained via "kubectl get endpoints --namespace default kubernetes" + ## If these are not supplied, egress controls will allow access to any IPs/ports from any pod where API access is needed + # kubeApiCidr: 172.17.0.3/32 + # kubeApiPort: 7443 + + ## named egress sections defined here, can be referenced by components, or they can define their own egress section explicitly + #engineEgress: + #- to: + # - ipBlock: + # cidr: 10.9.8.7/32 + # ports: + # - protocol: TCP + # port: 443 + + + cost: + currencyCode: USD + # The following are example pricing based on standard Azure pricing and should be updated to reflect actual rates + perCpu: 0.0565000000001 # D64ds_v4 compute node ($2,639.68/month for 64 vCPU) + storageAtRest: 0.0208000000001 # Blob storage pricing (East US/Flag NS/LRS redundancy/Hot) + storageReads: 0.00400000000001 # Blob storage pricing (East US/Flag NS/LRS redundancy/Hot) + storageWrites: 0.0500000000001 # Blob storage pricing (East US/Flag NS/LRS redundancy/Hot) + + # postJobCommand will execute at the end of a dynamically launched K8s job, + # when the main entrypoint process finishes, or if the readiness probes trigger a preStop event. + # This can be useful if injected sidecars are installed that need to be told to stop. + # If they are not stopped, the pod continues running with the side car container only, in a "NotReady" state. + # An example of this is the Istio envoy side car. It can be stopped with the command below. + # Set postJobCommandViaSidecar to true, if the command needs to run with privilege, this will enable the command + # to run as root in a sidecar in same process space as other containers, allowing it to for example send signals + # to processes in sidecars + # misc: + # postJobCommand: "curl -sf -XPOST http://127.0.0.1:15020/quitquitquit" + # Or example for linkerd + # postJobCommand: "kill $(pgrep linkerd2-proxy)" + # postJobCommandViaSidecar: true + + ## visibilities section can be used to set labels, annotations and service type for any service with the specified visibility + visibilities: + cluster: + type: ClusterIP + local: + annotations: + # This annotation will make azure load balancer use an internal rather than an internet-visible address + # May want different values on different cloud providers or use-cases. For example on AWS you may want to use + #service.beta.kubernetes.io/aws-load-balancer-internal: "true" + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + type: LoadBalancer + # If ingress is specified, an ingress Network Policy will be created for any pod implementing a service with this visibility + # Default allows ingress from anywhere, but more restrictive rules can be used if preferred. + # Ingress rules can also be overridden by individual services + ingress: + - {} + global: + #labels: + # mylabel: "4" + type: LoadBalancer + ingress: + - {} + ## CIDRS allowed to access this service. + #loadBalancerSourceRanges: [1.2.3.4/32, 5.6.7.8/32] + + # example expert section. The sysctl list will be applied to each pod in a privileged init container + # expert: + # sysctl: + # - kernel.dmesg_restrict=0 + +# For pod placement instruction and examples please reference docs/placements.md +# The following is for tolerations of Spot Node Pool on Azure. Other cloud providers +# may have different taints for Spot Node Pool. The tolerations are harmless when +# there is no taint on the node pool. +#placements: +# - pods: ["all"] +# placement: +# tolerations: +# - key: "kubernetes.azure.com/scalesetpriority" +# operator: "Equal" +# value: "spot" +# effect: "NoSchedule" + +security: + eclSecurity: + # Possible values: + # allow - functionality is permitted + # deny - functionality is not permitted + # allowSigned - functionality permitted only if code signed + embedded: "allow" + pipe: "allow" + extern: "allow" + datafile: "allow" + +## storage: +## +## 1. If an engine component has the dataPlane property set, then that plane will be the default data location for that component. +## 2. If there is a plane definition with a category of "data" then the first matching plane will be the default data location +## +## If a data plane contains the storageClass property then an implicit pvc will be created for that data plane. +## +## If plane.pvc is defined, a Persistent Volume Claim must exist with that name, storageClass and storageSize are not used. +## +## If plane.storageClass is defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If set to "", choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) +## +## plane.forcePermissions=true is required by some types of provisioned +## storage, where the mounted filing system has insufficient permissions to be +## read by the hpcc pods. Examples include using hostpath storage (e.g. on +## minikube and docker for desktop), or using NFS mounted storage. + +storage: + planes: + # name: + # prefix: # Root directory for accessing the plane (if pvc defined), or url to access plane. + # category: data|dali|lz|dll|spill|temp # What category of data is stored on this plane? + # + # For dynamic pvc creation: + # storageClass: '' + # storageSize: 1Gi + # + # For persistent storage: + # pvc: # The name of the persistant volume claim + # forcePermissions: false + # hosts: [ ] # Inline list of hosts + # hostGroup: # Name of the host group for bare metal - must match the name of the storage plane.. + # + # Other options: + # subPath: # Optional sub directory within to use as the root directory + # numDevices: 1 # number of devices that are part of the plane + # secret: # what secret is required to access the files. This could optionally become a list if required (or add secrets:). + # defaultSprayParts: 4 # The number of partitions created when spraying (default: 1) + # eclwatchVisible: true # Can the lz plane be visible from ECLWatch (default: true) + # cost: # The storage cost + # storageAtRest: 0.0135 # Storage at rest cost: cost per GiB/month + # storageapi: # Optional information to allow access to storage api + # type: azurefile | azureblob + # account: # azure storage account name + # secret: # secret name (under secrets/storage) for accessing SAS token + # containers: [ ] # a list of containers + + - name: dali + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/dalistorage" + category: dali + - name: sasha + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/sashastorage" + category: sasha + - name: dll + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/queries" + category: dll + - name: data + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/hpcc-data" + category: data + - name: mydropzone + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/mydropzone" + category: lz + - name: debug + disabled: False + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/debug" + category: debug + +## The certificates section can be used to enable cert-manager to generate TLS certificates for each component in the hpcc. +## You must first install cert-manager to use this feature. +## https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm +## +## The Certificate issuers are divided into "local" (those which will be used for local mutual TLS) and "public" those +## which will be publicly accessible and therefore need to be recognized by browsers and/or other entities. +## +## Both public and local issuers have a spec section. The contents of the "spec" are documented in the cert-manager +## "Issuer configuration" documentation. https://cert-manager.io/docs/configuration/#supported-issuer-types +## +## The default configuration is meant to provide reasonable functionality without additional dependencies. +## +## Public issuers can be tricky if you want browsers to recognize the certificates. This is a complex topic outside the scope +## of this comment. The default for the public issuer generates self signed certificates. The expectation is that this will be +## overridden by the configuration of an external certificate authority or vault in QA and production environments. +## +## The default for the local (mTLS) issuer is designed to act as our own local certificate authority. We only need to recognize +## what a component is, and that it belongs to this cluster. +## But a kubernetes secret must be provided for the certificate authority key-pair. The default name for the secret +## is "hpcc-local-issuer-key-pair". The secret is a standard kubernetes.io/tls secret and should provide data values for +## "tls.crt" and "tls.key". +## +## The local issuer can also be configured to use an external certificate authority or vault. +## +certificates: + enabled: false + issuers: + local: + name: hpcc-local-issuer + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-local-issuer-key-pair + public: + name: hpcc-public-issuer + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + selfSigned: {} + vaultclient: + name: hpcc-vaultclient-issuer + enabled: false + ## domain: hpcc.example.com + rolePrefix: "hpcc-" + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-vaultclient-issuer-key-pair + remote: + name: hpcc-remote-issuer + ## set enabled to true if adding remoteClients for any components + enabled: false + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-remote-issuer-key-pair + signing: # intended to be used for signing/verification purposes only, e.g. by dafilesrv + name: hpcc-signing-issuer + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-signing-issuer-key-pair + +## The secrets section contains a set of categories, each of which contain a list of secrets. The categories determine which +## components have access to the secrets. +## For each secret: +## name is the name that it is accessed by within the platform +## secret is the name of the secret that should be published +secrets: + #timeout: 300 # timeout period for cached secrets. Should be similar to the k8s refresh period. + + #Secret categories follow, remove the {} if a secret is defined in a section + storage: {} + ## Secrets that are required for accessing storage. Currently exposed in the engines, but in the future will + ## likely be restricted to esp (when it becomes the meta-data provider) + ## For example, to set the secret associated with the azure storage account "mystorageaccount" use + ##azure-mystorageaccount: storage-myazuresecret + + authn: {} + ## Category to deploy authentication secrets to container, and to create a key name alias to reference those secrets + #ldapadmincredskey: "admincredssecretname" ## Default k/v for LDAP authentication secrets + #testauthusercreds1: "testauthusercreds1" ## Default k/v for test authentication secrets + #testauthusercreds2: "testauthusercreds2" ## Default k/v for test authentication secrets + + ecl: {} + ## Category for secrets published to all components that run ecl. These secrets are for use by internal + ## ECL processing. For example HTTPCALL and SOAPCALL have built in support for secrets that are not directly + ## accessible to users, that is, accessed directly via ECL code. + + eclUser: {} + ## Category for secrets accessible via ecl code. These are secrets that users can access directly. Be cautious about + ## what secrets you add to this category as they are easily accessed by ECL code. + + codeSign: {} + #gpg-private-key-1: codesign-gpg-key-1 + #gpg-private-key-2: codesign-gpg-key-2 + + codeVerify: {} + #gpg-public-key-1: codesign-gpg-public-key-1 + #gpg-public-key-2: codesign-gpg-public-key-2 + + system: {} + ## Category for secrets published to all components for system level useage + + git: {} + ## Category to provide passwords for eclccserver to access private git repos + +## The vaults section mirrors the secret section but leverages vault for the storage of secrets. +## There is an additional category for vaults named "eclUser". "eclUser" vault +## secrets are readable directly from ECL code. Other secret categories are read internally +## by system components and not exposed directly to ECL code. +## +## For each vault: +## name is the name that it is accessed by within the platform +## url is the url used to read a secret from the vault. +## kind is the type of vault being accessed, or the protocol to use to access the secrets +## client_secret a kubernetes level secret that contains the client_token used to retrive secrets. +## if a client_secret is not provided "vault kubernetes auth" will be attempted. + +vaults: + storage: + git: + authn: + ecl: + # vault using vault client certs or kubernetes auth depending on whether certificates.issuers.vaultclient.enabled is true + # to use approle authentication specify appRoleId and appRoleSecret + # - name: my-ecl-vault + #Note the data node in the URL is there for the REST APIs use. The path inside the vault starts after /data + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/ecl/${secret} + # kind: kv-v2 + # namespace: + eclUser: + # vault using vault client certs or kubernetes auth depending on whether certificates.issuers.vaultclient.enabled is true + # to use approle authentication specify appRoleId and appRoleSecret + # - name: my-eclUser-vault + #Note the data node in the URL is there for the REST APIs use. The path inside the vault starts after /data + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/eclUser/${secret} + # kind: kv-v2 + esp: + + ## The keys for code signing may be imported from the vault. Multiple keys may be imported. + ## gpg keys may be imported as follows: + ## vault kv put secret/codeSign/gpg-private-key-1 passphrase= private=@ + ## vault kv put secret/codeSign/gpg-private-key-2 passphrase= private=@ + codeSign: + # - name: codesign-private-keys + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/codeSign/${secret} + # kind: kv-v2 + # namespace: mynamespace # for use with enterprise vaults segmented by namespaces + ## The keys for verifying signed code may be imported from the vault. + ## vault kv put secret/codeVerify/gpg-public-key-1 public=@ + ## vault kv put secret/codeVerify/gpg-public-key-2 public=@ + codeVerify: + # - name: codesign-public-keys + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/codeVerify/${secret} + # kind: kv-v2 + # namespace: mynamespace # for use with enterprise vaults segmented by namespaces + +bundles: [] +## Specifying bundles here will cause the indicated bundles to be downloaded and installed automatically +## whenever an eclccserver pod is started +# for example +# - name: DataPatterns + +# A dafilesrv 'stream' service is required to expose HPCC file access to 3rd parties (e.g. Spark / Java) +# Access will only be granted to requests that have been signed by the DFUFileAccess service +dafilesrv: +- name: rowservice + disabled: true # disabled by default because requires cert-manager etc. (see certificates section) + application: stream + service: + servicePort: 7600 + visibility: local + +# Enable if bare-metal systems require read access to this systems' data planes via ~remote:: +# If legacy ~foreign:: access is required, Dali will also need to be exposed via a service definition in the dali configuration +# NB: ingress rules should be added to limit access. +- name: direct-access + disabled: true + application: directio + service: + servicePort: 7200 + visibility: local + +- name: spray-service + application: spray + service: + servicePort: 7300 + visibility: cluster + + +dali: +- name: mydali + auth: none + services: # internal house keeping services + coalescer: + service: + servicePort: 8877 + #interval: 2 # (hours) + #at: "* * * * *" # cron type schedule, i.e. Min(0-59) Hour(0-23) DayOfMonth(1-31) Month(1-12) DayOfWeek(0-6) + #minDeltaSize: 50 # (Kb) will not start coalescing until delta log is above this threshold + resources: + cpu: "1" + memory: "10G" + + resources: + cpu: "2" + memory: "20G" + +sasha: + #disabled: true # disable all services. Alternatively set sasha to null (sasha: null) + wu-archiver: + #disabled: true + service: + servicePort: 8877 + plane: sasha + #interval: 6 # (hours) + #limit: 1000 # threshold number of workunits before archiving starts (0 disables) + #cutoff: 8 # minimum workunit age to archive (days) + #backup: 0 # minimum workunit age to backup (days, 0 disables) + #at: "* * * * *" + #duration: 0 # (maxDuration) - Maximum duration to run WorkUnit archiving session (hours, 0 unlimited) + #throttle: 0 # throttle ratio (0-99, 0 no throttling, 50 is half speed) + #retryinterval: 7 # minimal time before retrying archive of failed WorkUnits (days) + #keepResultFiles: false # option to keep result files owned by workunits after workunit is archived + resources: + cpu: "1" + memory: "4Gi" + + dfuwu-archiver: + #disabled: true + service: + servicePort: 8877 + plane: sasha + #forcePermissions: false + #limit: 1000 # threshold number of DFU workunits before archiving starts (0 disables) + #cutoff: 14 # minimum DFU workunit age to archive (days) + #interval: 24 # minimum interval between running DFU recovery archiver (in hours, 0 disables) + #at: "* * * * *" # schedule to run DFU workunit archiver (cron format) + #duration: 0 # (maxDuration) maximum duration to run DFU WorkUnit archiving session (hours, 0 unlimited) + #throttle: 0 # throttle ratio (0-99, 0 no throttling, 50 is half speed) + resources: + cpu: "1" + memory: "4Gi" + + dfurecovery-archiver: + #disabled: true + #limit: 20 # threshold number of DFU recovery items before archiving starts (0 disables) + #cutoff: 4 # minimum DFU recovery item age to archive (days) + #interval: 12 # minimum interval between running DFU recovery archiver(in hours, 0 disables) + #at: "* * * * *" # schedule to run DFU recovery archiver (cron format) + resources: + cpu: "1" + memory: "4Gi" + + file-expiry: + #disabled: true + #interval: 1 + #at: "* 3 * * *" + #persistExpiryDefault: 7 + #expiryDefault: 4 + #user: sasha + resources: + cpu: "1" + memory: "4Gi" + +dfuserver: +- name: dfuserver + maxJobs: 1 + resources: + cpu: "1" + memory: "1800Mi" + +eclagent: +- name: hthor + ## replicas indicates how many eclagent pods should be started + replicas: 1 + ## maxActive controls how many workunits may be active at once (per replica) + maxActive: 4 + ## prefix may be used to set a filename prefix applied to any relative filenames used by jobs submitted to this queue + prefix: hthor + ## Set to false if you want to launch each workunit in its own container, true to run as child processes in eclagent pod + useChildProcesses: false + ## type may be 'hthor' (the default) or 'roxie', to specify that the roxie engine rather than the hthor engine should be used for eclagent workunit processing + type: hthor + ## The following resources apply to child hThor pods when useChildProcesses=false, otherwise they apply to hThor pod. + resources: + cpu: "1" + memory: "1G" + stubResources: + cpu: "100m" + memory: "100Mi" + #egress: engineEgress + +- name: roxie-workunit + replicas: 1 + prefix: roxie_workunit + maxActive: 20 + useChildProcesses: true + type: roxie + #resources: + # cpu: "1" + # memory: "1G" + #egress: engineEgress + resources: + cpu: "1" + memory: "1G" + stubResources: + cpu: "100m" + memory: "100Mi" + +eclccserver: +- name: myeclccserver + replicas: 1 + ## Set to false if you want to launch each workunit compile in its own container, true to run as child processes in eclccserver pod. + useChildProcesses: false + ## If non-zero, and useChildProcesses is false, try spending up to this number of seconds compiling using a child process before switching to + ## a separate container. Speeds up throughput of small jobs. + childProcessTimeLimit: 10 + ## maxActive controls how many workunit compiles may be active at once (per replica) + maxActive: 4 + ## Specify a list of queues to listen on if you don't want this eclccserver listening on all queues. If empty or missing, listens on all queues + listen: [] + ## The following allows eclcc options (names start with a -) and debug options to be defined for each of the workunits that are compiled. + #options: + #- name: globalAutoHoist + # value: false + # cluster: name # optional cluster this is applied to + + # used to configure the authentication for git when using the option to compile from a repo. Also requires an associated secret. + #gitUsername: + + ## The following resources apply to child compile pods when useChildProcesses=false, otherwise they apply to eclccserver pod. + resources: + cpu: "1" + memory: "20Gi" + timedChildResources: + cpu: "1" + memory: "798Mi" + +esp: +- name: eclwatch + ## Pre-configured esp applications include eclwatch, eclservices, and eclqueries + application: eclwatch + auth: none + replicas: 1 + resources: + cpu: "4" + memory: "8G" + ## The following 'corsAllowed' section is used to configure CORS support + ## origin - the origin to support CORS requests from + ## headers - the headers to allow for the given origin via CORS + ## methods - the HTTP methods to allow for the given origin via CORS + ## + #corsAllowed: + ## origin starting with https will only allow https CORS + #- origin: https://*.my.com + # headers: + # - "X-X" + # methods: + # - "GET" + # - "OPTIONS" + ## origin starting with http will allow http or https CORS + #- origin: http://www.example.com + # headers: + # - "*" + # methods: + # - "GET" + # - "POST" + # - "OPTIONS" + +# Add remote clients to generated client certificates and make the ESP require that one of the generated certificates is provided by a client in order to connect +# When setting up remote clients make sure that certificates.issuers.remote.enabled is set to true. +# remoteClients: +# - name: petfoodapplicationprod +# organization: petfoodDept +# secretTemplate: +# annotations: +# kubed.appscode.com/sync: "hpccenv=petfoodAppProd" # use kubed config-syncer to replicate certificate to namespace with matching annotation (also supports syncing with separate aks clusters) + +# trustClients and remoteClients can be combined. Trust is far easier to manage and should now be the preferred mechanism. +# Trust is similar to remoteClients, but unlike remoteClients, the client certificates are generated elsewhere. +# If trust is present then esp will use mtls, with trust controlled by certificates.issuers.remote, which must be enabled. +# When using trustClients the remote issuer of each environment should point to the same certifate authority. +# Verification of identity is automatic if the CA matches, but only the clients listed here are actually allowed access +# trustClients: +# - commonName: rabc.example.com + + service: + ## port can be used to change the local port used by the pod. If omitted, the default port (8880) is used + port: 8888 + ## servicePort controls the port that this service will be exposed on, either internally to the cluster, or externally + servicePort: 8010 + ## wsdlAddress should be set to the host and port which clients can use to hit this service. + # This address is added to the service wsdl files which simplify setting up a SOAP client to hit this service. There may be many external factors determining the address + # that is accible to clients. + # wsdlAddress: clientfacingaddress:8010 + ## Specify visibility: local (or global) if you want the service available from outside the cluster. Typically, eclwatch and wsecl are published externally, while eclservices is designed for internal use. + visibility: local + ## Annotations can be specified on a service - for example to specify provider-specific information such as service.beta.kubernetes.io/azure-load-balancer-internal-subnet + #annotations: + # service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "mysubnet" + # The service.annotations prefixed with hpcc.eclwatch.io should not be declared here. They can be declared + # in other services in order to be exposed in the ECLWatch interface. Similar function can be used by other + # applications. For other applications, the "eclwatch" inside the service.annotations should be replaced by + # their application names. + # hpcc.eclwatch.io/enabled: "true" + # hpcc.eclwatch.io/description: "some description" + ## You can also specify labels on a service + #labels: + # mylabel: "3" + ## Links specify the web links for a service. The web links may be shown on ECLWatch. + #links: + #- name: linkname + # description: "some description" + # url: "http://abc.com/def?g=1" + ## CIDRS allowed to access this service. + #loadBalancerSourceRanges: [1.2.3.4/32, 5.6.7.8/32] + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 70M + #resources: + # cpu: "1" + # memory: "2G" +- name: eclservices + application: eclservices + auth: none + replicas: 1 + service: + servicePort: 8010 + visibility: cluster + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: eclqueries + application: eclqueries + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8002 + #annotations: + # hpcc.eclwatch.io/enabled: "true" + # hpcc.eclwatch.io/description: "Roxie Test page" + # hpcc.eclwatch.io/port: "8002" + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: esdl-sandbox + application: esdl-sandbox + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8899 + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: sql2ecl + application: sql2ecl + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8510 + #domain: hpccsql.com + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: dfs + application: dfs + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8520 + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" + + +#- name: ldapenvironment + #ldapenvironment is a stand alone ESP service used to help stand up new HPCC LDAP Environments +# application: ldapenvironment +# auth: ldap +# #specify the hpcc branch Root Name +# hpccRootName: ou=hpcc,dc=myldap,dc=com +# #specify all BaseDN with your LDAP Server's "dc=" settings +# sharedFilesBaseDN: ou=files,ou=hpcc,dc=myldap,dc=com +# sharedGroupsBaseDN: ou=groups,ou=hpcc,dc=myldap,dc=com +# sharedUsersBaseDN: ou=users,ou=hpcc,dc=myldap,dc=com +# sharedResourcesBaseDN: ou=smc,ou=espservices,ou=hpcc,dc=myldap,dc=com +# sharedWorkunitsBaseDN: ou=workunits,ou=hpcc,dc=myldap,dc=com +# adminGroupName: HPCCAdmins +# replicas: 1 +# service: +# visibility: local +# servicePort: 8511 + +roxie: +- name: roxie + disabled: false + prefix: roxie + services: + - name: roxie + servicePort: 9876 + listenQueue: 200 + numThreads: 30 + visibility: local +# trustClients: +# - commonName: rabc.example.com +# - commonName: rbcd.example.com + # Can override ingress rules for each service if desired - for example to add no additional ingress permissions you can use + # ingress: [] + +# Trust is similar to remoteClients, but unlike remoteClients, the client certificates are generated elsewhere. +# If trust is present then roxie will use mtls with trust controlled by certificates.issuer.remote. +# Using the trust section the remote issuer of each environment should point to the same certifate authority. +# Verification of identity is automatic if the CA matches, but only the clients listed here are actually allowed access +# trust: +# - commonName: abc.example.com +# - commonName: bcd.example.com + + ## replicas indicates the number of replicas per channel + replicas: 2 + numChannels: 2 + ## Set singleNode to true for a scalable cluster of "single-node" roxie servers, each implementing all channels locally + singleNode: false + ## Adjust traceLevel to taste (1 is default) + traceLevel: 1 + ## set totalMemoryLimit to indicate how much memory is preallocated for roxie row data + # totalMemoryLimit: "1Gi" # Default 1Gi, capped to 75% of resources.memory if defined. + ## Set mtuPayload to the maximum amount of data Roxie will put in a single packet. This should be just less than the system MTU. Default is 1400 + # mtuPayload: 3800 + + ## resources specifies the resources required by each agent pod + resources: + cpu: "8" + memory: "12G" + topoResources: + cpu: "789m" + memory: "543Mi" + serverResources: + cpu: "2" + memory: "8G" + channelResources: + cpu: "4" + memory: "6Gi" + + ## Set serverReplicas to indicate a separate replicaSet of roxie servers, with agent pods not acting as servers + serverReplicas: 0 + ## If serverReplicas is set, the resources required for the server pods can be configured separately from the agent (channel) pods + #serverResources: + # cpu: "1" + # memory: "4Gi" + #channelResources: + # cpu: "2" + # memory: "8Gi" + + # Roxie may take a while to start up if there are a lot of queries to load. Yuo may need to + #override the default startup/readiness probing by setting these values + #minStartupTime: 30 # How long to wait before initiating startup probing + #maxStartupTime: 600 # Maximum time to wait for startup to complete before failing + topoServer: + replicas: 1 + #directAccessPlanes: [] #add direct access planes that roxie will read from without copying the data to its default data plane + #ldapUser: roxie_file_access #add system username for accessing files + #egress: engineEgress + +## The [manager/worker/eclAgent]Resources define the resource limits for each container. +## If numWorkersPerPod is >1 (must be a factor of numWorkers). +## NB: Each worker corresponds to a container, that will be resourced according to +## workerResources, meaning that if numWorkersPerPod>1, N * workerResources.cpu, +## N * workerResources.memory etc., will be required in total for the pod. +## +## By default the available Thor memory will be based on the resourced container memory. +## This can be overriden by setting [worker/manager]Memory.query and +## [worker/manager]Memory.thirdParty. +thor: +- name: thor + prefix: thor + numWorkers: 2 + maxJobs: 4 + maxGraphs: 2 + #maxGraphStartupTime: 600 + #numWorkersPerPod: 1 + managerResources: + cpu: "1" + memory: "2G" + workerResources: + cpu: "4" + memory: "4G" + #workerMemory: + # query: "3G" + # thirdParty: "500M" + eclAgentResources: + cpu: "1" + memory: "432M" + #egress: engineEgress + +eclscheduler: +- name: eclscheduler + resources: + cpu: "567m" + memory: "4321M" diff --git a/testing/regress/ecl/indexmerge.ecl b/testing/regress/ecl/indexmerge.ecl new file mode 100644 index 00000000000..e089e4c95a5 --- /dev/null +++ b/testing/regress/ecl/indexmerge.ecl @@ -0,0 +1,40 @@ +/*############################################################################## + + HPCC SYSTEMS software Copyright (C) 2024 HPCC Systems®. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +############################################################################## */ + +//class=file +//class=index +//version multiPart=false +//version multiPart=true + +import ^ as root; +multiPart := #IFDEFINED(root.multiPart, true); +useLocal := #IFDEFINED(root.useLocal, false); +useTranslation := #IFDEFINED(root.useTranslation, false); + +//--- end of version configuration --- + +import $.setup; +import setup.TS; +Files := setup.Files(multiPart, useLocal, useTranslation); + +//Read from an index, merging from a large number of cursor positions to generate a sorted output + +TS_searchIndex := Files.getSearchIndex(); +filtered := TS_searchIndex(KEYED(kind=1 AND word[1]='a')); +withOrder := STEPPED(filtered, doc, PRIORITY(3),HINT(maxseeklookahead(50))); + +OUTPUT(COUNT(NOFOLD(withOrder)) = 305475); diff --git a/testing/regress/ecl/key/indexmerge.xml b/testing/regress/ecl/key/indexmerge.xml new file mode 100644 index 00000000000..44a8709ad35 --- /dev/null +++ b/testing/regress/ecl/key/indexmerge.xml @@ -0,0 +1,3 @@ + + true + diff --git a/testing/regress/hpcc/util/ecl/file.py b/testing/regress/hpcc/util/ecl/file.py index 9c5da6897f3..25eced0c77a 100644 --- a/testing/regress/hpcc/util/ecl/file.py +++ b/testing/regress/hpcc/util/ecl/file.py @@ -566,7 +566,10 @@ def setJobnameVersion(self, version): # 'multiPart=false,useSequential=true' # to this # 'multiPart(false)-useSequential(true)' - self.jobnameVersion += '-' +version.replace('=', '(').replace(',', ')-')+')' + # need to handle this kind of value as well: + # url='http://.:9876' + # where the ':', '.' and '/' can cause problem later in version result check + self.jobnameVersion += '-' +version.replace('=', '(').replace(',', ')-').replace('.','-dot-').replace(':','-colon-').replace('/','-slash-').replace('?','-qmark-').replace('*','-star-')+')' pass def setJobname(self, timestamp): diff --git a/testing/regress/hpcc/util/util.py b/testing/regress/hpcc/util/util.py index 6dd9a9432e6..814cda4c561 100644 --- a/testing/regress/hpcc/util/util.py +++ b/testing/regress/hpcc/util/util.py @@ -142,7 +142,8 @@ def queryWuid(jobname, taskId): result = 'OK' for resultItem in resultItems: resultItem = resultItem.strip() - [key, val] = resultItem.split(':') + logger.debug("%3d. resultItem: '%s'", taskId, resultItem) + [key, val] = resultItem.split(':', 1) if key == 'ID': wuid = val if key == 'state': diff --git a/testing/unittests/CMakeLists.txt b/testing/unittests/CMakeLists.txt index 58153493d35..ea5e6c9f59e 100644 --- a/testing/unittests/CMakeLists.txt +++ b/testing/unittests/CMakeLists.txt @@ -110,7 +110,7 @@ target_link_libraries ( unittests esphttp esdllib logginglib - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) if (NOT CONTAINERIZED) diff --git a/tools/hidl/hidlcomp.cpp b/tools/hidl/hidlcomp.cpp index b2328b909cb..37e22303ae6 100644 --- a/tools/hidl/hidlcomp.cpp +++ b/tools/hidl/hidlcomp.cpp @@ -4914,12 +4914,15 @@ void EspServInfo::write_esp_client_ipp() outs("\tIMPLEMENT_IINTERFACE;\n\n"); outf("\tCClient%s()\n\t{\n", name_); - outs("\t\tsoap_reqid=0;\n\t"); - outf("\t\tsoap_action.append(\"%s\");\n\t", name_); - const char *ver = getMetaString("default_client_version", NULL); + outs("\t\tsoap_reqid=0;\n"); + outf("\t\tsoap_action.append(\"%s\");\n", name_); + // use latest 'version' unless 'generated_client_version' provided + const char *ver = getMetaString("generated_client_version", nullptr); + if (!ver || !*ver) + ver = getMetaString("version", nullptr); if (ver && *ver) - outf("\t\tsoap_action.append(\"?ver_=\").append(%s);\n\t", ver); - outf("}\n\tvirtual ~CClient%s(){}\n", name_); + outf("\t\tsoap_action.append(\"?ver_=\").append(%s);\n", ver); + outf("\t}\n\tvirtual ~CClient%s(){}\n", name_); outs("\tvirtual void setProxyAddress(const char *address)\n\t{\n\t\tsoap_proxy.set(address);\n\t}\n"); outs("\tvirtual void addServiceUrl(const char *url)\n\t{\n\t\tsoap_url.set(url);\n\t}\n"); diff --git a/tools/wutool/CMakeLists.txt b/tools/wutool/CMakeLists.txt index 95cc20386bd..80cbec1dd57 100644 --- a/tools/wutool/CMakeLists.txt +++ b/tools/wutool/CMakeLists.txt @@ -64,7 +64,7 @@ target_link_libraries ( wutool deftype workunit wuanalysis - ${CPPUNIT_LIBRARIES} + ${CppUnit_LIBRARIES} ) if ( USE_CPPUNIT ) diff --git a/vcpkg_usage.txt b/vcpkg_usage.txt index a7888ec1e92..856a3a7fe6f 100644 --- a/vcpkg_usage.txt +++ b/vcpkg_usage.txt @@ -1,198 +1,228 @@ -[cmake] The package zlib is compatible with built-in CMake targets: -[cmake] -[cmake] find_package(ZLIB REQUIRED) -[cmake] target_link_libraries(main PRIVATE ZLIB::ZLIB) -[cmake] -[cmake] The package openssl is compatible with built-in CMake targets: -[cmake] -[cmake] find_package(OpenSSL REQUIRED) -[cmake] target_link_libraries(main PRIVATE OpenSSL::SSL OpenSSL::Crypto) -[cmake] -[cmake] curl provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(CURL CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE CURL::libcurl) -[cmake] -[cmake] The package aws-sdk-cpp:x64-linux-dynamic provides CMake targets: -[cmake] -[cmake] When using AWSSDK, AWSSDK_ROOT_DIR must be defined by the user. -[cmake] find_package(AWSSDK CONFIG COMPONENTS core dynamodb kinesis s3 REQUIRED) -[cmake] target_include_directories(main PRIVATE ${AWSSDK_INCLUDE_DIRS}) -[cmake] target_link_libraries(main PRIVATE ${AWSSDK_LIBRARIES}) -[cmake] -[cmake] OR -[cmake] -[cmake] find_package(aws-cpp-sdk-core REQUIRED) -[cmake] target_include_directories(main PRIVATE aws-cpp-sdk-core) -[cmake] target_link_libraries(main PRIVATE aws-cpp-sdk-core) -[cmake] -[cmake] The package libxml2 is compatible with built-in CMake targets: -[cmake] -[cmake] find_package(LibXml2 REQUIRED) -[cmake] target_link_libraries(main PRIVATE LibXml2::LibXml2) -[cmake] -[cmake] azure-storage-blobs-cpp provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(azure-storage-blobs-cpp CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE Azure::azure-storage-blobs) -[cmake] -[cmake] azure-storage-files-shares-cpp provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(azure-storage-files-shares-cpp CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE Azure::azure-storage-files-shares) -[cmake] -[cmake] The package boost is compatible with built-in CMake targets: -[cmake] -[cmake] find_package(Boost REQUIRED [COMPONENTS ...]) -[cmake] target_link_libraries(main PRIVATE Boost::boost Boost:: Boost:: ...) -[cmake] -[cmake] libuv provides CMake targets: -[cmake] -[cmake] find_package(libuv CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE $,uv_a,uv>) -[cmake] -[cmake] cpr provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(cpr CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE cpr::cpr) -[cmake] -[cmake] jsoncpp provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(jsoncpp CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE jsoncpp_lib jsoncpp_object JsonCpp::JsonCpp) -[cmake] -[cmake] h3 provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(h3 CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE h3::h3) -[cmake] -[cmake] hiredis provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(hiredis CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE hiredis::hiredis) -[cmake] -[cmake] jwt-cpp is header-only and can be used from CMake via: -[cmake] -[cmake] find_path(JWT_CPP_INCLUDE_DIRS "jwt-cpp/base.h") -[cmake] target_include_directories(main PRIVATE ${JWT_CPP_INCLUDE_DIRS}) -[cmake] -[cmake] The package libarchive is compatible with the CMake Find Module: -[cmake] -[cmake] find_package(LibArchive REQUIRED) -[cmake] target_include_directories(main PRIVATE ${LibArchive_INCLUDE_DIRS}) -[cmake] target_link_libraries(main PRIVATE ${LibArchive_LIBRARIES}) -[cmake] -[cmake] find_package(LibArchive REQUIRED) -[cmake] target_link_libraries(main PRIVATE LibArchive::LibArchive) # CMake >= 3.17 -[cmake] -[cmake] libevent provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(Libevent CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE libevent::core libevent::extra libevent::pthreads) -[cmake] -[cmake] libcouchbase-cxx is header-only and can be used from CMake via: -[cmake] -[cmake] find_path(LIBCOUCHBASE_CXX_INCLUDE_DIRS "libcouchbase/couchbase++.h") -[cmake] target_include_directories(main PRIVATE ${LIBCOUCHBASE_CXX_INCLUDE_DIRS}) -[cmake] -[cmake] libgit2 provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(unofficial-git2 CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE unofficial::git2::git2) -[cmake] -[cmake] lz4 provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(lz4 CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE lz4::lz4) -[cmake] -[cmake] The package libmysql provides CMake targets: -[cmake] -[cmake] find_package(libmysql REQUIRED) -[cmake] target_link_libraries(main PRIVATE ${MYSQL_LIBRARIES}) -[cmake] -[cmake] The package librdkafka:x64-linux-dynamic provides CMake targets: -[cmake] -[cmake] find_package(RdKafka CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE RdKafka::rdkafka RdKafka::rdkafka++) -[cmake] -[cmake] The package libxslt is compatible with built-in CMake targets: -[cmake] -[cmake] # xslt library -[cmake] find_package(LibXslt REQUIRED) -[cmake] target_link_libraries(main PRIVATE LibXslt::LibXslt) -[cmake] -[cmake] # exslt library -[cmake] find_package(LibXslt REQUIRED) -[cmake] target_link_libraries(main PRIVATE LibXslt::LibExslt) -[cmake] -[cmake] In order to use modules, you must set environment variable LIBXSLT_PLUGINS_PATH -[cmake] at runtime. -[cmake] -[cmake] libyaml provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(yaml CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE yaml) -[cmake] -[cmake] mongo-cxx-driver provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(bsoncxx CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE mongo::bsoncxx_shared) -[cmake] -[cmake] find_package(mongocxx CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE mongo::mongocxx_shared) -[cmake] -[cmake] The package nlohmann-json provides CMake targets: -[cmake] -[cmake] find_package(nlohmann_json CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE nlohmann_json::nlohmann_json) -[cmake] -[cmake] The package nlohmann-json can be configured to not provide implicit conversions via a custom triplet file: -[cmake] -[cmake] set(nlohmann-json_IMPLICIT_CONVERSIONS OFF) -[cmake] -[cmake] For more information, see the docs here: -[cmake] -[cmake] https://json.nlohmann.me/api/macros/json_use_implicit_conversions/ -[cmake] -[cmake] openblas provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(OpenBLAS CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE OpenBLAS::OpenBLAS) -[cmake] -[cmake] The package openldapp can be imported via CMake FindPkgConfig module: -[cmake] -[cmake] find_package(PkgConfig) -[cmake] pkg_check_modules(OPENLDAP REQUIRED IMPORTED_TARGET ldap) -[cmake] -[cmake] target_link_libraries(main PRIVATE PkgConfig::OPENLDAP) -[cmake] -[cmake] rapidjson provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(RapidJSON CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE rapidjson) -[cmake] -[cmake] sqlite3 provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(unofficial-sqlite3 CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE unofficial::sqlite3::sqlite3) -[cmake] -[cmake] tbb provides CMake targets: -[cmake] -[cmake] # this is heuristically generated, and may not be correct -[cmake] find_package(TBB CONFIG REQUIRED) -[cmake] target_link_libraries(main PRIVATE TBB::tbb TBB::tbbmalloc TBB::tbbmalloc_proxy) \ No newline at end of file +rapidjson provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(RapidJSON CONFIG REQUIRED) + target_link_libraries(main PRIVATE rapidjson) + +The package zlib is compatible with built-in CMake targets: + + find_package(ZLIB REQUIRED) + target_link_libraries(main PRIVATE ZLIB::ZLIB) + +The package openssl is compatible with built-in CMake targets: + + find_package(OpenSSL REQUIRED) + target_link_libraries(main PRIVATE OpenSSL::SSL OpenSSL::Crypto) + +The package boost is compatible with built-in CMake targets: + + find_package(Boost REQUIRED [COMPONENTS ...]) + target_link_libraries(main PRIVATE Boost::boost Boost:: Boost:: ...) + +lz4 provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(lz4 CONFIG REQUIRED) + target_link_libraries(main PRIVATE lz4::lz4) + +The package arrow provides CMake targets: + + find_package(Arrow CONFIG REQUIRED) + target_link_libraries(main PRIVATE "$,Arrow::arrow_static,Arrow::arrow_shared>") + + find_package(Parquet CONFIG REQUIRED) + target_link_libraries(main PRIVATE "$,Parquet::parquet_static,Parquet::parquet_shared>") + + find_package(ArrowDataset CONFIG REQUIRED) + target_link_libraries(main PRIVATE "$,ArrowDataset::arrow_dataset_static,ArrowDataset::arrow_dataset_shared>") + + find_package(ArrowAcero CONFIG REQUIRED) + target_link_libraries(main PRIVATE "$,ArrowAcero::arrow_acero_static,ArrowAcero::arrow_acero_shared>") + +curl is compatible with built-in CMake targets: + + find_package(CURL REQUIRED) + target_link_libraries(main PRIVATE CURL::libcurl) + +The package aws-sdk-cpp:x64-linux-dynamic provides CMake targets: + + When using AWSSDK, AWSSDK_ROOT_DIR must be defined by the user. + find_package(AWSSDK CONFIG COMPONENTS core dynamodb kinesis s3 REQUIRED) + target_include_directories(main PRIVATE ${AWSSDK_INCLUDE_DIRS}) + target_link_libraries(main PRIVATE ${AWSSDK_LIBRARIES}) + + OR + + find_package(aws-cpp-sdk-core REQUIRED) + target_include_directories(main PRIVATE aws-cpp-sdk-core) + target_link_libraries(main PRIVATE aws-cpp-sdk-core) + +azure-storage-blobs-cpp provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(azure-storage-blobs-cpp CONFIG REQUIRED) + target_link_libraries(main PRIVATE Azure::azure-storage-blobs) + +azure-storage-files-shares-cpp provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(azure-storage-files-shares-cpp CONFIG REQUIRED) + target_link_libraries(main PRIVATE Azure::azure-storage-files-shares) + +libuv provides CMake targets: + + find_package(libuv CONFIG REQUIRED) + target_link_libraries(main PRIVATE $,libuv::uv_a,libuv::uv>) + +cppunit provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(CppUnit CONFIG REQUIRED) + target_link_libraries(main PRIVATE CppUnit) + +cpr provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(cpr CONFIG REQUIRED) + target_link_libraries(main PRIVATE cpr::cpr) + +jsoncpp provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(jsoncpp CONFIG REQUIRED) + target_link_libraries(main PRIVATE JsonCpp::JsonCpp) + +h3 provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(h3 CONFIG REQUIRED) + target_link_libraries(main PRIVATE h3::h3) + +hiredis provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(hiredis CONFIG REQUIRED) + target_link_libraries(main PRIVATE hiredis::hiredis) + +jwt-cpp is header-only and can be used from CMake via: + + find_path(JWT_CPP_INCLUDE_DIRS "jwt-cpp/base.h") + target_include_directories(main PRIVATE ${JWT_CPP_INCLUDE_DIRS}) + +The package libarchive is compatible with the CMake Find Module: + + find_package(LibArchive REQUIRED) + target_include_directories(main PRIVATE ${LibArchive_INCLUDE_DIRS}) + target_link_libraries(main PRIVATE ${LibArchive_LIBRARIES}) + + find_package(LibArchive REQUIRED) + target_link_libraries(main PRIVATE LibArchive::LibArchive) # CMake >= 3.17 + +libcouchbase-cxx is header-only and can be used from CMake via: + + find_path(LIBCOUCHBASE_CXX_INCLUDE_DIRS "libcouchbase/couchbase++.h") + target_include_directories(main PRIVATE ${LIBCOUCHBASE_CXX_INCLUDE_DIRS}) + +libgit2 can be imported via CMake FindPkgConfig module: + + find_package(PkgConfig REQUIRED) + pkg_check_modules(LIBGIT2 REQUIRED IMPORTED_TARGET libgit2) + target_link_libraries(main PRIVATE PkgConfig::LIBGIT2) + +vcpkg provides proprietary CMake targets: + + find_package(unofficial-libgit2 CONFIG REQUIRED) + target_link_libraries(main PRIVATE unofficial::libgit2::libgit2) + + +libmysql provides CMake targets: + + find_package(unofficial-libmysql REQUIRED) + target_link_libraries(main PRIVATE unofficial::libmysql::libmysql) + +The package librdkafka:x64-linux-dynamic provides CMake targets: + + find_package(RdKafka CONFIG REQUIRED) + target_link_libraries(main PRIVATE RdKafka::rdkafka RdKafka::rdkafka++) + +The package libxslt is compatible with built-in CMake targets: + + # xslt library + find_package(LibXslt REQUIRED) + target_link_libraries(main PRIVATE LibXslt::LibXslt) + + # exslt library + find_package(LibXslt REQUIRED) + target_link_libraries(main PRIVATE LibXslt::LibExslt) + +In order to use modules, you must set environment variable LIBXSLT_PLUGINS_PATH +at runtime. + +libyaml provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(yaml CONFIG REQUIRED) + target_link_libraries(main PRIVATE yaml) + +minizip provides CMake targets: + + find_package(unofficial-minizip CONFIG REQUIRED) + target_link_libraries(main PRIVATE unofficial::minizip::minizip) + +mongo-cxx-driver provides CMake targets: + + find_package(bsoncxx CONFIG REQUIRED) + target_link_libraries(main PRIVATE $,mongo::bsoncxx_static,mongo::bsoncxx_shared>) + + find_package(mongocxx CONFIG REQUIRED) + target_link_libraries(main PRIVATE $,mongo::mongocxx_static,mongo::mongocxx_shared>) + +The package nlohmann-json provides CMake targets: + + find_package(nlohmann_json CONFIG REQUIRED) + target_link_libraries(main PRIVATE nlohmann_json::nlohmann_json) + +The package nlohmann-json can be configured to not provide implicit conversions via a custom triplet file: + + set(nlohmann-json_IMPLICIT_CONVERSIONS OFF) + +For more information, see the docs here: + + https://json.nlohmann.me/api/macros/json_use_implicit_conversions/ + +nlp-engine provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(nlp-engine CONFIG REQUIRED) + target_link_libraries(main PRIVATE nlp-engine) + +openblas provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(OpenBLAS CONFIG REQUIRED) + target_link_libraries(main PRIVATE OpenBLAS::OpenBLAS) + +The package openldapp can be imported via CMake FindPkgConfig module: + + find_package(PkgConfig) + pkg_check_modules(OPENLDAP REQUIRED IMPORTED_TARGET ldap) + + target_link_libraries(main PRIVATE PkgConfig::OPENLDAP) + +opentelemetry-cpp provides CMake targets: + + # this is heuristically generated, and may not be correct + find_package(opentelemetry-cpp CONFIG REQUIRED) + # note: 21 additional targets are not displayed. + target_link_libraries(main PRIVATE opentelemetry-cpp::api opentelemetry-cpp::ext opentelemetry-cpp::sdk opentelemetry-cpp::logs) + +sqlite3 provides pkgconfig bindings. +sqlite3 provides CMake targets: + + find_package(unofficial-sqlite3 CONFIG REQUIRED) + target_link_libraries(main PRIVATE unofficial::sqlite3::sqlite3) + +wasmtime-cpp-api is header-only and can be used from CMake via: + + find_path(WASMTIME_CPP_API_INCLUDE_DIRS "wasmtime-cpp-api/wasmtime.hh") + target_include_directories(main PRIVATE ${WASMTIME_CPP_API_INCLUDE_DIRS}) \ No newline at end of file