From 3e17095173e3df2065eee05d3360a78ca9c64f51 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Tue, 18 Jun 2024 14:06:22 +0100 Subject: [PATCH 01/31] HPCC-32054 Prevent global undefined due to out of order evaluation Signed-off-by: Gavin Halliday --- ecl/hql/hqltrans.ipp | 3 ++- ecl/hqlcpp/hqlttcpp.cpp | 24 ++++++++++++++++-------- ecl/hqlcpp/hqlttcpp.ipp | 6 ++---- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/ecl/hql/hqltrans.ipp b/ecl/hql/hqltrans.ipp index 89a49046473..b0cd886c17f 100644 --- a/ecl/hql/hqltrans.ipp +++ b/ecl/hql/hqltrans.ipp @@ -688,7 +688,8 @@ private: class HQL_API ConditionalHqlTransformer : public NewHqlTransformer { public: - enum { CTFnoteifactions = 0x0001, + enum { CTFnone = 0x0000, + CTFnoteifactions = 0x0001, CTFnoteifdatasets = 0x0002, CTFnoteifdatarows = 0x0004, CTFnoteifall = 0x0008, diff --git a/ecl/hqlcpp/hqlttcpp.cpp b/ecl/hqlcpp/hqlttcpp.cpp index 6dadd057d3f..49d13191769 100644 --- a/ecl/hqlcpp/hqlttcpp.cpp +++ b/ecl/hqlcpp/hqlttcpp.cpp @@ -8315,7 +8315,7 @@ IHqlDataset * queryRootDataset(IHqlExpression * dataset) //therefore, there is no need to special case if actions. Thor on the other hand will cause it to be executed unnecessarily. static HqlTransformerInfo newScopeMigrateTransformerInfo("NewScopeMigrateTransformer"); NewScopeMigrateTransformer::NewScopeMigrateTransformer(IWorkUnit * _wu, HqlCppTranslator & _translator) -: HoistingHqlTransformer(newScopeMigrateTransformerInfo, 0), translator(_translator) +: HoistingHqlTransformer(newScopeMigrateTransformerInfo, CTFnone), translator(_translator) { wu = _wu; isRoxie = translator.targetRoxie(); @@ -8622,7 +8622,7 @@ bool AutoScopeMigrateInfo::doAutoHoist(IHqlExpression * transformed, bool minimi static HqlTransformerInfo autoScopeMigrateTransformerInfo("AutoScopeMigrateTransformer"); AutoScopeMigrateTransformer::AutoScopeMigrateTransformer(IWorkUnit * _wu, HqlCppTranslator & _translator) -: NewHqlTransformer(autoScopeMigrateTransformerInfo), translator(_translator) +: HoistingHqlTransformer(autoScopeMigrateTransformerInfo, CTFnone), translator(_translator) { wu = _wu; isRoxie = (translator.getTargetClusterType() == RoxieCluster); @@ -8631,7 +8631,6 @@ AutoScopeMigrateTransformer::AutoScopeMigrateTransformer(IWorkUnit * _wu, HqlCpp hasCandidate = false; activityDepth = 0; curGraph = 1; - globalTarget = NULL; } //Ensure all input activities are marked as never hoisting, but child activities are unaffected @@ -8861,7 +8860,7 @@ IHqlExpression * AutoScopeMigrateTransformer::createTransformed(IHqlExpression * //else hoist it within the current graph, otherwise it can get hoisted before globals on datasets that //it is dependent on. if (extra->firstUseIsConditional) - globalTarget->append(*createWrapper(no_thor, setResult.getClear())); + appendToTarget(*createWrapper(no_thor, setResult.getClear())); else graphActions.append(*setResult.getClear()); transformed.setown(getResult.getClear()); @@ -8871,11 +8870,20 @@ IHqlExpression * AutoScopeMigrateTransformer::createTransformed(IHqlExpression * } -void AutoScopeMigrateTransformer::transformRoot(const HqlExprArray & in, HqlExprArray & out) +IHqlExpression * AutoScopeMigrateTransformer::doTransformIndependent(IHqlExpression * expr) { - globalTarget = &out; - NewHqlTransformer::transformRoot(in, out); - globalTarget = NULL; + AutoScopeMigrateTransformer transformer(wu, translator); + + HqlExprArray exprs; + unwindCommaCompound(exprs, expr); + transformer.analyseArray(exprs, 0); + transformer.analyseArray(exprs, 1); + if (!transformer.worthTransforming()) + return LINK(expr); + + HqlExprArray results; + transformer.transformRoot(exprs, results); + return createActionList(results); } diff --git a/ecl/hqlcpp/hqlttcpp.ipp b/ecl/hqlcpp/hqlttcpp.ipp index d5ca93bdca5..a5f1e72df27 100644 --- a/ecl/hqlcpp/hqlttcpp.ipp +++ b/ecl/hqlcpp/hqlttcpp.ipp @@ -729,13 +729,11 @@ public: bool neverHoist = false; }; -class AutoScopeMigrateTransformer : public NewHqlTransformer +class AutoScopeMigrateTransformer : public HoistingHqlTransformer { public: AutoScopeMigrateTransformer(IWorkUnit * _wu, HqlCppTranslator & _translator); - void transformRoot(const HqlExprArray & in, HqlExprArray & out); - bool worthTransforming() const { return hasCandidate; } protected: @@ -751,6 +749,7 @@ protected: IHqlExpression * transformCond(IHqlExpression * expr); void doAnalyseExpr(IHqlExpression * expr); void doAnalyseConditionalExpr(IHqlExpression * expr, unsigned firstConditional); + virtual IHqlExpression * doTransformIndependent(IHqlExpression * expr) override; inline AutoScopeMigrateInfo * queryBodyExtra(IHqlExpression * expr) { return static_cast(queryTransformExtra(expr->queryBody())); } @@ -765,7 +764,6 @@ private: unsigned graphDepth = 0; HqlExprArray graphActions; unsigned activityDepth; - HqlExprArray * globalTarget; }; //--------------------------------------------------------------------------- From d76308621ee70672dfb666aaccdc03348470eab1 Mon Sep 17 00:00:00 2001 From: Jim DeFabia Date: Wed, 19 Jun 2024 14:38:39 -0400 Subject: [PATCH 02/31] HPCC-32098 Modify incorrect file name in Containerized book Signed-off-by: Jim DeFabia --- .../ContainerizedMods/ConfigureValues.xml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml index f3b52e78f51..d424a8d8a54 100644 --- a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml +++ b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml @@ -22,7 +22,7 @@ The entire HPCC Systems configuration in the container space, is governed by a single file, a values.yaml file, and - its associated schema (values-schema.json) + its associated schema (values.schema.json) file. @@ -509,7 +509,7 @@ components - + @@ -845,7 +845,7 @@ Preferred Storage The preferredReadPlanes option is available - for each type of cluster--hThor, Thor, and Roxie. + for each type of cluster--hThor, Thor, and Roxie. This option is only significant for logical files which reside on multiple storage planes. When specified, the HPCC Systems platform @@ -1332,9 +1332,9 @@ thor: - + - + From e4082eb1ebe9932ed334fe6d71e6da230d287420 Mon Sep 17 00:00:00 2001 From: Shamser Ahmed Date: Thu, 13 Jun 2024 11:55:16 +0100 Subject: [PATCH 03/31] HPCC-31647 spill stats for join Signed-off-by: Shamser Ahmed --- thorlcr/activities/lookupjoin/thlookupjoinslave.cpp | 6 ++++-- thorlcr/thorutil/thmem.cpp | 9 +++++---- thorlcr/thorutil/thmem.hpp | 2 +- thorlcr/thorutil/thormisc.cpp | 2 +- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp index 3db86c42edc..09662742d09 100644 --- a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp +++ b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp @@ -1751,7 +1751,7 @@ class CLookupJoinActivityBase : public CInMemJoinBase overflowWriteFile; - Owned overflowWriteStream; + Owned overflowWriteStream; rowcount_t overflowWriteCount; OwnedMalloc channelDistributors; unsigned nextRhsToSpill = 0; @@ -1881,7 +1881,7 @@ class CLookupJoinActivityBase : public CInMemJoinBasecreateOwnedTempFile(tempName.str())); VStringBuffer spillPrefixStr("clearAllNonLocalRows(%d)", SPILL_PRIORITY_SPILLABLE_STREAM); // 3rd param. is skipNulls = true, the row arrays may have had the non-local rows delete already. - rows.save(file->queryIFile(), spillCompInfo, true, spillPrefixStr.str()); // saves committed rows + rows.save(*file, spillCompInfo, true, spillPrefixStr.str()); // saves committed rows rows.flushMarker = 0; // reset because array will be moved as a consequence of further adds, so next scan must be from start } @@ -2900,6 +2900,7 @@ class CLookupJoinActivityBase : public CInMemJoinBaseputRow(rhsInRowsTemp.getClear(r)); + overflowWriteFile->noteSize(overflowWriteStream->getStatistic(StSizeDiskWrite)); return true; } if (hasFailedOverToLocal()) @@ -2949,6 +2950,7 @@ class CLookupJoinActivityBase : public CInMemJoinBaseputRow(rhsInRowsTemp.getClear(r)); + overflowWriteFile->noteSize(overflowWriteStream->getStatistic(StSizeDiskWrite)); return true; } virtual void gatherActiveStats(CRuntimeStatisticCollection &activeStats) const diff --git a/thorlcr/thorutil/thmem.cpp b/thorlcr/thorutil/thmem.cpp index c3d2d795a88..299f1e3118a 100644 --- a/thorlcr/thorutil/thmem.cpp +++ b/thorlcr/thorutil/thmem.cpp @@ -247,7 +247,7 @@ class CSpillableStreamBase : public CSpillable GetTempFilePath(tempName, tempPrefix.str()); spillFile.setown(activity.createOwnedTempFile(tempName.str())); VStringBuffer spillPrefixStr("SpillableStream(%u)", spillPriority); - rows.save(spillFile->queryIFile(), spillCompInfo, false, spillPrefixStr.str()); // saves committed rows + rows.save(*spillFile, spillCompInfo, false, spillPrefixStr.str()); // saves committed rows rows.kill(); // no longer needed, readers will pull from spillFile. NB: ok to kill array as rows is never written to or expanded spillFile->noteSize(spillFile->queryIFile().size()); return true; @@ -1375,7 +1375,7 @@ static int callbackSortRev(IInterface * const *cb2, IInterface * const *cb1) return 1; } -rowidx_t CThorSpillableRowArray::save(IFile &iFile, unsigned _spillCompInfo, bool skipNulls, const char *_tracingPrefix) +rowidx_t CThorSpillableRowArray::save(CFileOwner &iFileOwner, unsigned _spillCompInfo, bool skipNulls, const char *_tracingPrefix) { rowidx_t n = numCommitted(); if (0 == n) @@ -1405,7 +1405,7 @@ rowidx_t CThorSpillableRowArray::save(IFile &iFile, unsigned _spillCompInfo, boo nextCB = &cbCopy.popGet(); nextCBI = nextCB->queryRecordNumber(); } - Owned writer = createRowWriter(&iFile, rowIf, rwFlags, nullptr, compBlkSz); + Owned writer = createRowWriter(&iFileOwner.queryIFile(), rowIf, rwFlags, nullptr, compBlkSz); rowidx_t i=0; rowidx_t rowsWritten=0; try @@ -1444,6 +1444,7 @@ rowidx_t CThorSpillableRowArray::save(IFile &iFile, unsigned _spillCompInfo, boo ++i; } writer->flush(NULL); + iFileOwner.noteSize(writer->getStatistic(StSizeDiskWrite)); } catch (IException *e) { @@ -1656,7 +1657,7 @@ class CThorRowCollectorBase : public CSpillable GetTempFilePath(tempName, tempPrefix.str()); VStringBuffer spillPrefixStr("%sRowCollector(%d)", tracingPrefix.str(), spillPriority); Owned tempFileOwner = activity.createOwnedTempFile(tempName.str()); - spillableRows.save(tempFileOwner->queryIFile(), spillCompInfo, false, spillPrefixStr.str()); // saves committed rows + spillableRows.save(*tempFileOwner, spillCompInfo, false, spillPrefixStr.str()); // saves committed rows spillFiles.append(tempFileOwner.getLink()); ++overflowCount; statOverflowCount.fastAdd(1); // NB: this is total over multiple uses of this class diff --git a/thorlcr/thorutil/thmem.hpp b/thorlcr/thorutil/thmem.hpp index ac7a1dd60ee..8e4f1b896a8 100644 --- a/thorlcr/thorutil/thmem.hpp +++ b/thorlcr/thorutil/thmem.hpp @@ -480,7 +480,7 @@ class graph_decl CThorSpillableRowArray : private CThorExpandingRowArray, implem //A thread calling the following functions must own the lock, or guarantee no other thread will access void sort(ICompare & compare, unsigned maxcores); - rowidx_t save(IFile &file, unsigned _spillCompInfo, bool skipNulls, const char *tracingPrefix); + rowidx_t save(CFileOwner &file, unsigned _spillCompInfo, bool skipNulls, const char *tracingPrefix); inline rowidx_t numCommitted() const { return commitRows - firstRow; } //MORE::Not convinced this is very safe! inline rowidx_t queryTotalRows() const { return CThorExpandingRowArray::ordinality(); } // includes uncommited rows diff --git a/thorlcr/thorutil/thormisc.cpp b/thorlcr/thorutil/thormisc.cpp index e3759811a48..48dc1231ee1 100644 --- a/thorlcr/thorutil/thormisc.cpp +++ b/thorlcr/thorutil/thormisc.cpp @@ -83,7 +83,7 @@ const StatisticsMapping indexReadActivityStatistics({StNumRowsProcessed}, indexR const StatisticsMapping indexWriteActivityStatistics({StPerReplicated, StNumLeafCacheAdds, StNumNodeCacheAdds, StNumBlobCacheAdds }, basicActivityStatistics, diskWriteRemoteStatistics); const StatisticsMapping keyedJoinActivityStatistics({ StNumIndexAccepted, StNumPreFiltered, StNumDiskSeeks, StNumDiskAccepted, StNumDiskRejected}, basicActivityStatistics, indexReadFileStatistics); const StatisticsMapping loopActivityStatistics({StNumIterations}, basicActivityStatistics); -const StatisticsMapping lookupJoinActivityStatistics({StNumSmartJoinSlavesDegradedToStd, StNumSmartJoinDegradedToLocal}, basicActivityStatistics); +const StatisticsMapping lookupJoinActivityStatistics({StNumSmartJoinSlavesDegradedToStd, StNumSmartJoinDegradedToLocal}, spillStatistics, basicActivityStatistics); const StatisticsMapping joinActivityStatistics({StNumLeftRows, StNumRightRows}, basicActivityStatistics, spillStatistics); const StatisticsMapping diskReadActivityStatistics({StNumDiskRowsRead, }, basicActivityStatistics, diskReadRemoteStatistics); const StatisticsMapping diskWriteActivityStatistics({StPerReplicated}, basicActivityStatistics, diskWriteRemoteStatistics); From 0e0849ec5611689bf0398df81f5a95d12d8dbd77 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Thu, 20 Jun 2024 16:37:37 +0100 Subject: [PATCH 04/31] HPCC-31931 Manual copy/paste of WU errors Fix flicker + poor layout of the error/warning pane. Signed-off-by: Gordon Smith --- esp/src/src-react/components/InfoGrid.tsx | 51 ++++++++----- .../src-react/components/controls/Grid.tsx | 72 +++++++++++++------ 2 files changed, 83 insertions(+), 40 deletions(-) diff --git a/esp/src/src-react/components/InfoGrid.tsx b/esp/src/src-react/components/InfoGrid.tsx index 0e13e1d0e7b..1bcca1178b9 100644 --- a/esp/src/src-react/components/InfoGrid.tsx +++ b/esp/src/src-react/components/InfoGrid.tsx @@ -1,5 +1,5 @@ import * as React from "react"; -import { Checkbox, CommandBar, ICommandBarItemProps, Link } from "@fluentui/react"; +import { Checkbox, CommandBar, ICommandBarItemProps, Link, SelectionMode } from "@fluentui/react"; import { SizeMe } from "react-sizeme"; import { formatCost, formatTwoDigits } from "src/Session"; import nlsHPCC from "src/nlsHPCC"; @@ -123,6 +123,11 @@ export const InfoGrid: React.FunctionComponent = ({ return <>{info?.prefix}{txt}{info?.message}; } return Message; + }, + fluentColumn: { + flexGrow: 1, + minWidth: 320, + isResizable: true } }, Column: { label: nlsHPCC.Col, width: 36 }, @@ -133,7 +138,14 @@ export const InfoGrid: React.FunctionComponent = ({ return activityId ? a{activityId} : ""; } }, - FileName: { label: nlsHPCC.FileName, width: 360 } + FileName: { + label: nlsHPCC.FileName, + fluentColumn: { + flexGrow: 2, + minWidth: 320, + isResizable: true + } + } }; }, [wuid]); @@ -210,7 +222,8 @@ export const InfoGrid: React.FunctionComponent = ({ }); setData(filteredExceptions); setFilterCounts(filterCounts); - }, [costChecked, errorChecked, errors, infoChecked, otherChecked, warningChecked]); + setSelection(filteredExceptions); + }, [costChecked, errorChecked, errors, infoChecked, otherChecked, setSelection, warningChecked]); React.useEffect(() => { if (data.length) { @@ -224,19 +237,23 @@ export const InfoGrid: React.FunctionComponent = ({ } }, [data.length]); - return {({ size }) => -
- -
- + return
+ + {({ size }) => +
+
+ { }} + setTotal={setTotal} + refresh={refreshTable} + height={`${size.height - (44 + 8 + 45 + 12)}px`} + selectionMode={SelectionMode.none} + > +
-
- }; + } +
; }; diff --git a/esp/src/src-react/components/controls/Grid.tsx b/esp/src/src-react/components/controls/Grid.tsx index fff30e920ca..634cb318627 100644 --- a/esp/src/src-react/components/controls/Grid.tsx +++ b/esp/src/src-react/components/controls/Grid.tsx @@ -1,5 +1,5 @@ import * as React from "react"; -import { DetailsList, DetailsListLayoutMode, Dropdown, IColumn as _IColumn, ICommandBarItemProps, IDetailsHeaderProps, IDetailsListStyles, mergeStyleSets, Selection, Stack, TooltipHost, TooltipOverflowMode, IDetailsList, IRenderFunction, IDetailsRowProps } from "@fluentui/react"; +import { DetailsList, DetailsListLayoutMode, Dropdown, IColumn as _IColumn, ICommandBarItemProps, IDetailsHeaderProps, IDetailsListStyles, mergeStyleSets, Selection, Stack, TooltipHost, TooltipOverflowMode, IDetailsList, IRenderFunction, IDetailsRowProps, SelectionMode, ConstrainMode } from "@fluentui/react"; import { Pagination } from "@fluentui/react-experiments/lib/Pagination"; import { useConst, useId, useMount, useOnEvent } from "@fluentui/react-hooks"; import { BaseStore, Memory, QueryRequest, QuerySortItem } from "src/store/Memory"; @@ -34,6 +34,7 @@ export interface FluentColumn { formatter?: (value: any, row: any) => any; csvFormatter?: (value: any, row: any) => string; className?: (value: any, row: any) => string; + fluentColumn?: Partial<_IColumn>; } export type FluentColumns = { [key: string]: FluentColumn }; @@ -72,25 +73,42 @@ function columnsAdapter(columns: FluentColumns, columnWidths: Map): const column = columns[key]; const width = columnWidths.get(key) ?? column.width; if (column?.selectorType === undefined && column?.hidden !== true) { - retVal.push({ - key, - name: column.label ?? key, - fieldName: column.field ?? key, - minWidth: width ?? 70, - maxWidth: width, - isResizable: true, - isSorted: false, - isSortedDescending: false, - iconName: column.headerIcon, - isIconOnly: !!column.headerIcon, - data: column, - styles: { root: { width, ":hover": { cursor: column?.sortable !== false ? "pointer" : "default" } } }, - onRender: (item: any, index: number, col: IColumn) => { - col.minWidth = column.width ?? 70; - col.maxWidth = column.width; - return tooltipItemRenderer(item, index, col); - } - } as IColumn); + if (column?.fluentColumn) { + retVal.push({ + key, + name: column.label ?? key, + fieldName: column.field ?? key, + iconName: column.headerIcon, + isIconOnly: !!column.headerIcon, + data: column, + styles: { root: { width, ":hover": { cursor: column?.sortable !== false ? "pointer" : "default" } } }, + onRender: (item: any, index: number, col: IColumn) => { + col.minWidth = column.width ?? 70; + return tooltipItemRenderer(item, index, col); + }, + ...column.fluentColumn + } as IColumn); + } else { + retVal.push({ + key, + name: column.label ?? key, + fieldName: column.field ?? key, + minWidth: width ?? 70, + maxWidth: width, + isResizable: true, + isSorted: false, + isSortedDescending: false, + iconName: column.headerIcon, + isIconOnly: !!column.headerIcon, + data: column, + styles: { root: { width, ":hover": { cursor: column?.sortable !== false ? "pointer" : "default" } } }, + onRender: (item: any, index: number, col: IColumn) => { + col.minWidth = column.width ?? 70; + col.maxWidth = column.width; + return tooltipItemRenderer(item, index, col); + } + } as IColumn); + } } } return retVal; @@ -191,6 +209,7 @@ interface FluentStoreGridProps { columns: FluentColumns, height: string, refresh: RefreshTable, + selectionMode?: SelectionMode, setSelection: (selection: any[]) => void, setTotal: (total: number) => void, onRenderRow?: IRenderFunction @@ -205,6 +224,7 @@ const FluentStoreGrid: React.FunctionComponent = ({ columns, height, refresh, + selectionMode = SelectionMode.multiple, setSelection, setTotal, onRenderRow @@ -315,7 +335,8 @@ const FluentStoreGrid: React.FunctionComponent = ({ compact={true} items={items} columns={fluentColumns} - layoutMode={DetailsListLayoutMode.justified} + layoutMode={DetailsListLayoutMode.fixedColumns} + constrainMode={ConstrainMode.unconstrained} selection={selectionHandler} isSelectedOnFocus={false} selectionPreservedOnEmptyClick={true} @@ -324,6 +345,7 @@ const FluentStoreGrid: React.FunctionComponent = ({ onColumnResize={columnResize} onRenderRow={onRenderRow} styles={gridStyles(height)} + selectionMode={selectionMode} />
; }; @@ -335,6 +357,7 @@ interface FluentGridProps { sort?: QuerySortItem, columns: FluentColumns, height?: string, + selectionMode?: SelectionMode, setSelection: (selection: any[]) => void, setTotal: (total: number) => void, refresh: RefreshTable, @@ -348,6 +371,7 @@ export const FluentGrid: React.FunctionComponent = ({ sort, columns, height, + selectionMode = SelectionMode.multiple, setSelection, setTotal, refresh, @@ -362,7 +386,7 @@ export const FluentGrid: React.FunctionComponent = ({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [constStore, data, /*refresh*/]); - return + return ; }; @@ -375,6 +399,7 @@ interface FluentPagedGridProps { total: number, columns: FluentColumns, height?: string, + selectionMode?: SelectionMode, setSelection: (selection: any[]) => void, setTotal: (total: number) => void, refresh: RefreshTable, @@ -390,6 +415,7 @@ export const FluentPagedGrid: React.FunctionComponent = ({ total, columns, height, + selectionMode = SelectionMode.multiple, setSelection, setTotal, refresh, @@ -414,7 +440,7 @@ export const FluentPagedGrid: React.FunctionComponent = ({ setPage(_page); }, [pageNum]); - return + return ; }; From ee7c3bfab83e528e04588378dbe137415e0c07d0 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Thu, 20 Jun 2024 16:59:29 +0100 Subject: [PATCH 05/31] Split off 9.4.72 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index c8177a82bfb..c194d77e8d6 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.4.71-closedown0 +version: 9.4.73-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.4.71-closedown0 +appVersion: 9.4.73-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 3af14c427c0..ab3e33f4178 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1519,7 +1519,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 732a1114341..a50440cf06b 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index c7b1a0a3b7c..21ade987987 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index eadf65d08fd..36fdadbb6cf 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 05ebf461b28..7b86a7449d1 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -60,7 +60,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -137,7 +137,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 135798849d6..882b2b7f63c 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -143,7 +143,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 1afedb255f5..c69b739f350 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index b2d4f24bfc6..c28acbaaa8f 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -122,7 +122,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 8063096a70e..e5621197105 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index ecd46f7112a..1c740401a65 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index a6359bb8cd2..01d5361a5b5 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index f5ef8bf3d6d..8656e97f866 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -86,7 +86,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -151,7 +151,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -218,7 +218,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -351,7 +351,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -416,7 +416,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index e313885be18..22b14233235 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 4 ) -set ( HPCC_POINT 71 ) +set ( HPCC_POINT 73 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-14T16:03:56Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-20T15:59:29Z" ) ### From 13038aa7885739502ebf52b7ca264a26450bd863 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Thu, 20 Jun 2024 17:00:57 +0100 Subject: [PATCH 06/31] Split off 9.2.98 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 08b98c94298..6fb3b6188af 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.2.97-closedown0 +version: 9.2.99-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.2.97-closedown0 +appVersion: 9.2.99-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 444d2af71db..4658ca74316 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1405,7 +1405,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 381d7fbddb0..d9ab3304dae 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index d840198008d..2c88ddbe102 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 6de6c7b87c3..faa5493a225 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 520cded722f..1f5a8da4b07 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -133,7 +133,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index e665c1e1c0b..991b88ec767 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -140,7 +140,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index d0b54cef1b7..713eec2275a 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index d7ee54fc3c3..c11fc1ac990 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -120,7 +120,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 9ef61b02d47..5d75783d0e0 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index a39049fa504..735c7d17117 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -178,7 +178,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -240,7 +240,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -346,7 +346,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 34a1a660472..a660b392ae3 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index caf98186b0e..a99ee0d0356 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -145,7 +145,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -210,7 +210,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -341,7 +341,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -404,7 +404,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 5a0d035376d..9f37c6d68fd 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 2 ) -set ( HPCC_POINT 97 ) +set ( HPCC_POINT 99 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-14T16:02:29Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-20T16:00:57Z" ) ### From aad4f52027a6e0cc7eb209343294b9aed5f6f4c0 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Mon, 24 Jun 2024 15:15:43 +0100 Subject: [PATCH 07/31] Split off 9.8.0 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 8 ++++---- 14 files changed, 27 insertions(+), 27 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 25f78c664c5..de086da65e6 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.7.0-trunk0 +version: 9.8.1-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.7.0-trunk0 +appVersion: 9.8.1-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 1ccb9d41b5a..e571448a037 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1523,7 +1523,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 275aa8d6b0c..3b4046deafa 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index b458e708383..c92f9d4bb17 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 88e3ee9a5be..3c39acd576c 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index da0207f7fc8..430ec35143d 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -139,7 +139,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 01030cbbfb0..1b6c580be70 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -147,7 +147,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 9513b95f939..11f3edb0628 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 160be59be49..14e9ae0d5cb 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -125,7 +125,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 55ee65b9385..fd3ad929130 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 1849d559ec7..03cd7d3effc 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 0aad43a7328..d7558cdcf30 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 863e7cb3e4c..706dfacaf2f 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -88,7 +88,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -153,7 +153,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -220,7 +220,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -353,7 +353,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -418,7 +418,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index a2c38c499f0..92e98514d49 100644 --- a/version.cmake +++ b/version.cmake @@ -4,9 +4,9 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) -set ( HPCC_MINOR 7 ) -set ( HPCC_POINT 0 ) -set ( HPCC_MATURITY "trunk" ) +set ( HPCC_MINOR 8 ) +set ( HPCC_POINT 1 ) +set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-03-08T17:36:05Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-24T14:15:43Z" ) ### From 99add4c267554e30a9fe34da5fb3a391b1620e78 Mon Sep 17 00:00:00 2001 From: "Dan S. Camper" Date: Fri, 21 Jun 2024 11:22:20 -0500 Subject: [PATCH 08/31] HPCC-32091 Improve REGEXREPLACE performance Leverage PCRE2-specific opaque data structure search/replace usage pattern to improve average 'replace' performance. --- rtl/eclrtl/eclregex.cpp | 132 ++++++++++++++++++++++++++-------------- 1 file changed, 88 insertions(+), 44 deletions(-) diff --git a/rtl/eclrtl/eclregex.cpp b/rtl/eclrtl/eclregex.cpp index 275aa19cd4d..bab37da8ea4 100644 --- a/rtl/eclrtl/eclregex.cpp +++ b/rtl/eclrtl/eclregex.cpp @@ -386,41 +386,63 @@ class CCompiledStrRegExpr : implements ICompiledStrRegExpr size32_t sourceSize = (isUTF8Enabled ? rtlUtf8Size(slen, str) : slen); size32_t replaceSize = (isUTF8Enabled ? rtlUtf8Size(rlen, replace) : rlen); - uint32_t replaceOptions = PCRE2_SUBSTITUTE_GLOBAL|PCRE2_SUBSTITUTE_EXTENDED; + // Execute an explicit match first to see if we match at all; if we do, matchData will be populated + // with data that can be used by pcre2_substitute to bypass some work + int numMatches = pcre2_match_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, 0, matchData, pcre2MatchContext8); - // Call it once to get the size of the output, then allocate memory for it; - // Note that pcreLen will include space for a terminating null character; - // we have to allocate memory for that byte to avoid a buffer overrun, - // but we won't count that terminating byte - int replaceResult = pcre2_substitute_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, nullptr, &pcreLen); - - if (replaceResult < 0 && replaceResult != PCRE2_ERROR_NOMEMORY) + if (numMatches < 0 && numMatches != PCRE2_ERROR_NOMATCH) { - // PCRE2_ERROR_NOMEMORY is a normal result when we're just asking for the size of the output + // Treat everything other than PCRE2_ERROR_NOMATCH as an error pcre2_match_data_free_8(matchData); - failWithPCRE2Error(replaceResult, "Error in regex replace: "); + failWithPCRE2Error(numMatches, "Error in regex replace: "); } - if (pcreLen > 0) + if (numMatches > 0) { - out = (char *)rtlMalloc(pcreLen); - - replaceResult = pcre2_substitute_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, (PCRE2_UCHAR8 *)out, &pcreLen); + uint32_t replaceOptions = PCRE2_SUBSTITUTE_MATCHED|PCRE2_SUBSTITUTE_GLOBAL|PCRE2_SUBSTITUTE_EXTENDED; - // Note that, weirdly, pcreLen will now contain the number of code points - // in the result *excluding* the null terminator, so pcreLen will - // become our final result length + // Call substitute once to get the size of the output, then allocate memory for it; + // Note that pcreLen will include space for a terminating null character; + // we have to allocate memory for that byte to avoid a buffer overrun, + // but we won't count that terminating byte + int replaceResult = pcre2_substitute_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, nullptr, &pcreLen); - if (replaceResult < 0) + if (replaceResult < 0 && replaceResult != PCRE2_ERROR_NOMEMORY) { + // PCRE2_ERROR_NOMEMORY is a normal result when we're just asking for the size of the output pcre2_match_data_free_8(matchData); failWithPCRE2Error(replaceResult, "Error in regex replace: "); } - } - pcre2_match_data_free_8(matchData); - // We need to return the number of characters here, not the byte count - outlen = (isUTF8Enabled ? rtlUtf8Length(pcreLen, out) : pcreLen); + if (pcreLen > 0) + { + out = (char *)rtlMalloc(pcreLen); + + replaceResult = pcre2_substitute_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, (PCRE2_UCHAR8 *)out, &pcreLen); + + // Note that, weirdly, pcreLen will now contain the number of code points + // in the result *excluding* the null terminator, so pcreLen will + // become our final result length + + if (replaceResult < 0) + { + pcre2_match_data_free_8(matchData); + failWithPCRE2Error(replaceResult, "Error in regex replace: "); + } + } + + pcre2_match_data_free_8(matchData); + // We need to return the number of characters here, not the byte count + outlen = (isUTF8Enabled ? rtlUtf8Length(pcreLen, out) : pcreLen); + } + else + { + // No match found; return the original string + out = (char *)rtlMalloc(sourceSize); + memcpy(out, str, sourceSize); + outlen = slen; + pcre2_match_data_free_8(matchData); + } } IStrRegExprFindInstance * find(const char * str, size32_t from, size32_t len, bool needToKeepSearchString) const @@ -763,41 +785,63 @@ class CCompiledUStrRegExpr : implements ICompiledUStrRegExpr outlen = 0; pcre2_match_data_16 * matchData = pcre2_match_data_create_from_pattern_16(compiledRegex.get(), pcre2GeneralContext16); - uint32_t replaceOptions = PCRE2_SUBSTITUTE_GLOBAL|PCRE2_SUBSTITUTE_EXTENDED; + // Execute an explicit match first to see if we match at all; if we do, matchData will be populated + // with data that can be used by pcre2_substitute to bypass some work + int numMatches = pcre2_match_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, 0, matchData, pcre2MatchContext16); - // Call it once to get the size of the output, then allocate memory for it; - // Note that pcreLen will include space for a terminating null character; - // we have to allocate memory for that byte to avoid a buffer overrun, - // but we won't count that terminating byte - int replaceResult = pcre2_substitute_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, nullptr, &pcreLen); - - if (replaceResult < 0 && replaceResult != PCRE2_ERROR_NOMEMORY) + if (numMatches < 0 && numMatches != PCRE2_ERROR_NOMATCH) { - // PCRE2_ERROR_NOMEMORY is a normal result when we're just asking for the size of the output + // Treat everything other than PCRE2_ERROR_NOMATCH as an error pcre2_match_data_free_16(matchData); - failWithPCRE2Error(replaceResult, "Error in regex replace: "); + failWithPCRE2Error(numMatches, "Error in regex replace: "); } - if (pcreLen > 0) + if (numMatches > 0) { - out = (UChar *)rtlMalloc(pcreLen * sizeof(UChar)); - - replaceResult = pcre2_substitute_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, replaceOptions, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, (PCRE2_UCHAR16 *)out, &pcreLen); + uint32_t replaceOptions = PCRE2_SUBSTITUTE_MATCHED|PCRE2_SUBSTITUTE_GLOBAL|PCRE2_SUBSTITUTE_EXTENDED; - // Note that, weirdly, pcreLen will now contain the number of code points - // in the result *excluding* the null terminator, so pcreLen will - // become our final result length + // Call substitute once to get the size of the output, then allocate memory for it; + // Note that pcreLen will include space for a terminating null character; + // we have to allocate memory for that byte to avoid a buffer overrun, + // but we won't count that terminating byte + int replaceResult = pcre2_substitute_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, nullptr, &pcreLen); - if (replaceResult < 0) + if (replaceResult < 0 && replaceResult != PCRE2_ERROR_NOMEMORY) { + // PCRE2_ERROR_NOMEMORY is a normal result when we're just asking for the size of the output pcre2_match_data_free_16(matchData); failWithPCRE2Error(replaceResult, "Error in regex replace: "); } - } - pcre2_match_data_free_16(matchData); - // We need to return the number of characters here, not the byte count - outlen = pcreLen; + if (pcreLen > 0) + { + out = (UChar *)rtlMalloc(pcreLen * sizeof(UChar)); + + replaceResult = pcre2_substitute_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, replaceOptions, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, (PCRE2_UCHAR16 *)out, &pcreLen); + + // Note that, weirdly, pcreLen will now contain the number of code points + // in the result *excluding* the null terminator, so pcreLen will + // become our final result length + + if (replaceResult < 0) + { + pcre2_match_data_free_16(matchData); + failWithPCRE2Error(replaceResult, "Error in regex replace: "); + } + } + + pcre2_match_data_free_16(matchData); + // We need to return the number of characters here, not the byte count + outlen = pcreLen; + } + else + { + // No match found; return the original string + out = (UChar *)rtlMalloc(slen * sizeof(UChar)); + memcpy(out, str, slen * sizeof(UChar)); + outlen = slen; + pcre2_match_data_free_16(matchData); + } } IUStrRegExprFindInstance * find(const UChar * str, size32_t from, size32_t len) const From f6408046078b26ce59b8de6a93501d8b9d6c37f3 Mon Sep 17 00:00:00 2001 From: "Dan S. Camper" Date: Mon, 24 Jun 2024 12:55:42 -0500 Subject: [PATCH 09/31] HPCC-32126 Performance improvements in regex code (via Coverity) --- rtl/eclrtl/eclregex.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/rtl/eclrtl/eclregex.cpp b/rtl/eclrtl/eclregex.cpp index 275aa19cd4d..8943f88d101 100644 --- a/rtl/eclrtl/eclregex.cpp +++ b/rtl/eclrtl/eclregex.cpp @@ -153,11 +153,11 @@ class RegexCacheEntry RegexCacheEntry() = delete; RegexCacheEntry(size32_t _patternSize, const char * _pattern, uint32_t _options, std::shared_ptr _compiledRegex8) - : savedOptions(_options), savedPattern(_pattern, _patternSize), compiledRegex8(_compiledRegex8) + : savedOptions(_options), savedPattern(_pattern, _patternSize), compiledRegex8(std::move(_compiledRegex8)) {} RegexCacheEntry(size32_t _patternSize, const char * _pattern, uint32_t _options, std::shared_ptr _compiledRegex16) - : savedOptions(_options), savedPattern(_pattern, _patternSize), compiledRegex16(_compiledRegex16) + : savedOptions(_options), savedPattern(_pattern, _patternSize), compiledRegex16(std::move(_compiledRegex16)) {} RegexCacheEntry(const RegexCacheEntry & other) = delete; @@ -254,7 +254,7 @@ class CStrRegExprFindInstance : implements IStrRegExprFindInstance public: CStrRegExprFindInstance(std::shared_ptr _compiledRegex, const char * _subject, size32_t _from, size32_t _len, bool _keep) - : compiledRegex(_compiledRegex) + : compiledRegex(std::move(_compiledRegex)) { // See if UTF-8 is enabled on this compiled regex uint32_t option_bits; @@ -663,7 +663,7 @@ class CUStrRegExprFindInstance : implements IUStrRegExprFindInstance public: CUStrRegExprFindInstance(std::shared_ptr _compiledRegex, const UChar * _subject, size32_t _from, size32_t _len) - : compiledRegex(_compiledRegex) + : compiledRegex(std::move(_compiledRegex)) { subject = _subject + _from; matched = false; From 29ecc71185f45934e381ec9adbe21233ce7cc69b Mon Sep 17 00:00:00 2001 From: Michael Gardner Date: Mon, 24 Jun 2024 15:10:50 -0400 Subject: [PATCH 10/31] HPCC-32110-1 Fixing issue with sec3, sect2, programlisting Signed-off-by: Michael Gardner --- .../ContainerizedMods/ConfigureValues.xml | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml index 0e926af95bf..4f37c591c36 100644 --- a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml +++ b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml @@ -1034,7 +1034,7 @@ https://github.com/hpcc-systems/HPCC-Platform/blob/master/helm/hpcc/docs/placements.md - + Placement O Placement é responsável por encontrar o melhor nó para um @@ -1096,21 +1096,9 @@ posicionamentos para garantir que os pods com requisitos específicos sejam colocados nos nós apropriados. -
- - - Environment Values - - Você pode definir variáveis de ambiente em um arquivo YAML. Os - valores do ambiente são definidos na parte global.env - do arquivo HPCC Systems values.yaml fornecido. Esses - valores são especificados como uma lista de pares de valor de nome - conforme ilustrado abaixo. - - global: + global: -bbe9bd8001 (HPCC-32050 -HPCC Portuguese language Update 9.6) env: - name: SMTPserver value: mysmtpserver From 5946df191d96bb10d57f56d4ddd7932274bc13ab Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Tue, 25 Jun 2024 13:15:15 +0100 Subject: [PATCH 11/31] HPCC-32136 Allow the input and output to be replaced in a buffered class Signed-off-by: Gavin Halliday --- system/jlib/jstream.cpp | 16 ++++++++++++++++ system/jlib/jstream.hpp | 6 +++++- testing/unittests/jstreamtests.cpp | 1 + 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/system/jlib/jstream.cpp b/system/jlib/jstream.cpp index fa76a9632f8..64641763a3a 100644 --- a/system/jlib/jstream.cpp +++ b/system/jlib/jstream.cpp @@ -405,6 +405,11 @@ class CBlockedSerialInputStream : public CInterfaceOfreset(_offset, _flen); } + virtual void replaceInput(ISerialInputStream * newInput) override + { + input.set(newInput); + } + protected: inline byte * data(size32_t offset) { return (byte *)buffer.get() + offset; } inline size32_t available() const { return dataLength - bufferOffset; } @@ -792,6 +797,11 @@ class CBlockedSerialOutputStream final : public CInterfaceOfwanted then got is size available in buffer }; -using IBufferedSerialInputStream = ISerialStream; +interface IBufferedSerialInputStream : extends ISerialStream +{ + virtual void replaceInput(ISerialInputStream * newInput) = 0; +}; /* example of reading a nul terminated string using ISerialStream peek and skip { @@ -100,6 +103,7 @@ interface IBufferedSerialOutputStream : extends ISerialOutputStream virtual void commit(size32_t written) = 0 ; // commit the data written to the block returned by reserve virtual void suspend(size32_t wanted) = 0; // Reserve some bytes and prevent data being flushed to the next stage until endNested is called. May nest. virtual void resume(size32_t len, const void * ptr) = 0; // update the data allocated by suspend and allow flushing. + virtual void replaceOutput(ISerialOutputStream * newOutput) = 0; }; interface ICompressor; diff --git a/testing/unittests/jstreamtests.cpp b/testing/unittests/jstreamtests.cpp index 98303578ccc..7e13a495e04 100644 --- a/testing/unittests/jstreamtests.cpp +++ b/testing/unittests/jstreamtests.cpp @@ -292,6 +292,7 @@ class NullOuputStream : public CInterfaceOf virtual void suspend(size32_t wanted) {} virtual void resume(size32_t len, const void * ptr) {} virtual offset_t tell() const override { return 0; } + virtual void replaceOutput(ISerialOutputStream * newOutput) override {} }; class JlibStreamStressTest : public CppUnit::TestFixture From 6835477778a17332da0284334271c3b793feead7 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Tue, 25 Jun 2024 16:17:22 +0100 Subject: [PATCH 12/31] HPCC-32136 Minor modifications to streaming classes Signed-off-by: Gavin Halliday --- system/jlib/jstream.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/system/jlib/jstream.cpp b/system/jlib/jstream.cpp index 64641763a3a..8f31718a8e1 100644 --- a/system/jlib/jstream.cpp +++ b/system/jlib/jstream.cpp @@ -290,7 +290,7 @@ IByteInputStream *createInputStream(int handle) // This means the buffer size is likely to be bigger than the block size - the class is passed // an initial estimate for the potential overlap. -class CBlockedSerialInputStream : public CInterfaceOf +class CBlockedSerialInputStream final : public CInterfaceOf { public: CBlockedSerialInputStream(ISerialInputStream * _input, size32_t _blockReadSize) @@ -318,7 +318,7 @@ class CBlockedSerialInputStream : public CInterfaceOf Date: Mon, 24 Jun 2024 23:09:46 -0400 Subject: [PATCH 13/31] HPCC-32131 Jtrace exporters batch config support - Enables span batch export mode by default for remote exporters - Exposes batch configuration options - Updates sample otel export values files to include batch config - Updates helm schema to expose batch confi Signed-off-by: Rodrigo Pastrana --- .../tracing/otlp-grpc-collector-default.yaml | 5 +++ .../tracing/otlp-grpc-collector-k8s.yaml | 5 +++ .../tracing/otlp-http-collector-default.yaml | 7 +++- .../tracing/otlp-http-collector-k8s.yaml | 5 +++ helm/hpcc/values.schema.json | 12 +++++++ system/jlib/jtrace.cpp | 35 +++++++++++++------ 6 files changed, 57 insertions(+), 12 deletions(-) diff --git a/helm/examples/tracing/otlp-grpc-collector-default.yaml b/helm/examples/tracing/otlp-grpc-collector-default.yaml index 90ca78a56b0..e038dedeb4f 100644 --- a/helm/examples/tracing/otlp-grpc-collector-default.yaml +++ b/helm/examples/tracing/otlp-grpc-collector-default.yaml @@ -4,3 +4,8 @@ global: - type: OTLP-GRPC endpoint: "localhost:4317" useSslCredentials: false + batch: + enabled: true + maxQueueSize: 4096 + scheduledDelayMillis: 6000 + maxExportBatchSize: 512 \ No newline at end of file diff --git a/helm/examples/tracing/otlp-grpc-collector-k8s.yaml b/helm/examples/tracing/otlp-grpc-collector-k8s.yaml index a5aa01b2dd6..2730b415a1c 100644 --- a/helm/examples/tracing/otlp-grpc-collector-k8s.yaml +++ b/helm/examples/tracing/otlp-grpc-collector-k8s.yaml @@ -4,3 +4,8 @@ global: - type: OTLP-GRPC endpoint: "http://myotelcollector-opentelemetry-collector.default.svc.cluster.local:4317" useSslCredentials: false + batch: + enabled: true + maxQueueSize: 4096 + scheduledDelayMillis: 6000 + maxExportBatchSize: 512 diff --git a/helm/examples/tracing/otlp-http-collector-default.yaml b/helm/examples/tracing/otlp-http-collector-default.yaml index c48979473d6..361d1afe126 100644 --- a/helm/examples/tracing/otlp-http-collector-default.yaml +++ b/helm/examples/tracing/otlp-http-collector-default.yaml @@ -3,4 +3,9 @@ global: exporters: - type: OTLP-HTTP endpoint: "localhost:4318/v1/traces" - consoleDebug: true \ No newline at end of file + consoleDebug: true + batch: + enabled: true + maxQueueSize: 4096 + scheduledDelayMillis: 6000 + maxExportBatchSize: 512 \ No newline at end of file diff --git a/helm/examples/tracing/otlp-http-collector-k8s.yaml b/helm/examples/tracing/otlp-http-collector-k8s.yaml index d4f77ba86a5..74eb0e40e0d 100644 --- a/helm/examples/tracing/otlp-http-collector-k8s.yaml +++ b/helm/examples/tracing/otlp-http-collector-k8s.yaml @@ -4,3 +4,8 @@ global: - type: OTLP-HTTP endpoint: "http://myotelcollector-opentelemetry-collector.default.svc.cluster.local:4318/v1/traces" consoleDebug: true + batch: + enabled: true + maxQueueSize: 4096 + scheduledDelayMillis: 6000 + maxExportBatchSize: 512 diff --git a/helm/hpcc/values.schema.json b/helm/hpcc/values.schema.json index e028ee27f92..49cfc60bfca 100644 --- a/helm/hpcc/values.schema.json +++ b/helm/hpcc/values.schema.json @@ -1165,6 +1165,18 @@ "enabled": { "type": "boolean", "description": "If true, trace data is processed in a batch, if false, trace data is processed immediately" + }, + "maxQueueSize": { + "type": "number", + "description": "The maximum buffer/queue size. After the size is reached, spans are dropped." + }, + "scheduledDelayMillis": { + "type": "number", + "description": "The time interval between two consecutive exports." + }, + "maxExportBatchSize": { + "type": "number", + "description": " The maximum batch size of every export. It must be smaller or equal to max_queue_size." } }, "additionalProperties": { "type": ["integer", "string", "boolean"] } diff --git a/system/jlib/jtrace.cpp b/system/jlib/jtrace.cpp index d166ad06eaa..ce6f7f68fd5 100644 --- a/system/jlib/jtrace.cpp +++ b/system/jlib/jtrace.cpp @@ -494,7 +494,7 @@ class CTraceManager : implements ITraceManager, public CInterface void initTracerProviderAndGlobalInternals(const IPropertyTree * traceConfig); void initTracer(const IPropertyTree * traceConfig); void cleanupTracer(); - std::unique_ptr createExporter(const IPropertyTree * exportConfig); + std::unique_ptr createExporter(const IPropertyTree * exportConfig, bool & shouldBatch); std::unique_ptr createProcessor(const IPropertyTree * exportConfig); public: @@ -1159,10 +1159,11 @@ IProperties * getSpanContext(const ISpan * span) //--------------------------------------------------------------------------------------------------------------------- -std::unique_ptr CTraceManager::createExporter(const IPropertyTree * exportConfig) +std::unique_ptr CTraceManager::createExporter(const IPropertyTree * exportConfig, bool & shouldBatch) { assertex(exportConfig); + shouldBatch = true; StringBuffer exportType; exportConfig->getProp("@type", exportType); @@ -1172,6 +1173,7 @@ std::unique_ptr CTraceManager::createEx if (stricmp(exportType.str(), "OS")==0) //To stdout/err { LOG(MCoperatorInfo, "Tracing exporter set OS"); + shouldBatch = false; return opentelemetry::exporter::trace::OStreamSpanExporterFactory::Create(); } else if (stricmp(exportType.str(), "OTLP")==0 || stricmp(exportType.str(), "OTLP-HTTP")==0) @@ -1255,6 +1257,7 @@ std::unique_ptr CTraceManager::createEx if (logFlags == SpanLogFlags::LogNone) logFlags = DEFAULT_SPAN_LOG_FLAGS; + shouldBatch = false; LOG(MCoperatorInfo, "Tracing exporter set to JLog: logFlags( LogAttributes LogParentInfo %s)", logFlagsStr.str()); return JLogSpanExporterFactory::Create(logFlags); } @@ -1268,10 +1271,11 @@ std::unique_ptr CTraceManager::createEx std::unique_ptr CTraceManager::createProcessor(const IPropertyTree * exportConfig) { + bool batchDefault; //to be determined by the createExporter function std::unique_ptr exporter; try { - exporter = createExporter(exportConfig); + exporter = createExporter(exportConfig, batchDefault); } catch(const std::exception& e) //polymorphic type std::exception { @@ -1285,16 +1289,25 @@ std::unique_ptr CTraceManager::createP if (!exporter) return nullptr; - if (exportConfig->getPropBool("batch/@enabled", false)) + if (exportConfig->getPropBool("batch/@enabled", batchDefault)) { //Groups several spans together, before sending them to an exporter. - //MORE: These options should be configurable from batch/@option - opentelemetry::v1::sdk::trace::BatchSpanProcessorOptions options; //size_t max_queue_size = 2048; - //The time interval between two consecutive exports - //std::chrono::milliseconds(5000); - //The maximum batch size of every export. It must be smaller or - //equal to max_queue_size. - //size_t max_export_batch_size = 512 + opentelemetry::v1::sdk::trace::BatchSpanProcessorOptions options; + /** + * The maximum buffer/queue size. After the size is reached, spans are + * dropped. + */ + options.max_queue_size = exportConfig->getPropInt("batch/@maxQueueSize", 2048); + + /* The time interval between two consecutive exports. */ + options.schedule_delay_millis = std::chrono::milliseconds(exportConfig->getPropInt("batch/@scheduledDelayMillis", 5000)); + + /** + * The maximum batch size of every export. It must be smaller or + * equal to max_queue_size. + */ + options.max_export_batch_size = exportConfig->getPropInt("batch/@maxExportBatchSize", 512); + return opentelemetry::sdk::trace::BatchSpanProcessorFactory::Create(std::move(exporter), options); } From c309fd15a1271c5886b220af6c66d0885cc9092e Mon Sep 17 00:00:00 2001 From: "Dan S. Camper" Date: Tue, 25 Jun 2024 12:33:37 -0500 Subject: [PATCH 14/31] HPCC-32140 eclcc should expand embedded archives within an ECL archive file Embedded archives will be unpacked into subdirectories based upon their original package values (typically git branch names) or, if package values are not found, an ascending numeric archive_NNNNNN name. --- ecl/hql/hqlcache.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/ecl/hql/hqlcache.cpp b/ecl/hql/hqlcache.cpp index 1e9cd46ebd0..1389f456b8c 100644 --- a/ecl/hql/hqlcache.cpp +++ b/ecl/hql/hqlcache.cpp @@ -507,6 +507,24 @@ extern HQL_API void expandArchive(const char * path, IPropertyTree * archive, bo StringBuffer baseFilename; makeAbsolutePath(path, baseFilename, false); addPathSepChar(baseFilename); + unsigned int embeddedArchiveNum = 0; + + // Look for embedded archives and recursively expand them + Owned embeddedArchives = archive->getElements("Archive"); + ForEach(*embeddedArchives) + { + // Append the package value to the path, if it exists + StringBuffer embeddedFilename(baseFilename); + if (embeddedArchives->query().hasProp("@package")) + { + embeddedFilename.append(embeddedArchives->query().queryProp("@package")); + } + else + { + embeddedFilename.appendf("archive_%0*d", 6, ++embeddedArchiveNum); + } + expandArchive(embeddedFilename, &embeddedArchives->query(), includePlugins); + } Owned modules = archive->getElements("Module"); ForEach(*modules) From f3116659a55e1af6a99d5cbf5208211879f46cd3 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Wed, 26 Jun 2024 15:28:35 +0100 Subject: [PATCH 15/31] HPCC-32148 Add an option to gather metrics by default for service calls in esp Signed-off-by: Gavin Halliday --- cmake_modules/options.cmake | 1 + initfiles/componentfiles/configxml/dali.xsl | 2 +- tools/hidl/CMakeLists.txt | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmake_modules/options.cmake b/cmake_modules/options.cmake index 1919ae71b07..6a39ffd369e 100644 --- a/cmake_modules/options.cmake +++ b/cmake_modules/options.cmake @@ -70,6 +70,7 @@ option(USE_ADDRESS_SANITIZER "Use address sanitizer to spot leaks" OFF) option(INSTALL_VCPKG_CATALOG "Install vcpkg-catalog.txt" ON) option(PORTALURL "Set url to hpccsystems portal download page") option(PROFILING "Set to true if planning to profile so stacks are informative" OFF) +option(COLLECT_SERVICE_METRICS "Set to true to gather metrics for HIDL services by default" OFF) set(CUSTOM_LABEL "" CACHE STRING "Appends a custom label to the final package name") diff --git a/initfiles/componentfiles/configxml/dali.xsl b/initfiles/componentfiles/configxml/dali.xsl index 5983537297d..a8fa70c2aab 100644 --- a/initfiles/componentfiles/configxml/dali.xsl +++ b/initfiles/componentfiles/configxml/dali.xsl @@ -346,8 +346,8 @@ + - diff --git a/tools/hidl/CMakeLists.txt b/tools/hidl/CMakeLists.txt index ac69965966a..a67cf109401 100644 --- a/tools/hidl/CMakeLists.txt +++ b/tools/hidl/CMakeLists.txt @@ -23,7 +23,7 @@ project( hidl ) -if(CMAKE_BUILD_TYPE STREQUAL "Debug") +if(CMAKE_BUILD_TYPE STREQUAL "Debug" OR COLLECT_SERVICE_METRICS) add_definitions(-DENABLE_DEFAULT_EXECUTION_PROFILING) endif() From bcdbc49316455ed8c3df96b8c79167c45c74e10b Mon Sep 17 00:00:00 2001 From: Richard Chapman Date: Wed, 19 Jun 2024 14:19:12 +0100 Subject: [PATCH 16/31] HPCC-32031 Generate summary information in workunit to speed up file list operations Track whether references are used in conjunction with OPT and signed code. Signed-off-by: Richard Chapman --- common/pkgfiles/referencedfilelist.cpp | 87 ++++++++++++++++++-------- common/workunit/workunit.cpp | 69 +++++++++++++++++++- common/workunit/workunit.hpp | 39 ++++++++++++ common/workunit/workunit.ipp | 3 + ecl/hqlcpp/hqlckey.cpp | 14 +++-- ecl/hqlcpp/hqlcpp.ipp | 26 +++++++- ecl/hqlcpp/hqlhtcpp.cpp | 45 +++++++++---- ecl/hqlcpp/hqlsource.cpp | 26 +++++++- 8 files changed, 259 insertions(+), 50 deletions(-) diff --git a/common/pkgfiles/referencedfilelist.cpp b/common/pkgfiles/referencedfilelist.cpp index 837a1ee1e4f..9a246966413 100644 --- a/common/pkgfiles/referencedfilelist.cpp +++ b/common/pkgfiles/referencedfilelist.cpp @@ -965,34 +965,15 @@ void ReferencedFileList::addFilesFromPackageMap(IPropertyTree *pm) bool ReferencedFileList::addFilesFromQuery(IConstWorkUnit *cw, const IHpccPackage *pkg) { - Owned graphs = &cw->getGraphs(GraphTypeActivities); - ForEach(*graphs) + SummaryMap files; + if (cw->getSummary(SummaryType::ReadFile, files) && + cw->getSummary(SummaryType::ReadIndex, files)) { - Owned xgmml = graphs->query().getXGMMLTree(false, false); - Owned iter = xgmml->getElements("//node[att/@name='_*ileName']"); - ForEach(*iter) + for (const auto& [lName, summaryFlags] : files) { - IPropertyTree &node = iter->query(); - bool isOpt = false; - const char *logicalName = node.queryProp("att[@name='_fileName']/@value"); - if (!logicalName) - logicalName = node.queryProp("att[@name='_indexFileName']/@value"); - if (!logicalName) - continue; - - isOpt = node.getPropBool("att[@name='_isIndexOpt']/@value"); - if (!isOpt) - isOpt = node.getPropBool("att[@name='_isOpt']/@value"); - - ThorActivityKind kind = (ThorActivityKind) node.getPropInt("att[@name='_kind']/@value", TAKnone); - //not likely to be part of roxie queries, but for forward compatibility: - if(kind==TAKdiskwrite || kind==TAKspillwrite || kind==TAKindexwrite || kind==TAKcsvwrite || kind==TAKxmlwrite || kind==TAKjsonwrite) - continue; - if (node.getPropBool("att[@name='_isSpill']/@value") || - node.getPropBool("att[@name='_isTransformSpill']/@value")) - continue; + const char *logicalName = lName.c_str(); StringArray subfileNames; - unsigned flags = isOpt ? RefFileOptional : RefFileNotOptional; + unsigned flags = (summaryFlags & SummaryFlags::IsOpt) ? RefFileOptional : RefFileNotOptional; if (pkg) { const char *pkgid = pkg->locateSuperFile(logicalName); @@ -1018,6 +999,62 @@ bool ReferencedFileList::addFilesFromQuery(IConstWorkUnit *cw, const IHpccPackag ensureFile(logicalName, flags, NULL, false, &subfileNames); } } + else + { + Owned graphs = &cw->getGraphs(GraphTypeActivities); + ForEach(*graphs) + { + Owned xgmml = graphs->query().getXGMMLTree(false, false); + Owned iter = xgmml->getElements("//node[att/@name='_*ileName']"); + ForEach(*iter) + { + IPropertyTree &node = iter->query(); + bool isOpt = false; + const char *logicalName = node.queryProp("att[@name='_fileName']/@value"); + if (!logicalName) + logicalName = node.queryProp("att[@name='_indexFileName']/@value"); + if (!logicalName) + continue; + + isOpt = node.getPropBool("att[@name='_isIndexOpt']/@value"); + if (!isOpt) + isOpt = node.getPropBool("att[@name='_isOpt']/@value"); + + ThorActivityKind kind = (ThorActivityKind) node.getPropInt("att[@name='_kind']/@value", TAKnone); + //not likely to be part of roxie queries, but for forward compatibility: + if(kind==TAKdiskwrite || kind==TAKspillwrite || kind==TAKindexwrite || kind==TAKcsvwrite || kind==TAKxmlwrite || kind==TAKjsonwrite) + continue; + if (node.getPropBool("att[@name='_isSpill']/@value") || + node.getPropBool("att[@name='_isTransformSpill']/@value")) + continue; + StringArray subfileNames; + unsigned flags = isOpt ? RefFileOptional : RefFileNotOptional; + if (pkg) + { + const char *pkgid = pkg->locateSuperFile(logicalName); + if (pkgid) + { + flags |= (RefFileSuper | RefFileInPackage); + Owned ssfe = pkg->resolveSuperFile(logicalName); + if (ssfe && ssfe->numSubFiles()>0) + { + unsigned count = ssfe->numSubFiles(); + while (count--) + { + StringBuffer subfile; + ssfe->getSubFileName(count, subfile); + ensureFile(subfile, RefSubFile | RefFileInPackage, pkgid, false, nullptr); + subfileNames.append(subfile); + } + } + } + ensureFile(logicalName, flags, pkgid, pkg->isCompulsory(), &subfileNames); + } + else + ensureFile(logicalName, flags, NULL, false, &subfileNames); + } + } + } return pkg ? pkg->isCompulsory() : false; } diff --git a/common/workunit/workunit.cpp b/common/workunit/workunit.cpp index 87d6842ba7c..1337d27a276 100644 --- a/common/workunit/workunit.cpp +++ b/common/workunit/workunit.cpp @@ -4437,6 +4437,8 @@ class CLockedWorkUnit : implements ILocalWorkUnit, implements IExtendedWUInterfa { return c->getFileAccessCost(); } virtual cost_type getCompileCost() const { return c->getCompileCost(); } + virtual bool getSummary(SummaryType type, SummaryMap &map) const override + { return c->getSummary(type, map); } virtual void import(IPropertyTree *wuTree, IPropertyTree *graphProgressTree) { return c->import(wuTree, graphProgressTree); } @@ -4503,6 +4505,8 @@ class CLockedWorkUnit : implements ILocalWorkUnit, implements IExtendedWUInterfa { c->setUser(value); } virtual void setWuScope(const char * value) { c->setWuScope(value); } + virtual void setSummary(SummaryType type, const SummaryMap &map) override + { c->setSummary(type, map); } virtual IWorkflowItem* addWorkflowItem(unsigned wfid, WFType type, WFMode mode, unsigned success, unsigned failure, unsigned recovery, unsigned retriesAllowed, unsigned contingencyFor) { return c->addWorkflowItem(wfid, type, mode, success, failure, recovery, retriesAllowed, contingencyFor); } virtual void syncRuntimeWorkflow(IWorkflowItemArray * array) @@ -8721,6 +8725,65 @@ void CLocalWorkUnit::setDebugValue(const char *propname, const char *value, bool } } +static const char *summaryTypeName(SummaryType type) +{ + switch (type) + { + case SummaryType::ReadFile: return "ReadFile"; + case SummaryType::ReadIndex: return "ReadIndex"; + case SummaryType::WriteFile: return "WriteFile"; + case SummaryType::WriteIndex: return "WriteIndex"; + case SummaryType::PersistFile: return "PersistFile"; + case SummaryType::SpillFile: return "SpillFile"; + case SummaryType::JobTemp: return "JobTemp"; + case SummaryType::Service: return "Service"; + default: + throwUnexpected(); + } +}; + +bool CLocalWorkUnit::getSummary(SummaryType type, SummaryMap &map) const +{ + VStringBuffer xpath("Summaries/%s", summaryTypeName(type)); + CriticalBlock block(crit); + const char *list = p->queryProp(xpath); + if (!list) + return false; + StringArray s; + s.appendList(list, "\n"); + ForEachItemIn(idx, s) + { + const char *name = s.item(idx); + if (name && *name) + { + char *end = nullptr; + SummaryFlags flags = (SummaryFlags) strtol(name, &end, 16); + if (*end!=':') + return false; // unrecognized format + name = end+1; + if (map.find(name) == map.end()) + map[name] = flags; + else + map[name] = map[name] & flags; + } + } + return true; +} + +void CLocalWorkUnit::setSummary(SummaryType type, const SummaryMap &map) +{ + StringBuffer list; + for (const auto& [name, flags] : map) + { + if (list.length()) + list.append('\n'); + list.appendf("%01x:%s", (unsigned) flags, name.c_str()); + } + CriticalBlock block(crit); + IPropertyTree *summaries = ensurePTree(p, "Summaries"); + summaries->setProp(summaryTypeName(type), list); +} + void CLocalWorkUnit::setDebugValueInt(const char *propname, int value, bool overwrite) { StringBuffer lower; @@ -13980,6 +14043,11 @@ extern WORKUNIT_API void descheduleWorkunit(char const * wuid) doDescheduleWorkkunit(wuid); } +extern WORKUNIT_API void addWorkunitSummary(IWorkUnit * wu, SummaryType summaryType, SummaryMap &map) +{ + wu->setSummary(summaryType, map); +} + extern WORKUNIT_API void updateWorkunitStat(IWorkUnit * wu, StatisticScopeType scopeType, const char * scope, StatisticKind kind, const char * description, unsigned __int64 value, unsigned wfid) { StringBuffer scopestr; @@ -14008,7 +14076,6 @@ class WuTimingUpdater : implements ITimeReportInfo StatisticKind kind; }; - extern WORKUNIT_API void updateWorkunitTimings(IWorkUnit * wu, ITimeReporter *timer) { WuTimingUpdater target(wu, SSTsection, StTimeTotalExecute); diff --git a/common/workunit/workunit.hpp b/common/workunit/workunit.hpp index d96471b5077..4cd299961e6 100644 --- a/common/workunit/workunit.hpp +++ b/common/workunit/workunit.hpp @@ -41,6 +41,7 @@ #include #include #include +#include #include #define LEGACY_GLOBAL_SCOPE "workunit" @@ -1179,6 +1180,40 @@ interface IConstWUScopeIterator : extends IScmIterator //--------------------------------------------------------------------------------------------------------------------- //! IWorkUnit //! Provides high level access to WorkUnit "header" data. + +// Be sure to update summaryTypeName in workunit.cpp if adding anything here +enum class SummaryType +{ + First, + ReadFile = First, + ReadIndex, + WriteFile, + WriteIndex, + PersistFile, + SpillFile, + JobTemp, + Service, + // Keep these at the end + NumItems, + None = NumItems +}; + +enum SummaryFlags : byte +{ + None = 0, + IsOpt = 0x01, + IsSigned = 0x02, +}; +BITMASK_ENUM(SummaryFlags); + +struct ncasecomp { + bool operator() (const std::string& lhs, const std::string& rhs) const { + return stricmp(lhs.c_str(), rhs.c_str()) < 0; + } +}; + +typedef std::map SummaryMap; + interface IWorkUnit; interface IUserDescriptor; @@ -1267,6 +1302,7 @@ interface IConstWorkUnit : extends IConstWorkUnitInfo virtual unsigned queryFileUsage(const char * filename) const = 0; virtual IConstWUFileUsageIterator * getFieldUsage() const = 0; virtual bool getFieldUsageArray(StringArray & filenames, StringArray & columnnames, const char * clusterName) const = 0; + virtual bool getSummary(SummaryType type, SummaryMap &result) const = 0; virtual unsigned getCodeVersion() const = 0; virtual unsigned getWuidVersion() const = 0; @@ -1400,6 +1436,7 @@ interface IWorkUnit : extends IConstWorkUnit virtual void setResultDecimal(const char *name, unsigned sequence, int len, int precision, bool isSigned, const void *val) = 0; virtual void setResultDataset(const char * name, unsigned sequence, size32_t len, const void *val, unsigned numRows, bool extend) = 0; virtual void import(IPropertyTree *wuTree, IPropertyTree *graphProgressTree = nullptr) = 0; + virtual void setSummary(SummaryType type, const SummaryMap &map) = 0; virtual IConstWorkUnit * unlock() = 0; }; @@ -1722,6 +1759,8 @@ extern WORKUNIT_API void gatherLibraryNames(StringArray &names, StringArray &unr //If we add any more parameters we should consider returning an object that can be updated extern WORKUNIT_API void associateLocalFile(IWUQuery * query, WUFileType type, const char * name, const char * description, unsigned crc, unsigned minActivity=0, unsigned maxActivity=0); +extern WORKUNIT_API void addWorkunitSummary(IWorkUnit * wu, SummaryType summaryType, SummaryMap &map); + interface ITimeReporter; extern WORKUNIT_API void updateWorkunitStat(IWorkUnit * wu, StatisticScopeType scopeType, const char * scope, StatisticKind kind, const char * description, unsigned __int64 value, unsigned wfid=0); extern WORKUNIT_API void updateWorkunitTimings(IWorkUnit * wu, ITimeReporter *timer); diff --git a/common/workunit/workunit.ipp b/common/workunit/workunit.ipp index f83956af090..8ac4acbc0aa 100644 --- a/common/workunit/workunit.ipp +++ b/common/workunit/workunit.ipp @@ -379,6 +379,9 @@ public: void setTimeScheduled(const IJlibDateTime &val); virtual void subscribe(WUSubscribeOptions options) {}; + virtual bool getSummary(SummaryType type, SummaryMap &map) const override; + virtual void setSummary(SummaryType type, const SummaryMap &map) override; + // ILocalWorkUnit - used for debugging etc void loadXML(const char *xml); void serialize(MemoryBuffer &tgt); diff --git a/ecl/hqlcpp/hqlckey.cpp b/ecl/hqlcpp/hqlckey.cpp index f04c2cad29b..47099f3d328 100644 --- a/ecl/hqlcpp/hqlckey.cpp +++ b/ecl/hqlcpp/hqlckey.cpp @@ -162,6 +162,8 @@ class KeyedJoinInfo : public CInterface bool needToExtractJoinFields() const { return extractJoinFieldsTransform != NULL; } bool hasPostFilter() const { return monitors->queryExtraFilter() || fileFilter; } bool requireActivityForKey() const { return hasComplexIndex; } + bool isKeySigned() { return key->hasAttribute(_signed_Atom); } + bool isFileSigned() { return file && file->hasAttribute(_signed_Atom); } void reportFailureReason(IHqlExpression * cond) { monitors->reportFailureReason(cond); } bool useValueSets() const { return createValueSets; } @@ -1192,7 +1194,7 @@ void HqlCppTranslator::buildKeyedJoinExtra(ActivityInstance & instance, IHqlExpr //virtual const char * getFileName() = 0; // Returns filename of raw file fpos'es refer into if (info->isFullJoin()) - buildFilenameFunction(instance, instance.createctx, WaFilename, "getFileName", info->queryFileFilename(), hasDynamicFilename(info->queryFile())); + buildFilenameFunction(instance, instance.createctx, WaFilename, "getFileName", info->queryFileFilename(), hasDynamicFilename(info->queryFile()), SummaryType::ReadFile, info->isKeyOpt(), info->isFileSigned()); //virtual bool diskAccessRequired() = 0; if (info->isFullJoin()) @@ -1229,7 +1231,7 @@ void HqlCppTranslator::buildKeyJoinIndexReadHelper(ActivityInstance & instance, info->buildExtractIndexReadFields(instance.startctx); //virtual const char * getIndexFileName() = 0; - buildFilenameFunction(instance, instance.startctx, WaIndexname, "getIndexFileName", info->queryKeyFilename(), hasDynamicFilename(info->queryKey())); + buildFilenameFunction(instance, instance.startctx, WaIndexname, "getIndexFileName", info->queryKeyFilename(), hasDynamicFilename(info->queryKey()), SummaryType::ReadIndex, info->isKeyOpt(), info->isKeySigned()); //virtual IOutputMetaData * queryIndexRecordSize() = 0; LinkedHqlExpr indexExpr = info->queryOriginalKey(); @@ -1489,7 +1491,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityKeyedDistribute(BuildCtx & ctx doBuildUnsignedFunction(instance->classctx, "getFlags", flags.str()+1); //virtual const char * getIndexFileName() = 0; - buildFilenameFunction(*instance, instance->startctx, WaIndexname, "getIndexFileName", keyFilename, dynamic); + buildFilenameFunction(*instance, instance->startctx, WaIndexname, "getIndexFileName", keyFilename, dynamic, SummaryType::ReadIndex, info.isKeyOpt(), info.isKeySigned()); //virtual IOutputMetaData * queryIndexRecordSize() = 0; LinkedHqlExpr indexExpr = info.queryRawKey(); @@ -1583,7 +1585,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityKeyDiff(BuildCtx & ctx, IHqlEx noteAllFieldsUsed(updated); //virtual const char * getOutputName() = 0; - buildFilenameFunction(*instance, instance->startctx, WaOutputFilename, "getOutputName", output, hasDynamicFilename(expr)); + buildFilenameFunction(*instance, instance->startctx, WaOutputFilename, "getOutputName", output, hasDynamicFilename(expr), SummaryType::WriteFile, false, expr->hasAttribute(_signed_Atom)); //virtual int getSequence() = 0; doBuildSequenceFunc(instance->classctx, querySequence(expr), false); @@ -1626,10 +1628,10 @@ ABoundActivity * HqlCppTranslator::doBuildActivityKeyPatch(BuildCtx & ctx, IHqlE noteAllFieldsUsed(original); //virtual const char * getPatchName() = 0; - buildFilenameFunction(*instance, instance->startctx, WaPatchFilename, "getPatchName", patch, true); + buildFilenameFunction(*instance, instance->startctx, WaPatchFilename, "getPatchName", patch, true, SummaryType::ReadFile, false, false); //virtual const char * getOutputName() = 0; - buildFilenameFunction(*instance, instance->startctx, WaOutputFilename, "getOutputName", output, hasDynamicFilename(expr)); + buildFilenameFunction(*instance, instance->startctx, WaOutputFilename, "getOutputName", output, hasDynamicFilename(expr), SummaryType::WriteIndex, false, false); //virtual int getSequence() = 0; doBuildSequenceFunc(instance->classctx, querySequence(expr), false); diff --git a/ecl/hqlcpp/hqlcpp.ipp b/ecl/hqlcpp/hqlcpp.ipp index 16ef89f64e3..0607c583892 100644 --- a/ecl/hqlcpp/hqlcpp.ipp +++ b/ecl/hqlcpp/hqlcpp.ipp @@ -1886,8 +1886,8 @@ public: void doBuildFunctionReturn(BuildCtx & ctx, ITypeInfo * type, IHqlExpression * value); void doBuildUserFunctionReturn(BuildCtx & ctx, ITypeInfo * type, IHqlExpression * value); - void addFilenameConstructorParameter(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr); - void buildFilenameFunction(ActivityInstance & instance, BuildCtx & classctx, WuAttr attr, const char * name, IHqlExpression * expr, bool isDynamic); + void addFilenameConstructorParameter(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, SummaryType summaryType); + void buildFilenameFunction(ActivityInstance & instance, BuildCtx & classctx, WuAttr attr, const char * name, IHqlExpression * expr, bool isDynamic, SummaryType summaryType, bool isOpt, bool isSigned); void buildRefFilenameFunction(ActivityInstance & instance, BuildCtx & classctx, WuAttr attr, const char * name, IHqlExpression * dataset); void createAccessFunctions(StringBuffer & helperFunc, BuildCtx & declarectx, unsigned prio, const char * interfaceName, const char * object); @@ -1911,7 +1911,7 @@ protected: void buildIteratorNext(BuildCtx & ctx, IHqlExpression * iter, IHqlExpression * row); bool shouldEvaluateSelectAsAlias(BuildCtx & ctx, IHqlExpression * expr); IWUResult * createWorkunitResult(int sequence, IHqlExpression * nameExpr); - void noteFilename(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, bool isDynamic); + void noteFilename(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, bool isDynamic, SummaryType summaryType, bool isOpt, bool isSigned); bool checkGetResultContext(BuildCtx & ctx, IHqlExpression * expr, CHqlBoundExpr & tgt); void buildGetResultInfo(BuildCtx & ctx, IHqlExpression * expr, CHqlBoundExpr * boundTarget, const CHqlBoundTarget * targetAssign); void buildGetResultSetInfo(BuildCtx & ctx, IHqlExpression * expr, CHqlBoundExpr * boundTarget, const CHqlBoundTarget * targetAssign); @@ -2038,6 +2038,7 @@ protected: bool isNeverDistributed(IHqlExpression * expr); void ensureWorkUnitUpdated(); + void addWorkunitSummaries(); bool getDebugFlag(const char * name, bool defValue); void initOptions(); void postProcessOptions(); @@ -2140,6 +2141,25 @@ protected: Owned timeReporter; CIArrayOf trackedSources; HqlExprArray tracedActivities; + + // These are used to generate workunit summary info, to avoid having to walk the xgmml to get it + SummaryMap summaries[(int) SummaryType::NumItems]; + void noteSummaryInfo(const char *name, SummaryType type, bool isOpt, bool isSigned) + { + if (type != SummaryType::None) + { + SummaryMap &map = summaries[(int) type]; + SummaryFlags flags = SummaryFlags::None; + if (isOpt) + flags |= SummaryFlags::IsOpt; + if (isSigned) + flags |= SummaryFlags::IsSigned; + if (map.find(name) == map.end()) + map[name] = flags; + else + map[name] = map[name] & flags; + } + } }; diff --git a/ecl/hqlcpp/hqlhtcpp.cpp b/ecl/hqlcpp/hqlhtcpp.cpp index 1c0a55836be..ea303805449 100644 --- a/ecl/hqlcpp/hqlhtcpp.cpp +++ b/ecl/hqlcpp/hqlhtcpp.cpp @@ -3382,21 +3382,21 @@ void HqlCppTranslator::doBuildFunction(BuildCtx & ctx, ITypeInfo * type, const c } } -void HqlCppTranslator::addFilenameConstructorParameter(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr) +void HqlCppTranslator::addFilenameConstructorParameter(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, SummaryType summaryType) { OwnedHqlExpr folded = foldHqlExpression(expr); instance.addConstructorParameter(folded); - noteFilename(instance, attr, folded, false); + noteFilename(instance, attr, folded, false, summaryType, false, false); } -void HqlCppTranslator::buildFilenameFunction(ActivityInstance & instance, BuildCtx & classctx, WuAttr attr, const char * name, IHqlExpression * expr, bool isDynamic) +void HqlCppTranslator::buildFilenameFunction(ActivityInstance & instance, BuildCtx & classctx, WuAttr attr, const char * name, IHqlExpression * expr, bool isDynamic, SummaryType summaryType, bool isOpt, bool isSigned) { OwnedHqlExpr folded = foldHqlExpression(expr); doBuildVarStringFunction(classctx, name, folded); - noteFilename(instance, attr, folded, isDynamic); + noteFilename(instance, attr, folded, isDynamic, summaryType, isOpt, isSigned); } -void HqlCppTranslator::noteFilename(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, bool isDynamic) +void HqlCppTranslator::noteFilename(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, bool isDynamic, SummaryType summaryType, bool isOpt, bool isSigned) { if (options.addFilesnamesToGraph) { @@ -3417,6 +3417,7 @@ void HqlCppTranslator::noteFilename(ActivityInstance & instance, WuAttr attr, IH StringBuffer propValue; folded->queryValue()->getStringValue(propValue); instance.addAttribute(attr, propValue); + noteSummaryInfo(propValue, summaryType, isOpt, isSigned); } } if (isDynamic) @@ -3459,20 +3460,24 @@ void HqlCppTranslator::buildRefFilenameFunction(ActivityInstance & instance, Bui assertex(table); IHqlExpression * filename = NULL; + SummaryType summaryType = SummaryType::ReadFile; switch (table->getOperator()) { case no_keyindex: filename = table->queryChild(2); + summaryType = SummaryType::ReadIndex; break; case no_newkeyindex: filename = table->queryChild(3); + summaryType = SummaryType::ReadIndex; break; case no_table: filename = table->queryChild(0); + summaryType = SummaryType::ReadFile; break; } - buildFilenameFunction(instance, classctx, attr, name, filename, hasDynamicFilename(table)); + buildFilenameFunction(instance, classctx, attr, name, filename, hasDynamicFilename(table), summaryType, table->hasAttribute(optAtom), table->hasAttribute(_signed_Atom)); } void HqlCppTranslator::buildConnectInputOutput(BuildCtx & ctx, ActivityInstance * instance, ABoundActivity * table, unsigned outputIndex, unsigned inputIndex, const char * label, bool nWay) @@ -6236,12 +6241,17 @@ bool HqlCppTranslator::buildCpp(IHqlCppInstance & _code, HqlQueryContext & query ensureWorkUnitUpdated(); throw; } + addWorkunitSummaries(); ensureWorkUnitUpdated(); - - return true; } +void HqlCppTranslator::addWorkunitSummaries() +{ + for (int i = (int) SummaryType::First; i < (int) SummaryType::NumItems; i++) + addWorkunitSummary(wu(), (SummaryType) i, summaries[i]); +} + void HqlCppTranslator::ensureWorkUnitUpdated() { if (timeReporter) @@ -10659,7 +10669,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutputIndex(BuildCtx & ctx, IH buildInstancePrefix(instance); //virtual const char * getFileName() { return "x.d00"; } - buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, hasDynamicFilename(expr)); + buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, hasDynamicFilename(expr), SummaryType::WriteIndex, false, expr->hasAttribute(_signed_Atom)); //virtual unsigned getFlags() = 0; IHqlExpression * updateAttr = expr->queryAttribute(updateAtom); @@ -10710,7 +10720,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutputIndex(BuildCtx & ctx, IH IHqlExpression * indexNameAttr = expr->queryAttribute(indexAtom); if (indexNameAttr) - buildFilenameFunction(*instance, instance->startctx, WaDistributeIndexname, "getDistributeIndexName", indexNameAttr->queryChild(0), hasDynamicFilename(expr)); + buildFilenameFunction(*instance, instance->startctx, WaDistributeIndexname, "getDistributeIndexName", indexNameAttr->queryChild(0), hasDynamicFilename(expr), SummaryType::ReadIndex, false, expr->hasAttribute(_signed_Atom)); buildExpiryHelper(instance->createctx, expr->queryAttribute(expireAtom)); buildUpdateHelper(instance->createctx, *instance, dataset, updateAttr); @@ -10942,15 +10952,18 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutput(BuildCtx & ctx, IHqlExp Owned boundDataset = buildCachedActivity(ctx, dataset); ThorActivityKind kind = TAKdiskwrite; const char * activityArgName = "DiskWrite"; + SummaryType summaryType = SummaryType::WriteFile; if (expr->getOperator() == no_spill) { kind = TAKspill; activityArgName = "Spill"; + summaryType = SummaryType::SpillFile; } else if (pipe) { kind = TAKpipewrite; activityArgName = "PipeWrite"; + summaryType = SummaryType::None; } else if (csvAttr) { @@ -10963,7 +10976,14 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutput(BuildCtx & ctx, IHqlExp activityArgName = "XmlWrite"; } else if (expr->hasAttribute(_spill_Atom)) + { kind = TAKspillwrite; + summaryType = SummaryType::SpillFile; + } + if (expr->hasAttribute(jobTempAtom)) + summaryType = SummaryType::JobTemp; + else if (expr->hasAttribute(_workflowPersist_Atom)) + summaryType = SummaryType::PersistFile; bool useImplementationClass = options.minimizeActivityClasses && targetRoxie() && expr->hasAttribute(_spill_Atom); Owned instance = new ActivityInstance(*this, ctx, kind, expr, activityArgName); @@ -11061,7 +11081,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutput(BuildCtx & ctx, IHqlExp if (filename && filename->getOperator() != no_pipe) { bool isDynamic = expr->hasAttribute(resultAtom) || hasDynamicFilename(expr); - buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, isDynamic); + buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, isDynamic, summaryType, false, expr->hasAttribute(_signed_Atom)); if (!filename->isConstant()) constFilename = false; } @@ -11163,7 +11183,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutput(BuildCtx & ctx, IHqlExp { assertex(tempCount.get() && !hasDynamic(expr)); instance->addConstructorParameter(tempCount); - addFilenameConstructorParameter(*instance, WaFilename, filename); + addFilenameConstructorParameter(*instance, WaFilename, filename, summaryType); } instance->addSignedAttribute(expr->queryAttribute(_signed_Atom)); @@ -18050,6 +18070,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivitySOAP(BuildCtx & ctx, IHqlExpre StringBuffer serviceName; getUTF8Value(serviceName, service); instance->addAttribute(WaServiceName, serviceName); + noteSummaryInfo(serviceName, SummaryType::Service, false, false); } enum class ReqFormat { NONE, XML, JSON, FORM_ENCODED }; diff --git a/ecl/hqlcpp/hqlsource.cpp b/ecl/hqlcpp/hqlsource.cpp index 3c22a022168..55fca66803a 100644 --- a/ecl/hqlcpp/hqlsource.cpp +++ b/ecl/hqlcpp/hqlsource.cpp @@ -1174,7 +1174,27 @@ void SourceBuilder::rebindFilepositons(BuildCtx & ctx, IHqlExpression * dataset, void SourceBuilder::buildFilenameMember() { //---- virtual const char * getFileName() { return "x.d00"; } ---- - translator.buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", nameExpr, translator.hasDynamicFilename(tableExpr)); + SummaryType summaryType = SummaryType::ReadFile; + switch (activityKind) + { + case TAKindexread: + case TAKindexnormalize: + case TAKindexaggregate: + case TAKindexcount: + case TAKindexgroupaggregate: + summaryType = SummaryType::ReadIndex; + break; + case TAKspillread: + summaryType = SummaryType::SpillFile; + break; + } + if (tableExpr->hasAttribute(_spill_Atom)) + summaryType = SummaryType::SpillFile; + else if (tableExpr->hasAttribute(jobTempAtom)) + summaryType = SummaryType::JobTemp; + else if (tableExpr->hasAttribute(_workflowPersist_Atom)) + summaryType = SummaryType::PersistFile; + translator.buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", nameExpr, translator.hasDynamicFilename(tableExpr), summaryType, tableExpr->hasAttribute(optAtom), tableExpr->hasAttribute(_signed_Atom)); } void SourceBuilder::buildReadMembers(IHqlExpression * expr) @@ -2115,7 +2135,7 @@ ABoundActivity * SourceBuilder::buildActivity(BuildCtx & ctx, IHqlExpression * e else throwError1(HQLERR_ReadSpillBeforeWrite, spillName.str()); } - translator.addFilenameConstructorParameter(*instance, WaFilename, nameExpr); + translator.addFilenameConstructorParameter(*instance, WaFilename, nameExpr, SummaryType::SpillFile); } if (steppedExpr) @@ -4843,7 +4863,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityXmlRead(BuildCtx & ctx, IHqlEx fieldUsage->noteAll(); //---- virtual const char * getFileName() { return "x.d00"; } ---- - buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, hasDynamicFilename(tableExpr)); + buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, hasDynamicFilename(tableExpr), SummaryType::ReadIndex, tableExpr->hasAttribute(optAtom), tableExpr->hasAttribute(_signed_Atom)); buildEncryptHelper(instance->startctx, tableExpr->queryAttribute(encryptAtom)); bool usesContents = false; From 68198d1da4ed22216615b6952fdcac0866d1ed4b Mon Sep 17 00:00:00 2001 From: Richard Chapman Date: Wed, 26 Jun 2024 11:50:24 +0100 Subject: [PATCH 17/31] HPCC-30252 Optimize WuInfo::IncludeServiceNames Use new summary information from workunit if present. Signed-off-by: Richard Chapman --- .../ws_workunits/ws_workunitsHelpers.cpp | 34 ++++++++++++------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/esp/services/ws_workunits/ws_workunitsHelpers.cpp b/esp/services/ws_workunits/ws_workunitsHelpers.cpp index 212e90d7b4f..62000fcf450 100644 --- a/esp/services/ws_workunits/ws_workunitsHelpers.cpp +++ b/esp/services/ws_workunits/ws_workunitsHelpers.cpp @@ -1203,20 +1203,30 @@ void WsWuInfo::getServiceNames(IEspECLWorkunit &info, unsigned long flags) { if (!(flags & WUINFO_IncludeServiceNames)) return; - StringArray serviceNames; - WuScopeFilter filter; - filter.addScopeType("activity"); - filter.addOutputAttribute(WaServiceName); - filter.addRequiredAttr(WaServiceName); - filter.finishedFilter(); - Owned it = &cw->getScopeIterator(filter); - ForEach(*it) + SummaryMap services; + if (cw->getSummary(SummaryType::Service, services)) + { + for (const auto& [serviceName, flags] : services) + if (!serviceName.empty()) + serviceNames.append(serviceName.c_str()); + } + else { - StringBuffer serviceName; - const char *value = it->queryAttribute(WaServiceName, serviceName); - if (!isEmptyString(value)) - serviceNames.append(value); + // Old method used if new information not present + WuScopeFilter filter; + filter.addScopeType("activity"); + filter.addOutputAttribute(WaServiceName); + filter.addRequiredAttr(WaServiceName); + filter.finishedFilter(); + Owned it = &cw->getScopeIterator(filter); + ForEach(*it) + { + StringBuffer serviceName; + const char *value = it->queryAttribute(WaServiceName, serviceName); + if (!isEmptyString(value)) + serviceNames.append(value); + } } info.setServiceNames(serviceNames); } From a04e422437e02022b5e8c1f894505cd5cfafd302 Mon Sep 17 00:00:00 2001 From: Kunal Aswani Date: Wed, 26 Jun 2024 11:49:23 -0400 Subject: [PATCH 18/31] HPCC-32150 ES Translations 9.8.x Translations added for Spanish (ES). Signed-off-by: Kunal Aswani --- esp/src/src/nls/es/hpcc.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/esp/src/src/nls/es/hpcc.ts b/esp/src/src/nls/es/hpcc.ts index 79938c65ca2..6ce86086867 100644 --- a/esp/src/src/nls/es/hpcc.ts +++ b/esp/src/src/nls/es/hpcc.ts @@ -412,6 +412,8 @@ export = { IgnoreGlobalStoreOutEdges: "Ignorar los bordes de salida de el almacén global", Import: "Importar", Inactive: "Inactivo", + IncludePerComponentLogs: "Incluir registros por componente", + IncludeRelatedLogs: "Incluir registros relacionados", IncludeSlaveLogs: "Incluir slave logs", IncludeSubFileInfo: "¿Incluir información de subarchivo?", Index: "Indice", @@ -583,6 +585,7 @@ export = { Newest: "El Mas Nuevo", NewPassword: "Nueva Contraseña", NextSelection: "Siguiente selección", + NextWorkunit: "Siguiente unidad de trabajo", NoCommon: "No hay común", NoContent: "(No hay contenido)", NoContentPleaseSelectItem: "Sin contenido, por favor seleccione un elemento", @@ -715,6 +718,7 @@ export = { PressCtrlCToCopy: "Oprima ctrl+c para copiar", Preview: "Presentación Preliminar", PreviousSelection: "Selección anterior", + PreviousWorkunit: "Unidad de trabajo anterior", PrimaryLost: "Primario perdido", PrimaryMonitoring: "Monitoreado Principal", Priority: "Prioridad", @@ -836,6 +840,7 @@ export = { Save: "Guardar", Scope: "Ámbito", SearchResults: "Resultados de búsqueda", + Seconds: "Segundos", SecondsRemaining: "Segundos que faltan", Security: "Seguridad", SecurityMessageHTML: "Solo vea HTML de usuarios de confianza. Esta unidad de trabajo fue creada por '{__placeholder__}'. ¿Representar HTML?", @@ -935,6 +940,7 @@ export = { SVGSource: "Origen del SVG", Sync: "Sincronizar", SyncSelection: "Sincronizar", + Syntax: "Sintaxis", SystemServers: "Servidores de sistema", Table: "Tabla", tag: "etiqueta", @@ -962,6 +968,7 @@ export = { TimeMaxTotalExecuteMinutes: "Maximo tiempo total de ejecucion en minutos", TimeMeanTotalExecuteMinutes: "Total tiempo total de ejecucion en minutos", TimeMinTotalExecuteMinutes: "Minomo tiempo total de ejecucion en minutos", + TimePenalty: "Penalización de tiempo", Timers: "Cronómetros", TimeSeconds: "Tiempo (Segundos)", TimeStamp: "Marca de tiempo", @@ -1129,6 +1136,7 @@ export = { WildcardFilter: "Filtro de Comodín", Workflows: "Flujos de Trabajo", Workunit: "Unidad de trabajo", + WorkunitNotFound: "Unidad de trabajo no encontrada", Workunits: "Unidades de trabajo", WorkUnitScopeDefaultPermissions: "Permisos por defect de alcaces de Workunit", Wrap: "Envolver", From 80934a5af1629068628617dbeb25a4b96d3e6753 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Wed, 26 Jun 2024 18:12:22 +0100 Subject: [PATCH 19/31] HPCC-32159 Fix core stack capture Signed-off-by: Jake Smith --- initfiles/bin/check_executes | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/initfiles/bin/check_executes b/initfiles/bin/check_executes index 5214b767026..67d17c95564 100755 --- a/initfiles/bin/check_executes +++ b/initfiles/bin/check_executes @@ -101,11 +101,16 @@ if [ $PMD_ALWAYS = true ] || [ $retVal -ne 0 ]; then done cp `ls -rt /tmp/postmortem.$progPid.log.*` $POST_MORTEM_DIR rm /tmp/postmortem.$progPid.log.* - if [ -f core ]; then - echo "Generating info from core file to $POST_MORTEM_DIR/info.log" | tee -a $POST_MORTEM_DIR/info.log - gdb -batch -ix /opt/HPCCSystems/bin/.gdbinit -x /opt/HPCCSystems/bin/post-mortem-gdb ${PMD_PROGNAME} core 2>$POST_MORTEM_DIR/info.err >>$POST_MORTEM_DIR/info.log - echo "Generated info from core file" | tee -a $POST_MORTEM_DIR/info.log - rm core + + readarray -t core_files < <(find . -maxdepth 1 -type f -name 'core*' -print) + # we only expect one, but cater for multiple + if [[ ${#core_files[@]} -gt 0 ]]; then + for file in "${core_files[@]}"; do + echo "Generating info from core file($file) to $POST_MORTEM_DIR/info.log" | tee -a $POST_MORTEM_DIR/info.log + gdb -batch -ix /opt/HPCCSystems/bin/.gdbinit -x /opt/HPCCSystems/bin/post-mortem-gdb ${PMD_PROGNAME} $file 2>$POST_MORTEM_DIR/info.err >>$POST_MORTEM_DIR/info.log + echo "Generated info from core file($file)" | tee -a $POST_MORTEM_DIR/info.log + rm $file + done fi dmesg -xT > $POST_MORTEM_DIR/dmesg.log if [[ -n "${PMD_DALISERVER}" ]] && [[ -n "${PMD_WORKUNIT}" ]]; then From e3416762ca0b3a6db713e5f273ce9d3e73913403 Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Wed, 26 Jun 2024 16:01:26 -0400 Subject: [PATCH 20/31] HPCC-32147 ECL Watch v9 WU details do not show blank TotalClusterTime If the TotalClusterTime for a WU returned by ESP is "", then display "0.00" instead. Also, fixed an issue where "Potential Savings" percentage was showing "NaN%" when totalCosts was 0. Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/src-react/components/WorkunitSummary.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/esp/src/src-react/components/WorkunitSummary.tsx b/esp/src/src-react/components/WorkunitSummary.tsx index e0b0b49da2e..8e74c0b008c 100644 --- a/esp/src/src-react/components/WorkunitSummary.tsx +++ b/esp/src/src-react/components/WorkunitSummary.tsx @@ -216,13 +216,13 @@ export const WorkunitSummary: React.FunctionComponent = ({ "owner": { label: nlsHPCC.Owner, type: "string", value: workunit?.Owner, readonly: true }, "jobname": { label: nlsHPCC.JobName, type: "string", value: jobname }, "description": { label: nlsHPCC.Description, type: "string", value: description }, - "potentialSavings": { label: nlsHPCC.PotentialSavings, type: "string", value: `${formatCost(potentialSavings)} (${Math.round((potentialSavings / totalCosts) * 10000) / 100}%)`, readonly: true }, + "potentialSavings": { label: nlsHPCC.PotentialSavings, type: "string", value: `${formatCost(potentialSavings)} (${totalCosts > 0 ? Math.round((potentialSavings / totalCosts) * 10000) / 100 : 0}%)`, readonly: true }, "compileCost": { label: nlsHPCC.CompileCost, type: "string", value: `${formatCost(workunit?.CompileCost)}`, readonly: true }, "executeCost": { label: nlsHPCC.ExecuteCost, type: "string", value: `${formatCost(workunit?.ExecuteCost)}`, readonly: true }, "fileAccessCost": { label: nlsHPCC.FileAccessCost, type: "string", value: `${formatCost(workunit?.FileAccessCost)}`, readonly: true }, "protected": { label: nlsHPCC.Protected, type: "checkbox", value: _protected }, "cluster": { label: nlsHPCC.Cluster, type: "string", value: workunit?.Cluster, readonly: true }, - "totalClusterTime": { label: nlsHPCC.TotalClusterTime, type: "string", value: workunit?.TotalClusterTime, readonly: true }, + "totalClusterTime": { label: nlsHPCC.TotalClusterTime, type: "string", value: workunit?.TotalClusterTime ? workunit?.TotalClusterTime : "0.00", readonly: true }, "abortedBy": { label: nlsHPCC.AbortedBy, type: "string", value: workunit?.AbortBy, readonly: true }, "abortedTime": { label: nlsHPCC.AbortedTime, type: "string", value: workunit?.AbortTime, readonly: true }, "ServiceNamesCustom": { label: nlsHPCC.Services, type: "string", value: serviceNames, readonly: true, multiline: true }, From ee5c128eabdb60c2c4ae53ab60953cf939b5f7d4 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Wed, 19 Jun 2024 12:44:19 +0100 Subject: [PATCH 21/31] HPCC-32132 New compressed spilling lookahead implementation Signed-off-by: Jake Smith --- system/jlib/jqueue.hpp | 54 ++ .../activities/nsplitter/thnsplitterslave.cpp | 4 +- thorlcr/activities/thactivityutil.cpp | 40 +- thorlcr/thorutil/thbuf.cpp | 622 +++++++++++++++++- thorlcr/thorutil/thbuf.hpp | 29 +- thorlcr/thorutil/thormisc.hpp | 7 +- 6 files changed, 707 insertions(+), 49 deletions(-) diff --git a/system/jlib/jqueue.hpp b/system/jlib/jqueue.hpp index e9447833c65..97ff9d91832 100644 --- a/system/jlib/jqueue.hpp +++ b/system/jlib/jqueue.hpp @@ -546,5 +546,59 @@ class DListOf } }; +// Lockfree Single Producer Single Conumser bounded queue implementation +// No mutexes are required to interact with the queue, as long as there's a single consumer thread, and a single writer thread. +template +class CSPSCQueue +{ + size32_t maxCapacity = 0; + std::vector elements; + std::atomic head = 0; + std::atomic tail = 0; + + inline size32_t increment(size32_t idx) const + { + size32_t next = idx+1; + if (next == maxCapacity) + next = 0; + return next; + } +public: + CSPSCQueue() + { + // should set capacity before using + } + CSPSCQueue(size32_t _maxCapacity) + : maxCapacity(_maxCapacity + 1), // +1 to distinguish full vs empty + elements(maxCapacity) + { + } + void setCapacity(size32_t _maxCapacity) + { + maxCapacity = _maxCapacity + 1; + elements.resize(maxCapacity); + } + bool enqueue(const T e) + { + size32_t currentHead = head; + size32_t nextHead = increment(currentHead); + if (nextHead == tail) + return false; // full + + elements[currentHead] = std::move(e); + head = nextHead; + return true; + } + bool dequeue(T &res) + { + size32_t currentTail = tail; + if (currentTail == head) + return false; // empty + + res = std::move(elements[currentTail]); + tail = increment(currentTail); + return true; + } +}; #endif diff --git a/thorlcr/activities/nsplitter/thnsplitterslave.cpp b/thorlcr/activities/nsplitter/thnsplitterslave.cpp index 191d005fa9a..de22da08908 100644 --- a/thorlcr/activities/nsplitter/thnsplitterslave.cpp +++ b/thorlcr/activities/nsplitter/thnsplitterslave.cpp @@ -251,9 +251,9 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf if ((size32_t)-1 != blockedSequentialIOSize) options.storageBlockSize = blockedSequentialIOSize; } - options.totalCompressionBufferSize = getOptInt(THOROPT_SPLITTER_COMPRESSIONTOALK, options.totalCompressionBufferSize / 1024) * 1024; + options.totalCompressionBufferSize = getOptInt(THOROPT_SPLITTER_COMPRESSIONTOTALK, options.totalCompressionBufferSize / 1024) * 1024; options.inMemMaxMem = getOptInt(THOROPT_SPLITTER_MAXROWMEMK, options.inMemMaxMem / 1024) * 1024; - options.spillWriteAheadSize = getOptInt64(THOROPT_SPLITTER_WRITEAHEADK, options.spillWriteAheadSize / 1024) * 1024; + options.writeAheadSize = getOptInt64(THOROPT_SPLITTER_WRITEAHEADK, options.writeAheadSize / 1024) * 1024; options.inMemReadAheadGranularity = getOptInt(THOROPT_SPLITTER_READAHEADGRANULARITYK, options.inMemReadAheadGranularity / 1024) * 1024; options.inMemReadAheadGranularityRows = getOptInt(THOROPT_SPLITTER_READAHEADGRANULARITYROWS, options.inMemReadAheadGranularity); options.heapFlags = getOptInt("spillheapflags", options.heapFlags); diff --git a/thorlcr/activities/thactivityutil.cpp b/thorlcr/activities/thactivityutil.cpp index f5701672c18..fe9a960bb74 100644 --- a/thorlcr/activities/thactivityutil.cpp +++ b/thorlcr/activities/thactivityutil.cpp @@ -66,6 +66,8 @@ class CRowStreamLookAhead : public CSimpleInterfaceOf rowcount_t required; Semaphore startSem; Owned getexception; + LookAheadOptions options; + bool newLookAhead = false; class CThread: public Thread { @@ -94,12 +96,19 @@ class CRowStreamLookAhead : public CSimpleInterfaceOf { try { - StringBuffer temp; - if (allowspill) - GetTempFilePath(temp,"lookahd"); assertex(bufsize); if (allowspill) - smartbuf.setown(createSmartBuffer(&activity, temp.str(), bufsize, rowIf)); + { + StringBuffer temp; + GetTempFilePath(temp,"lookahd"); + if (newLookAhead) + { + ICompressHandler *compressHandler = options.totalCompressionBufferSize ? queryDefaultCompressHandler() : nullptr; + smartbuf.setown(createCompressedSpillingRowStream(&activity, temp.str(), preserveGrouping, rowIf, options, compressHandler)); + } + else + smartbuf.setown(createSmartBuffer(&activity, temp.str(), bufsize, rowIf)); + } else smartbuf.setown(createSmartInMemoryBuffer(&activity, rowIf, bufsize)); startSem.signal(); @@ -207,6 +216,29 @@ class CRowStreamLookAhead : public CSimpleInterfaceOf running = true; required = _required; count = 0; + + newLookAhead = activity.getOptBool("newlookahead", false); + if (activity.getOptBool("forcenewlookahead")) + { + newLookAhead = true; + allowspill = true; + } + + // for "newlookahead" only + if (isContainerized()) + { + // JCSMORE - add CJobBase::getTempBlockSize() to calc. once. + StringBuffer planeName; + if (!getDefaultPlane(planeName, "@tempPlane", "temp")) + getDefaultPlane(planeName, "@spillPlane", "spill"); + size32_t blockedSequentialIOSize = getPlaneAttributeValue(planeName, BlockedSequentialIO, (size32_t)-1); + if ((size32_t)-1 != blockedSequentialIOSize) + options.storageBlockSize = blockedSequentialIOSize; + } + options.totalCompressionBufferSize = activity.getOptInt(THOROPT_LOOKAHEAD_COMPRESSIONTOTALK, options.totalCompressionBufferSize / 1024) * 1024; + options.inMemMaxMem = activity.getOptInt(THOROPT_LOOKAHEAD_MAXROWMEMK, options.inMemMaxMem / 1024) * 1024; + options.writeAheadSize = activity.getOptInt64(THOROPT_LOOKAHEAD_WRITEAHEADK, options.writeAheadSize / 1024) * 1024; + options.tempFileGranularity = activity.getOptInt64(THOROPT_LOOKAHEAD_TEMPFILE_GRANULARITY, options.tempFileGranularity / 0x100000) * 0x100000; } ~CRowStreamLookAhead() { diff --git a/thorlcr/thorutil/thbuf.cpp b/thorlcr/thorutil/thbuf.cpp index 951b7db82d2..b1377f6db00 100644 --- a/thorlcr/thorutil/thbuf.cpp +++ b/thorlcr/thorutil/thbuf.cpp @@ -16,11 +16,14 @@ ############################################################################## */ #include +#include #include +#include #include "platform.h" #include #include #include "jlib.hpp" +#include "jqueue.hpp" #include "jmisc.hpp" #include "jio.hpp" #include "jlzw.hpp" @@ -606,6 +609,576 @@ class CSmartRowInMemoryBuffer: public CSimpleInterface, implements ISmartRowBuff } }; + +static std::tuple createSerialInputStream(IFile *iFile, ICompressHandler *compressHandler, const CommonBufferRowRWStreamOptions &options, unsigned numSharingCompressionBuffer) +{ + Owned iFileIO = iFile->open(IFOread); + Owned in = createSerialInputStream(iFileIO); + Owned inputStream = createBufferedInputStream(in, options.storageBlockSize, 0); + if (compressHandler) + { + const char *decompressOptions = nullptr; // at least for now! + Owned decompressor = compressHandler->getExpander(decompressOptions); + Owned decompressed = createDecompressingInputStream(inputStream, decompressor); + + size32_t compressionBlockSize = (size32_t)(options.totalCompressionBufferSize / numSharingCompressionBuffer); + if (compressionBlockSize < options.minCompressionBlockSize) + { + WARNLOG("Shared totalCompressionBufferSize=%" I64F "u, too small for number of numSharingCompressionBuffer(%u). Using minCompressionBlockSize(%u).", (unsigned __int64)options.totalCompressionBufferSize, numSharingCompressionBuffer, options.minCompressionBlockSize); + compressionBlockSize = options.minCompressionBlockSize; + } + inputStream.setown(createBufferedInputStream(decompressed, compressionBlockSize, 0)); + } + return { inputStream.getClear(), iFileIO.getClear() }; +} + +static std::tuple createSerialOutputStream(IFile *iFile, ICompressHandler *compressHandler, const CommonBufferRowRWStreamOptions &options, unsigned numSharingCompressionBuffer) +{ + Owned iFileIO = iFile->open(IFOcreate); // kept for stats purposes + Owned out = createSerialOutputStream(iFileIO); + Owned outputStream = createBufferedOutputStream(out, options.storageBlockSize); //prefered plane block size + if (compressHandler) + { + const char *compressOptions = nullptr; // at least for now! + Owned compressor = compressHandler->getCompressor(compressOptions); + Owned compressed = createCompressingOutputStream(outputStream, compressor); + size32_t compressionBlockSize = (size32_t)(options.totalCompressionBufferSize / numSharingCompressionBuffer); + if (compressionBlockSize < options.minCompressionBlockSize) + { + WARNLOG("Shared totalCompressionBufferSize=%" I64F "u, too small for number of numSharingCompressionBuffer(%u). Using minCompressionBlockSize(%u).", (unsigned __int64)options.totalCompressionBufferSize, numSharingCompressionBuffer, options.minCompressionBlockSize); + compressionBlockSize = options.minCompressionBlockSize; + } + + outputStream.setown(createBufferedOutputStream(compressed, compressionBlockSize)); + } + return { outputStream.getClear(), iFileIO.getClear() }; +} + +// #define TRACE_SPILLING_ROWSTREAM // traces each row read/written, and other events + +// based on query that produces records with a single sequential (from 1) unsigned4 +// #define VERIFY_ROW_IDS_SPILLING_ROWSTREAM + +// for 'stressLookAhead' code. When enabled, reduces buffer sizes etc. to stress test the lookahead spilling +// #define STRESSTEST_SPILLING_ROWSTREAM + + + +/* CCompressedSpillingRowStream implementation details: + - Writer: + - The writer to an in-memory queue, and when the queue is full, or a certain number of rows have been queued, it writes to starts writing to temp files. + - The writer will always write to the queue if it can, even after it has started spilling. + - The writer commits to disk at LookAheadOptions::writeAheadSize granularity + - The writer creates a new temp file when the current one reaches LookAheadOptions::tempFileGranularity + - The writer pushes the current nextOutputRow to a queue when it creates the next output file (used by the reader to know when to move to next) + - NB: writer implements ISmartRowBuffer::flush() which has slightly weird semantics (blocks until everything is read or stopped) +- Reader: + - The reader will read from the queue until it is exhausted, and block to be signalled for more. + - If the reader dequeues a row that is ahead of the expected 'nextInputRow', it will stash it, and read from disk until it catches up to that row. + - If the reader is reading from disk and it catches up with 'committedRows' it will block until the writer has committed more rows. + - When reading from a temp file, it will take ownership the CFileOwner and dispose of the underlying file when it has consumed it. + - The reader will read from the stream until it hits 'currentTempFileEndRow' (initially 0), at which point it will open the next temp file. + */ + +// NB: Supports being read by 1 thread and written to by another only +class CCompressedSpillingRowStream: public CSimpleInterfaceOf, implements IRowWriter +{ + typedef std::tuple RowEntry; + + CActivityBase &activity; // ctor input parameter + StringAttr baseTmpFilename; // ctor input parameter + LookAheadOptions options; // ctor input parameter + Linked compressHandler; // ctor input parameter + + // derived from input paramter (IThorRowInterfaces *rowIf) + Linked meta; + Linked serializer; + Linked allocator; + Linked deserializer; + memsize_t compressionBlockSize = 0; // filled in createOutputStream + + // in-memory related members + CSPSCQueue inMemRows; + std::atomic inMemRowsMemoryUsage = 0; // NB updated from writer and reader threads + Semaphore moreRows; + std::atomic readerWaitingForQ = false; // set by reader, cleared by writer + + // temp write related members + Owned outputStream; + std::unique_ptr outputStreamSerializer; + memsize_t pendingFlushToDiskSz = 0; + offset_t currentTempFileSize = 0; + CFileOwner *currentOwnedOutputFile = nullptr; + Owned currentOutputIFileIO; // keep for stats + CriticalSection outputFilesQCS; + std::queue outputFiles; + unsigned writeTempFileNum = 0; + std::atomic nextOutputRow = 0; // read by reader, updated by writer + std::atomic committedRows = 0; // read by reader, updated by writer + std::atomic spilt = false; // set by createOutputStream, checked by reader + std::queue outputFileEndRowMarkers; + bool lastWriteWasEog = false; + bool outputComplete = false; // only accessed and modified by writer or reader within readerWriterCS + bool recentlyQueued = false; + CriticalSection outputStreamCS; + + // temp read related members + std::atomic currentTempFileEndRow = 0; + Owned currentInputIFileIO; // keep for stats + Linked currentOwnedInputFile; + Owned inputStream; + CThorStreamDeserializerSource inputDeserializerSource; + rowcount_t nextInputRow = 0; + bool readerWaitingForCommit = false; + static constexpr unsigned readerWakeupGranularity = 32; // how often to wake up the reader if it is waiting for more rows + enum ReadState { rs_fromqueue, rs_frommarker, rs_endstream, rs_stopped } readState = rs_fromqueue; + RowEntry readFromStreamMarker = { nullptr, 0, 0 }; + + // misc + bool grouped = false; // ctor input parameter + CriticalSection readerWriterCS; +#ifdef STRESSTEST_SPILLING_ROWSTREAM + bool stressTest = false; +#endif + + // annoying flush semantics + bool flushWaiting = false; + Semaphore flushWaitSem; + + + void trace(const char *format, ...) + { +#ifdef TRACE_SPILLING_ROWSTREAM + va_list args; + va_start(args, format); + VALOG(MCdebugInfo, format, args); + va_end(args); +#endif + } + void createNextOutputStream() + { + VStringBuffer tmpFilename("%s.%u", baseTmpFilename.get(), writeTempFileNum++); + trace("WRITE: writing to %s", tmpFilename.str()); + Owned iFile = createIFile(tmpFilename); + currentOwnedOutputFile = new CFileOwner(iFile, activity.queryTempFileSizeTracker()); // used by checkFlushToDisk to noteSize + { + CriticalBlock b(outputFilesQCS); + outputFiles.push(currentOwnedOutputFile); // NB: takes ownership + } + + auto res = createSerialOutputStream(iFile, compressHandler, options, 2); // (2) input & output sharing totalCompressionBufferSize + outputStream.setown(std::get<0>(res)); + currentOutputIFileIO.setown(std::get<1>(res)); + outputStreamSerializer = std::make_unique(outputStream); + } + void createNextInputStream() + { + CFileOwner *dequeuedOwnedIFile = nullptr; + { + CriticalBlock b(outputFilesQCS); + dequeuedOwnedIFile = outputFiles.front(); + outputFiles.pop(); + } + currentOwnedInputFile.setown(dequeuedOwnedIFile); + IFile *iFile = ¤tOwnedInputFile->queryIFile(); + trace("READ: reading from %s", iFile->queryFilename()); + + auto res = createSerialInputStream(iFile, compressHandler, options, 2); // (2) input & output sharing totalCompressionBufferSize + inputStream.setown(std::get<0>(res)); + currentInputIFileIO.setown(std::get<1>(res)); + inputDeserializerSource.setStream(inputStream); + } + const void *readRowFromStream() + { + // readRowFromStream() called from readToMarker (which will block before calling this if behind committedRows), + // or when outputComplete. + // Either way, it will not enter this method until the writer has committed ahead of the reader nextInputRow + + // NB: currentTempFileEndRow will be 0 if 1st input read + // nextInputRow can be > currentTempFileEndRow, because the writer/read may have used the Q + // beyond this point, the next row in the stream could be anywhere above. + if (nextInputRow >= currentTempFileEndRow) + { + createNextInputStream(); + CriticalBlock b(outputStreamCS); + if (nextInputRow >= currentTempFileEndRow) + { + if (!outputFileEndRowMarkers.empty()) + { + currentTempFileEndRow = outputFileEndRowMarkers.front(); + outputFileEndRowMarkers.pop(); + assertex(currentTempFileEndRow > nextInputRow); + } + else + { + currentTempFileEndRow = (rowcount_t)-1; // unbounded for now, writer will set when it knows + trace("READ: setting currentTempFileEndRow: unbounded"); + } + } + } + if (grouped) + { + bool eog; + inputStream->read(sizeof(bool), &eog); + if (eog) + return nullptr; + } + RtlDynamicRowBuilder rowBuilder(allocator); + size32_t sz = deserializer->deserialize(rowBuilder, inputDeserializerSource); + const void *row = rowBuilder.finalizeRowClear(sz); + checkCurrentRow("S: ", row, nextInputRow); + return row; + } + void writeRowToStream(const void *row, size32_t rowSz) + { + if (!spilt) + { + spilt = true; + ActPrintLog(&activity, "Spilling to temp storage [file = %s]", baseTmpFilename.get()); + createNextOutputStream(); + } + if (grouped) + { + bool eog = (nullptr == row); + outputStream->put(sizeof(bool), &eog); + pendingFlushToDiskSz++; + if (nullptr == row) + return; + } + serializer->serialize(*outputStreamSerializer.get(), (const byte *)row); + pendingFlushToDiskSz += rowSz; + } + void checkReleaseQBlockReader() + { + if (readerWaitingForQ) + { + readerWaitingForQ = false; + moreRows.signal(); + } + } + void checkReleaseReaderCommitBlocked() + { + if (readerWaitingForCommit) + { + readerWaitingForCommit = false; + moreRows.signal(); + } + } + void handleInputComplete() + { + readState = rs_stopped; + if (flushWaiting) + { + flushWaiting = false; + flushWaitSem.signal(); + } + } + bool checkFlushToDisk(size32_t threshold) + { + if (pendingFlushToDiskSz <= threshold) + return false; + rowcount_t currentNextOutputRow = nextOutputRow.load(); + trace("WRITE: Flushed to disk. nextOutputRow = %" RCPF "u", currentNextOutputRow); + outputStream->flush(); + currentTempFileSize += pendingFlushToDiskSz; + currentOwnedOutputFile->noteSize(currentTempFileSize); + pendingFlushToDiskSz = 0; + if (currentTempFileSize > options.tempFileGranularity) + { + currentTempFileSize = 0; + { + CriticalBlock b(outputStreamCS); + // set if reader isn't bounded yet, or queue next boundary + if ((rowcount_t)-1 == currentTempFileEndRow) + { + currentTempFileEndRow = currentNextOutputRow; + trace("WRITE: setting currentTempFileEndRow: %" RCPF "u", currentTempFileEndRow.load()); + } + else + { + outputFileEndRowMarkers.push(currentNextOutputRow); + trace("WRITE: adding to tempFileEndRowMarker(size=%u): %" RCPF "u", (unsigned)outputFileEndRowMarkers.size(), currentNextOutputRow); + } + } + createNextOutputStream(); + } + committedRows = currentNextOutputRow; + return true; + } + void addRow(const void *row) + { + bool queued = false; + size32_t rowSz = row ? thorRowMemoryFootprint(serializer, row) : 0; + if (rowSz + inMemRowsMemoryUsage <= options.inMemMaxMem) + queued = inMemRows.enqueue({ row, nextOutputRow, rowSz }); // takes ownership of 'row' if successful + if (queued) + { + trace("WRITE: Q: nextOutputRow: %" RCPF "u", nextOutputRow.load()); + inMemRowsMemoryUsage += rowSz; + ++nextOutputRow; + recentlyQueued = true; + } + else + { + trace("WRITE: S: nextOutputRow: %" RCPF "u", nextOutputRow.load()); + writeRowToStream(row, rowSz); // JCSMORE - rowSz is memory not disk size... does it matter that much? + ::ReleaseThorRow(row); + ++nextOutputRow; + if (checkFlushToDisk(options.writeAheadSize)) + { + CriticalBlock b(readerWriterCS); + checkReleaseReaderCommitBlocked(); + } + } + + // do not wake up reader every time a row is queued (but granularly) to avoid excessive flapping + if (recentlyQueued && (0 == (nextOutputRow % readerWakeupGranularity))) + { + recentlyQueued = false; + CriticalBlock b(readerWriterCS); + checkReleaseQBlockReader(); + } + } + const void *getQRow(RowEntry &e) + { + rowcount_t writeRow = std::get<1>(e); + inMemRowsMemoryUsage -= std::get<2>(e); + if (writeRow == nextInputRow) + { +#ifdef STRESSTEST_SPILLING_ROWSTREAM + if (stressTest && (0 == (nextInputRow % 100))) + MilliSleep(5); +#endif + + const void *row = std::get<0>(e); + checkCurrentRow("Q: ", row, nextInputRow); + ++nextInputRow; + return row; + } + else + { + // queued row is ahead of reader position, save marker and read from stream until marker + dbgassertex(writeRow > nextInputRow); + readFromStreamMarker = e; + readState = rs_frommarker; + return readToMarker(); + } + + } + inline void checkCurrentRow(const char *msg, const void *row, rowcount_t expectedId) + { +#ifdef VERIFY_ROW_IDS_SPILLING_ROWSTREAM + unsigned id; + memcpy(&id, row, sizeof(unsigned)); + assertex(id-1 == expectedId); + trace("READ: %s nextInputRow: %" RCPF "u", msg, expectedId); +#endif + } + const void *readToMarker() + { + rowcount_t markerRow = std::get<1>(readFromStreamMarker); + if (markerRow == nextInputRow) + { + const void *ret = std::get<0>(readFromStreamMarker); + checkCurrentRow("M: ", ret, nextInputRow); + readFromStreamMarker = { nullptr, 0, 0 }; + readState = rs_fromqueue; + ++nextInputRow; + return ret; + } + else if (nextInputRow >= committedRows) // row we need have not yet been committed to disk. + { + CLeavableCriticalBlock b(readerWriterCS); + if (nextInputRow >= committedRows) + { + // wait for writer to commit + readerWaitingForCommit = true; + b.leave(); + trace("READ: waiting for committedRows(currently = %" RCPF "u) to catch up to nextInputRow = %" RCPF "u", committedRows.load(), nextInputRow); + moreRows.wait(); + assertex(nextInputRow < committedRows); + } + } + const void *row = readRowFromStream(); + ++nextInputRow; + return row; + } +public: + IMPLEMENT_IINTERFACE_O_USING(CSimpleInterfaceOf); + + explicit CCompressedSpillingRowStream(CActivityBase *_activity, const char *_baseTmpFilename, bool _grouped, IThorRowInterfaces *rowIf, const LookAheadOptions &_options, ICompressHandler *_compressHandler) + : activity(*_activity), baseTmpFilename(_baseTmpFilename), grouped(_grouped), options(_options), compressHandler(_compressHandler), + meta(rowIf->queryRowMetaData()), serializer(rowIf->queryRowSerializer()), allocator(rowIf->queryRowAllocator()), deserializer(rowIf->queryRowDeserializer()) + { + size32_t minSize = meta->getMinRecordSize(); + +#ifdef STRESSTEST_SPILLING_ROWSTREAM + stressTest = activity.getOptBool("stressLookAhead"); + if (stressTest) + { + options.inMemMaxMem = minSize * 4; + options.writeAheadSize = options.inMemMaxMem * 2; + options.tempFileGranularity = options.inMemMaxMem * 4; + if (options.tempFileGranularity < 0x10000) // stop silly sizes (NB: this would only be set so small for testing!) + options.tempFileGranularity = 0x10000; + } +#endif + + if (minSize < 16) + minSize = 16; // not too important, just using to cap inMemRows queue size + inMemRows.setCapacity(options.inMemMaxMem / minSize); + + assertex(options.writeAheadSize < options.tempFileGranularity); + } + ~CCompressedSpillingRowStream() + { + while (!outputFiles.empty()) + { + ::Release(outputFiles.front()); + outputFiles.pop(); + } + RowEntry e; + while (true) + { + if (!inMemRows.dequeue(e)) + break; + const void *row = std::get<0>(e); + if (row) + ReleaseThorRow(row); + } + const void *markerRow = std::get<0>(readFromStreamMarker); + if (markerRow) + ReleaseThorRow(markerRow); + } + +// ISmartRowBuffer + virtual IRowWriter *queryWriter() override + { + return this; + } +// IRowStream + virtual const void *nextRow() override + { + switch (readState) + { + case rs_fromqueue: + { + while (true) + { + RowEntry e; + if (inMemRows.dequeue(e)) + return getQRow(e); + else + { + { + CLeavableCriticalBlock b(readerWriterCS); + // Recheck Q now have CS, if reader here and writer ready to signal more, then it may have just released CS + if (inMemRows.dequeue(e)) + { + b.leave(); + return getQRow(e); + } + else if (outputComplete)// && (nextInputRow == nextOutputRow)) + { + if (nextInputRow == nextOutputRow) + { + handleInputComplete(); // sets readState to rs_stopped + return nullptr; + } + else + { + // writer has finished, nothing is on the queue or will be queued, rest is on disk + readState = rs_endstream; + const void *row = readRowFromStream(); + ++nextInputRow; + return row; + } + } + readerWaitingForQ = true; + } + trace("READ: waiting for Q'd rows @ %" RCPF "u (nextOutputRow = %" RCPF "u)", nextInputRow, nextOutputRow.load()); + moreRows.wait(); + } + } + return nullptr; + } + case rs_frommarker: + { + return readToMarker(); + } + case rs_endstream: + { + if (nextInputRow == nextOutputRow) + { + readState = rs_stopped; + return nullptr; + } + const void *row = readRowFromStream(); + ++nextInputRow; + return row; + } + case rs_stopped: + return nullptr; + } + throwUnexpected(); + } + virtual void stop() override + { + CriticalBlock b(readerWriterCS); + handleInputComplete(); + } +// IRowWriter + virtual void putRow(const void *row) override + { + if (outputComplete) + { + // should never get here, but guard against. + OwnedConstThorRow tmpRow(row); + assertex(!row); + return; + } + + if (row) + { + lastWriteWasEog = false; + addRow(row); + } + else // eog + { + if (lastWriteWasEog) // error, should not have two EOGs in a row + return; + else if (grouped) + { + lastWriteWasEog = true; + addRow(nullptr); + } + else // non-grouped nulls unexpected + throwUnexpected(); + } + } + virtual void flush() override + { + // semantics of ISmartRowBuffer::flush: + // - tell smartbuf that there will be no more rows written (BUT should only be called after finished writing) + // - wait for all rows to be read from smartbuf, or smartbuf stopped before returning. + + bool flushedToDisk = checkFlushToDisk(0); + { + CriticalBlock b(readerWriterCS); + outputComplete = true; + if (rs_stopped == readState) + return; + flushWaiting = true; + if (flushedToDisk) + checkReleaseReaderCommitBlocked(); + checkReleaseQBlockReader(); + } + flushWaitSem.wait(); + } +}; + + + ISmartRowBuffer * createSmartBuffer(CActivityBase *activity, const char * tempname, size32_t buffsize, IThorRowInterfaces *rowif) { Owned file = createIFile(tempname); @@ -617,6 +1190,11 @@ ISmartRowBuffer * createSmartInMemoryBuffer(CActivityBase *activity, IThorRowInt return new CSmartRowInMemoryBuffer(activity, rowIf, buffsize); } +ISmartRowBuffer * createCompressedSpillingRowStream(CActivityBase *activity, const char * tempBaseName, bool grouped, IThorRowInterfaces *rowif, const LookAheadOptions &options, ICompressHandler *compressHandler) +{ + return new CCompressedSpillingRowStream(activity, tempBaseName, grouped, rowif, options, compressHandler); +} + class COverflowableBuffer : public CSimpleInterface, implements IRowWriterMultiReader { CActivityBase &activity; @@ -1844,6 +2422,7 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf input; + unsigned numOutputs = 0; Linked meta; Linked serializer; Linked deserializer; @@ -1863,7 +2442,6 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfopen(IFOcreate)); // kept for stats purposes - Owned out = createSerialOutputStream(iFileIO); - outputStream.setown(createBufferedOutputStream(out, options.storageBlockSize)); //prefered plane block size - if (compressHandler) - { - const char *compressOptions = nullptr; - Owned compressor = compressHandler->getCompressor(compressOptions); - Owned compressed = createCompressingOutputStream(outputStream, compressor); - outputStream.setown(createBufferedOutputStream(compressed, compressionBlockSize)); - } + auto res = createSerialOutputStream(iFile, compressHandler, options, numOutputs + 1); + outputStream.setown(std::get<0>(res)); + iFileIO.setown(std::get<1>(res)); totalInputRowsRead = inMemTotalRows; } void writeRowsFromInput() @@ -1940,7 +2511,7 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf= options.spillWriteAheadSize) + if (serializedSz >= options.writeAheadSize) break; } } @@ -1957,8 +2528,8 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf(row)); } public: - explicit CSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &_options, IThorRowInterfaces *rowIf, const char *tempFileName, ICompressHandler *_compressHandler) - : activity(*_activity), input(_input), inputGrouped(_inputGrouped), options(_options), compressHandler(_compressHandler), + explicit CSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned _numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &_options, IThorRowInterfaces *rowIf, const char *tempFileName, ICompressHandler *_compressHandler) + : activity(*_activity), numOutputs(_numOutputs), input(_input), inputGrouped(_inputGrouped), options(_options), compressHandler(_compressHandler), meta(rowIf->queryRowMetaData()), serializer(rowIf->queryRowSerializer()), allocator(rowIf->queryRowAllocator()), deserializer(rowIf->queryRowDeserializer()) { assertex(input); @@ -1968,17 +2539,6 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf options.inMemMaxMem) inMemReadAheadGranularity = options.inMemMaxMem; - constexpr size32_t minCompressionBlockSize = 256 * 1024; - memsize_t totalCompressionBufferSize = options.totalCompressionBufferSize; - if (totalCompressionBufferSize) - { - compressionBlockSize = (size32_t)(totalCompressionBufferSize / (numOutputs + 1)); // +1 for writer - if (compressionBlockSize < minCompressionBlockSize) - { - WARNLOG("Shared totalCompressionBufferSize=%" I64F "u, too small for number of outputs(%u). Using minCompressionBlockSize(%u) for writer and each reader.", (unsigned __int64)totalCompressionBufferSize, numOutputs, minCompressionBlockSize); - compressionBlockSize = minCompressionBlockSize; - } - } for (unsigned o=0; o getReadStream() // also pass back IFileIO for stats purposes { - Owned iFileIO = iFile->open(IFOread); - Owned in = createSerialInputStream(iFileIO); - Owned inputStream = createBufferedInputStream(in, options.storageBlockSize, 0); - if (compressHandler) - { - const char *decompressOptions = nullptr; - Owned decompressor = compressHandler->getExpander(decompressOptions); - Owned decompressed = createDecompressingInputStream(inputStream, decompressor); - inputStream.setown(createBufferedInputStream(decompressed, compressionBlockSize, 0)); - } - return { inputStream.getClear(), iFileIO.getClear() }; + return createSerialInputStream(iFile, compressHandler, options, numOutputs + 1); // +1 for writer } bool checkWriteAhead(rowcount_t &outputRowsAvailable) { diff --git a/thorlcr/thorutil/thbuf.hpp b/thorlcr/thorutil/thbuf.hpp index dc64aeb888d..1750f63b007 100644 --- a/thorlcr/thorutil/thbuf.hpp +++ b/thorlcr/thorutil/thbuf.hpp @@ -37,6 +37,25 @@ typedef QueueOf ThorRowQueue; +struct CommonBufferRowRWStreamOptions +{ + offset_t storageBlockSize = 256 * 1024; // block size of read/write streams + size32_t minCompressionBlockSize = 256 * 1024; // minimum block size for compression + memsize_t totalCompressionBufferSize = 3000 * 1024; // compression buffer size of read streams (split between writer and outputs) + memsize_t inMemMaxMem = 2000 * 1024; // before spilling begins. + offset_t writeAheadSize = 2000 * 1024; // once spilling, maximum size to write ahead + unsigned heapFlags = roxiemem::RHFunique|roxiemem::RHFblocked; +}; + +struct LookAheadOptions : CommonBufferRowRWStreamOptions +{ + LookAheadOptions() + { + // override defaults + totalCompressionBufferSize = 2000 * 1024; // compression buffer size of read streams (split between writer and outputs) + } + offset_t tempFileGranularity = 1000 * 0x100000; // 1GB +}; interface ISmartRowBuffer: extends IRowStream @@ -55,15 +74,13 @@ extern graph_decl ISmartRowBuffer * createSmartInMemoryBuffer(CActivityBase *act IThorRowInterfaces *rowIf, size32_t buffsize); -struct SharedRowStreamReaderOptions + +extern graph_decl ISmartRowBuffer * createCompressedSpillingRowStream(CActivityBase *activity, const char * tempBasename, bool grouped, IThorRowInterfaces *rowif, const LookAheadOptions &options, ICompressHandler *compressHandler); + +struct SharedRowStreamReaderOptions : public CommonBufferRowRWStreamOptions { - offset_t storageBlockSize = 256 * 1024; // block size of read/write streams - memsize_t totalCompressionBufferSize = 3000 * 1024; // compression buffer size of read streams (split between writer and outputs) - memsize_t inMemMaxMem = 2000 * 1024; // before spilling begins. memsize_t inMemReadAheadGranularity = 128 * 1024; // granularity (K) of read ahead rowcount_t inMemReadAheadGranularityRows = 64; // granularity (rows) of read ahead. NB: whichever granularity is hit first - offset_t spillWriteAheadSize = 2000 * 1024; // once spilling, maximum size to write ahead - unsigned heapFlags = roxiemem::RHFunique|roxiemem::RHFblocked; }; interface ISharedRowStreamReader : extends IInterface { diff --git a/thorlcr/thorutil/thormisc.hpp b/thorlcr/thorutil/thormisc.hpp index cb259a7053a..d760f3d06da 100644 --- a/thorlcr/thorutil/thormisc.hpp +++ b/thorlcr/thorutil/thormisc.hpp @@ -59,7 +59,7 @@ #define THOROPT_SPLITTER_READAHEADGRANULARITYK "inMemReadAheadGranularityK" // Splitter in memory read ahead granularity (K) (default = 128K) #define THOROPT_SPLITTER_READAHEADGRANULARITYROWS "inMemReadAheadGranularityRows" // Splitter in memory read ahead granularity (# rows) (default = 64) #define THOROPT_SPLITTER_WRITEAHEADK "splitterWriteAheadK" // Splitter spilling write ahead size (K) (default = 2MB) -#define THOROPT_SPLITTER_COMPRESSIONTOALK "splitterCompressionTotalK" // Splitter total compression buffer size (shared between writer and readers) (K) (default = 3MB) +#define THOROPT_SPLITTER_COMPRESSIONTOTALK "splitterCompressionTotalK" // Splitter total compression buffer size (shared between writer and readers) (K) (default = 3MB) #define THOROPT_LOOP_MAX_EMPTY "loopMaxEmpty" // Max # of iterations that LOOP can cycle through with 0 results before errors (default = 1000) #define THOROPT_SMALLSORT "smallSortThreshold" // Use minisort approach, if estimate size of data to sort is below this setting (default = 0) #define THOROPT_PARALLEL_FUNNEL "parallelFunnel" // Use parallel funnel impl. if !ordered (default = true) @@ -121,6 +121,11 @@ #define THOROPT_SORT_ALGORITHM "sortAlgorithm" // The algorithm used to sort records (quicksort/mergesort) #define THOROPT_COMPRESS_ALLFILES "compressAllOutputs" // Compress all output files (default: bare-metal=off, cloud=on) #define THOROPT_AVOID_RENAME "avoidRename" // Avoid rename, write directly to target physical filenames (no temp file) +#define THOROPT_LOOKAHEAD_MAXROWMEMK "readAheadRowMemK" // Splitter max memory (K) to use before spilling (default = 2MB) +#define THOROPT_LOOKAHEAD_WRITEAHEADK "readAheadWriteAheadK" // Splitter spilling write ahead size (K) (default = 2MB) +#define THOROPT_LOOKAHEAD_COMPRESSIONTOTALK "readAheadCompressionTotalK" // Splitter total compression buffer size (shared between writer and readers) (K) (default = 3MB) +#define THOROPT_LOOKAHEAD_TEMPFILE_GRANULARITY "readAheadTempFileGranularity" // Splitter temp file granularity (default = 1GB) + #define INITIAL_SELFJOIN_MATCH_WARNING_LEVEL 20000 // max of row matches before selfjoin emits warning From 3fc8be88960e949c4ff7fbc72e3566e9365c59f6 Mon Sep 17 00:00:00 2001 From: Attila Vamos Date: Thu, 27 Jun 2024 10:52:10 +0100 Subject: [PATCH 22/31] HPCC-32142 Add SVM to platform-ml and platform-gnn Docker images Add missing install instruction for SVM to the relevant Dockerfiles Signed-off-by: Attila Vamos --- dockerfiles/platform-gnn-gpu/Dockerfile | 2 ++ dockerfiles/platform-gnn/Dockerfile | 1 + dockerfiles/platform-ml/Dockerfile | 1 + 3 files changed, 4 insertions(+) diff --git a/dockerfiles/platform-gnn-gpu/Dockerfile b/dockerfiles/platform-gnn-gpu/Dockerfile index 181580d225f..e6df170e921 100644 --- a/dockerfiles/platform-gnn-gpu/Dockerfile +++ b/dockerfiles/platform-gnn-gpu/Dockerfile @@ -85,6 +85,8 @@ RUN apt clean && \ apt autoremove && \ apt-get update +RUN apt-get install -y libsvm-dev libsvm-tools + RUN apt-get install -y python3-pip --fix-missing RUN python3 -m pip --no-cache-dir install \ setuptools diff --git a/dockerfiles/platform-gnn/Dockerfile b/dockerfiles/platform-gnn/Dockerfile index 20b3faba41e..3a735fa88f3 100644 --- a/dockerfiles/platform-gnn/Dockerfile +++ b/dockerfiles/platform-gnn/Dockerfile @@ -25,6 +25,7 @@ FROM ${DOCKER_REPO}/platform-core:${BUILD_LABEL} USER root RUN apt-get update -y && apt-get install -y python3-pip --fix-missing +RUN apt-get install -y libsvm-dev libsvm-tools RUN python3 -m pip install --upgrade pip RUN pip3 install \ diff --git a/dockerfiles/platform-ml/Dockerfile b/dockerfiles/platform-ml/Dockerfile index d6bfe648a85..0e0d4d8ce72 100644 --- a/dockerfiles/platform-ml/Dockerfile +++ b/dockerfiles/platform-ml/Dockerfile @@ -26,6 +26,7 @@ USER root # Machine Learning Dependencies RUN apt-get update && apt-get install -y python3 python3-pip --fix-missing +RUN apt-get install -y libsvm-dev libsvm-tools RUN python3 -m pip install --upgrade pip RUN pip3 install \ scikit-learn From d05ab65745fd6e3587eaa6632e6bb9f6f5c60be1 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Thu, 27 Jun 2024 10:19:22 +0100 Subject: [PATCH 23/31] HPCC-32164 Minor improvements to new filename gathering code Signed-off-by: Gavin Halliday --- common/workunit/workunit.cpp | 23 ++++++++++++++++------- ecl/hqlcpp/hqlcpp.ipp | 30 +++++++++++++++++------------- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/common/workunit/workunit.cpp b/common/workunit/workunit.cpp index 1337d27a276..4b5b7223a9d 100644 --- a/common/workunit/workunit.cpp +++ b/common/workunit/workunit.cpp @@ -8745,12 +8745,20 @@ static const char *summaryTypeName(SummaryType type) bool CLocalWorkUnit::getSummary(SummaryType type, SummaryMap &map) const { VStringBuffer xpath("Summaries/%s", summaryTypeName(type)); - CriticalBlock block(crit); - const char *list = p->queryProp(xpath); - if (!list) - return false; StringArray s; - s.appendList(list, "\n"); + { + CriticalBlock block(crit); + IPropertyTree * match = p->queryPropTree(xpath); + //If there is not entry then the information is not recorded in the workunit + if (!match) + return false; + + const char *list = match->queryProp(nullptr); + //If the information was recorded return true, even if ther are no results + if (!list) + return true; + s.appendList(list, "\n"); + } ForEachItemIn(idx, s) { const char *name = s.item(idx); @@ -8761,10 +8769,11 @@ bool CLocalWorkUnit::getSummary(SummaryType type, SummaryMap &map) const if (*end!=':') return false; // unrecognized format name = end+1; - if (map.find(name) == map.end()) + auto match = map.find(name); + if (match == map.end()) map[name] = flags; else - map[name] = map[name] & flags; + match->second &= flags; } } return true; diff --git a/ecl/hqlcpp/hqlcpp.ipp b/ecl/hqlcpp/hqlcpp.ipp index 0607c583892..ce13dba6eef 100644 --- a/ecl/hqlcpp/hqlcpp.ipp +++ b/ecl/hqlcpp/hqlcpp.ipp @@ -2146,19 +2146,23 @@ protected: SummaryMap summaries[(int) SummaryType::NumItems]; void noteSummaryInfo(const char *name, SummaryType type, bool isOpt, bool isSigned) { - if (type != SummaryType::None) - { - SummaryMap &map = summaries[(int) type]; - SummaryFlags flags = SummaryFlags::None; - if (isOpt) - flags |= SummaryFlags::IsOpt; - if (isSigned) - flags |= SummaryFlags::IsSigned; - if (map.find(name) == map.end()) - map[name] = flags; - else - map[name] = map[name] & flags; - } + if (type == SummaryType::None) + return; + //Spill files are meaningless in roxie, and no current benefit in recording them for hthor/thor + if (type == SummaryType::SpillFile) + return; + + SummaryMap &map = summaries[(int) type]; + SummaryFlags flags = SummaryFlags::None; + if (isOpt) + flags |= SummaryFlags::IsOpt; + if (isSigned) + flags |= SummaryFlags::IsSigned; + auto match = map.find(name); + if (match == map.end()) + map[name] = flags; + else + match->second &= flags; } }; From 9d2ef45da88d80a6bee63cdd7f7b50835bc15f7b Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 21 Jun 2024 08:06:16 +0100 Subject: [PATCH 24/31] HPCC-32158 Add SQL Driven OLAP engine for WU Metrics Signed-off-by: Gordon Smith --- esp/src/package-lock.json | 381 ++++++++++++------ esp/src/package.json | 7 +- esp/src/src-react/components/Metrics.tsx | 68 ++-- .../src-react/components/MetricsOptions.tsx | 19 +- esp/src/src-react/components/MetricsSQL.tsx | 174 ++++++++ esp/src/src-react/components/SourceEditor.tsx | 107 ++++- .../src-react/components/WorkunitDetails.tsx | 16 +- esp/src/src-react/hooks/duckdb.ts | 59 +++ esp/src/src-react/hooks/metrics.ts | 40 +- esp/src/src-react/layouts/DockPanel.tsx | 9 - esp/src/src/nls/hpcc.ts | 2 + esp/src/tsconfig.json | 14 +- esp/src/webpack.config.js | 5 + 13 files changed, 683 insertions(+), 218 deletions(-) create mode 100644 esp/src/src-react/components/MetricsSQL.tsx create mode 100644 esp/src/src-react/hooks/duckdb.ts diff --git a/esp/src/package-lock.json b/esp/src/package-lock.json index 1a1d9ac0fbf..06de90bbe29 100644 --- a/esp/src/package-lock.json +++ b/esp/src/package-lock.json @@ -16,11 +16,11 @@ "@fluentui/react-icons-mdl2": "1.3.59", "@fluentui/react-migration-v8-v9": "9.6.3", "@hpcc-js/chart": "2.83.3", - "@hpcc-js/codemirror": "2.61.4", + "@hpcc-js/codemirror": "2.62.0", "@hpcc-js/common": "2.71.17", "@hpcc-js/comms": "2.92.2", "@hpcc-js/dataflow": "8.1.6", - "@hpcc-js/eclwatch": "2.74.3", + "@hpcc-js/eclwatch": "2.74.5", "@hpcc-js/graph": "2.85.15", "@hpcc-js/html": "2.42.20", "@hpcc-js/layout": "2.49.22", @@ -30,6 +30,7 @@ "@hpcc-js/react": "2.53.16", "@hpcc-js/tree": "2.40.17", "@hpcc-js/util": "2.51.0", + "@hpcc-js/wasm": "2.17.1", "@kubernetes/client-node": "0.20.0", "clipboard": "2.0.11", "d3-dsv": "3.0.1", @@ -170,6 +171,7 @@ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", "dev": true, + "license": "MIT", "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", @@ -193,6 +195,7 @@ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz", "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==", "dev": true, + "license": "MIT", "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } @@ -1824,9 +1827,10 @@ } }, "node_modules/@hpcc-js/codemirror": { - "version": "2.61.4", - "resolved": "https://registry.npmjs.org/@hpcc-js/codemirror/-/codemirror-2.61.4.tgz", - "integrity": "sha512-rscy1L5EcRhRtldjjwdurxC8RLWW8KY+B8EYj/XXH25blpvlt3P05Bdd6kotBIG18sV33sezaydhM7dqs+iltg==", + "version": "2.62.0", + "resolved": "https://registry.npmjs.org/@hpcc-js/codemirror/-/codemirror-2.62.0.tgz", + "integrity": "sha512-KgVvmPKVJWS6nG3pLsGxRApLRo259Tpf0EEIHQtbqFQHbFHQLr9r2T6aAMtoh4eehqvkqUedsorCCnmlfZCx7A==", + "license": "Apache-2.0", "dependencies": { "@hpcc-js/common": "^2.71.17" } @@ -1932,13 +1936,14 @@ } }, "node_modules/@hpcc-js/eclwatch": { - "version": "2.74.3", - "resolved": "https://registry.npmjs.org/@hpcc-js/eclwatch/-/eclwatch-2.74.3.tgz", - "integrity": "sha512-tsJfXAbREXNXAzui8Mc7Vb9J2xmc1A40I2+pTTOFnVeHPv8bzDvc5sGQXgRrkqqOkeMwzGsnlpbVmC7zTZ33UA==", + "version": "2.74.5", + "resolved": "https://registry.npmjs.org/@hpcc-js/eclwatch/-/eclwatch-2.74.5.tgz", + "integrity": "sha512-KGpefRbFD0ZIOq7eV3kF6Of2uG7wFA8C2o/vUUUy5+E+eG46qZRGqo2G4jLYqXbbCQ1RO8XHVcnXfeWR1XB4AQ==", + "license": "Apache-2.0", "dependencies": { - "@hpcc-js/codemirror": "^2.61.4", + "@hpcc-js/codemirror": "^2.62.0", "@hpcc-js/common": "^2.71.17", - "@hpcc-js/comms": "^2.92.1", + "@hpcc-js/comms": "^2.92.2", "@hpcc-js/dgrid": "^2.32.20", "@hpcc-js/graph": "^2.85.15", "@hpcc-js/layout": "^2.49.22", @@ -2083,11 +2088,66 @@ "tslib": "2.6.2" } }, + "node_modules/@hpcc-js/wasm": { + "version": "2.17.1", + "resolved": "https://registry.npmjs.org/@hpcc-js/wasm/-/wasm-2.17.1.tgz", + "integrity": "sha512-IeQFVcRir9vRbJqG/Eje2S/sIHatw/cx7Mp62S+J5VKiglc56kNUe8CxuZIeJaIo6YEuhIio/KnE3XN9oPI1Pg==", + "license": "Apache-2.0", + "dependencies": { + "yargs": "17.7.2" + }, + "bin": { + "dot-wasm": "bin/dot-wasm.js" + } + }, + "node_modules/@hpcc-js/wasm/node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@hpcc-js/wasm/node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@hpcc-js/wasm/node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, "node_modules/@humanwhocodes/config-array": { "version": "0.11.14", "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "deprecated": "Use @eslint/config-array instead", "dev": true, + "license": "Apache-2.0", "dependencies": { "@humanwhocodes/object-schema": "^2.0.2", "debug": "^4.3.1", @@ -2102,6 +2162,7 @@ "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", "dev": true, + "license": "Apache-2.0", "engines": { "node": ">=12.22" }, @@ -2111,10 +2172,12 @@ } }, "node_modules/@humanwhocodes/object-schema": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz", - "integrity": "sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==", - "dev": true + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@isaacs/cliui": { "version": "8.0.2", @@ -2213,14 +2276,15 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", - "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", "dev": true, + "license": "MIT", "dependencies": { - "@jridgewell/set-array": "^1.0.1", + "@jridgewell/set-array": "^1.2.1", "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" @@ -2236,10 +2300,11 @@ } }, "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", "dev": true, + "license": "MIT", "engines": { "node": ">=6.0.0" } @@ -2261,10 +2326,11 @@ "dev": true }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.22", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.22.tgz", - "integrity": "sha512-Wf963MzWtA2sjrNt+g18IAln9lKnlRp+K2eH4jjIoF1wYeq3aMREpG09xhlhdzS0EjwU7qmUJYangWa+151vZw==", + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", "dev": true, + "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -2307,10 +2373,11 @@ } }, "node_modules/@leichtgewicht/ip-codec": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", - "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==", - "dev": true + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz", + "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==", + "dev": true, + "license": "MIT" }, "node_modules/@lumino/algorithm": { "version": "1.9.2", @@ -3168,7 +3235,8 @@ "version": "0.12.0", "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@types/scheduler": { "version": "0.16.2", @@ -3451,7 +3519,8 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/@webassemblyjs/ast": { "version": "1.12.1", @@ -3704,6 +3773,7 @@ "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", "dev": true, + "license": "MIT", "peerDependencies": { "acorn": "^8" } @@ -3713,6 +3783,7 @@ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", "dev": true, + "license": "MIT", "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } @@ -3832,7 +3903,6 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, "engines": { "node": ">=8" } @@ -3841,7 +3911,6 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, "dependencies": { "color-convert": "^2.0.1" }, @@ -3863,6 +3932,7 @@ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", "dev": true, + "license": "ISC", "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" @@ -3885,12 +3955,6 @@ "node": ">=12.17" } }, - "node_modules/array-flatten": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", - "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==", - "dev": true - }, "node_modules/array-union": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", @@ -3986,12 +4050,16 @@ } }, "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/body-parser": { @@ -4059,13 +4127,12 @@ } }, "node_modules/bonjour-service": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.1.1.tgz", - "integrity": "sha512-Z/5lQRMOG9k7W+FkeGTNjh7htqn/2LMnfOvBZ8pynNZCM9MwkQkI3zeI4oz09uWdcgmgHugVvBqxGg4VQJ5PCg==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.2.1.tgz", + "integrity": "sha512-oSzCS2zV14bh2kji6vNe7vrpJYCHGvcZnlffFQ1MEoX/WOeQ/teD8SYWKR942OI3INjq8OMNJlbPK5LLLUxFDw==", "dev": true, + "license": "MIT", "dependencies": { - "array-flatten": "^2.1.2", - "dns-equal": "^1.0.0", "fast-deep-equal": "^3.1.3", "multicast-dns": "^7.2.5" } @@ -4086,21 +4153,22 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, + "license": "MIT", "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" } }, "node_modules/browserslist": { - "version": "4.22.2", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.2.tgz", - "integrity": "sha512-0UgcrvQmBDvZHFGdYUehrCNIazki7/lUP3kkoi/r3YB2amZbFM9J43ZRkJTXBUZK4gmx56+Sqk9+Vs9mwZx9+A==", + "version": "4.23.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.1.tgz", + "integrity": "sha512-TUfofFo/KsK/bWZ9TWQ5O26tsWW4Uhmt8IYklbnUa70udB6P2wA7w7o4PY4muaEPBQaAX+CEnmmIA41NVHtPVw==", "dev": true, "funding": [ { @@ -4116,11 +4184,12 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "caniuse-lite": "^1.0.30001565", - "electron-to-chromium": "^1.4.601", + "caniuse-lite": "^1.0.30001629", + "electron-to-chromium": "^1.4.796", "node-releases": "^2.0.14", - "update-browserslist-db": "^1.0.13" + "update-browserslist-db": "^1.0.16" }, "bin": { "browserslist": "cli.js" @@ -4202,14 +4271,15 @@ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/caniuse-lite": { - "version": "1.0.30001579", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001579.tgz", - "integrity": "sha512-u5AUVkixruKHJjw/pj9wISlcMpgFWzSrczLZbrqBSxukQixmg0SJ5sZTpvaFvxU0HoQKd4yoyAogyrAz9pzJnA==", + "version": "1.0.30001637", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001637.tgz", + "integrity": "sha512-1x0qRI1mD1o9e+7mBI7XtzFAP4XszbHaVWsMiGbSPLYekKTJF7K+FNk6AsXH4sUpc+qrsI3pVgf1Jdl/uGkuSQ==", "dev": true, "funding": [ { @@ -4224,7 +4294,8 @@ "type": "github", "url": "https://github.com/sponsors/ai" } - ] + ], + "license": "CC-BY-4.0" }, "node_modules/caseless": { "version": "0.12.0", @@ -4248,16 +4319,11 @@ } }, "node_modules/chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", "dev": true, - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ], + "license": "MIT", "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", @@ -4270,6 +4336,9 @@ "engines": { "node": ">= 8.10.0" }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, "optionalDependencies": { "fsevents": "~2.3.2" } @@ -4279,6 +4348,7 @@ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, + "license": "ISC", "dependencies": { "is-glob": "^4.0.1" }, @@ -4372,7 +4442,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, "dependencies": { "color-name": "~1.1.4" }, @@ -4383,8 +4452,7 @@ "node_modules/color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" }, "node_modules/colorette": { "version": "2.0.16", @@ -5048,17 +5116,12 @@ "node": ">=8" } }, - "node_modules/dns-equal": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", - "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==", - "dev": true - }, "node_modules/dns-packet": { "version": "5.6.1", "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", "dev": true, + "license": "MIT", "dependencies": { "@leichtgewicht/ip-codec": "^2.0.1" }, @@ -5165,10 +5228,11 @@ "dev": true }, "node_modules/electron-to-chromium": { - "version": "1.4.643", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.643.tgz", - "integrity": "sha512-QHscvvS7gt155PtoRC0dR2ilhL8E9LHhfTQEq1uD5AL0524rBLAwpAREFH06f87/e45B9XkR6Ki5dbhbCsVEIg==", - "dev": true + "version": "1.4.812", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.812.tgz", + "integrity": "sha512-7L8fC2Ey/b6SePDFKR2zHAy4mbdp1/38Yk5TsARO66W3hC5KEaeKMMHoxwtuH+jcu2AYLSn9QX04i95t6Fl1Hg==", + "dev": true, + "license": "ISC" }, "node_modules/element-resize-detector": { "version": "1.2.4", @@ -5181,8 +5245,7 @@ "node_modules/emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" }, "node_modules/emojis-list": { "version": "3.0.0", @@ -5203,10 +5266,11 @@ } }, "node_modules/enhanced-resolve": { - "version": "5.16.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.16.0.tgz", - "integrity": "sha512-O+QWCviPNSSLAD9Ucn8Awv+poAkqn3T1XY5/N7kR7rQO9yfSGWkYZDwpJ+iKF7B8rxaQKWngSqACpgzeapSyoA==", + "version": "5.17.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.0.tgz", + "integrity": "sha512-dwDPwZL0dmye8Txp2gzFmA6sxALaSvdRDjPH0viLcKrtlOL3tw62nWWweVD1SdILDTJrbrL6tdWVN58Wo6U3eA==", "dev": true, + "license": "MIT", "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" @@ -5299,10 +5363,10 @@ "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" }, "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "dev": true, + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "license": "MIT", "engines": { "node": ">=6" } @@ -5330,6 +5394,7 @@ "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz", "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", "dev": true, + "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", @@ -5389,6 +5454,7 @@ "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz", "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, @@ -5451,6 +5517,7 @@ "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "dev": true, + "license": "MIT", "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" @@ -5467,6 +5534,7 @@ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", "dev": true, + "license": "MIT", "dependencies": { "p-locate": "^5.0.0" }, @@ -5482,6 +5550,7 @@ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, + "license": "MIT", "dependencies": { "yocto-queue": "^0.1.0" }, @@ -5497,6 +5566,7 @@ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", "dev": true, + "license": "MIT", "dependencies": { "p-limit": "^3.0.2" }, @@ -5512,6 +5582,7 @@ "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, + "license": "BSD-2-Clause", "dependencies": { "acorn": "^8.9.0", "acorn-jsx": "^5.3.2", @@ -5870,10 +5941,11 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dev": true, + "license": "MIT", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -6130,10 +6202,11 @@ } }, "node_modules/fs-monkey": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.5.tgz", - "integrity": "sha512-8uMbBjrhzW76TYgEV27Y5E//W2f/lTFmx78P2w19FZSxarhI/798APGQyuGCwmkNxgwGRhrLfvWyLBvNtuOmew==", - "dev": true + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.6.tgz", + "integrity": "sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg==", + "dev": true, + "license": "Unlicense" }, "node_modules/fs.realpath": { "version": "1.0.0", @@ -6141,6 +6214,21 @@ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", "dev": true }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, "node_modules/function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", @@ -6151,7 +6239,6 @@ "version": "2.0.5", "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true, "engines": { "node": "6.* || 8.* || >= 10.*" } @@ -6254,6 +6341,7 @@ "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", "dev": true, + "license": "MIT", "dependencies": { "type-fest": "^0.20.2" }, @@ -6644,6 +6732,7 @@ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", "dev": true, + "license": "MIT", "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" @@ -6779,6 +6868,7 @@ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", "dev": true, + "license": "MIT", "dependencies": { "binary-extensions": "^2.0.0" }, @@ -6869,7 +6959,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, "engines": { "node": ">=8" } @@ -6918,6 +7007,7 @@ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.12.0" } @@ -6950,6 +7040,7 @@ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -8010,6 +8101,7 @@ "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", "dev": true, + "license": "Unlicense", "dependencies": { "fs-monkey": "^1.0.4" }, @@ -8223,6 +8315,7 @@ "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", "dev": true, + "license": "MIT", "dependencies": { "dns-packet": "^5.2.2", "thunky": "^1.0.2" @@ -8366,6 +8459,7 @@ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -8781,6 +8875,7 @@ "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", "dev": true, + "license": "MIT", "dependencies": { "@types/retry": "0.12.0", "retry": "^0.13.1" @@ -8803,6 +8898,7 @@ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", "dev": true, + "license": "MIT", "dependencies": { "callsites": "^3.0.0" }, @@ -8919,10 +9015,11 @@ "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" }, "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", - "dev": true + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==", + "dev": true, + "license": "ISC" }, "node_modules/picomatch": { "version": "2.3.1", @@ -9437,6 +9534,7 @@ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", "dev": true, + "license": "MIT", "dependencies": { "picomatch": "^2.2.1" }, @@ -9526,7 +9624,6 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "dev": true, "engines": { "node": ">=0.10.0" } @@ -9594,6 +9691,7 @@ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true, + "license": "MIT", "engines": { "node": ">=4" } @@ -9652,6 +9750,7 @@ "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", "dev": true, + "license": "MIT", "engines": { "node": ">= 4" } @@ -10540,7 +10639,6 @@ "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", @@ -10612,7 +10710,6 @@ "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, "dependencies": { "ansi-regex": "^5.0.1" }, @@ -10656,6 +10753,7 @@ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" }, @@ -10760,19 +10858,29 @@ } }, "node_modules/tar": { - "version": "6.1.11", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.11.tgz", - "integrity": "sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA==", + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "license": "ISC", "dependencies": { "chownr": "^2.0.0", "fs-minipass": "^2.0.0", - "minipass": "^3.0.0", + "minipass": "^5.0.0", "minizlib": "^2.1.1", "mkdirp": "^1.0.3", "yallist": "^4.0.0" }, "engines": { - "node": ">= 10" + "node": ">=10" + } + }, + "node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "license": "ISC", + "engines": { + "node": ">=8" } }, "node_modules/terser": { @@ -10912,7 +11020,8 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/tiny-emitter": { "version": "2.1.0", @@ -10937,6 +11046,7 @@ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dev": true, + "license": "MIT", "dependencies": { "is-number": "^7.0.0" }, @@ -11029,6 +11139,7 @@ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", "dev": true, + "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=10" }, @@ -11143,9 +11254,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.0.13", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", - "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "version": "1.0.16", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.16.tgz", + "integrity": "sha512-KVbTxlBYlckhF5wgfyZXTWnMn7MMZjMu9XG8bPlliUOP9ThaF4QnhP8qrjrH7DRzHfSk0oQv1wToW+iA5GajEQ==", "dev": true, "funding": [ { @@ -11161,9 +11272,10 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" + "escalade": "^3.1.2", + "picocolors": "^1.0.1" }, "bin": { "update-browserslist-db": "cli.js" @@ -11320,6 +11432,7 @@ "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.91.0.tgz", "integrity": "sha512-rzVwlLeBWHJbmgTC/8TvAcu5vpJNII+MelQpylD4jNERPwpBJOE2lEcko1zJX3QJeLjTTAnQxn/OJ8bjDzVQaw==", "dev": true, + "license": "MIT", "dependencies": { "@types/eslint-scope": "^3.7.3", "@types/estree": "^1.0.5", @@ -11421,6 +11534,7 @@ "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", "dev": true, + "license": "MIT", "dependencies": { "colorette": "^2.0.10", "memfs": "^3.4.3", @@ -11440,15 +11554,16 @@ } }, "node_modules/webpack-dev-middleware/node_modules/ajv": { - "version": "8.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", - "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.16.0.tgz", + "integrity": "sha512-F0twR8U1ZU67JIEtekUcLkXkoO5mMMmgGD8sK/xUFzJ805jxHQl92hImFAqqXMyMYjSPOyUPAwHYhB72g5sTXw==", "dev": true, + "license": "MIT", "dependencies": { - "fast-deep-equal": "^3.1.1", + "fast-deep-equal": "^3.1.3", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" + "uri-js": "^4.4.1" }, "funding": { "type": "github", @@ -11460,6 +11575,7 @@ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", "dev": true, + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.3" }, @@ -11471,13 +11587,15 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/webpack-dev-middleware/node_modules/schema-utils": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", "dev": true, + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.9", "ajv": "^8.9.0", @@ -11497,6 +11615,7 @@ "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", "dev": true, + "license": "MIT", "dependencies": { "@types/bonjour": "^3.5.9", "@types/connect-history-api-fallback": "^1.3.5", @@ -11589,7 +11708,9 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, + "license": "ISC", "dependencies": { "glob": "^7.1.3" }, @@ -11736,7 +11857,6 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -11773,9 +11893,10 @@ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, "node_modules/ws": { - "version": "8.14.2", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.14.2.tgz", - "integrity": "sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==", + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", + "license": "MIT", "engines": { "node": ">=10.0.0" }, @@ -11813,7 +11934,6 @@ "version": "5.0.8", "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "dev": true, "engines": { "node": ">=10" } @@ -11864,6 +11984,7 @@ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, diff --git a/esp/src/package.json b/esp/src/package.json index b104c55aed4..9f825f692bf 100644 --- a/esp/src/package.json +++ b/esp/src/package.json @@ -42,11 +42,11 @@ "@fluentui/react-icons-mdl2": "1.3.59", "@fluentui/react-migration-v8-v9": "9.6.3", "@hpcc-js/chart": "2.83.3", - "@hpcc-js/codemirror": "2.61.4", + "@hpcc-js/codemirror": "2.62.0", "@hpcc-js/common": "2.71.17", "@hpcc-js/comms": "2.92.2", "@hpcc-js/dataflow": "8.1.6", - "@hpcc-js/eclwatch": "2.74.3", + "@hpcc-js/eclwatch": "2.74.5", "@hpcc-js/graph": "2.85.15", "@hpcc-js/html": "2.42.20", "@hpcc-js/layout": "2.49.22", @@ -56,6 +56,7 @@ "@hpcc-js/react": "2.53.16", "@hpcc-js/tree": "2.40.17", "@hpcc-js/util": "2.51.0", + "@hpcc-js/wasm": "2.17.1", "@kubernetes/client-node": "0.20.0", "clipboard": "2.0.11", "d3-dsv": "3.0.1", @@ -67,9 +68,9 @@ "es6-promise": "4.2.8", "font-awesome": "4.7.0", "formik": "2.4.5", + "octokit": "3.1.2", "put-selector": "0.3.6", "query-string": "7.1.3", - "octokit": "3.1.2", "react": "17.0.2", "react-dom": "17.0.2", "react-hook-form": "7.51.2", diff --git a/esp/src/src-react/components/Metrics.tsx b/esp/src/src-react/components/Metrics.tsx index 5373bb6ab59..ee05e3055e3 100644 --- a/esp/src/src-react/components/Metrics.tsx +++ b/esp/src/src-react/components/Metrics.tsx @@ -23,6 +23,7 @@ import { ShortVerticalDivider } from "./Common"; import { MetricsOptions } from "./MetricsOptions"; import { BreadcrumbInfo, OverflowBreadcrumb } from "./controls/OverflowBreadcrumb"; import { MetricsPropertiesTables } from "./MetricsPropertiesTables"; +import { MetricsSQL } from "./MetricsSQL"; const logger = scopedLogger("src-react/components/Metrics.tsx"); @@ -77,29 +78,34 @@ class TableEx extends Table { _rawDataMap: { [id: number]: string } = {}; metrics(metrics: any[], options: MetricsOptionsT, timelineFilter: string, scopeFilter: string): this { - this.columns(["##", nlsHPCC.Type, nlsHPCC.Scope, ...options.properties]); - this.data(metrics.filter(m => this.scopeFilterFunc(m, scopeFilter)).filter(row => { - return (timelineFilter === "" || row.name?.indexOf(timelineFilter) === 0) && - (options.scopeTypes.indexOf(row.type) >= 0); - }).map((row, idx) => { - if (idx === 0) { - this._rawDataMap = { - 0: "##", 1: "type", 2: "name" - }; - options.properties.forEach((p, idx2) => { - this._rawDataMap[3 + idx2] = p; - }); - } - row.__hpcc_id = row.name; - return [idx, row.type, row.name, ...options.properties.map(p => { - return row.__groupedProps[p]?.Value ?? - row.__groupedProps[p]?.Max ?? - row.__groupedProps[p]?.Avg ?? - row.__formattedProps[p] ?? - row[p] ?? - ""; - }), row]; - })); + this + .columns(["##"]) // Reset hash to force recalculation of default widths + .columns(["##", nlsHPCC.Type, nlsHPCC.Scope, ...options.properties]) + .data(metrics + .filter(m => this.scopeFilterFunc(m, scopeFilter)) + .filter(row => { + return (timelineFilter === "" || row.name?.indexOf(timelineFilter) === 0) && + (options.scopeTypes.indexOf(row.type) >= 0); + }).map((row, idx) => { + if (idx === 0) { + this._rawDataMap = { + 0: "##", 1: "type", 2: "name" + }; + options.properties.forEach((p, idx2) => { + this._rawDataMap[3 + idx2] = p; + }); + } + row.__hpcc_id = row.name; + return [idx, row.type, row.name, ...options.properties.map(p => { + return row.__groupedProps[p]?.Value ?? + row.__groupedProps[p]?.Max ?? + row.__groupedProps[p]?.Avg ?? + row.__formattedProps[p] ?? + row[p] ?? + ""; + }), row]; + })) + ; return this; } @@ -129,6 +135,8 @@ class TableEx extends Table { } } +type SelectedMetricsSource = "" | "scopesTable" | "scopesSqlTable" | "metricGraphWidget" | "hotspot" | "reset"; + interface MetricsProps { wuid: string; querySet?: string; @@ -146,7 +154,7 @@ export const Metrics: React.FunctionComponent = ({ }) => { const [_uiState, _setUIState] = React.useState({ ...defaultUIState }); const [timelineFilter, setTimelineFilter] = React.useState(""); - const [selectedMetricsSource, setSelectedMetricsSource] = React.useState<"" | "scopesTable" | "metricGraphWidget" | "hotspot" | "reset">(""); + const [selectedMetricsSource, setSelectedMetricsSource] = React.useState(""); const [selectedMetrics, setSelectedMetrics] = React.useState([]); const [selectedMetricsPtr, setSelectedMetricsPtr] = React.useState(-1); const [metrics, columns, _activities, _properties, _measures, _scopeTypes, fetchStatus, refresh] = useWUQueryMetrics(wuid, querySet, queryId); @@ -243,15 +251,18 @@ export const Metrics: React.FunctionComponent = ({ setScopeFilter(newValue || ""); }, []); + const scopesSelectionChanged = React.useCallback((source: SelectedMetricsSource, selection: IScope[]) => { + setSelectedMetricsSource(source); + pushUrl(`${parentUrl}/${selection.map(row => row.__lparam?.id ?? row.id).join(",")}`); + }, [parentUrl]); + const scopesTable = useConst(() => new TableEx() .multiSelect(true) .metrics([], options, timelineFilter, scopeFilter) .sortable(true) .on("click", debounce((row, col, sel) => { if (sel) { - const selection = scopesTable.selection(); - setSelectedMetricsSource("scopesTable"); - pushUrl(`${parentUrl}/${selection.map(row => row.__lparam.id).join(",")}`); + scopesSelectionChanged("scopesTable", scopesTable.selection()); } }, 100)) ); @@ -617,6 +628,9 @@ export const Metrics: React.FunctionComponent = ({ main={} /> + + scopesSelectionChanged("scopesSqlTable", selection)}> + diff --git a/esp/src/src-react/components/MetricsOptions.tsx b/esp/src/src-react/components/MetricsOptions.tsx index 549b644ae2b..db72c14579d 100644 --- a/esp/src/src-react/components/MetricsOptions.tsx +++ b/esp/src/src-react/components/MetricsOptions.tsx @@ -3,7 +3,7 @@ import { DefaultButton, PrimaryButton, Checkbox, Pivot, PivotItem, TextField } f import nlsHPCC from "src/nlsHPCC"; import { useMetricMeta, useMetricsOptions } from "../hooks/metrics"; import { MessageBox } from "../layouts/MessageBox"; -import { JSONSourceEditor } from "./SourceEditor"; +import { JSONSourceEditor, SourceEditor } from "./SourceEditor"; const width = 640; const innerHeight = 400; @@ -51,7 +51,7 @@ export const MetricsOptions: React.FunctionComponent = ({ /> } > - +
{ if (checked) { @@ -71,7 +71,7 @@ export const MetricsOptions: React.FunctionComponent = ({ })}
- +
{properties.map(p => { return = 0} onChange={(ev, checked) => { @@ -84,7 +84,14 @@ export const MetricsOptions: React.FunctionComponent = ({ })}
- + +
+ { + setOptions({ ...options, sql }); + }} /> +
+
+
{ setOptions({ ...options, ignoreGlobalStoreOutEdges: !!checked }); @@ -100,7 +107,7 @@ export const MetricsOptions: React.FunctionComponent = ({ }} />
- +
{ if (obj) { @@ -110,5 +117,5 @@ export const MetricsOptions: React.FunctionComponent = ({
- ; + ; }; \ No newline at end of file diff --git a/esp/src/src-react/components/MetricsSQL.tsx b/esp/src/src-react/components/MetricsSQL.tsx new file mode 100644 index 00000000000..76d202ac20e --- /dev/null +++ b/esp/src/src-react/components/MetricsSQL.tsx @@ -0,0 +1,174 @@ +import * as React from "react"; +import { CommandBarButton, Stack } from "@fluentui/react"; +import { useConst } from "@fluentui/react-hooks"; +import { IScope } from "@hpcc-js/comms"; +import { ICompletion } from "@hpcc-js/codemirror"; +import { Table } from "@hpcc-js/dgrid"; +import * as Utility from "src/Utility"; +import { useDuckDBConnection } from "../hooks/duckdb"; +import { HolyGrail } from "../layouts/HolyGrail"; +import { AutosizeHpccJSComponent } from "../layouts/HpccJSAdapter"; +import { debounce } from "../util/throttle"; +import { SQLSourceEditor } from "./SourceEditor"; +import nlsHPCC from "src/nlsHPCC"; + +const spaceRegex = new RegExp("\\s", "g"); + +interface MetricsDataProps { + defaultSql: string; + scopes: IScope[]; + onSelectionChanged: (selection: IScope[]) => void; +} + +export const MetricsSQL: React.FunctionComponent = ({ + defaultSql, + scopes, + onSelectionChanged +}) => { + + const cleanScopes = React.useMemo(() => { + return scopes.map(scope => { + const retVal = { ...scope }; + delete retVal.__children; + return retVal; + }); + }, [scopes]); + + const connection = useDuckDBConnection(cleanScopes, "metrics"); + const [schema, setSchema] = React.useState([]); + const [sql, setSql] = React.useState(defaultSql); + const [sqlError, setSqlError] = React.useState(); + const [dirtySql, setDirtySql] = React.useState(sql); + const [data, setData] = React.useState([]); + + // Grid --- + const columns = React.useMemo((): string[] => { + const retVal: string[] = []; + schema.forEach(col => { + retVal.push(col.column_name); + }); + return retVal; + }, [schema]); + + const scopesTable = useConst(() => new Table() + .multiSelect(true) + .sortable(true) + .noDataMessage(nlsHPCC.loadingMessage) + .on("click", debounce((row, col, sel) => { + if (sel) { + onSelectionChanged(scopesTable.selection()); + } + }, 100)) + ); + + React.useEffect(() => { + if (columns.length === 0 && data.length === 0 && sqlError) { + scopesTable + .columns(["Error"]) + .data(sqlError.message.split("\n").map(line => { + if (line.indexOf("LINE") === 0) { + } else if (line.includes("^")) { + line = line.replace(spaceRegex, " "); + } + return [line]; + })) + .lazyRender() + ; + } else { + scopesTable + .columns(["##"]) // Reset hash to force recalculation of default widths + .columns(["##", ...columns]) + .data(data.map((row, idx) => [idx + 1, ...row])) + .lazyRender() + ; + } + }, [columns, data, sqlError, scopesTable]); + + // Query --- + React.useEffect(() => { + if (cleanScopes.length === 0) { + setSchema([]); + setData([]); + } else if (connection) { + connection.query(`DESCRIBE ${sql}`).then(result => { + if (connection) { + setSchema(result.toArray().map((row) => row.toJSON())); + } + }).catch(e => { + setSchema([]); + }); + + setSqlError(undefined); + connection.query(sql).then(result => { + if (connection) { + setData(result.toArray().map((row) => { + return row.toArray(); + })); + } + }).catch(e => { + setSqlError(e); + setData([]); + }).finally(() => { + scopesTable.noDataMessage(nlsHPCC.noDataMessage); + }); + } + }, [cleanScopes.length, connection, scopesTable, sql]); + + // Selection --- + const onChange = React.useCallback((newSql: string) => { + setDirtySql(newSql); + }, []); + + const onFetchHints = React.useCallback((cm, option): Promise => { + const cursor = cm.getCursor(); + const lineStr = cm.getLine(cursor.line); + let lineEnd = cursor.ch; + let end = cm.indexFromPos({ line: cursor.line, ch: lineEnd }); + if (connection) { + return connection.query(`SELECT * FROM sql_auto_complete("${dirtySql.substring(0, end)}")`).then(result => { + if (connection) { + const hints = result.toArray().map((row) => row.toJSON()); + while (lineEnd < lineStr.length && /\w/.test(lineStr.charAt(lineEnd))) ++lineEnd; + end = cm.indexFromPos({ line: cursor.line, ch: lineEnd }); + const suggestion_start = hints.length ? hints[0].suggestion_start : end; + return { + list: hints.map(row => row.suggestion), + from: cm.posFromIndex(suggestion_start), + to: cm.posFromIndex(end) + }; + } + }).catch(e => { + return Promise.resolve(null); + }); + } + return Promise.resolve(null); + }, [connection, dirtySql]); + + const onSubmit = React.useCallback(() => { + setSql(dirtySql); + }, [dirtySql]); + + const onCopy = React.useCallback(() => { + const tsv = scopesTable.export("TSV"); + navigator?.clipboard?.writeText(tsv); + }, [scopesTable]); + + const onDownload = React.useCallback(() => { + const csv = scopesTable.export("CSV"); + Utility.downloadCSV(csv, "metrics.csv"); + }, [scopesTable]); + + return +
+ +
+ setSql(dirtySql)} /> + + + + } + main={} + />; +}; diff --git a/esp/src/src-react/components/SourceEditor.tsx b/esp/src/src-react/components/SourceEditor.tsx index 6a367c39c71..5e070c6f419 100644 --- a/esp/src/src-react/components/SourceEditor.tsx +++ b/esp/src/src-react/components/SourceEditor.tsx @@ -1,7 +1,7 @@ import * as React from "react"; import { CommandBar, ContextualMenuItemType, ICommandBarItemProps } from "@fluentui/react"; import { useConst, useOnEvent } from "@fluentui/react-hooks"; -import { Editor, ECLEditor, XMLEditor, JSONEditor } from "@hpcc-js/codemirror"; +import { Editor, ECLEditor, XMLEditor, JSONEditor, SQLEditor, ICompletion } from "@hpcc-js/codemirror"; import { Workunit } from "@hpcc-js/comms"; import nlsHPCC from "src/nlsHPCC"; import { HolyGrail } from "../layouts/HolyGrail"; @@ -12,7 +12,30 @@ import { ShortVerticalDivider } from "./Common"; import "eclwatch/css/cmDarcula.css"; -type ModeT = "ecl" | "xml" | "json" | "text"; +type ModeT = "ecl" | "xml" | "json" | "text" | "sql"; + +class SQLEditorEx extends SQLEditor { + + constructor() { + super(); + } + + enter(domNode, element) { + super.enter(domNode, element); + this.option("extraKeys", { + "Ctrl-Enter": cm => { + this.submit(); + }, + "Ctrl-S": cm => { + this.submit(); + } + + } as any); + } + + submit() { + } +} function newEditor(mode: ModeT) { switch (mode) { @@ -22,6 +45,8 @@ function newEditor(mode: ModeT) { return new XMLEditor(); case "json": return new JSONEditor(); + case "sql": + return new SQLEditorEx(); case "text": default: return new Editor(); @@ -32,14 +57,20 @@ interface SourceEditorProps { mode?: ModeT; text?: string; readonly?: boolean; - onChange?: (text: string) => void; + toolbar?: boolean; + onTextChange?: (text: string) => void; + onFetchHints?: (cm: any, option: any) => Promise; + onSubmit?: () => void; } export const SourceEditor: React.FunctionComponent = ({ mode = "text", text = "", readonly = false, - onChange = (text: string) => { } + toolbar = true, + onTextChange = (text: string) => { }, + onFetchHints, + onSubmit }) => { const { isDark } = useUserTheme(); @@ -55,23 +86,33 @@ export const SourceEditor: React.FunctionComponent = ({ { key: "divider_1", itemType: ContextualMenuItemType.Divider, onRender: () => }, ]; - const editor = useConst(() => newEditor(mode) - .on("changes", () => { - onChange(editor.text()); - }) - ); + const editor = useConst(() => newEditor(mode)); React.useEffect(() => { - editor.option("theme", isDark ? "darcula" : "default"); - if (editor.text() !== text) { - editor.text(text); - } + editor + .on("changes", onTextChange ? () => onTextChange(editor.text()) : undefined, true) + ; + }, [editor, onTextChange]); + React.useEffect(() => { editor - .readOnly(readonly) - .lazyRender() + .showHints(onFetchHints !== undefined) + .on("fetchHints", (cm, option) => { + if (onFetchHints) { + return onFetchHints(cm, option); + } + return Promise.resolve(null); + }, true) ; - }, [editor, text, readonly, isDark]); + }, [editor, onFetchHints]); + + React.useEffect(() => { + if (onSubmit) { + editor + .on("submit", onSubmit ? () => onSubmit() : undefined, true) + ; + } + }, [editor, onSubmit]); const handleThemeToggle = React.useCallback((evt) => { if (!editor) return; @@ -83,8 +124,20 @@ export const SourceEditor: React.FunctionComponent = ({ }, [editor]); useOnEvent(document, "eclwatch-theme-toggle", handleThemeToggle); + React.useEffect(() => { + editor.option("theme", isDark ? "darcula" : "default"); + if (editor.text() !== text) { + editor.text(text); + } + + editor + .readOnly(readonly) + .lazyRender() + ; + }, [editor, text, readonly, isDark]); + return } + header={toolbar && } main={ } @@ -144,7 +197,7 @@ export const JSONSourceEditor: React.FunctionComponent = } }, [onChange]); - return ; + return ; }; export interface WUXMLSourceEditorProps { @@ -243,3 +296,21 @@ export const FetchEditor: React.FunctionComponent = ({ return ; }; +interface SQLSourceEditorProps { + sql: string; + toolbar?: boolean; + onSqlChange?: (sql: string) => void; + onFetchHints?: (cm: any, option: any) => Promise; + onSubmit?: () => void; +} + +export const SQLSourceEditor: React.FunctionComponent = ({ + sql, + toolbar, + onSqlChange, + onFetchHints, + onSubmit +}) => { + return ; +}; + diff --git a/esp/src/src-react/components/WorkunitDetails.tsx b/esp/src/src-react/components/WorkunitDetails.tsx index 6d790b184a8..d7e84120d13 100644 --- a/esp/src/src-react/components/WorkunitDetails.tsx +++ b/esp/src/src-react/components/WorkunitDetails.tsx @@ -1,5 +1,5 @@ import * as React from "react"; -import { Icon } from "@fluentui/react"; +import { Icon, Shimmer } from "@fluentui/react"; import { WsWorkunits, WorkunitsService } from "@hpcc-js/comms"; import { scopedLogger } from "@hpcc-js/util"; import { SizeMe } from "react-sizeme"; @@ -16,7 +16,6 @@ import { Helpers } from "./Helpers"; import { IFrame } from "./IFrame"; import { Logs } from "./Logs"; import { useNextPrev } from "./Menu"; -import { Metrics } from "./Metrics"; import { Queries } from "./Queries"; import { Resources } from "./Resources"; import { Result } from "./Result"; @@ -29,6 +28,8 @@ import { WorkunitSummary } from "./WorkunitSummary"; import { TabInfo, DelayLoadedPanel, OverflowTabList } from "./controls/TabbedPanes/index"; import { ECLArchive } from "./ECLArchive"; +const Metrics = React.lazy(() => import("./Metrics").then(mod => ({ default: mod.Metrics }))); + const logger = scopedLogger("src-react/components/WorkunitDetails.tsx"); const workunitService = new WorkunitsService({ baseUrl: "" }); @@ -197,7 +198,16 @@ export const WorkunitDetails: React.FunctionComponent = ({ - + + + + + + + }> + + diff --git a/esp/src/src-react/hooks/duckdb.ts b/esp/src/src-react/hooks/duckdb.ts new file mode 100644 index 00000000000..75bfa8dd5cd --- /dev/null +++ b/esp/src/src-react/hooks/duckdb.ts @@ -0,0 +1,59 @@ +import * as React from "react"; +import { DuckDB } from "@hpcc-js/wasm/dist/duckdb"; + +type AsyncDuckDB = any; +type AsyncDuckDBConnection = any; + +export function useDuckDB(): [AsyncDuckDB] { + + const [db, setDb] = React.useState(); + + React.useEffect(() => { + const duckdb = DuckDB.load().then(duckdb => { + setDb(duckdb.db); + return duckdb; + }); + + return () => { + duckdb?.db?.close(); + }; + }, []); + + return [db]; +} + +export function useDuckDBConnection(scopes: T, name: string): AsyncDuckDBConnection | undefined { + + const [db] = useDuckDB(); + const [connection, setConnection] = React.useState(undefined); + + React.useEffect(() => { + let c: AsyncDuckDBConnection | undefined; + if (db) { + db.connect().then(async connection => { + await db.registerFileText(`${name}.json`, JSON.stringify(scopes)); + await connection.insertJSONFromPath(`${name}.json`, { name }); + await connection.close(); + c = await db.connect(); + try { // TODO: Move to @hpcc-js/wasm + await c.query("LOAD autocomplete").catch(e => { + console.log(e.message); + }); + } catch (e) { + console.log(e.message); + } + setConnection(c); + }); + } + return () => { + try { + c?.query(`DROP TABLE ${name}`); + } finally { + c?.close(); + } + + }; + }, [db, name, scopes]); + + return connection; +} diff --git a/esp/src/src-react/hooks/metrics.ts b/esp/src/src-react/hooks/metrics.ts index d377997ac92..24d4a5341e4 100644 --- a/esp/src/src-react/hooks/metrics.ts +++ b/esp/src/src-react/hooks/metrics.ts @@ -7,19 +7,35 @@ import { useWorkunit } from "./workunit"; import { useQuery } from "./query"; import { useCounter } from "./util"; -const logger = scopedLogger("src-react\hooks\metrics.ts"); +const logger = scopedLogger("src-react/hooks/metrics.ts"); -const defaults = { - scopeTypes: ["graph", "subgraph", "activity", "edge"], +const MetricOptionsVersion = 2; + +export interface MetricsOptions { + scopeTypes: string[]; + properties: string[]; + ignoreGlobalStoreOutEdges: boolean; + subgraphTpl; + activityTpl; + edgeTpl; + sql: string; + layout?: object; + showTimeline: boolean; +} + +const defaults: MetricsOptions = { + scopeTypes: ["graph", "subgraph", "activity", "operation", "workflow"], properties: ["TimeElapsed"], ignoreGlobalStoreOutEdges: true, subgraphTpl: "%id% - %TimeElapsed%", activityTpl: "%Label%", edgeTpl: "%Label%\n%NumRowsProcessed%\n%SkewMinRowsProcessed% / %SkewMaxRowsProcessed%", - layout: undefined + sql: "SELECT type, name, TimeElapsed, id\n FROM metrics\n WHERE TimeElapsed IS NOT NULL", + layout: undefined, + showTimeline: true }; -const options = { ...defaults }; +const options: MetricsOptions = { ...defaults }; function checkLayout(options: MetricsOptions): boolean { if (options?.layout && !options?.layout?.["main"]) { @@ -28,16 +44,6 @@ function checkLayout(options: MetricsOptions): boolean { return !!options?.layout; } -export interface MetricsOptions { - scopeTypes: string[]; - properties: string[]; - ignoreGlobalStoreOutEdges: boolean; - subgraphTpl; - activityTpl; - edgeTpl; - layout?: object -} - export function useMetricsOptions(): [MetricsOptions, (opts: MetricsOptions) => void, () => void, (toDefaults?: boolean) => void] { const store = useConst(() => userKeyValStore()); @@ -52,7 +58,7 @@ export function useMetricsOptions(): [MetricsOptions, (opts: MetricsOptions) => const save = React.useCallback(() => { if (checkLayout(options)) { - store?.set("MetricOptions", JSON.stringify(options), true); + store?.set(`MetricOptions-${MetricOptionsVersion}`, JSON.stringify(options), true); } }, [store]); @@ -60,7 +66,7 @@ export function useMetricsOptions(): [MetricsOptions, (opts: MetricsOptions) => if (toDefaults) { setOptions({ ...defaults }); } else { - store?.get("MetricOptions").then(opts => { + store?.get(`MetricOptions-${MetricOptionsVersion}`).then(opts => { const options = JSON.parse(opts); checkLayout(options); setOptions({ ...defaults, ...options }); diff --git a/esp/src/src-react/layouts/DockPanel.tsx b/esp/src/src-react/layouts/DockPanel.tsx index af586ee5919..8ebea0e1c74 100644 --- a/esp/src/src-react/layouts/DockPanel.tsx +++ b/esp/src/src-react/layouts/DockPanel.tsx @@ -51,8 +51,6 @@ export class ReactWidget extends HTMLWidget { this._div = element.append("div"); } - private _prevWidth; - private _prevHeight; update(domNode, element) { super.update(domNode, element); this._div @@ -66,13 +64,6 @@ export class ReactWidget extends HTMLWidget { , this._div.node() ); - - // TODO: Hack to make command bar resize... - if (this._prevWidth !== this.width() || this._prevHeight !== this.height()) { - this._prevWidth = this.width(); - this._prevHeight = this.height(); - window.dispatchEvent(new Event("resize")); - } } exit(domNode, element) { diff --git a/esp/src/src/nls/hpcc.ts b/esp/src/src/nls/hpcc.ts index 8588cf18c54..d6b2c3aad70 100644 --- a/esp/src/src/nls/hpcc.ts +++ b/esp/src/src/nls/hpcc.ts @@ -547,6 +547,7 @@ export = { Methods: "Methods", Metrics: "Metrics", MetricsGraph: "Metrics/Graph", + MetricsSQL: "Metrics (SQL)", Min: "Min", Mine: "Mine", MinNode: "Min Node", @@ -884,6 +885,7 @@ export = { Spill: "Spill", SplitPrefix: "Split Prefix", Spray: "Spray", + SQL: "SQL", Start: "Start", Started: "Started", Starting: "Starting", diff --git a/esp/src/tsconfig.json b/esp/src/tsconfig.json index dc6b41ab0ae..55fd90ef44b 100644 --- a/esp/src/tsconfig.json +++ b/esp/src/tsconfig.json @@ -3,9 +3,9 @@ "baseUrl": ".", "outDir": "./lib", "declarationDir": "./types", - "target": "es5", - "module": "amd", - "moduleResolution": "node", + "target": "ES5", + "module": "AMD", + "moduleResolution": "Node", "allowSyntheticDefaultImports": true, "sourceMap": true, "declaration": true, @@ -22,12 +22,16 @@ "downlevelIteration": true, "jsx": "react", "lib": [ - "dom", - "es2019" + "DOM", + "ES2019" ], "typeRoots": [], "types": [], "paths": { + "@hpcc-js/wasm": [ + "./node_modules/@hpcc-js/wasm", + "../../../hpcc-js-wasm" + ], "@hpcc-js/*": [ "./node_modules/@hpcc-js/*", "../../../hpcc-js/packages/*", diff --git a/esp/src/webpack.config.js b/esp/src/webpack.config.js index 3d15d932e6a..9365d0d16e8 100644 --- a/esp/src/webpack.config.js +++ b/esp/src/webpack.config.js @@ -88,8 +88,12 @@ module.exports = function (env) { }, resolve: { alias: { + "@hpcc-js/wasm/dist/duckdb": path.resolve(__dirname, "node_modules/@hpcc-js/wasm/dist/duckdb.js"), }, fallback: { + "@hpcc-js/wasm": [ + path.resolve(__dirname, "../../../hpcc-js-wasm"), + ], "@hpcc-js": [ path.resolve(__dirname, "../../../hpcc-js/packages"), path.resolve(__dirname, "../../../Visualization/packages") @@ -101,6 +105,7 @@ module.exports = function (env) { modules: ["node_modules"] }, + target: "web", mode: isProduction ? "production" : "development", devtool: isProduction ? undefined : "cheap-module-source-map", From 5187b4edb6fb57e55872ea6e30c175fafb451672 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Thu, 20 Jun 2024 16:09:30 +0100 Subject: [PATCH 25/31] HPCC-30433 Add match statistics to join activities in thor Signed-off-by: Gavin Halliday --- system/jlib/jstatcodes.h | 4 + system/jlib/jstats.cpp | 7 ++ .../hashdistrib/thhashdistribslave.cpp | 2 + thorlcr/activities/join/thjoinslave.cpp | 2 + .../lookupjoin/thlookupjoinslave.cpp | 19 ++++- thorlcr/activities/msort/thsortu.cpp | 78 ++++++++++++++++++- thorlcr/activities/msort/thsortu.hpp | 41 ++++++++-- .../activities/selfjoin/thselfjoinslave.cpp | 9 ++- thorlcr/thorutil/thormisc.cpp | 6 +- thorlcr/thorutil/thormisc.hpp | 1 + 10 files changed, 155 insertions(+), 14 deletions(-) diff --git a/system/jlib/jstatcodes.h b/system/jlib/jstatcodes.h index 9995b70ddfd..aa133ad5ba2 100644 --- a/system/jlib/jstatcodes.h +++ b/system/jlib/jstatcodes.h @@ -307,6 +307,10 @@ enum StatisticKind StSizeRemoteWrite, StSizePeakTempDisk, StSizePeakEphemeralDisk, + StNumMatchLeftRowsMax, + StNumMatchRightRowsMax, + StNumMatchCandidates, + StNumMatchCandidatesMax, StMax, //For any quantity there is potentially the following variants. diff --git a/system/jlib/jstats.cpp b/system/jlib/jstats.cpp index 57733e6c259..92f02d1bdec 100644 --- a/system/jlib/jstats.cpp +++ b/system/jlib/jstats.cpp @@ -979,6 +979,10 @@ static const constexpr StatisticMeta statsMetaData[StMax] = { { SIZESTAT(RemoteWrite), "Size of data sent to remote workers"}, { PEAKSIZESTAT(PeakTempDisk), "High water mark for temporary files"}, { PEAKSIZESTAT(PeakEphemeralDisk), "High water mark for emphemeral storage use"}, + { NUMSTAT(MatchLeftRowsMax), "The largest number of left rows in a join group" }, + { NUMSTAT(MatchRightRowsMax), "The largest number of right rows in a join group" }, + { NUMSTAT(MatchCandidates), "The number of candidate combinations of left and right rows forming join groups" }, + { NUMSTAT(MatchCandidatesMax), "The largest number of candidate combinations of left and right rows in a single group" }, }; static MapStringTo statisticNameMap(true); @@ -3105,6 +3109,9 @@ static bool isWorthReportingMergedValue(StatisticKind kind) { case StSizePeakMemory: case StSizePeakRowMemory: + case StNumMatchLeftRowsMax: + case StNumMatchRightRowsMax: + case StNumMatchCandidatesMax: //These only make sense for individual nodes, the aggregated value is meaningless return false; } diff --git a/thorlcr/activities/hashdistrib/thhashdistribslave.cpp b/thorlcr/activities/hashdistrib/thhashdistribslave.cpp index 0b814f94b63..61a49ced9a9 100644 --- a/thorlcr/activities/hashdistrib/thhashdistribslave.cpp +++ b/thorlcr/activities/hashdistrib/thhashdistribslave.cpp @@ -4042,6 +4042,7 @@ class HashJoinSlaveActivity : public CSlaveActivity, implements IStopInput strmR.clear(); { CriticalBlock b(joinHelperCrit); + joinhelper->gatherStats(inactiveStats); joinhelper.clear(); } PARENT::stop(); @@ -4087,6 +4088,7 @@ class HashJoinSlaveActivity : public CSlaveActivity, implements IStopInput } else { + joinhelper->gatherStats(activeStats); activeStats.setStatistic(StNumLeftRows, joinhelper->getLhsProgress()); activeStats.setStatistic(StNumRightRows, joinhelper->getRhsProgress()); } diff --git a/thorlcr/activities/join/thjoinslave.cpp b/thorlcr/activities/join/thjoinslave.cpp index f7a07ee9f69..f2262256ab7 100644 --- a/thorlcr/activities/join/thjoinslave.cpp +++ b/thorlcr/activities/join/thjoinslave.cpp @@ -378,6 +378,7 @@ class JoinSlaveActivity : public CSlaveActivity, implements ILookAheadStopNotify rhsProgressCount = joinhelper->getRhsProgress(); { CriticalBlock b(joinHelperCrit); + joinhelper->gatherStats(inactiveStats); joinhelper.clear(); } ActPrintLog("SortJoinSlaveActivity::stop"); @@ -627,6 +628,7 @@ class JoinSlaveActivity : public CSlaveActivity, implements ILookAheadStopNotify } else { + joinhelper->gatherStats(activeStats); activeStats.setStatistic(StNumLeftRows, joinhelper->getLhsProgress()); if (!isSelfJoin) activeStats.setStatistic(StNumRightRows, joinhelper->getRhsProgress()); diff --git a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp index 09662742d09..3a0da40d7f3 100644 --- a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp +++ b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp @@ -806,6 +806,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, { typedef CSlaveActivity PARENT; + JoinMatchStats matchStats; Owned leftexception; bool eos, eog, someSinceEog; @@ -949,6 +950,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, unsigned keepLimit; unsigned joined; unsigned joinCounter; + unsigned candidateCounter; OwnedConstThorRow defaultLeft; bool leftMatch, grouped; @@ -1165,10 +1167,12 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, inline const void *denormalizeNextRow() { ConstPointerArray filteredRhs; + unsigned candidates = 0; while (rhsNext) { if (abortSoon) return NULL; + candidates++; if (!fuzzyMatch || (HELPERBASE::match(leftRow, rhsNext))) { leftMatch = true; @@ -1187,6 +1191,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, } rhsNext = tableProxy->getNextRHS(currentHashEntry); // NB: currentHashEntry only used for Lookup,Many case } + matchStats.noteGroup(1, candidates); if (filteredRhs.ordinality() || (!leftMatch && 0!=(flags & JFleftouter))) { unsigned rcCount = 0; @@ -1238,6 +1243,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, { leftRow.setown(left->nextRow()); joinCounter = 0; + candidateCounter = 0; if (leftRow) { eog = false; @@ -1273,6 +1279,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, RtlDynamicRowBuilder rowBuilder(allocator); while (rhsNext) { + candidateCounter++; if (!fuzzyMatch || HELPERBASE::match(leftRow, rhsNext)) { leftMatch = true; @@ -1289,12 +1296,15 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, rhsNext = NULL; else rhsNext = tableProxy->getNextRHS(currentHashEntry); // NB: currentHashEntry only used for Lookup,Many case + if (!rhsNext) + matchStats.noteGroup(1, candidateCounter); return row.getClear(); } } } rhsNext = tableProxy->getNextRHS(currentHashEntry); // NB: currentHashEntry used for Lookup,Many or All cases } + matchStats.noteGroup(1, candidateCounter); if (!leftMatch && NULL == rhsNext && 0!=(flags & JFleftouter)) { size32_t sz = HELPERBASE::joinTransform(rowBuilder, leftRow, defaultRight, 0, JTFmatchedleft); @@ -1330,6 +1340,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, joined = 0; joinCounter = 0; + candidateCounter = 0; leftMatch = false; returnMany = false; @@ -1472,6 +1483,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, { joined = 0; joinCounter = 0; + candidateCounter = 0; leftMatch = false; rhsNext = NULL; @@ -1631,6 +1643,11 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, { ActPrintLog("LHS input finished, %" RCPF "d rows read", count); } + virtual void gatherActiveStats(CRuntimeStatisticCollection &activeStats) const override + { + PARENT::gatherActiveStats(activeStats); + matchStats.gatherStats(activeStats); + } }; @@ -3359,7 +3376,7 @@ class CAllJoinSlaveActivity : public CInMemJoinBase } } public: - CAllJoinSlaveActivity(CGraphElementBase *_container) : PARENT(_container) + CAllJoinSlaveActivity(CGraphElementBase *_container) : PARENT(_container, allJoinActivityStatistics) { returnMany = true; } diff --git a/thorlcr/activities/msort/thsortu.cpp b/thorlcr/activities/msort/thsortu.cpp index 0caf778d494..0e2aafc0033 100644 --- a/thorlcr/activities/msort/thsortu.cpp +++ b/thorlcr/activities/msort/thsortu.cpp @@ -276,6 +276,7 @@ void swapRows(RtlDynamicRowBuilder &row1, RtlDynamicRowBuilder &row2) row1.swapWith(row2); } + class CJoinHelper : implements IJoinHelper, public CSimpleInterface { CActivityBase &activity; @@ -314,11 +315,13 @@ class CJoinHelper : implements IJoinHelper, public CSimpleInterface OwnedConstThorRow defaultRight; Linked strmL; Linked strmR; + JoinMatchStats matchStats; bool abort = false; bool nextleftgot = false; bool nextrightgot = false; unsigned atmost = (unsigned)-1; rowcount_t lhsProgressCount = 0, rhsProgressCount = 0; + rowcount_t startMatchLhsProgressCount = 0; unsigned keepmax = (unsigned)-1; unsigned abortlimit = (unsigned)-1; unsigned keepremaining = (unsigned)-1; @@ -819,8 +822,16 @@ class CJoinHelper : implements IJoinHelper, public CSimpleInterface } } while (state == JSonfail); + //We have read a row that does not match, so decrement by 1 to get the count for the row that mismatched + { + //Nested scope to avoid problems with variable leaking into the following case + rowcount_t nextStartMatchLhsProgressCount = lhsProgressCount - 1; + matchStats.noteGroup(nextStartMatchLhsProgressCount - startMatchLhsProgressCount, 0); + startMatchLhsProgressCount = nextStartMatchLhsProgressCount; + } // fall through case JScompare: + //Need to create a new match group when the right has been completely processed if (getL()) { rightidx = 0; rightgroupmatched = NULL; @@ -896,14 +907,29 @@ class CJoinHelper : implements IJoinHelper, public CSimpleInterface if (!hitatmost&&rightgroup.ordinality()) state = JSmatch; else if (cmp<0) + { + //Left row and no match right row + matchStats.noteGroup(1, 0); // This will not spot large left groups + startMatchLhsProgressCount = lhsProgressCount; ret.setown(outrow(Onext,Oouter)); + } else + { + //Right row with no matching left rows. + //This will not spot large right groups since it processes a row at a time + matchStats.noteGroup(0, 1); ret.setown(outrow(Oouter,Onext)); + } } } - else if (getR()) + else if (getR()) + { + //We would miss tracking a very large trailing right group, but it is not worth + //the extra work to spot it + //FUTURE: if (!rightouter) we could return null and stop reading the rhs. ret.setown(outrow(Oouter,Onext)); + } else return NULL; break; @@ -920,6 +946,7 @@ class CJoinHelper : implements IJoinHelper, public CSimpleInterface nextL(); } mcoreintercept->addWork(&leftgroup,&rightgroup); + startMatchLhsProgressCount = (lhsProgressCount - 1); // Never used, but keep consistent with other cases state = JScompare; } else if (rightidxdocompare(nextleft,prevleft); - if (cmp>0) + if (cmp>0) + { + //Finished processing this group -> gather the stats for the number of join candidates. + //lhsProgressCount is one higher than the the row count that follows the end of group + rowcount_t numLeftRows = (lhsProgressCount - 1) - startMatchLhsProgressCount; + matchStats.noteGroup(numLeftRows, rightgroup.ordinality()); + startMatchLhsProgressCount = (lhsProgressCount - 1); state = JSrightgrouponly; + } else if (cmp<0) { activity.logRow("prev: ", *allocatorL->queryOutputMeta(), prevleft); @@ -942,10 +976,17 @@ class CJoinHelper : implements IJoinHelper, public CSimpleInterface } } else + { + //Finished processing this group -> gather the stats for the number of join candidates. + rowcount_t numLeftRows = lhsProgressCount - startMatchLhsProgressCount; + matchStats.noteGroup(numLeftRows, rightgroup.ordinality()); + startMatchLhsProgressCount = lhsProgressCount; state = JSrightgrouponly; + } } break; - case JSrightgrouponly: + case JSrightgrouponly: + //FUTURE: Avoid walking the right group if it is an inner/left only join. // right group if (rightidx INITIAL_SELFJOIN_MATCH_WARNING_LEVEL) { Owned e = MakeActivityWarning(&activity, TE_SelfJoinMatchWarning, "Exceeded initial match limit"); e->queryData().append((unsigned)curgroup.ordinality()); @@ -1299,6 +1348,12 @@ class SelfJoinHelper: implements IJoinHelper, public CSimpleInterface virtual void stop() { abort = true; } virtual rowcount_t getLhsProgress() const { return progressCount; } virtual rowcount_t getRhsProgress() const { return progressCount; } + virtual void gatherStats(CRuntimeStatisticCollection & stats) const override + { + //Left and right progress could be added here. + matchStats.gatherStats(stats); + } + }; IJoinHelper *createDenormalizeHelper(CActivityBase &activity, IHThorDenormalizeArg *helper, IThorRowInterfaces *rowIf) @@ -1464,7 +1519,7 @@ class CMultiCoreJoinHelperBase: implements IJoinHelper, implements IMulticoreInt Owned exc; CriticalSection sect; bool eos, selfJoin; - + JoinMatchStats matchStats; void setException(IException *e,const char *title) { @@ -1561,6 +1616,18 @@ class CMultiCoreJoinHelperBase: implements IJoinHelper, implements IMulticoreInt } } + void noteGroupSizes(CThorExpandingRowArray *lgroup,CThorExpandingRowArray *rgroup) + { + rowidx_t numLeft = lgroup ? lgroup->ordinality() : 0; + rowidx_t numRight = lgroup ? lgroup->ordinality() : 0; + matchStats.noteGroup(numLeft, numRight); + } + + virtual void gatherStats(CRuntimeStatisticCollection & stats) const override + { + matchStats.gatherStats(stats); + } + CMultiCoreJoinHelperBase(CActivityBase &_activity, unsigned numthreads, bool _selfJoin, IJoinHelper *_jhelper, IHThorJoinArg *_helper, IThorRowInterfaces *_rowIf) : activity(_activity), rowIf(_rowIf) { @@ -1804,6 +1871,8 @@ class CMultiCoreJoinHelper: public CMultiCoreJoinHelperBase * The pull side, also pulls from the workers in sequence * This ensures the output is return in input order. */ + noteGroupSizes(lgroup, rgroup); + cWorker *worker = workers[curin]; worker->workready.wait(); workers[curin]->work.set(lgroup,rgroup); @@ -1987,6 +2056,7 @@ class CMultiCoreUnorderedJoinHelper: public CMultiCoreJoinHelperBase // IMulticoreIntercept impl. virtual void addWork(CThorExpandingRowArray *lgroup,CThorExpandingRowArray *rgroup) { + noteGroupSizes(lgroup, rgroup); cWorkItem *item = new cWorkItem(activity, lgroup, rgroup); workqueue.enqueue(item); } diff --git a/thorlcr/activities/msort/thsortu.hpp b/thorlcr/activities/msort/thsortu.hpp index eef6aa1c929..006b3f4c7b6 100644 --- a/thorlcr/activities/msort/thsortu.hpp +++ b/thorlcr/activities/msort/thsortu.hpp @@ -60,18 +60,49 @@ interface IJoinHelper: public IRowStream virtual rowcount_t getRhsProgress() const = 0; virtual const void *nextRow() = 0; virtual void stop() = 0; + virtual void gatherStats(CRuntimeStatisticCollection & stats) const = 0; }; IJoinHelper *createJoinHelper(CActivityBase &activity, IHThorJoinArg *helper, IThorRowInterfaces *rowIf, bool parallelmatch, bool unsortedoutput); IJoinHelper *createSelfJoinHelper(CActivityBase &activity, IHThorJoinArg *helper, IThorRowInterfaces *rowIf, bool parallelmatch, bool unsortedoutput); IJoinHelper *createDenormalizeHelper(CActivityBase &activity, IHThorDenormalizeArg *helper, IThorRowInterfaces *rowIf); - - ILimitedCompareHelper *createLimitedCompareHelper(); - - - +//Included here so this can be shared between join and lookup join. +class JoinMatchStats +{ +public: + void gatherStats(CRuntimeStatisticCollection & stats) const + { + //Left and right progress could be added here. + if (maxLeftGroupSize) + stats.addStatistic(StNumMatchLeftRowsMax, maxLeftGroupSize); + if (maxRightGroupSize) + stats.addStatistic(StNumMatchRightRowsMax, maxRightGroupSize); + if (numMatchCandidates) + stats.addStatistic(StNumMatchCandidates, numMatchCandidates); + if (maxMatchCandidates) + stats.addStatistic(StNumMatchCandidatesMax, maxMatchCandidates); + } + + void noteGroup(rowcount_t numLeft, rowcount_t numRight) + { + rowcount_t numCandidates = numLeft * numRight; + if (numLeft > maxLeftGroupSize) + maxLeftGroupSize = numLeft; + if (numRight > maxRightGroupSize) + maxRightGroupSize = numRight; + numMatchCandidates += numCandidates; + if (numCandidates > maxMatchCandidates) + maxMatchCandidates = numCandidates; + } + +public: + stat_type maxLeftGroupSize = 0; + stat_type maxRightGroupSize = 0; + stat_type numMatchCandidates = 0; + stat_type maxMatchCandidates = 0; +}; #endif diff --git a/thorlcr/activities/selfjoin/thselfjoinslave.cpp b/thorlcr/activities/selfjoin/thselfjoinslave.cpp index 8a951a8fe06..5b76eba370a 100644 --- a/thorlcr/activities/selfjoin/thselfjoinslave.cpp +++ b/thorlcr/activities/selfjoin/thselfjoinslave.cpp @@ -195,6 +195,7 @@ class SelfJoinSlaveActivity : public CSlaveActivity } { CriticalBlock b(joinHelperCrit); + joinhelper->gatherStats(inactiveStats); joinhelper.clear(); } if (strm) @@ -231,8 +232,12 @@ class SelfJoinSlaveActivity : public CSlaveActivity { PARENT::gatherActiveStats(activeStats); CriticalBlock b(joinHelperCrit); - rowcount_t p = joinhelper?joinhelper->getLhsProgress():0; - activeStats.setStatistic(StNumLeftRows, p); + if (joinhelper) + { + joinhelper->gatherStats(activeStats); + rowcount_t p = joinhelper->getLhsProgress(); + activeStats.setStatistic(StNumLeftRows, p); + } mergeStats(activeStats, sorter, spillStatistics); // No danger of a race with reset() because that never replaces a valid sorter } }; diff --git a/thorlcr/thorutil/thormisc.cpp b/thorlcr/thorutil/thormisc.cpp index 48dc1231ee1..9bed2fdc36b 100644 --- a/thorlcr/thorutil/thormisc.cpp +++ b/thorlcr/thorutil/thormisc.cpp @@ -83,8 +83,10 @@ const StatisticsMapping indexReadActivityStatistics({StNumRowsProcessed}, indexR const StatisticsMapping indexWriteActivityStatistics({StPerReplicated, StNumLeafCacheAdds, StNumNodeCacheAdds, StNumBlobCacheAdds }, basicActivityStatistics, diskWriteRemoteStatistics); const StatisticsMapping keyedJoinActivityStatistics({ StNumIndexAccepted, StNumPreFiltered, StNumDiskSeeks, StNumDiskAccepted, StNumDiskRejected}, basicActivityStatistics, indexReadFileStatistics); const StatisticsMapping loopActivityStatistics({StNumIterations}, basicActivityStatistics); -const StatisticsMapping lookupJoinActivityStatistics({StNumSmartJoinSlavesDegradedToStd, StNumSmartJoinDegradedToLocal}, spillStatistics, basicActivityStatistics); -const StatisticsMapping joinActivityStatistics({StNumLeftRows, StNumRightRows}, basicActivityStatistics, spillStatistics); +const StatisticsMapping commonJoinActivityStatistics({StNumMatchLeftRowsMax, StNumMatchRightRowsMax, StNumMatchCandidates, StNumMatchCandidatesMax}, basicActivityStatistics); +const StatisticsMapping allJoinActivityStatistics({}, commonJoinActivityStatistics); +const StatisticsMapping lookupJoinActivityStatistics({StNumSmartJoinSlavesDegradedToStd, StNumSmartJoinDegradedToLocal}, spillStatistics, commonJoinActivityStatistics); +const StatisticsMapping joinActivityStatistics({StNumLeftRows, StNumRightRows}, commonJoinActivityStatistics, spillStatistics); const StatisticsMapping diskReadActivityStatistics({StNumDiskRowsRead, }, basicActivityStatistics, diskReadRemoteStatistics); const StatisticsMapping diskWriteActivityStatistics({StPerReplicated}, basicActivityStatistics, diskWriteRemoteStatistics); const StatisticsMapping sortActivityStatistics({}, basicActivityStatistics, spillStatistics); diff --git a/thorlcr/thorutil/thormisc.hpp b/thorlcr/thorutil/thormisc.hpp index d760f3d06da..51d3a11043e 100644 --- a/thorlcr/thorutil/thormisc.hpp +++ b/thorlcr/thorutil/thormisc.hpp @@ -152,6 +152,7 @@ extern graph_decl const StatisticsMapping indexReadActivityStatistics; extern graph_decl const StatisticsMapping indexWriteActivityStatistics; extern graph_decl const StatisticsMapping joinActivityStatistics; extern graph_decl const StatisticsMapping keyedJoinActivityStatistics; +extern graph_decl const StatisticsMapping allJoinActivityStatistics; extern graph_decl const StatisticsMapping lookupJoinActivityStatistics; extern graph_decl const StatisticsMapping loopActivityStatistics; extern graph_decl const StatisticsMapping diskReadActivityStatistics; From 88d27cbd564967c7ccc5ae46e602e0d5a3e294df Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 27 Jun 2024 14:49:43 +0100 Subject: [PATCH 26/31] HPCC-32169 Remove stale removePhysicalFiles code Signed-off-by: Jake Smith --- dali/base/dadfs.cpp | 91 +-------------------------------------------- dali/base/dadfs.hpp | 3 -- 2 files changed, 2 insertions(+), 92 deletions(-) diff --git a/dali/base/dadfs.cpp b/dali/base/dadfs.cpp index f64f40c9ca2..48ec2a1bb63 100644 --- a/dali/base/dadfs.cpp +++ b/dali/base/dadfs.cpp @@ -232,6 +232,8 @@ extern da_decl cost_type calcDiskWriteCost(const StringArray & clusters, stat_ty return writeCost; } +// JCSMORE - I suspect this function should be removed/deprecated. It does not deal with dirPerPart or striping. +// makePhysicalPartName supports both, but does not deal with groups/endpoints) RemoteFilename &constructPartFilename(IGroup *grp,unsigned partno,unsigned partmax,const char *name,const char *partmask,const char *partdir,unsigned copy,ClusterPartDiskMapSpec &mspec,RemoteFilename &rfn) { partno--; @@ -12008,95 +12010,6 @@ void CDistributedFileDirectory::setDefaultPreferredClusters(const char *clusters defprefclusters.set(clusters); } -bool removePhysicalFiles(IGroup *grp,const char *_filemask,unsigned short port,ClusterPartDiskMapSpec &mspec,IMultiException *mexcept) -{ - // TBD this won't remove repeated parts - - - PROGLOG("removePhysicalFiles(%s)",_filemask); - if (!isAbsolutePath(_filemask)) - throw MakeStringException(-1,"removePhysicalFiles: Filename %s must be complete path",_filemask); - - size32_t l = strlen(_filemask); - while (l&&isdigit(_filemask[l-1])) - l--; - unsigned width=0; - if (l&&(_filemask[l-1]=='_')) - width = atoi(_filemask+l); - if (!width) - width = grp->ordinality(); - - CriticalSection errcrit; - class casyncfor: public CAsyncFor - { - unsigned short port; - CriticalSection &errcrit; - IMultiException *mexcept; - unsigned width; - StringAttr filemask; - IGroup *grp; - ClusterPartDiskMapSpec &mspec; - public: - bool ok; - casyncfor(IGroup *_grp,const char *_filemask,unsigned _width,unsigned short _port,ClusterPartDiskMapSpec &_mspec,IMultiException *_mexcept,CriticalSection &_errcrit) - : mspec(_mspec),filemask(_filemask),errcrit(_errcrit) - { - grp = _grp; - port = _port; - ok = true; - mexcept = _mexcept; - width = _width; - } - void Do(unsigned i) - { - for (unsigned copy = 0; copy < 2; copy++) // ** TBD - { - RemoteFilename rfn; - constructPartFilename(grp,i+1,width,NULL,filemask,"",copy>0,mspec,rfn); - if (port) - rfn.setPort(port); // if daliservix - Owned partfile = createIFile(rfn); - StringBuffer eps; - try - { - unsigned start = msTick(); -#if 1 - if (partfile->remove()) { - PROGLOG("Removed '%s'",partfile->queryFilename()); - unsigned t = msTick()-start; - if (t>5*1000) - DBGLOG("Removing %s from %s took %ds", partfile->queryFilename(), rfn.queryEndpoint().getEndpointHostText(eps).str(), t/1000); - } - else - IWARNLOG("Failed to remove file part %s from %s", partfile->queryFilename(),rfn.queryEndpoint().getEndpointHostText(eps).str()); -#else - if (partfile->exists()) - PROGLOG("Would remove '%s'",partfile->queryFilename()); -#endif - - } - catch (IException *e) - { - CriticalBlock block(errcrit); - if (mexcept) - mexcept->append(*e); - else { - StringBuffer s("Failed to remove file part "); - s.append(partfile->queryFilename()).append(" from "); - rfn.queryEndpoint().getEndpointHostText(s); - EXCLOG(e, s.str()); - e->Release(); - } - ok = false; - } - } - } - } afor(grp,_filemask,width,port,mspec,mexcept,errcrit); - afor.For(width,10,false,true); - return afor.ok; -} - - IDaliServer *createDaliDFSServer(IPropertyTree *config) { assertex(!daliDFSServer); // initialization problem diff --git a/dali/base/dadfs.hpp b/dali/base/dadfs.hpp index 003fca751c9..6cde9f34875 100644 --- a/dali/base/dadfs.hpp +++ b/dali/base/dadfs.hpp @@ -828,9 +828,6 @@ extern da_decl IDFPartFilter *createPartFilter(const char *filter); | '-' */ -extern da_decl bool removePhysicalFiles(IGroup *grp,const char *_filemask,unsigned short port, ClusterPartDiskMapSpec &mspec,IMultiException *mexcept); -// for removing orphaned files - // for server use interface IDaliServer; extern da_decl IDaliServer *createDaliDFSServer(IPropertyTree *config); // called for coven members From 626baa0ee89a06aa797e453bf029f0dcf8d2644d Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Thu, 27 Jun 2024 10:05:39 -0400 Subject: [PATCH 27/31] HPCC-32170 ECL Watch v9 fix ZAP dialog relative time param corrects the RelativeTimeRangeBuffer request param on the ZAP dialog Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/src-react/components/forms/ZAPDialog.tsx | 10 +++++----- esp/src/src/nls/hpcc.ts | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/esp/src/src-react/components/forms/ZAPDialog.tsx b/esp/src/src-react/components/forms/ZAPDialog.tsx index 750e997efba..d43147c8f3c 100644 --- a/esp/src/src-react/components/forms/ZAPDialog.tsx +++ b/esp/src/src-react/components/forms/ZAPDialog.tsx @@ -67,7 +67,7 @@ interface ZAPDialogValues { StartDate?: string; EndDate?: string; }; - RelativeLogTimeRangeBuffer?: string; + RelativeTimeRangeBuffer?: string; LineLimit?: string; LineStartFrom?: string; SelectColumnMode?: ColumnMode; @@ -103,7 +103,7 @@ const defaultValues: ZAPDialogValues = { StartDate: "", EndDate: "", }, - RelativeLogTimeRangeBuffer: "", + RelativeTimeRangeBuffer: "", LineLimit: "10000", LineStartFrom: "0", SelectColumnMode: ColumnMode.DEFAULT, @@ -464,7 +464,7 @@ export const ZAPDialog: React.FunctionComponent = ({ rules={{ validate: { hasValue: (value, formValues) => { - if (value === "" && formValues.LogFilter.RelativeLogTimeRangeBuffer === "") { + if (value === "" && formValues.LogFilter.RelativeTimeRangeBuffer === "") { return nlsHPCC.LogFilterTimeRequired; } return true; @@ -496,14 +496,14 @@ export const ZAPDialog: React.FunctionComponent = ({ } /> Date: Fri, 7 Jun 2024 14:38:56 +0100 Subject: [PATCH 28/31] HPCC-32000 Spill stats for nsplitter StSizePeakEphemeralDisk, StSizePeakTempDisk, StNumSpills, and StSizeSpillFile for nsplitter implemented. Signed-off-by: Shamser Ahmed --- .../activities/nsplitter/thnsplitterslave.cpp | 8 +- thorlcr/master/thactivitymaster.cpp | 4 +- thorlcr/thorutil/thbuf.cpp | 96 ++++++++++++++----- thorlcr/thorutil/thbuf.hpp | 1 + thorlcr/thorutil/thormisc.cpp | 4 +- thorlcr/thorutil/thormisc.hpp | 2 + 6 files changed, 89 insertions(+), 26 deletions(-) diff --git a/thorlcr/activities/nsplitter/thnsplitterslave.cpp b/thorlcr/activities/nsplitter/thnsplitterslave.cpp index de22da08908..ca9ccd0d6df 100644 --- a/thorlcr/activities/nsplitter/thnsplitterslave.cpp +++ b/thorlcr/activities/nsplitter/thnsplitterslave.cpp @@ -152,7 +152,7 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf } } public: - NSplitterSlaveActivity(CGraphElementBase *_container) : CSlaveActivity(_container), writer(*this) + NSplitterSlaveActivity(CGraphElementBase *_container) : CSlaveActivity(_container, nsplitterActivityStatistics), writer(*this) { numOutputs = container.getOutputs(); connectedOutputSet.setown(createBitSet()); @@ -401,6 +401,12 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf if (sharedRowStream) sharedRowStream->cancel(); } + virtual void gatherActiveStats(CRuntimeStatisticCollection &activeStats) const override + { + PARENT::gatherActiveStats(activeStats); + if (sharedRowStream) + ::mergeStats(activeStats, sharedRowStream); + } // ISharedSmartBufferCallback impl. virtual void paged() { pagedOut = true; } virtual void blocked() diff --git a/thorlcr/master/thactivitymaster.cpp b/thorlcr/master/thactivitymaster.cpp index 82ca2d647a9..f80ee0bd258 100644 --- a/thorlcr/master/thactivitymaster.cpp +++ b/thorlcr/master/thactivitymaster.cpp @@ -170,7 +170,6 @@ class CGenericMasterGraphElement : public CMasterGraphElement case TAKcase: case TAKchildcase: case TAKdegroup: - case TAKsplit: case TAKproject: case TAKprefetchproject: case TAKprefetchcountproject: @@ -210,6 +209,9 @@ class CGenericMasterGraphElement : public CMasterGraphElement case TAKemptyaction: ret = new CMasterActivity(this); break; + case TAKsplit: + ret = new CMasterActivity(this, nsplitterActivityStatistics); + break; case TAKsoap_rowdataset: case TAKsoap_rowaction: case TAKsoap_datasetdataset: diff --git a/thorlcr/thorutil/thbuf.cpp b/thorlcr/thorutil/thbuf.cpp index b1377f6db00..5ab144e8ee9 100644 --- a/thorlcr/thorutil/thbuf.cpp +++ b/thorlcr/thorutil/thbuf.cpp @@ -1753,6 +1753,10 @@ class CSharedWriteAheadBase : public CSimpleInterface, implements ISharedSmartBu queryCOutput(c).reset(); inMemRows->reset(0); } + virtual unsigned __int64 getStatistic(StatisticKind kind) const override + { + return 0; + } friend class COutput; friend class CRowSet; }; @@ -2145,6 +2149,24 @@ class CSharedWriteAheadDisk : public CSharedWriteAheadBase tempFileIO->setSize(0); tempFileOwner->noteSize(0); } + virtual unsigned __int64 getStatistic(StatisticKind kind) const override + { + switch (kind) + { + case StSizeSpillFile: + return tempFileIO->getStatistic(StSizeDiskWrite); + case StCycleDiskWriteIOCycles: + case StTimeDiskWriteIO: + case StSizeDiskWrite: + return 0; + case StNumSpills: + return 1; + case StTimeSpillElapsed: + return tempFileIO->getStatistic(StCycleDiskWriteIOCycles); + default: + return tempFileIO->getStatistic(kind); + } + } }; ISharedSmartBuffer *createSharedSmartDiskBuffer(CActivityBase *activity, const char *spillname, unsigned outputs, IThorRowInterfaces *rowIf) @@ -2433,7 +2455,7 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf totalInputRowsRead = 0; // not used until spilling begins, represents count of all rows read rowcount_t inMemTotalRows = 0; // whilst in memory, represents count of all rows seen CriticalSection readAheadCS; // ensure single reader (leader), reads ahead (updates rows/totalInputRowsRead/inMemTotalRows) - Owned iFile; + Owned tempFileOwner; Owned iFileIO; Owned outputStream; Linked compressHandler; @@ -2442,6 +2464,9 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfflush(); + tempFileOwner->noteSize(iFileIO->getStatistic(StSizeDiskWrite)); + ::mergeStats(inactiveStats, iFileIO); + iFileIO.clear(); + outputStream.clear(); + } } void createOutputStream() { // NB: Called once, when spilling starts. - auto res = createSerialOutputStream(iFile, compressHandler, options, numOutputs + 1); + tempFileOwner.setown(activity.createOwnedTempFile(baseTmpFilename)); + auto res = createSerialOutputStream(&(tempFileOwner->queryIFile()), compressHandler, options, numOutputs + 1); outputStream.setown(std::get<0>(res)); iFileIO.setown(std::get<1>(res)); totalInputRowsRead = inMemTotalRows; @@ -2517,7 +2549,7 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfflush(); totalInputRowsRead.fetch_add(newRowsWritten); - + tempFileOwner->noteSize(iFileIO->getStatistic(StSizeDiskWrite)); // JCSMORE - could track size written, and start new file at this point (e.g. every 100MB), // and track their starting points (by row #) in a vector // We could then tell if/when the readers catch up, and remove consumed files as they do. @@ -2528,9 +2560,10 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf(row)); } public: - explicit CSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned _numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &_options, IThorRowInterfaces *rowIf, const char *tempFileName, ICompressHandler *_compressHandler) - : activity(*_activity), numOutputs(_numOutputs), input(_input), inputGrouped(_inputGrouped), options(_options), compressHandler(_compressHandler), - meta(rowIf->queryRowMetaData()), serializer(rowIf->queryRowSerializer()), allocator(rowIf->queryRowAllocator()), deserializer(rowIf->queryRowDeserializer()) + explicit CSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned _numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &_options, IThorRowInterfaces *rowIf, const char *_baseTmpFilename, ICompressHandler *_compressHandler) + : activity(*_activity), numOutputs(_numOutputs), input(_input), inputGrouped(_inputGrouped), options(_options), compressHandler(_compressHandler), baseTmpFilename(_baseTmpFilename), + meta(rowIf->queryRowMetaData()), serializer(rowIf->queryRowSerializer()), allocator(rowIf->queryRowAllocator()), deserializer(rowIf->queryRowDeserializer()), + inactiveStats(spillingWriteAheadStatistics) { assertex(input); @@ -2541,15 +2574,10 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfremove(); - } + closeWriter(); freeRows(); } void outputStopped(unsigned output) @@ -2568,15 +2596,15 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfqueryFilename(), tracing.str()); + activity.ActPrintLog("CSharedFullSpillingWriteAhead::outputStopped closing tempfile writer: %s %s", tempFileOwner->queryIFile().queryFilename(), tracing.str()); closeWriter(); - iFile->remove(); + tempFileOwner.clear(); } } } std::tuple getReadStream() // also pass back IFileIO for stats purposes { - return createSerialInputStream(iFile, compressHandler, options, numOutputs + 1); // +1 for writer + return createSerialInputStream(&(tempFileOwner->queryIFile()), compressHandler, options, numOutputs + 1); // +1 for writer } bool checkWriteAhead(rowcount_t &outputRowsAvailable) { @@ -2623,8 +2651,8 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf= options.inMemMaxMem) // too much in memory, spill { // NB: this will reset rowMemUsage, however, each reader will continue to consume rows until they catch up (or stop) - ActPrintLog(&activity, "Spilling to temp storage [file = %s, outputRowsAvailable = %" I64F "u, start = %" I64F "u, end = %" I64F "u, count = %u]", iFile->queryFilename(), outputRowsAvailable, inMemTotalRows - rows.size(), inMemTotalRows, (unsigned)rows.size()); createOutputStream(); + ActPrintLog(&activity, "Spilling to temp storage [file = %s, outputRowsAvailable = %" I64F "u, start = %" I64F "u, end = %" I64F "u, count = %u]", tempFileOwner->queryIFile().queryFilename(), outputRowsAvailable, inMemTotalRows - rows.size(), inMemTotalRows, (unsigned)rows.size()); return false; } @@ -2686,11 +2714,7 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfremove(); - } + closeWriter(); for (auto &output: outputs) output->reset(); freeRows(); @@ -2701,6 +2725,32 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfgetStatistic(useKind); + v += inactiveStats.getStatisticValue(useKind); + return v; + } }; ISharedRowStreamReader *createSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &options, IThorRowInterfaces *_rowIf, const char *tempFileName, ICompressHandler *compressHandler) diff --git a/thorlcr/thorutil/thbuf.hpp b/thorlcr/thorutil/thbuf.hpp index 1750f63b007..fb5a66af8fa 100644 --- a/thorlcr/thorutil/thbuf.hpp +++ b/thorlcr/thorutil/thbuf.hpp @@ -87,6 +87,7 @@ interface ISharedRowStreamReader : extends IInterface virtual IRowStream *queryOutput(unsigned output) = 0; virtual void cancel()=0; virtual void reset() = 0; + virtual unsigned __int64 getStatistic(StatisticKind kind) const = 0; }; diff --git a/thorlcr/thorutil/thormisc.cpp b/thorlcr/thorutil/thormisc.cpp index 48dc1231ee1..4d36f2e1686 100644 --- a/thorlcr/thorutil/thormisc.cpp +++ b/thorlcr/thorutil/thormisc.cpp @@ -92,8 +92,10 @@ const StatisticsMapping graphStatistics({StNumExecutions, StSizeSpillFile, StSiz const StatisticsMapping diskReadPartStatistics({StNumDiskRowsRead}, diskReadRemoteStatistics); const StatisticsMapping indexDistribActivityStatistics({}, basicActivityStatistics, jhtreeCacheStatistics); const StatisticsMapping soapcallActivityStatistics({}, basicActivityStatistics, soapcallStatistics); -const StatisticsMapping hashDedupActivityStatistics({StNumSpills, StSizeSpillFile, StTimeSortElapsed, StSizePeakTempDisk}, diskWriteRemoteStatistics, basicActivityStatistics); +const StatisticsMapping hashDedupActivityStatistics({}, spillStatistics, diskWriteRemoteStatistics, basicActivityStatistics); const StatisticsMapping hashDistribActivityStatistics({StNumLocalRows, StNumRemoteRows, StSizeRemoteWrite}, basicActivityStatistics); +const StatisticsMapping nsplitterActivityStatistics({}, spillStatistics, basicActivityStatistics); +const StatisticsMapping spillingWriteAheadStatistics({}, spillStatistics); MODULE_INIT(INIT_PRIORITY_STANDARD) { diff --git a/thorlcr/thorutil/thormisc.hpp b/thorlcr/thorutil/thormisc.hpp index d760f3d06da..4ba1cc18664 100644 --- a/thorlcr/thorutil/thormisc.hpp +++ b/thorlcr/thorutil/thormisc.hpp @@ -166,6 +166,8 @@ extern graph_decl const StatisticsMapping soapcallActivityStatistics; extern graph_decl const StatisticsMapping indexReadFileStatistics; extern graph_decl const StatisticsMapping hashDedupActivityStatistics; extern graph_decl const StatisticsMapping hashDistribActivityStatistics; +extern graph_decl const StatisticsMapping nsplitterActivityStatistics; +extern graph_decl const StatisticsMapping spillingWriteAheadStatistics; class BooleanOnOff { From 881d5e2cf955d5f44763fc895b9d0d1508946562 Mon Sep 17 00:00:00 2001 From: Rodrigo Pastrana Date: Fri, 3 May 2024 17:28:52 -0400 Subject: [PATCH 29/31] HPCC-29546 Grafana/loki logaccess plugin - Provides Grafana/loki curl based logaccess plugin - Updates helm/managed/loggin/loki-stack/README - Provides mechanism to create grafana-hpcc logaccess secret - Adds encodeCSV jstring logic - Adds encodeCSV unittest - Attempts to minimize StringBuffer resizes - Adds sortby support - Enables csv header reporting Signed-off-by: Rodrigo Pastrana --- helm/managed/logging/loki-stack/README.md | 63 +- .../create-grafana-logaccess-secret.sh | 69 ++ .../loki-stack/grafana-hpcc-logaccess.yaml | 43 + .../loki-stack/secrets-templates/password | 1 + .../loki-stack/secrets-templates/username | 1 + system/jlib/jlog.cpp | 35 +- system/jlib/jstring.cpp | 36 + system/jlib/jstring.hpp | 5 + system/logaccess/CMakeLists.txt | 1 + system/logaccess/Grafana/CMakeLists.txt | 19 + .../Grafana/CurlClient/CMakeLists.txt | 45 + .../Grafana/CurlClient/GrafanaCurlClient.cpp | 867 ++++++++++++++++++ .../Grafana/CurlClient/GrafanaCurlClient.hpp | 108 +++ testing/unittests/jlibtests.cpp | 25 +- 14 files changed, 1293 insertions(+), 25 deletions(-) create mode 100755 helm/managed/logging/loki-stack/create-grafana-logaccess-secret.sh create mode 100644 helm/managed/logging/loki-stack/grafana-hpcc-logaccess.yaml create mode 100644 helm/managed/logging/loki-stack/secrets-templates/password create mode 100644 helm/managed/logging/loki-stack/secrets-templates/username create mode 100644 system/logaccess/Grafana/CMakeLists.txt create mode 100644 system/logaccess/Grafana/CurlClient/CMakeLists.txt create mode 100644 system/logaccess/Grafana/CurlClient/GrafanaCurlClient.cpp create mode 100644 system/logaccess/Grafana/CurlClient/GrafanaCurlClient.hpp diff --git a/helm/managed/logging/loki-stack/README.md b/helm/managed/logging/loki-stack/README.md index 266288393c1..6c4714e898c 100644 --- a/helm/managed/logging/loki-stack/README.md +++ b/helm/managed/logging/loki-stack/README.md @@ -78,4 +78,65 @@ The default Loki-Stack chart will not declare permanent storage and therefore lo loki: persistence: enabled: true -``` \ No newline at end of file +``` + +## Configure HPCC logAccess +The logAccess feature allows HPCC to query and package relevant logs for various features such as ZAP report, WorkUnit helper logs, ECLWatch log viewer, etc. + +### Provide target Grafana/Loki access information + +HPCC logAccess requires access to the Grafana username/password. Those values must be provided via a secure secret object. + +The secret is expected to be in the 'esp' category, and be named 'grafana-logaccess'. The following key-value pairs are required (key names must be spelled exactly as shown here) + + username - This should contain the Grafana username + password - This should contain the Grafana password + +The included 'create-grafana-logaccess-secret.sh' helper can be used to create the necessary secret. + +Example scripted secret creation command (assuming ./secrets-templates contains a file named exactly as the above keys): + +``` + create-grafana-logaccess-secret.sh -d HPCC-Platform/helm/managed/logging/loki-stack/secrets-templates/ -n hpcc +``` + +Otherwise, users can create the secret manually. + +Example manual secret creation command (assuming ./secrets-templates contains a file named exactly as the above keys): + +``` + kubectl create secret generic grafana-logaccess --from-file=HPCC-Platform/helm/managed//logging/loki-stack/secrets-templates/ -n hpcc +``` + +### Configure HPCC logAccess + +The target HPCC deployment should be directed to use the desired Grafana endpoint with the Loki datasource, and the newly created secret by providing appropriate logAccess values (such as ./grafana-hpcc-logaccess.yaml). + +Example use: + +``` + helm install myhpcc hpcc/hpcc -f HPCC-Platform/helm/managed/logging/loki-stack/grafana-hpcc-logaccess.yaml +``` + +#### + +The grafana hpcc logaccess values should provide Grafana connection information, such as the host, and port; the Loki datasource where the logs reside; the k8s namespace under which the logs were created (non-default namespace highly recommended); and the hpcc component log format (table|json|xml) + +``` +Example use: + global: + logAccess: + name: "Grafana/loki stack log access" + type: "GrafanaCurl" + connection: + protocol: "http" + host: "myloki4hpcclogs-grafana.default.svc.cluster.local" + port: 3000 + datasource: + id: "1" + name: "Loki" + namespace: + name: "hpcc" + logFormat: + type: "json" +``` diff --git a/helm/managed/logging/loki-stack/create-grafana-logaccess-secret.sh b/helm/managed/logging/loki-stack/create-grafana-logaccess-secret.sh new file mode 100755 index 00000000000..f4c7efbed09 --- /dev/null +++ b/helm/managed/logging/loki-stack/create-grafana-logaccess-secret.sh @@ -0,0 +1,69 @@ +#!/bin/bash +WORK_DIR=$(dirname $0) +source ${WORK_DIR}/env-loganalytics + +k8scommand="kubectl" +secretname="grafana-logaccess" +secretsdir="${WORK_DIR}/secrets-templates" +namespace="default" + +usage() +{ + echo "Creates necessary k8s secret used by HPCC's logAccess to access Loki data source through Grafana" + echo "> create-grafana-logaccess-secret.sh [Options]" + echo "" + echo "Options:" + echo "-d Specifies directory containing required secret values in self named files." + echo " Defaults to /<${secretssubdir}>" + echo "-h Print Usage message" + echo "-n Specifies namespace for secret" + echo "" + echo "Requires directory containing secret values in dedicated files." + echo "Defaults to ${secretssubdir} if not specified via -d option." + echo "" + echo "Expected directory structure:" + echo "${secretsdir}/" + echo " password - Should contain Grafana user name" + echo " username - Should contain Grafana password" +} + +while [ "$#" -gt 0 ]; do + arg=$1 + case "${arg}" in + -h) + usage + exit + ;; + -d) shift + secretsdir=$1 + ;; + -n) shift + namespace=$1 + ;; + esac + shift +done + +echo "Creating '${namespace}/${secretname}' secret." + +command -v ${k8scommand} >/dev/null 2>&1 || { echo >&2 "Aborting - '${k8scommand}' not found!"; exit 1; } + +errormessage=$(${k8scommand} get secret ${secretname} -n ${namespace} 2>&1) +if [[ $? -eq 0 ]] +then + echo "WARNING: Target secret '${namespace}/${secretname}' already exists! Delete it and re-run if secret update desired." + echo "${errormessage}" + exit 1 +fi + +errormessage=$(${k8scommand} create secret generic ${secretname} --from-file=${secretsdir} -n ${namespace} ) +if [[ $? -ne 0 ]] +then + echo "Error creating: Target secret '${namespace}/${secretname}'!" + echo >&2 + usage + exit 1 +else + echo "Target secret '${namespace}/${secretname}' successfully created!" + ${k8scommand} get secret ${secretname} -n ${namespace} +fi diff --git a/helm/managed/logging/loki-stack/grafana-hpcc-logaccess.yaml b/helm/managed/logging/loki-stack/grafana-hpcc-logaccess.yaml new file mode 100644 index 00000000000..70d09058960 --- /dev/null +++ b/helm/managed/logging/loki-stack/grafana-hpcc-logaccess.yaml @@ -0,0 +1,43 @@ +# Configures HPCC logAccess to target grafana/loki +global: + logAccess: + name: "Grafana/loki stack log access" + type: "GrafanaCurl" + connection: + protocol: "http" + host: "myloki4hpcclogs-grafana.default.svc.cluster.local" + port: 3000 + datasource: + id: "1" + name: "Loki" + namespace: + name: "hpcc" + logFormat: + type: "json" + logMaps: + - type: "global" + searchColumn: "log" + columnMode: "DEFAULT" + - type: "components" + storeName: "stream" + searchColumn: "component" + columnMode: "MIN" + columnType: "string" + - type: "timestamp" + storeName: "values" + searchColumn: "time" + columnMode: "ALL" + columnType: "datetime" + - type: "pod" + storeName: "stream" + searchColumn: "pod" + columnMode: "ALL" + columnType: "string" +secrets: + esp: + grafana-logaccess: "grafana-logaccess" +vaults: + esp: + - name: my-grafana-logaccess-vault + url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/esp/${secret} + kind: kv-v2 diff --git a/helm/managed/logging/loki-stack/secrets-templates/password b/helm/managed/logging/loki-stack/secrets-templates/password new file mode 100644 index 00000000000..6b3a9a39380 --- /dev/null +++ b/helm/managed/logging/loki-stack/secrets-templates/password @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/helm/managed/logging/loki-stack/secrets-templates/username b/helm/managed/logging/loki-stack/secrets-templates/username new file mode 100644 index 00000000000..f77b00407e0 --- /dev/null +++ b/helm/managed/logging/loki-stack/secrets-templates/username @@ -0,0 +1 @@ +admin \ No newline at end of file diff --git a/system/jlib/jlog.cpp b/system/jlib/jlog.cpp index 4b7fe5b3ec0..2d8bf718e3e 100644 --- a/system/jlib/jlog.cpp +++ b/system/jlib/jlog.cpp @@ -3213,32 +3213,21 @@ IRemoteLogAccess *queryRemoteLogAccessor() { const char * simulatedGlobalYaml = R"!!(global: logAccess: - name: "Azure LogAnalytics LogAccess" - type: "AzureLogAnalyticsCurl" + name: "Grafana/loki stack log access" + type: "GrafanaCurl" connection: #workspaceID: "ef060646-ef24-48a5-b88c-b1f3fbe40271" - workspaceID: "XYZ" #ID of the Azure LogAnalytics workspace to query logs from + #workspaceID: "XYZ" #ID of the Azure LogAnalytics workspace to query logs from #tenantID: "ABC" #The Tenant ID, required for KQL API access - clientID: "DEF" #ID of Azure Active Directory registered application with api.loganalytics.io access - logMaps: - - type: "global" - storeName: "ContainerLog" - searchColumn: "LogEntry" - timeStampColumn: "hpcc_log_timestamp" - - type: "workunits" - storeName: "ContainerLog" - searchColumn: "hpcc_log_jobid" - - type: "components" - searchColumn: "ContainerID" - - type: "audience" - searchColumn: "hpcc_log_audience" - - type: "class" - searchColumn: "hpcc_log_class" - - type: "instance" - storeName: "ContainerInventory" - searchColumn: "Name" - - type: "host" - searchColumn: "Computer" + #clientID: "DEF" #ID of Azure Active Directory registered application with api.loganalytics.io access + protocol: "http" + host: "localhost" + port: "3000" + datasource: + id: "1" + name: "Loki" + namespace: + name: "hpcc" )!!"; Owned testTree = createPTreeFromYAMLString(simulatedGlobalYaml, ipt_none, ptr_ignoreWhiteSpace, nullptr); logAccessPluginConfig.setown(testTree->getPropTree("global/logAccess")); diff --git a/system/jlib/jstring.cpp b/system/jlib/jstring.cpp index 5a1be75faf6..50951938ae5 100644 --- a/system/jlib/jstring.cpp +++ b/system/jlib/jstring.cpp @@ -2367,6 +2367,42 @@ StringBuffer &encodeJSON(StringBuffer &s, const char *value) return encodeJSON(s, strlen(value), value); } +inline StringBuffer & encodeCSVChar(StringBuffer & encodedCSV, char ch) +{ + byte next = ch; + switch (next) + { + case '\"': + encodedCSV.append("\""); + encodedCSV.append(next); + break; + //Any other character that needs to be escaped? + default: + encodedCSV.append(next); + break; + } + return encodedCSV; +} + +StringBuffer & encodeCSVColumn(StringBuffer & encodedCSV, unsigned size, const char *rawCSVCol) +{ + if (!rawCSVCol) + return encodedCSV; + encodedCSV.ensureCapacity(size+2); // Minimum size that will be written + encodedCSV.append("\""); + for (size32_t i = 0; i < size; i++) + encodeCSVChar(encodedCSV, rawCSVCol[i]); + encodedCSV.append("\""); + return encodedCSV; +} + +StringBuffer & encodeCSVColumn(StringBuffer & encodedCSV, const char *rawCSVCol) +{ + if (!rawCSVCol) + return encodedCSV; + return encodeCSVColumn(encodedCSV, strlen(rawCSVCol), rawCSVCol); +} + bool checkUnicodeLiteral(char const * str, unsigned length, unsigned & ep, StringBuffer & msg) { unsigned i; diff --git a/system/jlib/jstring.hpp b/system/jlib/jstring.hpp index 5a153555041..b3fe7651daf 100644 --- a/system/jlib/jstring.hpp +++ b/system/jlib/jstring.hpp @@ -479,6 +479,11 @@ inline StringBuffer &delimitJSON(StringBuffer &s, bool addNewline=false, bool es return s; } +/* +* Encodes a CSV column, not an entire CSV record +*/ +jlib_decl StringBuffer &encodeCSVColumn(StringBuffer &s, const char *value); + jlib_decl StringBuffer &encodeJSON(StringBuffer &s, const char *value); jlib_decl StringBuffer &encodeJSON(StringBuffer &s, unsigned len, const char *value); diff --git a/system/logaccess/CMakeLists.txt b/system/logaccess/CMakeLists.txt index 80ea08d0281..51c349ebf34 100644 --- a/system/logaccess/CMakeLists.txt +++ b/system/logaccess/CMakeLists.txt @@ -19,4 +19,5 @@ IF(NOT CLIENTTOOLS_ONLY) HPCC_ADD_SUBDIRECTORY (ElasticStack) ENDIF() HPCC_ADD_SUBDIRECTORY (Azure) + HPCC_ADD_SUBDIRECTORY (Grafana) ENDIF() diff --git a/system/logaccess/Grafana/CMakeLists.txt b/system/logaccess/Grafana/CMakeLists.txt new file mode 100644 index 00000000000..2a6ea152a52 --- /dev/null +++ b/system/logaccess/Grafana/CMakeLists.txt @@ -0,0 +1,19 @@ +############################################################################### +# HPCC SYSTEMS software Copyright (C) 2022 HPCC Systems®. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +IF(NOT CLIENTTOOLS_ONLY) + HPCC_ADD_SUBDIRECTORY (CurlClient) +ENDIF() diff --git a/system/logaccess/Grafana/CurlClient/CMakeLists.txt b/system/logaccess/Grafana/CurlClient/CMakeLists.txt new file mode 100644 index 00000000000..a749dacd715 --- /dev/null +++ b/system/logaccess/Grafana/CurlClient/CMakeLists.txt @@ -0,0 +1,45 @@ +############################################################################### +# HPCC SYSTEMS software Copyright (C) 2022 HPCC Systems®. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +project(GrafanaCurllogaccess) + +# Required installed libraries +find_package(CURL REQUIRED) + +set(srcs + ${CMAKE_CURRENT_SOURCE_DIR}/GrafanaCurlClient.cpp +) + +include_directories( + ${HPCC_SOURCE_DIR}/system/include + ${HPCC_SOURCE_DIR}/system/jlib + ${CURL_INCLUDE_DIR} +) + +add_definitions(-DGRAFANA_CURL_LOGACCESS_EXPORTS) + +HPCC_ADD_LIBRARY(${PROJECT_NAME} SHARED ${srcs}) + +target_link_libraries(${PROJECT_NAME} + PRIVATE jlib + PRIVATE ${CURL_LIBRARIES} +) + +install(TARGETS ${PROJECT_NAME} + RUNTIME DESTINATION ${EXEC_DIR} + LIBRARY DESTINATION ${LIB_DIR} + CALC_DEPS +) diff --git a/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.cpp b/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.cpp new file mode 100644 index 00000000000..5ada0237838 --- /dev/null +++ b/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.cpp @@ -0,0 +1,867 @@ +/*############################################################################## + + HPCC SYSTEMS software Copyright (C) 2024 HPCC Systems®. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +############################################################################## */ + +#include "GrafanaCurlClient.hpp" + +#include "platform.h" +#include +#include +#include + +#include +#include +#include + +#ifdef _CONTAINERIZED +//In containerized world, most likely localhost is not the target grafana host +static constexpr const char * DEFAULT_GRAFANA_HOST = "mycluster-grafana.default.svc.cluster.local"; +#else +//In baremetal, localhost is good guess as any +static constexpr const char * DEFAULT_GRAFANA_HOST = "localhost"; +#endif + +static constexpr const char * DEFAULT_GRAFANA_PROTOCOL = "http"; +static constexpr const char * DEFAULT_GRAFANA_PORT = "3000"; +static constexpr const char * DEFAULT_DATASOURCE_ID = "1"; + +static constexpr const char * defaultNamespaceStream = "default"; +static constexpr const char * defaultExpectedLogFormat = "table"; //"json"; + +static constexpr const char * logMapIndexPatternAtt = "@storeName"; +static constexpr const char * logMapSearchColAtt = "@searchColumn"; +static constexpr const char * logMapTimeStampColAtt = "@timeStampColumn"; +static constexpr const char * logMapKeyColAtt = "@keyColumn"; +static constexpr const char * logMapDisableJoinsAtt = "@disableJoins"; + +static constexpr std::size_t defaultMaxRecordsPerFetch = 100; + +/* +* To be used as a callback for curl_easy_setopt to capture the response from a curl request +*/ +size_t stringCallback(char *contents, size_t size, size_t nmemb, void *userp) +{ + ((std::string*)userp)->append((char*)contents, size * nmemb); + return size * nmemb; +} + +/* +* Constructs a curl based client request based on the provided connection string and targetURI +* The response is reported in the readBuffer +* Uses stringCallback to handle successfull curl requests +*/ +void GrafanaLogAccessCurlClient::submitQuery(std::string & readBuffer, const char * targetURI) +{ + if (isEmptyString(m_grafanaConnectionStr.str())) + throw makeStringExceptionV(-1, "%s Cannot submit query, empty connection string detected!", COMPONENT_NAME); + + if (isEmptyString(targetURI)) + throw makeStringExceptionV(-1, "%s Cannot submit query, empty request URI detected!", COMPONENT_NAME); + + OwnedPtrCustomFree curlHandle = curl_easy_init(); + if (curlHandle) + { + CURLcode curlResponseCode; + OwnedPtrCustomFree headers = nullptr; + char curlErrBuffer[CURL_ERROR_SIZE]; + curlErrBuffer[0] = '\0'; + + VStringBuffer requestURL("%s%s%s", m_grafanaConnectionStr.str(), m_dataSourcesAPIURI.str(), targetURI); + + if (curl_easy_setopt(curlHandle, CURLOPT_URL, requestURL.str()) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_URL' (%s)!", COMPONENT_NAME, requestURL.str()); + + int curloptretcode = curl_easy_setopt(curlHandle, CURLOPT_HTTPAUTH, (long)CURLAUTH_BASIC); + if (curloptretcode != CURLE_OK) + { + if (curloptretcode == CURLE_UNKNOWN_OPTION) + throw makeStringExceptionV(-1, "%s: Log query request: UNKNONW option 'CURLOPT_HTTPAUTH'!", COMPONENT_NAME); + if (curloptretcode == CURLE_NOT_BUILT_IN) + throw makeStringExceptionV(-1, "%s: Log query request: bitmask specified not built-in! 'CURLOPT_HTTPAUTH'/'CURLAUTH_BASIC'!", COMPONENT_NAME); + + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_HTTPAUTH':'CURLAUTH_BASIC'!", COMPONENT_NAME); + } + + //allow annonymous connections?? + if (isEmptyString(m_grafanaUserName.str())) + throw makeStringExceptionV(-1, "%s: Log query request: Empty user name detected!", COMPONENT_NAME); + + //allow non-secure connections?? + if (isEmptyString(m_grafanaPassword.str())) + throw makeStringExceptionV(-1, "%s: Log query request: Empty password detected!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_USERNAME, m_grafanaUserName.str())) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_USERNAME' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_PASSWORD, m_grafanaPassword.str())) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_PASSWORD' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_POST, 0) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not disable 'CURLOPT_POST' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_HTTPGET, 1) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_HTTPGET' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_NOPROGRESS, 1) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_NOPROGRESS' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_WRITEFUNCTION, stringCallback) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_WRITEFUNCTION' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_WRITEDATA, &readBuffer) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_WRITEDATA' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_USERAGENT, "HPCC Systems LogAccess client") != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_USERAGENT' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_ERRORBUFFER, curlErrBuffer) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_ERRORBUFFER' option!", COMPONENT_NAME); + + //If we set CURLOPT_FAILONERROR, we'll miss the actual error message returned in the response + //(curl_easy_setopt(curlHandle, CURLOPT_FAILONERROR, 1L) != CURLE_OK) // non HTTP Success treated as error + + try + { + curlResponseCode = curl_easy_perform(curlHandle); + } + catch (...) + { + throw makeStringExceptionV(-1, "%s LogQL request: Unknown libcurl error", COMPONENT_NAME); + } + + long response_code; + curl_easy_getinfo(curlHandle, CURLINFO_RESPONSE_CODE, &response_code); + + if (curlResponseCode != CURLE_OK || response_code != 200) + { + throw makeStringExceptionV(-1,"%s Error (%d): '%s'", COMPONENT_NAME, curlResponseCode, (readBuffer.length() != 0 ? readBuffer.c_str() : curlErrBuffer[0] ? curlErrBuffer : "Unknown Error")); + } + else if (readBuffer.length() == 0) + throw makeStringExceptionV(-1, "%s LogQL request: Empty response!", COMPONENT_NAME); + } +} + +/* + * This method consumes a JSON formatted data source response from a successful Grafana Loki query + * It extracts the data source information and populates the m_targetDataSource structure and constructs + * the URI to access the Loki API + * + * If this operation fails, an exception is thrown + */ +void GrafanaLogAccessCurlClient::processDatasourceJsonResp(const std::string & retrievedDocument) +{ + Owned tree = createPTreeFromJSONString(retrievedDocument.c_str()); + if (!tree) + throw makeStringExceptionV(-1, "%s: Could not parse data source query response!", COMPONENT_NAME); + + if (tree->hasProp("uid")) + m_targetDataSource.uid.set(tree->queryProp("uid")); + if (tree->hasProp("name")) + m_targetDataSource.name.set(tree->queryProp("name")); + if (tree->hasProp("type")) + m_targetDataSource.type.set(tree->queryProp("type")); + if (tree->hasProp("id")) + m_targetDataSource.id.set(tree->queryProp("id")); + + //Other elements that could be extracted from the data source response: + //basicAuthPassword, version, basicAuthUser, access=proxy, isDefault, withCredentials, readOnly, database + //url=http://myloki4hpcclogs:3100, secureJsonFields, user, password, basicAuth, jsonData, typeLogoUrl + + if (isEmptyString(m_targetDataSource.id.get())) + throw makeStringExceptionV(-1, "%s: DataSource query response does not include 'id'", COMPONENT_NAME); + if (isEmptyString(m_targetDataSource.type.get())) + throw makeStringExceptionV(-1, "%s: DataSource query response does not include 'type'", COMPONENT_NAME); + + //This URI is used to access the Loki API, if not properly populated, nothing will work! + m_dataSourcesAPIURI.setf("/api/datasources/proxy/%s/%s/api/v1" , m_targetDataSource.id.get(), m_targetDataSource.type.get()); +} + +/* + * This method consumes a logLine string from a successful Grafana Loki query + * The LogLine is wrapped in the desired output format + */ +void formatResultLine(StringBuffer & returnbuf, const char * resultLine, const char * resultLineName, LogAccessLogFormat format, bool & isFirstLine) +{ + switch (format) + { + case LOGACCESS_LOGFORMAT_xml: + { + returnbuf.appendf("<%s>", resultLineName); + encodeXML(resultLine, returnbuf); + returnbuf.appendf("", resultLineName); + isFirstLine = false; + break; + } + case LOGACCESS_LOGFORMAT_json: + { + if (!isFirstLine) + returnbuf.append(", "); + + returnbuf.append("\""); + encodeJSON(returnbuf,resultLine); + returnbuf.append("\""); + isFirstLine = false; + break; + } + case LOGACCESS_LOGFORMAT_csv: + { + encodeCSVColumn(returnbuf, resultLine); //Currently treating entire log line as a single CSV column + returnbuf.newline(); + isFirstLine = false; + break; + } + default: + break; + } +} + +/* + * This method consumes an Iterator of values elements from a successful Grafana Loki query + * It ignores the 1st child (ingest timestamp in ns), and formats the 2nd child (log line) into the desired format + */ +void processValues(StringBuffer & returnbuf, IPropertyTreeIterator * valuesIter, LogAccessLogFormat format, bool & isFirstLine) +{ + ForEach(*valuesIter) + { + IPropertyTree & values = valuesIter->query(); + int numofvalues = values.getCount("values"); + if (values.getCount("values") == 2) + { + //const char * insertTimeStamp = values.queryProp("values[1]"); + formatResultLine(returnbuf, values.queryProp("values[2]"), "line", format, isFirstLine); + } + else + { + throw makeStringExceptionV(-1, "%s: Detected unexpected Grafana/Loki values response format!: %s", COMPONENT_NAME, values.queryProp(".")); + } + } +} + +/* + * This starts the encapsulation of the logaccess response in the desired format + */ +inline void resultsWrapStart(StringBuffer & returnbuf, LogAccessLogFormat format, bool reportHeader) +{ + switch (format) + { + case LOGACCESS_LOGFORMAT_xml: + { + returnbuf.append(""); + break; + } + case LOGACCESS_LOGFORMAT_json: + { + returnbuf.append("{\"lines\": ["); + break; + } + case LOGACCESS_LOGFORMAT_csv: + { + if (reportHeader) + { + returnbuf.append("line"); // this is the entire header for CSV if we're only reporting the line + returnbuf.newline(); + } + break; + } + default: + break; + } +} + +/* + * This finishes the encapsulation of the logaccess response in the desired format + */ +inline void resultsWrapEnd(StringBuffer & returnbuf, LogAccessLogFormat format) +{ + switch (format) + { + case LOGACCESS_LOGFORMAT_xml: + { + returnbuf.append(""); + break; + } + case LOGACCESS_LOGFORMAT_json: + { + returnbuf.append("]}"); + break; + } + case LOGACCESS_LOGFORMAT_csv: + break; + default: + break; + } +} + +/* + * This method consumes JSON formatted elements from a successful Grafana Loki query + * It extracts all values elements processes them into the desired format + */ +void wrapResult(StringBuffer & returnbuf, IPropertyTree * result, LogAccessLogFormat format, bool & isFirstLine) +{ + Owned logLineIter; + + if (result->hasProp("values")) + { + logLineIter.setown(result->getElements("values")); + } + + processValues(returnbuf, logLineIter, format, isFirstLine); +} + +/* + * This method consumes the JSON response from a Grafana Loki query + * It attempts to unwrap the response and extract the log payload, and reports it in the desired format + */ +void GrafanaLogAccessCurlClient::processQueryJsonResp(LogQueryResultDetails & resultDetails, const std::string & retrievedDocument, StringBuffer & returnbuf, LogAccessLogFormat format, bool reportHeader) +{ + resultDetails.totalReceived = 0; + resultDetails.totalAvailable = 0; + + Owned tree = createPTreeFromJSONString(retrievedDocument.c_str()); + if (!tree) + throw makeStringExceptionV(-1, "%s: Could not parse log query response", COMPONENT_NAME); + + if (!tree->hasProp("data")) + throw makeStringExceptionV(-1, "%s: Query respose did not contain data element!", COMPONENT_NAME); + + IPropertyTree * data = tree->queryPropTree("data"); + if (!data) + throw makeStringExceptionV(-1, "%s: Could no parse data element!", COMPONENT_NAME); + + //process stats first, in case reported entries returned can help preallocate return buffer? + if (data->hasProp("stats")) + { + if (data->hasProp("stats/summary/totalEntriesReturned")) + { + resultDetails.totalReceived = data->getPropInt64("stats/summary/totalEntriesReturned"); + } + } + //should any of these query stats be reported? + /*"stats": {"summary": { "bytesProcessedPerSecond": 7187731, "linesProcessedPerSecond": 14201, + "totalBytesProcessed": 49601, "totalLinesProcessed": 98, "execTime": 0.006900786, "queueTime": 0.000045301, + "subqueries": 1, "totalEntriesReturned": 98}, + "querier": { "store": { "totalChunksRef": 1, "totalChunksDownloaded": 1, + "chunksDownloadTime": 916811, "chunk": {"headChunkBytes": 0, + "headChunkLines": 0, "decompressedBytes": 49601, + "decompressedLines": 98, "compressedBytes": 6571,"totalDuplicates": 0 }}}, + "ingester": {"totalReached": 0, "totalChunksMatched": 0, "totalBatches": 0, "totalLinesSent": 0, + "store": {"totalChunksRef": 0, "totalChunksDownloaded": 0, "chunksDownloadTime": 0, + "chunk": {"headChunkBytes": 0,"headChunkLines": 0,"decompressedBytes": 0, + "decompressedLines": 0,"compressedBytes": 0, "totalDuplicates": 0 }}}*/ + + if (data->hasProp("result")) //if no data, empty query rep + { + returnbuf.ensureCapacity(retrievedDocument.length());// this is difficult to predict, at least the size of the response? + //Adds the format prefix to the return buffer + resultsWrapStart(returnbuf, format, reportHeader); + + bool isFirstLine = true; + Owned resultIter = data->getElements("result"); + //many result elements can be returned, each with a unique set of labels + ForEach(*resultIter) + { + IPropertyTree & result = resultIter->query(); + wrapResult(returnbuf, &result, format, isFirstLine); + } + + //Adds the format postfix to the return buffer + resultsWrapEnd(returnbuf, format); + } +} + +/* + * This method constructs a query string for Grafana to provide all info for a given data source + * The method attemps to populate the m_targetDataSource structure with the data source information + */ +void GrafanaLogAccessCurlClient::fetchDatasourceByName(const char * targetDataSourceName) +{ + DBGLOG("%s: Fetching data source by name: '%s'", COMPONENT_NAME, targetDataSourceName); + if (isEmptyString(targetDataSourceName)) + throw makeStringExceptionV(-1, "%s: fetchDatasourceByName: Empty data source name!", COMPONENT_NAME); + + std::string readBuffer; + VStringBuffer targetURI("/api/datasources/name/%s", targetDataSourceName); + submitQuery(readBuffer, targetURI.str()); + processDatasourceJsonResp(readBuffer); +} + +/* +* sumbits a Grafana Loki query to fetch all available datasources +* The response is expected to be a JSON formatted list of datasources +*/ +void GrafanaLogAccessCurlClient::fetchDatasources(std::string & readBuffer) +{ + submitQuery(readBuffer, "/"); +} + +/* +* sumbits a Grafana Loki query to fetch all labels +* The response is expected to be a JSON formatted list of labels +*/ +void GrafanaLogAccessCurlClient::fetchLabels(std::string & readBuffer) +{ + submitQuery(readBuffer, "/label"); +} + +/* + * Creates query filter and stream selector strings for the LogQL query based on the filter options provided +*/ +void GrafanaLogAccessCurlClient::populateQueryFilterAndStreamSelector(StringBuffer & queryString, StringBuffer & streamSelector, const ILogAccessFilter * filter) +{ + if (filter == nullptr) + throw makeStringExceptionV(-1, "%s: Null filter detected while creating LogQL query string", COMPONENT_NAME); + + const char * queryOperator = " |~ "; + StringBuffer queryValue; + StringBuffer streamField; + StringBuffer queryField; + + filter->toString(queryValue); + switch (filter->filterType()) + { + case LOGACCESS_FILTER_jobid: + { + DBGLOG("%s: Searching log entries by jobid: '%s'...", COMPONENT_NAME, queryValue.str()); + break; + } + case LOGACCESS_FILTER_class: + { + DBGLOG("%s: Searching log entries by class: '%s'...", COMPONENT_NAME, queryValue.str()); + break; + } + case LOGACCESS_FILTER_audience: + { + DBGLOG("%s: Searching log entries by target audience: '%s'...", COMPONENT_NAME, queryValue.str()); + break; + } + case LOGACCESS_FILTER_component: + { + if (m_componentsColumn.isStream) + streamField = m_componentsColumn.name; + + DBGLOG("%s: Searching '%s' component log entries...", COMPONENT_NAME, queryValue.str()); + break; + } + case LOGACCESS_FILTER_instance: + { + if (m_instanceColumn.isStream) + streamField = m_instanceColumn.name; + + DBGLOG("%s: Searching log entries by HPCC component instance: '%s'", COMPONENT_NAME, queryValue.str() ); + break; + } + case LOGACCESS_FILTER_wildcard: + { + if (queryValue.isEmpty()) + throw makeStringExceptionV(-1, "%s: Wildcard filter cannot be empty!", COMPONENT_NAME); + + DBGLOG("%s: Searching log entries by wildcard filter: '%s %s %s'...", COMPONENT_NAME, queryField.str(), queryOperator, queryValue.str()); + break; + } + case LOGACCESS_FILTER_or: + case LOGACCESS_FILTER_and: + { + StringBuffer op(logAccessFilterTypeToString(filter->filterType())); + queryString.append(" ( "); + populateQueryFilterAndStreamSelector(queryString, streamSelector, filter->leftFilterClause()); + queryString.append(" "); + queryString.append(op.toLowerCase()); //LogQL or | and + queryString.append(" "); + populateQueryFilterAndStreamSelector(queryString, streamSelector, filter->rightFilterClause()); + queryString.append(" ) "); + return; // queryString populated, need to break out + } + case LOGACCESS_FILTER_pod: + { + if (m_podColumn.isStream) + streamField = m_podColumn.name; + + DBGLOG("%s: Searching log entries by Pod: '%s'", COMPONENT_NAME, queryValue.str() ); + break; + } + case LOGACCESS_FILTER_column: + { + if (filter->getFieldName() == nullptr) + throw makeStringExceptionV(-1, "%s: empty field name detected in filter by column!", COMPONENT_NAME); + break; + } + //case LOGACCESS_FILTER_trace: + //case LOGACCESS_FILTER_span: + default: + throw makeStringExceptionV(-1, "%s: Unknown query criteria type encountered: '%s'", COMPONENT_NAME, queryValue.str()); + } + + //We're constructing two clauses, the stream selector and the query filter + //the streamSelector is a comma separated list of key value pairs + if (!streamField.isEmpty()) + { + if (!streamSelector.isEmpty()) + streamSelector.append(", "); + + streamSelector.appendf(" %s=\"%s\" ", streamField.str(), queryValue.str()); + } + else + { + //the query filter is a sequence of expressions seperated by a logical operator + queryString.append(" ").append(queryField.str()).append(queryOperator); + if (strcmp(m_expectedLogFormat, "table")==0) + queryString.append(" \"").append(queryValue.str()).append("\" "); + else + queryString.append("\"").append(queryValue.str()).append("\""); + } +} + +/* +Translates LogAccess defined SortBy direction enum value to +the LogQL/Loki counterpart +*/ +const char * sortByDirection(SortByDirection direction) +{ + switch (direction) + { + case SORTBY_DIRECTION_ascending: + return "FORWARD"; + case SORTBY_DIRECTION_descending: + case SORTBY_DIRECTION_none: + default: + return "BACKWARD"; + } +} + +/* +* Constructs LogQL query based on filter options, and sets Loki specific query parameters, + submits query, processes responce and returns the log entries in the desired format +*/ +bool GrafanaLogAccessCurlClient::fetchLog(LogQueryResultDetails & resultDetails, const LogAccessConditions & options, StringBuffer & returnbuf, LogAccessLogFormat format) +{ + try + { + resultDetails.totalReceived = 0; + resultDetails.totalAvailable = 0; + + const LogAccessTimeRange & trange = options.getTimeRange(); + if (trange.getStartt().isNull()) + throw makeStringExceptionV(-1, "%s: start time must be provided!", COMPONENT_NAME); + + StringBuffer fullQuery; + fullQuery.set("/query_range?"); + + if (options.getSortByConditions().length() > 0) + { + if (options.getSortByConditions().length() > 1) + UWARNLOG("%s: LogQL sorting is only supported by one field!", COMPONENT_NAME); + + SortByCondition condition = options.getSortByConditions().item(0); + switch (condition.byKnownField) + { + case LOGACCESS_MAPPEDFIELD_timestamp: + break; + case LOGACCESS_MAPPEDFIELD_jobid: + case LOGACCESS_MAPPEDFIELD_component: + case LOGACCESS_MAPPEDFIELD_class: + case LOGACCESS_MAPPEDFIELD_audience: + case LOGACCESS_MAPPEDFIELD_instance: + case LOGACCESS_MAPPEDFIELD_host: + case LOGACCESS_MAPPEDFIELD_unmapped: + default: + throw makeStringExceptionV(-1, "%s: LogQL sorting is only supported by ingest timestamp!", COMPONENT_NAME); + } + + const char * direction = sortByDirection(condition.direction); + if (!isEmptyString(direction)) + fullQuery.appendf("direction=%s", direction); + } + + fullQuery.append("&limit=").append(std::to_string(options.getLimit()).c_str()); + fullQuery.append("&query="); + //At this point the log field appears as a detected field and is not formated + // Detected fields + //if output is json: + // log "{ \"MSG\": \"QueryFilesInUse.unsubscribe() called\", \"MID\": \"104\", \"AUD\": \"USR\", \"CLS\": \"PRO\", \"DATE\": \"2024-06-06\", \"TIME\": \"22:03:00.229\", \"PID\": \"8\", \"TID\": \"8\", \"JOBID\": \"UNK\" }\n" + //if output is table: + // log "00000174 USR PRO 2024-06-19 19:20:58.089 8 160 UNK \"WUUpdate: W20240619-192058\"\n" + // stream "stderr" + // time "2024-06-06T22:03:00.230759942Z" + // ts 2024-06-06T22:03:00.382Z + // tsNs 1717711380382410602 + + StringBuffer logLineParser; + //from https://grafana.com/docs/loki/latest/query/log_queries/ + //Adding | json to your pipeline will extract all json properties as labels if the log line is a valid json document. Nested properties are flattened into label keys using the _ separator. + logLineParser.set(" | json log"); //this parses the log entry and extracts the log field into a label + logLineParser.append(" | line_format \"{{.log}}\""); //Formats output line to only contain log label + //This drops the stream, and various insert timestamps + + //we're always going to get a stream container, and a the log line... + //the stream container contains unnecessary, and redundant lines + //there's documentation of a 'drop' command whch doesn't work in practice + //online recomendation is to clear those stream entries... + logLineParser.append(" | label_format log=\"\", filename=\"\", namespace=\"\", node_name=\"\", job=\"\"");// app=\"\", component=\"\", container=\"\", instance=\"\"); + + /* we're not going to attempt to parse the log line for now, + return the entire log line in raw format + if (strcmp(m_expectedLogFormat.get(), "json") == 0) + { + logLineParser.append( " | json "); + //at this point, the stream "log" looks like this: + // { "MSG": "ESP server started.", "MID": "89", "AUD": "PRG", "CLS": "INF", "DATE": "2024-06-19", "TIME": "14:56:36.648", "PID": "8", "TID": "8", "JOBID": "UNK" } + //no need to format "log" into json + logLineParser.append(" | line_format \"{{.log}}\""); + } + else + { + //parses log into individual fields as labels + logLineParser.append(" | pattern \" \""); + //the "pattern" parser is not reliable, sensitive to number of spaces, and the order of the fields + + //do we want to manually format the return format at the server? + logLineParser.append(" | line_format \"{ \\\"MID\\\":\\\"{{.MID}}\\\", \\\"AUD\\\":\\\"{{.AUD}}\\\", \\\"MSG\\\":\\\"{{.MSG}}\\\" }\""); + } + */ + + //if we parse the logline as above, We could control the individual fields returned + //HPCC_LOG_TYPE="CLS", HPCC_LOG_MESSAGE="MSG", HPCC_LOG_JOBID="JOBID" | HPCC_LOG_JOBID="UNK" + + //"All LogQL queries contain a log stream selector." - https://grafana.com/docs/loki/latest/query/log_queries/ + StringBuffer streamSelector; + StringBuffer queryFilter; + populateQueryFilterAndStreamSelector(queryFilter, streamSelector, options.queryFilter()); + if (!streamSelector.isEmpty()) + streamSelector.append(", "); + + streamSelector.appendf("namespace=\"%s\"", m_targetNamespace.get()); + + fullQuery.append("{"); + encodeURL(fullQuery, streamSelector.str()); + fullQuery.append("}"); + encodeURL(fullQuery, queryFilter.str()); + encodeURL(fullQuery, logLineParser.str()); + + fullQuery.appendf("&start=%s000000000", std::to_string(trange.getStartt().getSimple()).c_str()); + if (trange.getEndt().isNull() != -1) //aka 'to' has been initialized + { + fullQuery.appendf("&end=%s000000000", std::to_string(trange.getEndt().getSimple()).c_str()); + } + + DBGLOG("FetchLog query: %s", fullQuery.str()); + + std::string readBuffer; + submitQuery(readBuffer, fullQuery.str()); + + processQueryJsonResp(resultDetails, readBuffer, returnbuf, format, true); + //DBGLOG("Query fetchLog result: %s", readBuffer.c_str()); + } + catch(IException * e) + { + StringBuffer description; + IERRLOG("%s: query exception: (%d) - %s", COMPONENT_NAME, e->errorCode(), e->errorMessage(description).str()); + e->Release(); + } + return false; +} + +GrafanaLogAccessCurlClient::GrafanaLogAccessCurlClient(IPropertyTree & logAccessPluginConfig) +{ + m_pluginCfg.set(&logAccessPluginConfig); + + const char * protocol = logAccessPluginConfig.queryProp("connection/@protocol"); + const char * host = logAccessPluginConfig.queryProp("connection/@host"); + const char * port = logAccessPluginConfig.queryProp("connection/@port"); + + m_grafanaConnectionStr = isEmptyString(protocol) ? DEFAULT_GRAFANA_PROTOCOL : protocol; + m_grafanaConnectionStr.append("://"); + m_grafanaConnectionStr.append(isEmptyString(host) ? DEFAULT_GRAFANA_HOST : host); + m_grafanaConnectionStr.append(":").append((!port || !*port) ? DEFAULT_GRAFANA_PORT : port); + + m_targetDataSource.id.set(logAccessPluginConfig.hasProp("datasource/@id") ? logAccessPluginConfig.queryProp("datasource/@id") : DEFAULT_DATASOURCE_ID); + m_targetDataSource.name.set(logAccessPluginConfig.hasProp("datasource/@name") ? logAccessPluginConfig.queryProp("datasource/@name") : DEFAULT_DATASOURCE_NAME); + + if (logAccessPluginConfig.hasProp("namespace/@name")) + { + m_targetNamespace.set(logAccessPluginConfig.queryProp("namespace/@name")); + } + + if (isEmptyString(m_targetNamespace.get())) + { + m_targetNamespace.set(defaultNamespaceStream); + OWARNLOG("%s: No namespace specified! Loki logaccess should target non-default namespaced logs!!!", COMPONENT_NAME); + } + + Owned secretTree = getSecret("esp", "grafana-logaccess"); + if (secretTree) + { + DBGLOG("Grafana LogAccess: loading esp/grafana-logaccess secret"); + + getSecretKeyValue(m_grafanaUserName.clear(), secretTree, "username"); + if (isEmptyString(m_grafanaUserName.str())) + throw makeStringExceptionV(-1, "%s: Empty Grafana user name detected!", COMPONENT_NAME); + + getSecretKeyValue(m_grafanaPassword.clear(), secretTree, "password"); + if (isEmptyString(m_grafanaPassword.str())) + throw makeStringExceptionV(-1, "%s: Empty Grafana password detected!", COMPONENT_NAME); + } + else + { + DBGLOG("%s: could not load esp/grafana-logaccess secret", COMPONENT_NAME); + } + + if (isEmptyString(m_grafanaUserName.str()) || isEmptyString(m_grafanaPassword.str())) + { + OWARNLOG("%s: Grafana credentials not found in secret, searching in grafana logaccess configuration", COMPONENT_NAME); + + if (logAccessPluginConfig.hasProp("connection/@username")) + m_grafanaUserName.set(logAccessPluginConfig.queryProp("connection/@username")); + + if (logAccessPluginConfig.hasProp("connection/@password")) + m_grafanaPassword.set(logAccessPluginConfig.queryProp("connection/@password")); + } + + //this is very important, without this, we can't target the correct datasource + fetchDatasourceByName(m_targetDataSource.name.get()); + + std::string availableLabels; + fetchLabels(availableLabels); + DBGLOG("%s: Available labels on target loki/grafana: %s", COMPONENT_NAME, availableLabels.c_str()); + + m_expectedLogFormat = defaultExpectedLogFormat; + if (logAccessPluginConfig.hasProp("logFormat/@type")) + { + m_expectedLogFormat.set(logAccessPluginConfig.queryProp("logFormat/@type")); + } + + Owned logMapIter = m_pluginCfg->getElements("logMaps"); + ForEach(*logMapIter) + { + IPropertyTree & logMap = logMapIter->query(); + const char * logMapType = logMap.queryProp("@type"); + if (streq(logMapType, "global")) + { + if (logMap.hasProp(logMapIndexPatternAtt)) + if (strcmp(logMap.queryProp(logMapIndexPatternAtt), "stream")==0) + m_globalSearchCol.isStream = true; + + if (logMap.hasProp(logMapSearchColAtt)) + m_globalSearchCol.name = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "workunits")) + { + if (logMap.hasProp(logMapSearchColAtt)) + m_workunitsColumn = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "components")) + { + if (logMap.hasProp(logMapIndexPatternAtt)) + if (strcmp(logMap.queryProp(logMapIndexPatternAtt), "stream")==0) + m_componentsColumn.isStream = true; + + if (logMap.hasProp(logMapSearchColAtt)) + m_componentsColumn.name = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "class")) + { + if (logMap.hasProp(logMapSearchColAtt)) + m_classColumn = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "audience")) + { + if (logMap.hasProp(logMapSearchColAtt)) + m_audienceColumn = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "instance")) + { + if (logMap.hasProp(logMapIndexPatternAtt)) + if (strcmp(logMap.queryProp(logMapIndexPatternAtt), "stream")==0) + m_instanceColumn.isStream = true; + + if (logMap.hasProp(logMapSearchColAtt)) + m_instanceColumn.name = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "node")) + { + if (logMap.hasProp(logMapIndexPatternAtt)) + if (strcmp(logMap.queryProp(logMapIndexPatternAtt), "stream")==0) + m_nodeColumn.isStream = true; + + if (logMap.hasProp(logMapSearchColAtt)) + m_nodeColumn.name = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "host")) + { + OWARNLOG("%s: 'host' LogMap entry is NOT supported!", COMPONENT_NAME); + } + else if (streq(logMapType, "pod")) + { + if (logMap.hasProp(logMapIndexPatternAtt)) + if (strcmp(logMap.queryProp(logMapIndexPatternAtt), "stream")==0) + m_podColumn.isStream = true; + + if (logMap.hasProp(logMapSearchColAtt)) + m_podColumn.name = logMap.queryProp(logMapSearchColAtt); + } + else + { + ERRLOG("Encountered invalid LogAccess field map type: '%s'", logMapType); + } + } + + DBGLOG("%s: targeting: '%s' - datasource: '%s'", COMPONENT_NAME, m_grafanaConnectionStr.str(), m_dataSourcesAPIURI.str()); +} + +class GrafanaLogaccessStream : public CInterfaceOf +{ +public: + virtual bool readLogEntries(StringBuffer & record, unsigned & recsRead) override + { + DBGLOG("%s: GrafanaLogaccessStream readLogEntries called", COMPONENT_NAME); + LogQueryResultDetails resultDetails; + m_remoteLogAccessor->fetchLog(resultDetails, m_options, record, m_outputFormat); + recsRead = resultDetails.totalReceived; + DBGLOG("%s: GrafanaLogaccessStream readLogEntries returned %d records", COMPONENT_NAME, recsRead); + + return false; + } + + GrafanaLogaccessStream(IRemoteLogAccess * grafanaQueryClient, const LogAccessConditions & options, LogAccessLogFormat format, unsigned int pageSize) + { + DBGLOG("%s: GrafanaLogaccessStream created", COMPONENT_NAME); + m_remoteLogAccessor.set(grafanaQueryClient); + m_outputFormat = format; + m_pageSize = pageSize; + m_options = options; + } + +private: + unsigned int m_pageSize; + bool m_hasBeenScrolled = false; + LogAccessLogFormat m_outputFormat; + LogAccessConditions m_options; + Owned m_remoteLogAccessor; +}; + +IRemoteLogAccessStream * GrafanaLogAccessCurlClient::getLogReader(const LogAccessConditions & options, LogAccessLogFormat format) +{ + return getLogReader(options, format, defaultMaxRecordsPerFetch); +} + +IRemoteLogAccessStream * GrafanaLogAccessCurlClient::getLogReader(const LogAccessConditions & options, LogAccessLogFormat format, unsigned int pageSize) +{ + return new GrafanaLogaccessStream(this, options, format, pageSize); +} + +extern "C" IRemoteLogAccess * createInstance(IPropertyTree & logAccessPluginConfig) +{ + return new GrafanaLogAccessCurlClient(logAccessPluginConfig); +} \ No newline at end of file diff --git a/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.hpp b/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.hpp new file mode 100644 index 00000000000..fb6f71cff98 --- /dev/null +++ b/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.hpp @@ -0,0 +1,108 @@ +/*############################################################################## + + HPCC SYSTEMS software Copyright (C) 2024 HPCC Systems®. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +############################################################################## */ + +#pragma once + +#include "jlog.hpp" +#include "jlog.ipp" +#include "jptree.hpp" +#include "jstring.hpp" +#include +#include "jsecrets.hpp" + +#define COMPONENT_NAME "GrafanaLogAccessCurlClient" + +static constexpr const char * DEFAULT_DATASOURCE_NAME = "Loki"; +static constexpr const char * DEFAULT_DATASOURCE_TYPE = "loki"; +static constexpr const char * DEFAULT_DATASOURCE_INDEX = "1"; + +struct GrafanaDataSource +{ + StringAttr type = DEFAULT_DATASOURCE_TYPE; + StringAttr name = DEFAULT_DATASOURCE_NAME; + StringAttr id = DEFAULT_DATASOURCE_INDEX; + StringAttr uid; + //Other Grafana datasource attributes: + //basicAuthPassword, version, basicAuthUser, access = proxy, isDefault + //withCredentials, url http://myloki4hpcclogs:3100, secureJsonFields + //user, password, basicAuth, jsonData, typeLogoUrl, readOnly, database +}; + +struct LogField +{ + StringAttr name; + bool isStream; + LogField(const char * name, bool isStream = false) : name(name), isStream(isStream) {} +}; + +static constexpr int defaultEntryLimit = 100; +static constexpr int defaultEntryStart = 0; + +class GrafanaLogAccessCurlClient : public CInterfaceOf +{ +private: + static constexpr const char * type = "grafanaloganalyticscurl"; + Owned m_pluginCfg; + StringBuffer m_grafanaConnectionStr; + GrafanaDataSource m_targetDataSource; + + StringBuffer m_grafanaUserName; + StringBuffer m_grafanaPassword; + StringBuffer m_dataSourcesAPIURI; + StringAttr m_targetNamespace; + + LogField m_globalSearchCol = LogField("log"); + LogField m_workunitsColumn = LogField("JOBID"); + LogField m_componentsColumn = LogField("component", true); + LogField m_audienceColumn = LogField("AUD"); + LogField m_classColumn = LogField("CLS"); + LogField m_instanceColumn = LogField("instance", true); + LogField m_podColumn = LogField("pod", true); + LogField m_containerColumn = LogField("container", true); + LogField m_messageColumn = LogField("MSG"); + LogField m_nodeColumn = LogField("node_name", true); + LogField m_logTimestampColumn = LogField("TIME"); + LogField m_logDatestampColumn = LogField("DATE"); + LogField m_logSequesnceColumn = LogField("MID"); + LogField m_logProcIDColumn = LogField("PID"); + LogField m_logThreadIDColumn = LogField("TID"); + //LogField m_logTraceIDColumn = LogField("TRC"); + //LogField m_logSpanIDColumn = LogField("SPN"); + + StringAttr m_expectedLogFormat; //json|table|xml + +public: + GrafanaLogAccessCurlClient(IPropertyTree & logAccessPluginConfig); + void processQueryJsonResp(LogQueryResultDetails & resultDetails, const std::string & retrievedDocument, StringBuffer & returnbuf, LogAccessLogFormat format, bool reportHeader); + void processDatasourceJsonResp(const std::string & retrievedDocument); + void fetchDatasourceByName(const char * targetDataSourceName); + void fetchDatasources(std::string & readBuffer); + void fetchLabels(std::string & readBuffer); + void submitQuery(std::string & readBuffer, const char * targetURI); + + void populateQueryFilterAndStreamSelector(StringBuffer & queryString, StringBuffer & streamSelector, const ILogAccessFilter * filter); + static void timestampQueryRangeString(StringBuffer & range, std::time_t from, std::time_t to); + + // IRemoteLogAccess methods + virtual bool fetchLog(LogQueryResultDetails & resultDetails, const LogAccessConditions & options, StringBuffer & returnbuf, LogAccessLogFormat format) override; + virtual const char * getRemoteLogAccessType() const override { return type; } + virtual IPropertyTree * queryLogMap() const override { return m_pluginCfg->queryPropTree(""); } + virtual const char * fetchConnectionStr() const override { return m_grafanaConnectionStr.str(); } + virtual IRemoteLogAccessStream * getLogReader(const LogAccessConditions & options, LogAccessLogFormat format) override; + virtual IRemoteLogAccessStream * getLogReader(const LogAccessConditions & options, LogAccessLogFormat format, unsigned int pageSize) override; + virtual bool supportsResultPaging() const override { return false;} +}; \ No newline at end of file diff --git a/testing/unittests/jlibtests.cpp b/testing/unittests/jlibtests.cpp index d5cca3b2864..cc72e5adbd6 100644 --- a/testing/unittests/jlibtests.cpp +++ b/testing/unittests/jlibtests.cpp @@ -68,7 +68,6 @@ class JlibTraceTest : public CppUnit::TestFixture CPPUNIT_TEST(manualTestScopeEnd); CPPUNIT_TEST(testActiveSpans); CPPUNIT_TEST(testSpanFetchMethods); - //CPPUNIT_TEST(testJTraceJLOGExporterprintResources); //CPPUNIT_TEST(testJTraceJLOGExporterprintAttributes); CPPUNIT_TEST(manualTestsDeclaredSpanStartTime); @@ -826,6 +825,30 @@ class JlibTraceTest : public CppUnit::TestFixture CPPUNIT_TEST_SUITE_REGISTRATION( JlibTraceTest ); CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( JlibTraceTest, "JlibTraceTest" ); +class JlibStringTest : public CppUnit::TestFixture +{ +public: + CPPUNIT_TEST_SUITE(JlibStringTest); + CPPUNIT_TEST(testEncodeCSVColumn); + CPPUNIT_TEST_SUITE_END(); + +protected: +void testEncodeCSVColumn() + { + const char * csvCol1 = "hello,world"; + StringBuffer encodedCSV; + encodeCSVColumn(encodedCSV, csvCol1); + CPPUNIT_ASSERT_EQUAL_STR(encodedCSV.str(), "\"hello,world\""); + + const char * csvCol2 = "hello world, \"how are you?\""; + encodedCSV.clear(); + encodeCSVColumn(encodedCSV, csvCol2); + CPPUNIT_ASSERT_EQUAL_STR(encodedCSV.str(), "\"hello world, \"\"how are you?\"\"\""); + } +}; + +CPPUNIT_TEST_SUITE_REGISTRATION( JlibStringTest ); +CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( JlibStringTest, "JlibStringTest" ); class JlibSemTest : public CppUnit::TestFixture { From 9c1debad5b1c32d1ba08ababddbc90e215e68cc1 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 27 Jun 2024 20:25:21 +0100 Subject: [PATCH 30/31] HPCC-32174 Incorrect epoll event added for non-ssl rowservice Signed-off-by: Jake Smith --- fs/dafsserver/dafsserver.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/dafsserver/dafsserver.cpp b/fs/dafsserver/dafsserver.cpp index 44ee5a96cf0..282559612fe 100644 --- a/fs/dafsserver/dafsserver.cpp +++ b/fs/dafsserver/dafsserver.cpp @@ -5627,7 +5627,7 @@ class CRemoteFileServer : implements IRemoteFileServer, public CInterface eps.getEndpointHostText(peerURL.clear()); PROGLOG("Server accepting row service socket from %s", peerURL.str()); #endif - addClient(acceptedRSSock.getClear(), true, true); + addClient(acceptedRSSock.getClear(), rowServiceSSL, true); } } else From ce48f75f6ce298bd86161fcfa39fba88c8b8cf58 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 28 Jun 2024 08:35:48 +0100 Subject: [PATCH 31/31] HPCC-32172 ECL Playground results flickering Flipping between scroll and no scroll Signed-off-by: Gordon Smith --- esp/src/src-react/components/ECLPlayground.tsx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/esp/src/src-react/components/ECLPlayground.tsx b/esp/src/src-react/components/ECLPlayground.tsx index 665e0c699ad..9fe22b20ec7 100644 --- a/esp/src/src-react/components/ECLPlayground.tsx +++ b/esp/src/src-react/components/ECLPlayground.tsx @@ -512,18 +512,18 @@ export const ECLPlayground: React.FunctionComponent = (props - - {outputMode === OutputMode.ERRORS ? ( - + +
+ {outputMode === OutputMode.ERRORS ? ( + - ) : outputMode === OutputMode.RESULTS ? ( - + ) : outputMode === OutputMode.RESULTS ? ( + - ) : outputMode === OutputMode.VIS ? ( -
+ ) : outputMode === OutputMode.VIS ? ( -
- ) : null} + ) : null} +
;