Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/candidate-9.8.x'
Browse files Browse the repository at this point in the history
Signed-off-by: Gavin Halliday <[email protected]>
  • Loading branch information
ghalliday committed Dec 3, 2024
2 parents 294a5fd + 60d96e6 commit 77ac6fe
Show file tree
Hide file tree
Showing 19 changed files with 232 additions and 33 deletions.
21 changes: 15 additions & 6 deletions .github/workflows/build-assets.yml
Original file line number Diff line number Diff line change
Expand Up @@ -91,15 +91,24 @@ jobs:
- os: ubuntu-22.04
name: k8s
container: true
build-docker-image: true
- os: ubuntu-22.04
name: docs
documentation: true
- os: ubuntu-20.04
- os: ubuntu-20.04
name: k8s
container: true
- os: rockylinux-8
- os: ubuntu-22.04
name: LN k8s
ln: true
container: true
build-docker-image: true
- os: ubuntu-20.04
name: LN k8s
ln: true
container: true
- os: ubuntu-22.04
name: LN
ln: true
Expand Down Expand Up @@ -272,7 +281,7 @@ jobs:
${{ needs.preamble.outputs.folder_build }}/*.md5sum
- name: Locate k8s deb file (community)
if: ${{ !matrix.ln && !matrix.ee && matrix.container && !matrix.documentation }}
if: ${{ !matrix.ln && !matrix.ee && matrix.container && !matrix.documentation && matrix.build-docker-image }}
id: container
run: |
k8s_pkg_path=$(ls -t ${{ needs.preamble.outputs.folder_build }}/*64_k8s.deb 2>/dev/null | head -1)
Expand All @@ -282,7 +291,7 @@ jobs:
- name: Create Docker Image (community)
uses: docker/build-push-action@v5
if: ${{ !matrix.ln && !matrix.ee && matrix.container && !matrix.documentation }}
if: ${{ !matrix.ln && !matrix.ee && matrix.container && !matrix.documentation && matrix.build-docker-image }}
with:
builder: ${{ steps.buildx.outputs.name }}
file: ${{ needs.preamble.outputs.folder_platform }}/dockerfiles/vcpkg/platform-core-${{ matrix.os }}/Dockerfile
Expand Down Expand Up @@ -345,7 +354,7 @@ jobs:
cmake --build /hpcc-dev/build --parallel $(nproc) --target package"
- name: Upload Assets to Jfrog (debian internal)
if: ${{ matrix.ln && !matrix.container && contains(matrix.os, 'ubuntu') && github.repository_owner == 'hpcc-systems'}}
if: ${{ matrix.ln && contains(matrix.os, 'ubuntu') && github.repository_owner == 'hpcc-systems'}}
shell: bash
run: |
cd ${{ needs.preamble.outputs.folder_build }}
Expand All @@ -368,7 +377,7 @@ jobs:
done
- name: Locate k8s deb file (internal)
if: ${{ matrix.ln && matrix.container && !matrix.documentation }}
if: ${{ matrix.ln && matrix.container && !matrix.documentation && matrix.build-docker-image }}
id: ln-container
run: |
k8s_pkg_path=$(ls -t ${{ needs.preamble.outputs.folder_build }}/*64_k8s.deb 2>/dev/null | head -1)
Expand All @@ -378,7 +387,7 @@ jobs:
- name: Create Docker Image (internal)
uses: docker/build-push-action@v5
if: ${{ matrix.ln && matrix.container && !matrix.documentation }}
if: ${{ matrix.ln && matrix.container && !matrix.documentation && matrix.build-docker-image }}
with:
builder: ${{ steps.buildx.outputs.name }}
file: ${{ needs.preamble.outputs.folder_platform }}/dockerfiles/vcpkg/platform-core-${{ matrix.os }}/Dockerfile
Expand All @@ -393,7 +402,7 @@ jobs:
type=registry,ref=hpccsystems/platform-core-${{ matrix.os }}:${{ needs.preamble.outputs.candidate_base_branch }}
- name: JFrog Docker Push and Publish
if: ${{ matrix.ln && matrix.container && !matrix.documentation && github.repository_owner == 'hpcc-systems' }}
if: ${{ matrix.ln && matrix.container && !matrix.documentation && matrix.build-docker-image && github.repository_owner == 'hpcc-systems' }}
run: |
jf docker push ${{ secrets.JFROG_REGISTRY || 'dummy.io' }}/hpccpl-docker-local/platform-core-ln:${{ needs.preamble.outputs.hpcc_version }} --build-name=platform-core-ln --build-number=${{ needs.preamble.outputs.hpcc_version }} --project=hpccpl
jf rt bp platform-core-ln ${{ needs.preamble.outputs.hpcc_version }} --project=hpccpl
Expand Down
5 changes: 3 additions & 2 deletions .github/workflows/bundleTest-thor.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ The steps in the workflow run on the specified operating system, with Ubuntu-22.
This step enables us to download the ready-to-install HPCC Platform's artifact built on the latest commit.
```yaml
- name: Download Package
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: ${{ inputs.asset-name }}
path: ${{ inputs.asset-name }}
Expand Down Expand Up @@ -457,4 +457,5 @@ If any logs, ZAP reports, or .trace files are generated, they are uploaded as ar
/home/runner/HPCCSystems-regression/log/*
/home/runner/HPCCSystems-regression/zap/*
if-no-files-found: ignore
```
```

2 changes: 1 addition & 1 deletion .github/workflows/bundleTest-thor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ jobs:
sudo rm -rf /usr/local/lib/android
- name: Download Package
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: ${{ inputs.asset-name }}
path: ${{ inputs.asset-name }}
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test-ui-gh_runner.yml
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ jobs:
path: ${{ inputs.asset-name }}

- name: Download Support Files
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: ${{ inputs.asset-name }}-support-files
path: ${{ inputs.asset-name }}-support-files
Expand Down
4 changes: 2 additions & 2 deletions cmake_modules/commonSetup.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -1049,16 +1049,16 @@ IF ("${COMMONSETUP_DONE}" STREQUAL "")
list(INSERT ARGS 3 ${ARGV1}_deps)
if (WIN32)
install(RUNTIME_DEPENDENCY_SET ${ARGV1}_deps
DESTINATION ${EXEC_DIR}
PRE_EXCLUDE_REGEXES "api-ms-win-.*\.dll"
POST_INCLUDE_REGEXES "^${VCPKG_FILES_DIR}.*"
POST_EXCLUDE_REGEXES ".*"
DESTINATION ${EXEC_DIR}
)
else()
install(RUNTIME_DEPENDENCY_SET ${ARGV1}_deps
DESTINATION ${LIB_DIR}
POST_INCLUDE_REGEXES "^${VCPKG_FILES_DIR}\/vcpkg_installed\/.*"
POST_EXCLUDE_REGEXES ".*"
DESTINATION ${LIB_DIR}
)
endif()
endif()
Expand Down
42 changes: 42 additions & 0 deletions dali/base/dadfs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3779,6 +3779,47 @@ protected: friend class CDistributedFilePart;
clusters.kill();
}

//Ensure that enough time has passed from when the file was last modified for reads to be consistent
//Important for blob storage or remote, geographically synchronized storage
void checkWriteSync()
{
time_t modifiedTime = 0;
time_t now = 0;

Owned<IPropertyTreeIterator> iter = root->getElements("Cluster");
ForEach(*iter)
{
const char * name = iter->query().queryProp("@name");
unsigned marginMs = getWriteSyncMarginMs(name);
if (marginMs)
{
if (0 == modifiedTime)
{
CDateTime modified;
if (!getModificationTime(modified))
return;
modifiedTime = modified.getSimple();
}

if (0 == now)
now = time(&now);

//Round the elapsed time down - so that a change on the last ms of one time period does not count as a whole second of elapsed time
//This could be avoided if the modified time was more granular
unsigned __int64 elapsedMs = (now - modifiedTime) * 1000;
if (elapsedMs >= 1000)
elapsedMs -= 999;

if (unlikely(elapsedMs < marginMs))
{
LOG(MCuserProgress, "Delaying access to %s on %s for %ums to ensure write sync", queryLogicalName(), name, (unsigned)(marginMs - elapsedMs));
MilliSleep(marginMs - elapsedMs);
now = 0; // re-evaluate now - unlikely to actually happen
}
}
}
}

bool hasDirPerPart() const
{
return FileDescriptorFlags::none != (fileFlags & FileDescriptorFlags::dirperpart);
Expand Down Expand Up @@ -8299,6 +8340,7 @@ IDistributedFile *CDistributedFileDirectory::dolookup(CDfsLogicalFileName &_logi
}
CDistributedFile *ret = new CDistributedFile(this,fcl.detach(),*logicalname,accessMode,user); // found
ret->setSuperOwnerLock(superOwnerLock.detach());
ret->checkWriteSync();
return ret;
}
// now super file
Expand Down
72 changes: 72 additions & 0 deletions dali/base/dafdesc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -665,6 +665,10 @@ class CFileDescriptorBase: public CInterface
virtual IFileDescriptor &querySelf() = 0;
virtual unsigned copyClusterNum(unsigned partidx, unsigned copy,unsigned *replicate=NULL) = 0;

IPropertyTree & queryAttributes() const
{
return *attr;
}
};

class CPartDescriptor : implements IPartDescriptor
Expand Down Expand Up @@ -819,6 +823,74 @@ protected: friend class CFileDescriptor;
return ismulti;
}

IPropertyTree & queryAttributes() const
{
return *props;
}

offset_t getFileSize(bool allowphysical,bool forcephysical)
{
offset_t ret = (offset_t)((forcephysical&&allowphysical)?-1:queryAttributes().getPropInt64("@size", -1));
if (allowphysical&&(ret==(offset_t)-1))
ret = getSize(true);
return ret;
}

offset_t getDiskSize(bool allowphysical,bool forcephysical)
{
if (!::isCompressed(parent.queryAttributes()))
return getFileSize(allowphysical, forcephysical);

if (forcephysical && allowphysical)
return getSize(false); // i.e. only if force, because all compressed should have @compressedSize attribute

// NB: compressSize is disk size
return queryAttributes().getPropInt64("@compressedSize", -1);
}

offset_t getSize(bool checkCompressed)
{
offset_t ret = (offset_t)-1;
StringBuffer firstname;
bool compressed = ::isCompressed(parent.queryAttributes());
unsigned nc=parent.numCopies(partIndex);
for (unsigned copy=0;copy<nc;copy++)
{
RemoteFilename rfn;
try
{
Owned<IFile> partfile = createIFile(getFilename(copy, rfn));
if (checkCompressed && compressed)
{
Owned<IFileIO> partFileIO = partfile->open(IFOread);
if (partFileIO)
{
Owned<ICompressedFileIO> compressedIO = createCompressedFileReader(partFileIO);
if (compressedIO)
ret = compressedIO->size();
else
throw makeStringExceptionV(DFSERR_PhysicalCompressedPartInvalid, "Compressed part is not in the valid format: %s", partfile->queryFilename());
}
}
else
ret = partfile->size();
if (ret!=(offset_t)-1)
return ret;
}
catch (IException *e)
{
StringBuffer s("CDistributedFilePart::getSize ");
rfn.getRemotePath(s);
EXCLOG(e, s.str());
e->Release();
}
if (copy==0)
rfn.getRemotePath(firstname);
}
throw makeStringExceptionV(DFSERR_CannotFindPartFileSize, "Cannot find physical file size for %s", firstname.str());;
}


RemoteMultiFilename &getMultiFilename(unsigned copy, RemoteMultiFilename &rmfn)
{
if (ismulti) {
Expand Down
3 changes: 3 additions & 0 deletions dali/base/dafdesc.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,9 @@ interface IPartDescriptor: extends IInterface
virtual const char *queryOverrideName() = 0; // for non-standard files
virtual unsigned copyClusterNum(unsigned copy,unsigned *replicate=NULL)=0; // map copy number to cluster (and optionally replicate number)
virtual IReplicatedFile *getReplicatedFile()=0;

virtual offset_t getFileSize(bool allowphysical,bool forcephysical)=0; // gets the part filesize (NB this will be the *expanded* size)
virtual offset_t getDiskSize(bool allowphysical,bool forcephysical)=0; // gets the part size on disk (NB this will be the compressed size)
};
typedef IArrayOf<IPartDescriptor> CPartDescriptorArray;
typedef IIteratorOf<IPartDescriptor> IPartDescriptorIterator;
Expand Down
2 changes: 1 addition & 1 deletion dockerfiles/vcpkg/platform-core-ubuntu-22.04.dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.29.7/b
chmod +x ./kubectl && \
mv ./kubectl /usr/local/bin

RUN curl -LO https://packagecloud.io/github/git-lfs/packages/ubuntu/jammy/git-lfs_3.5.1_amd64.deb/download && \
RUN curl -LO https://packagecloud.io/github/git-lfs/packages/ubuntu/jammy/git-lfs_3.6.0_amd64.deb/download && \
dpkg -i download && \
rm download

Expand Down
2 changes: 1 addition & 1 deletion dockerfiles/vcpkg/platform-core-ubuntu-22.04/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.29.7/b
chmod +x ./kubectl && \
mv ./kubectl /usr/local/bin

RUN curl -LO https://packagecloud.io/github/git-lfs/packages/ubuntu/jammy/git-lfs_3.5.1_amd64.deb/download && \
RUN curl -LO https://packagecloud.io/github/git-lfs/packages/ubuntu/jammy/git-lfs_3.6.0_amd64.deb/download && \
dpkg -i download && \
rm download

Expand Down
11 changes: 11 additions & 0 deletions ecl/hthor/hthor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8735,6 +8735,17 @@ bool CHThorDiskReadBaseActivity::openNext()
{
inputfile.setown(createIFile(rfilename));

if (curPart)
{
offset_t expectedSize = curPart->getDiskSize(false, false);
if (expectedSize != unknownFileSize)
{
offset_t actualSize = inputfile->size();
if(actualSize != expectedSize)
throw MakeStringException(0, "File size mismatch: file %s was supposed to be %" I64F "d bytes but appears to be %" I64F "d bytes", inputfile->queryFilename(), expectedSize, actualSize);
}
}

if (compressed)
{
Owned<IExpander> eexp;
Expand Down
17 changes: 13 additions & 4 deletions esp/services/ws_dfu/ws_dfuXRefService.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,10 @@ void CWsDfuXRefEx::init(IPropertyTree *cfg, const char *process, const char *ser
throw MakeStringException(-1, "No Dali Connection Active. Please Specify a Dali to connect to in you configuration file");
}

#ifndef _CONTAINERIZED
initBareMetalRoxieTargets(roxieConnMap);
#endif

XRefNodeManager.setown(CreateXRefNodeFactory());

//Start out builder thread......
Expand Down Expand Up @@ -750,19 +754,24 @@ void CWsDfuXRefEx::findUnusedFilesWithDetailsInDFS(IEspContext &context, const c
void CWsDfuXRefEx::getRoxieFiles(const char *process, bool checkPackageMaps, MapStringTo<bool> &usedFileMap)
{
SocketEndpointArray servers;
Owned<IPropertyTree> controlXrefInfo;
#ifdef _CONTAINERIZED
StringBuffer epStr;
getService(epStr, process, true);
SocketEndpoint ep(epStr);
servers.append(ep);
#else
getRoxieProcessServers(process, servers);
if (!servers.length())
throw MakeStringExceptionDirect(ECLWATCH_INVALID_CLUSTER_INFO, "process cluster, not found.");
Owned<ISocket> sock = ISocket::connect_timeout(servers.item(0), ROXIECONNECTIONTIMEOUT);
controlXrefInfo.setown(sendRoxieControlQuery(sock, "<control:getQueryXrefInfo/>", ROXIECONTROLXREFTIMEOUT));
#else
ISmartSocketFactory *conn = roxieConnMap.getValue(process);
if (!conn)
throw makeStringExceptionV(ECLWATCH_CANNOT_GET_ENV_INFO, "Connection info for '%s' process cluster not found.", process ? process : "(null)");

controlXrefInfo.setown(sendRoxieControlQuery(conn, "<control:getQueryXrefInfo/>", ROXIECONTROLXREFTIMEOUT, ROXIECONNECTIONTIMEOUT));
#endif

Owned<ISocket> sock = ISocket::connect_timeout(servers.item(0), ROXIECONNECTIONTIMEOUT);
Owned<IPropertyTree> controlXrefInfo = sendRoxieControlQuery(sock, "<control:getQueryXrefInfo/>", ROXIECONTROLXREFTIMEOUT);
if (!controlXrefInfo)
throw MakeStringExceptionDirect(ECLWATCH_INTERNAL_ERROR, "roxie cluster, not responding.");
Owned<IPropertyTreeIterator> roxieFiles = controlXrefInfo->getElements("//File");
Expand Down
1 change: 1 addition & 0 deletions esp/services/ws_dfu/ws_dfuXRefService.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,7 @@ class CWsDfuXRefEx : public CWsDFUXRef
{
Owned<IXRefNodeManager> XRefNodeManager;
Owned<CXRefExBuilderThread> m_XRefbuilder;
MapStringToMyClass<ISmartSocketFactory> roxieConnMap;

IXRefFilesNode* getFileNodeInterface(IXRefNode& XRefNode,const char* nodeType);
void addXRefNode(const char* name, IPropertyTree* pXRefNodeTree);
Expand Down
5 changes: 5 additions & 0 deletions helm/hpcc/values.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -603,6 +603,11 @@
"eclwatchVisible": {
"type": "boolean"
},
"writeSyncMarginMs": {
"description": "Time that is required to elapse between writing a file and all read copies to be consistently updated",
"type": "integer",
"default": 0
},
"components": {},
"prefix": {},
"subPath": {},
Expand Down
2 changes: 2 additions & 0 deletions roxie/ccd/ccdfile.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3909,6 +3909,8 @@ class CcdFileTest : public CppUnit::TestFixture
virtual const char *queryOverrideName() { UNIMPLEMENTED; }
virtual unsigned copyClusterNum(unsigned copy,unsigned *replicate=NULL) { UNIMPLEMENTED; }
virtual IReplicatedFile *getReplicatedFile() { UNIMPLEMENTED; }
virtual offset_t getFileSize(bool allowphysical,bool forcephysical) { UNIMPLEMENTED; }
virtual offset_t getDiskSize(bool allowphysical,bool forcephysical) { UNIMPLEMENTED; }
};

void testCopy()
Expand Down
Loading

0 comments on commit 77ac6fe

Please sign in to comment.