From 45c1b8a56ed8165cb469c3a108e1c8a1467501fd Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 19 Oct 2023 13:35:44 +0200 Subject: [PATCH 001/726] Port the same fix we have in the Run3 code --- Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx index f66454ebad52e..652bef585c98c 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx @@ -189,7 +189,6 @@ void TrackerDPL::run(ProcessingContext& pc) LOG(info) << labels->getIndexedSize() << " MC label objects , in " << mc2rofs.size() << " MC events"; } - std::vector tracks; auto& allClusIdx = pc.outputs().make>(Output{"IT3", "TRACKCLSID", 0, Lifetime::Timeframe}); std::vector trackLabels; std::vector verticesLabels; @@ -281,7 +280,7 @@ void TrackerDPL::run(ProcessingContext& pc) for (unsigned int iROF{0}; iROF < rofs.size(); ++iROF) { auto& rof{rofs[iROF]}; - tracks = timeFrame->getTracks(iROF); + auto &tracks = timeFrame->getTracks(iROF); trackLabels = timeFrame->getTracksLabel(iROF); auto number{tracks.size()}; auto first{allTracks.size()}; From ef510cc22bb818c0bcd95e942fb926c801e44af4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 19 Oct 2023 13:45:57 +0200 Subject: [PATCH 002/726] Drop obsolete flag -undefined error is the default now. --- dependencies/O2CompileFlags.cmake | 1 - 1 file changed, 1 deletion(-) diff --git a/dependencies/O2CompileFlags.cmake b/dependencies/O2CompileFlags.cmake index e52260ed74455..7fe9019701efe 100644 --- a/dependencies/O2CompileFlags.cmake +++ b/dependencies/O2CompileFlags.cmake @@ -126,7 +126,6 @@ set(CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}} set(CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE}} ${CMAKE_C_WARNINGS}") if(APPLE) - set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-undefined,error") # avoid undefined in our libs elseif(UNIX) set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined") # avoid undefined in our libs endif() From a367067ddaa6c5da2bc85b4ed48def2165fb729e Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 19 Oct 2023 15:54:20 +0200 Subject: [PATCH 003/726] DPL: change InjectorFunction to return a bool The result is now wether or not something was send out of band --- .../CTP/workflowScalers/src/ctp-proxy.cxx | 3 ++- .../CTP/workflowScalers/src/ctp-qc-proxy.cxx | 5 ++-- .../DCS/testWorkflow/src/DCStoDPLconverter.h | 7 +++-- .../DCS/testWorkflow/src/dcs-config-proxy.cxx | 11 ++++---- .../Framework/ExternalFairMQDeviceProxy.h | 11 +++++++- .../Core/src/ExternalFairMQDeviceProxy.cxx | 6 +++-- .../benchmark_ExternalFairMQDeviceProxies.cxx | 11 ++++---- .../test_ExternalFairMQDeviceWorkflow.cxx | 26 +++++++++---------- .../src/DataSamplingReadoutAdapter.cxx | 1 + run/o2sim_mctracks_proxy.cxx | 8 ++++-- 10 files changed, 56 insertions(+), 33 deletions(-) diff --git a/Detectors/CTP/workflowScalers/src/ctp-proxy.cxx b/Detectors/CTP/workflowScalers/src/ctp-proxy.cxx index 449cd0903c882..3634df6d30bda 100644 --- a/Detectors/CTP/workflowScalers/src/ctp-proxy.cxx +++ b/Detectors/CTP/workflowScalers/src/ctp-proxy.cxx @@ -52,7 +52,7 @@ InjectorFunction dcs2dpl(std::string& ccdbhost) auto runMgr = std::make_shared(); runMgr->setCCDBHost(ccdbhost); runMgr->init(); - return [runMgr](TimingInfo&, ServiceRegistryRef const& services, fair::mq::Parts& parts, ChannelRetriever channelRetriever, size_t newTimesliceId, bool& stop) { + return [runMgr](TimingInfo&, ServiceRegistryRef const& services, fair::mq::Parts& parts, ChannelRetriever channelRetriever, size_t newTimesliceId, bool& stop) -> bool { // FIXME: Why isn't this function using the timeslice index? // make sure just 2 messages received // if (parts.Size() != 2) { @@ -64,6 +64,7 @@ InjectorFunction dcs2dpl(std::string& ccdbhost) std::string messageData{static_cast(parts.At(1)->GetData()), parts.At(1)->GetSize()}; LOG(info) << "received message " << messageHeader << " of size " << dataSize << " # parts:" << parts.Size(); // << " Payload:" << messageData; runMgr->processMessage(messageHeader, messageData); + return true; }; } diff --git a/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx b/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx index 0502bbae86493..2372ba07a28f7 100644 --- a/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx +++ b/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx @@ -46,7 +46,7 @@ using DetID = o2::detectors::DetID; InjectorFunction dcs2dpl() // InjectorFunction dcs2dpl() { - return [](TimingInfo&, ServiceRegistryRef const& services, fair::mq::Parts& parts, ChannelRetriever channelRetriever, size_t newTimesliceId, bool&) { + return [](TimingInfo&, ServiceRegistryRef const& services, fair::mq::Parts& parts, ChannelRetriever channelRetriever, size_t newTimesliceId, bool&) -> bool { auto *device = services.get().device(); std::string messageHeader{static_cast(parts.At(0)->GetData()), parts.At(0)->GetSize()}; size_t dataSize = parts.At(1)->GetSize(); @@ -57,7 +57,7 @@ InjectorFunction dcs2dpl() auto channel = channelRetriever(outsp, newTimesliceId); if (channel.empty()) { LOG(error) << "No output channel found for OutputSpec " << outsp; - return; + return false; } hdrF.tfCounter = newTimesliceId; // this also hdrF.payloadSerializationMethod = o2::header::gSerializationMethodNone; @@ -91,6 +91,7 @@ InjectorFunction dcs2dpl() outParts.AddPart(std::move(plMessageF)); sendOnChannel(*device, outParts, channel, newTimesliceId); LOG(info) << "Sent CTP counters DPL message" << std::flush; + return true; }; } diff --git a/Detectors/DCS/testWorkflow/src/DCStoDPLconverter.h b/Detectors/DCS/testWorkflow/src/DCStoDPLconverter.h index dab2fd730b360..4384628d01f4c 100644 --- a/Detectors/DCS/testWorkflow/src/DCStoDPLconverter.h +++ b/Detectors/DCS/testWorkflow/src/DCStoDPLconverter.h @@ -53,7 +53,7 @@ using DPCOM = o2::dcs::DataPointCompositeObject; o2f::InjectorFunction dcs2dpl(std::unordered_map& dpid2group, bool fbiFirst, bool verbose = false, int FBIPerInterval = 1) { - return [dpid2group, fbiFirst, verbose, FBIPerInterval](o2::framework::TimingInfo& tinfo, framework::ServiceRegistryRef const& services, fair::mq::Parts& parts, o2f::ChannelRetriever channelRetriever, size_t newTimesliceId, bool& stop) { + return [dpid2group, fbiFirst, verbose, FBIPerInterval](o2::framework::TimingInfo& tinfo, framework::ServiceRegistryRef const& services, fair::mq::Parts& parts, o2f::ChannelRetriever channelRetriever, size_t newTimesliceId, bool& stop) -> bool { auto *device = services.get().device(); static std::unordered_map cache; // will keep only the latest measurement in the 1-second wide window for each DPID static std::unordered_map sentToChannel; @@ -69,7 +69,7 @@ o2f::InjectorFunction dcs2dpl(std::unordered_map& dp // check if we got FBI (Master) or delta (MasterDelta) if (!parts.Size()) { LOGP(warn, "Empty input recieved at timeslice {}", tinfo.timeslice); - return; + return false; } std::string firstName = std::string((char*)&(reinterpret_cast(parts.At(0)->GetData()))->id); @@ -124,6 +124,7 @@ o2f::InjectorFunction dcs2dpl(std::unordered_map& dp } std::chrono::duration> duration = timerNow - timer; + bool didSendMessages = false; if (duration.count() > 1 && (seenFBI || !fbiFirst)) { // did we accumulate for 1 sec and have we seen FBI if it was requested? std::unordered_map, std::hash> outputs; // in the cache we have the final values of the DPs that we should put in the output @@ -183,6 +184,7 @@ o2f::InjectorFunction dcs2dpl(std::unordered_map& dp } o2f::sendOnChannel(*device, *msgIt.second.get(), msgIt.first, tinfo.timeslice); sentToChannel[msgIt.first]++; + didSendMessages |= msgIt.second->Size() > 0; } timer = timerNow; cache.clear(); @@ -200,6 +202,7 @@ o2f::InjectorFunction dcs2dpl(std::unordered_map& dp } LOGP(info, "{} inputs ({} bytes) of which {} FBI ({} bytes) seen in {:.3f} s | {}", nInp, fmt::group_digits(szInp), nInpFBI, fmt::group_digits(szInpFBI), runtime, sent); } + return didSendMessages; }; } diff --git a/Detectors/DCS/testWorkflow/src/dcs-config-proxy.cxx b/Detectors/DCS/testWorkflow/src/dcs-config-proxy.cxx index 936ceebf1e0f2..9b697da428ad2 100644 --- a/Detectors/DCS/testWorkflow/src/dcs-config-proxy.cxx +++ b/Detectors/DCS/testWorkflow/src/dcs-config-proxy.cxx @@ -67,17 +67,17 @@ auto getDataOriginFromFilename(const std::string& filename) InjectorFunction dcs2dpl(const std::string& acknowledge) { - return [acknowledge](TimingInfo&, ServiceRegistryRef const& services, fair::mq::Parts& parts, ChannelRetriever channelRetriever, size_t newTimesliceId, bool&) { + return [acknowledge](TimingInfo&, ServiceRegistryRef const& services, fair::mq::Parts& parts, ChannelRetriever channelRetriever, size_t newTimesliceId, bool&) -> bool { auto *device = services.get().device(); if (parts.Size() == 0) { // received at ^c, ignore LOG(info) << "ignoring empty message"; - return; + return false; } // make sure just 2 messages received if (parts.Size() != 2) { LOG(error) << "received " << parts.Size() << " instead of 2 expected"; sendAnswer("error0: wrong number of messages", acknowledge, *device); - return; + return false; } std::string filename{static_cast(parts.At(0)->GetData()), parts.At(0)->GetSize()}; size_t filesize = parts.At(1)->GetSize(); @@ -86,7 +86,7 @@ InjectorFunction dcs2dpl(const std::string& acknowledge) if (dataOrigin == o2::header::gDataOriginInvalid) { LOG(error) << "unknown detector for " << filename; sendAnswer(fmt::format("{}:error1: unrecognized filename", filename), acknowledge, *device); - return; + return false; } o2::header::DataHeader hdrF("DCS_CONFIG_FILE", dataOrigin, 0); @@ -96,7 +96,7 @@ InjectorFunction dcs2dpl(const std::string& acknowledge) if (channel.empty()) { LOG(error) << "No output channel found for OutputSpec " << outsp; sendAnswer(fmt::format("{}:error2: no channel to send", filename), acknowledge, *device); - return; + return false; } hdrF.tfCounter = newTimesliceId; @@ -140,6 +140,7 @@ InjectorFunction dcs2dpl(const std::string& acknowledge) sendAnswer(fmt::format("{}:ok", filename), acknowledge, *device); LOG(info) << "Sent DPL message and acknowledgment for file " << filename; + return true; }; } diff --git a/Framework/Core/include/Framework/ExternalFairMQDeviceProxy.h b/Framework/Core/include/Framework/ExternalFairMQDeviceProxy.h index cfa05cd07d1d2..3ff77ae703b9b 100644 --- a/Framework/Core/include/Framework/ExternalFairMQDeviceProxy.h +++ b/Framework/Core/include/Framework/ExternalFairMQDeviceProxy.h @@ -23,7 +23,16 @@ namespace o2::framework /// A callback function to retrieve the fair::mq::Channel name to be used for sending /// messages of the specified OutputSpec using ChannelRetriever = std::function; -using InjectorFunction = std::function; +/// The callback which actually does the heavy lifting of converting the input data into +/// DPL messages. The callback is invoked with the following parameters: +/// @param timingInfo is the timing information of the current timeslice +/// @param services is the service registry +/// @param inputs is the list of input messages +/// @param channelRetriever is a callback to retrieve the fair::mq::Channel name to be used for +/// sending the messages +/// @param newTimesliceId is the timeslice ID of the current timeslice +/// @return true if any message were sent, false otherwise +using InjectorFunction = std::function; using ChannelSelector = std::function>& channels)>; struct InputChannelSpec; diff --git a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx index d1a45a95c6d98..44a9b15d684b3 100644 --- a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx +++ b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx @@ -203,7 +203,7 @@ void appendForSending(fair::mq::Device& device, o2::header::Stack&& headerStack, InjectorFunction o2DataModelAdaptor(OutputSpec const& spec, uint64_t startTime, uint64_t /*step*/) { - return [spec](TimingInfo&, ServiceRegistryRef const& ref, fair::mq::Parts& parts, ChannelRetriever channelRetriever, size_t newTimesliceId, bool& stop) { + return [spec](TimingInfo&, ServiceRegistryRef const& ref, fair::mq::Parts& parts, ChannelRetriever channelRetriever, size_t newTimesliceId, bool& stop) -> bool { auto* device = ref.get().device(); for (int i = 0; i < parts.Size() / 2; ++i) { auto dh = o2::header::get(parts.At(i * 2)->GetData()); @@ -212,6 +212,7 @@ InjectorFunction o2DataModelAdaptor(OutputSpec const& spec, uint64_t startTime, o2::header::Stack headerStack{*dh, dph}; sendOnChannel(*device, std::move(headerStack), std::move(parts.At(i * 2 + 1)), spec, channelRetriever); } + return parts.Size() > 0; }; } @@ -589,7 +590,7 @@ InjectorFunction dplModelAdaptor(std::vector const& filterSpecs, DPL } } } - return; + return didSendParts; }; } @@ -621,6 +622,7 @@ InjectorFunction incrementalConverter(OutputSpec const& spec, o2::header::Serial sendOnChannel(*device, std::move(headerStack), std::move(parts.At(i)), spec, channelRetriever); } + return parts.Size(); }; } diff --git a/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx b/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx index 55e0b4cec89db..bdb4deb443021 100644 --- a/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx +++ b/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx @@ -469,22 +469,22 @@ std::vector defineDataProcessing(ConfigContext const& config) // reads the messages from the output proxy via the out-of-band channel // converter callback for the external FairMQ device proxy ProcessorSpec generator - auto converter = [](TimingInfo&, ServiceRegistryRef const& ref, fair::mq::Parts& inputs, ChannelRetriever channelRetriever, size_t newTimesliceId, bool&) { + InjectorFunction converter = [](TimingInfo&, ServiceRegistryRef const& ref, fair::mq::Parts& inputs, ChannelRetriever channelRetriever, size_t newTimesliceId, bool&) -> bool { auto* device = ref.get().device(); ASSERT_ERROR(inputs.Size() >= 2); if (inputs.Size() < 2) { - return; + return false; } int msgidx = 0; auto dh = o2::header::get(inputs.At(msgidx)->GetData()); if (!dh) { LOG(error) << "data on input " << msgidx << " does not follow the O2 data model, DataHeader missing"; - return; + return false; } auto dph = o2::header::get(inputs.At(msgidx)->GetData()); if (!dph) { LOG(error) << "data on input " << msgidx << " does not follow the O2 data model, DataProcessingHeader missing"; - return; + return false; } // Note: we want to run both the output and input proxy in the same workflow and thus we need // different data identifiers and change the data origin in the forwarding @@ -497,7 +497,7 @@ std::vector defineDataProcessing(ConfigContext const& config) ASSERT_ERROR(!isData || !channelName.empty()); LOG(debug) << "using channel '" << channelName << "' for " << DataSpecUtils::describe(OutputSpec{dh->dataOrigin, dh->dataDescription, dh->subSpecification}); if (channelName.empty()) { - return; + return false; } // make a copy of the header message, get the data header and change origin auto outHeaderMessage = device->NewMessageFor(channelName, 0, inputs.At(msgidx)->GetSize()); @@ -511,6 +511,7 @@ std::vector defineDataProcessing(ConfigContext const& config) output.AddPart(std::move(inputs.At(msgidx + 1))); LOG(debug) << "sending " << DataSpecUtils::describe(OutputSpec{odh->dataOrigin, odh->dataDescription, odh->subSpecification}); o2::framework::sendOnChannel(*device, output, channelName, (size_t)-1); + return output.Size() > 0; }; // we use the same spec to build the configuration string, ideally we would have some helpers diff --git a/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx b/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx index 98f3139fe7b86..c105a9d0f662c 100644 --- a/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx +++ b/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx @@ -176,7 +176,7 @@ std::vector defineDataProcessing(ConfigContext const& config) // the compute callback of the producer auto producerCallback = [nRolls, channelName, proxyMode, counter = std::make_shared()](DataAllocator& outputs, ControlService& control, RawDeviceService& rds) { int data = *counter; - //outputs.make(OutputRef{"data", 0}) = data; + // outputs.make(OutputRef{"data", 0}) = data; fair::mq::Device& device = *(rds.device()); auto transport = device.GetChannel(*channelName, 0).Transport(); @@ -327,14 +327,14 @@ std::vector defineDataProcessing(ConfigContext const& config) Inputs checkerInputs; if (proxyMode != ProxyMode::All) { checkerInputs.emplace_back(InputSpec{"datain", ConcreteDataTypeMatcher{"TST", "DATA"}, Lifetime::Timeframe}); - //for (unsigned int i = 0; i < pState->nChannels; i++) { - // checkerInputs.emplace_back(InputSpec{{"datain"}, "TST", "DATA", i, Lifetime::Timeframe}); - //} + // for (unsigned int i = 0; i < pState->nChannels; i++) { + // checkerInputs.emplace_back(InputSpec{{"datain"}, "TST", "DATA", i, Lifetime::Timeframe}); + // } } else { checkerInputs.emplace_back(InputSpec{"datain", ConcreteDataTypeMatcher{"PRX", "DATA"}, Lifetime::Timeframe}); - //for (unsigned int i = 0; i < pState->nChannels; i++) { - // checkerInputs.emplace_back(InputSpec{{"datain"}, "PRX", "DATA", i, Lifetime::Timeframe}); - //} + // for (unsigned int i = 0; i < pState->nChannels; i++) { + // checkerInputs.emplace_back(InputSpec{{"datain"}, "PRX", "DATA", i, Lifetime::Timeframe}); + // } } if (proxyMode != ProxyMode::OnlyOutput) { // the checker is not added if the input proxy is skipped @@ -349,22 +349,22 @@ std::vector defineDataProcessing(ConfigContext const& config) // reads the messages from the output proxy via the out-of-band channel // converter callback for the external FairMQ device proxy ProcessorSpec generator - auto converter = [](TimingInfo&, ServiceRegistryRef const& services, fair::mq::Parts& inputs, ChannelRetriever channelRetriever, size_t newTimesliceId, bool&) { + InjectorFunction converter = [](TimingInfo&, ServiceRegistryRef const& services, fair::mq::Parts& inputs, ChannelRetriever channelRetriever, size_t newTimesliceId, bool&) -> bool { auto* device = services.get().device(); ASSERT_ERROR(inputs.Size() >= 2); if (inputs.Size() < 2) { - return; + return false; } int msgidx = 0; auto dh = o2::header::get(inputs.At(msgidx)->GetData()); if (!dh) { LOG(error) << "data on input " << msgidx << " does not follow the O2 data model, DataHeader missing"; - return; + return false; } auto dph = o2::header::get(inputs.At(msgidx)->GetData()); if (!dph) { LOG(error) << "data on input " << msgidx << " does not follow the O2 data model, DataProcessingHeader missing"; - return; + return false; } // Note: we want to run both the output and input proxy in the same workflow and thus we need // different data identifiers and change the data origin in the forwarding @@ -377,7 +377,7 @@ std::vector defineDataProcessing(ConfigContext const& config) ASSERT_ERROR(!isData || !channelName.empty()); LOG(debug) << "using channel '" << channelName << "' for " << DataSpecUtils::describe(OutputSpec{dh->dataOrigin, dh->dataDescription, dh->subSpecification}); if (channelName.empty()) { - return; + return false; } fair::mq::Parts output; for (; msgidx < inputs.Size(); ++msgidx) { @@ -402,7 +402,7 @@ std::vector defineDataProcessing(ConfigContext const& config) } } o2::framework::sendOnChannel(*device, output, channelName, (size_t)-1); - return; + return output.Size() != 0; }; // we use the same spec to build the configuration string, ideally we would have some helpers diff --git a/Utilities/DataSampling/src/DataSamplingReadoutAdapter.cxx b/Utilities/DataSampling/src/DataSamplingReadoutAdapter.cxx index 745c0cddbf835..66a872e265baf 100644 --- a/Utilities/DataSampling/src/DataSamplingReadoutAdapter.cxx +++ b/Utilities/DataSampling/src/DataSamplingReadoutAdapter.cxx @@ -41,6 +41,7 @@ InjectorFunction dataSamplingReadoutAdapter(OutputSpec const& spec) o2::header::Stack headerStack{dh, dph}; sendOnChannel(*device, std::move(headerStack), std::move(parts.At(i)), spec, channelRetriever); } + return parts.Size() != 0; }; } diff --git a/run/o2sim_mctracks_proxy.cxx b/run/o2sim_mctracks_proxy.cxx index cd032dd35c4fa..8b40c6a647cca 100644 --- a/run/o2sim_mctracks_proxy.cxx +++ b/run/o2sim_mctracks_proxy.cxx @@ -93,8 +93,9 @@ InjectorFunction o2simKinematicsConverter(std::vector const& specs, auto MCTracksMessageCache = std::make_shared(); auto Nparts = std::make_shared(nPerTF); - return [timesliceId, specs, step, nevents, nPerTF, totalEventCounter, eventCounter, TFcounter, Nparts, MCHeadersMessageCache = MCHeadersMessageCache, MCTracksMessageCache = MCTracksMessageCache](TimingInfo& ti, ServiceRegistryRef const& services, fair::mq::Parts& parts, ChannelRetriever channelRetriever, size_t newTimesliceId, bool& stop) mutable { + return [timesliceId, specs, step, nevents, nPerTF, totalEventCounter, eventCounter, TFcounter, Nparts, MCHeadersMessageCache = MCHeadersMessageCache, MCTracksMessageCache = MCTracksMessageCache](TimingInfo& ti, ServiceRegistryRef const& services, fair::mq::Parts& parts, ChannelRetriever channelRetriever, size_t newTimesliceId, bool& stop) mutable -> bool { auto*device = services.get().device(); + bool didSendData = false; if (nPerTF < 0) { // if no aggregation requested, forward each message with the DPL header if (*timesliceId != newTimesliceId) { @@ -108,6 +109,7 @@ InjectorFunction o2simKinematicsConverter(std::vector const& specs, // we have to move the incoming data o2::header::Stack headerStack{dh, dph}; sendOnChannel(*device, std::move(headerStack), std::move(parts.At(i)), specs[i], channelRetriever); + didSendData |= parts.At(i)->GetSize() > 0; } *timesliceId += step; } else { @@ -141,6 +143,8 @@ InjectorFunction o2simKinematicsConverter(std::vector const& specs, *eventCounter = 0; sendOnChannel(*device, *MCHeadersMessageCache.get(), channelRetriever(specs[0], *TFcounter), *TFcounter); sendOnChannel(*device, *MCTracksMessageCache.get(), channelRetriever(specs[1], *TFcounter), *TFcounter); + didSendData |= MCHeadersMessageCache->Size() > 0; + didSendData |= MCTracksMessageCache->Size() > 0; ++(*TFcounter); MCHeadersMessageCache->Clear(); MCTracksMessageCache->Clear(); @@ -150,7 +154,7 @@ InjectorFunction o2simKinematicsConverter(std::vector const& specs, // I am done (I don't expect more events to convert); so tell the proxy device to shut-down stop = true; } - return; + return didSendData; }; } From e2f54d002112d3ec4520efd867fcdbbc15f60baa Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 19 Oct 2023 15:55:37 +0200 Subject: [PATCH 004/726] DPL: notify DPL about out of band data being sent by input proxy This is needed to avoid skipping some of the callbacks, in particular the CCDB one. --- .../Core/src/ExternalFairMQDeviceProxy.cxx | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx index 44a9b15d684b3..7e09bfb408ff1 100644 --- a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx +++ b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx @@ -561,13 +561,6 @@ InjectorFunction dplModelAdaptor(std::vector const& filterSpecs, DPL didSendParts = true; sendOnChannel(*device, channelParts, channelName, newTimesliceId); } - // In case we did not send any part at all, we need to rewind by one - // to avoid creating extra timeslices at the end of the run. - auto& decongestion = services.get(); - decongestion.nextEnumerationTimesliceRewinded = !didSendParts; - if (didSendParts == false) { - decongestion.nextEnumerationTimeslice -= 1; - } if (not unmatchedDescriptions.empty()) { if (throwOnUnmatchedInputs) { std::string descriptions; @@ -769,12 +762,13 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, return count; }; - auto dataHandler = [ref = ctx.services(), converter, doInjectMissingData, doPrintSizes, + // Data handler for incoming data. Must return true if it sent any data. + auto dataHandler = [converter, doInjectMissingData, doPrintSizes, outputRoutes = std::move(outputRoutes), control = &ctx.services().get(), deviceState = &ctx.services().get(), timesliceIndex = &ctx.services().get(), - outputChannels = std::move(outputChannels)](TimingInfo& timingInfo, fair::mq::Parts& inputs, int, size_t ci, bool newRun) { + outputChannels = std::move(outputChannels)](ServiceRegistryRef ref, TimingInfo& timingInfo, fair::mq::Parts& inputs, int, size_t ci, bool newRun) -> bool { auto* device = ref.get().device(); // pass a copy of the outputRoutes auto channelRetriever = [&outputRoutes](OutputSpec const& query, DataProcessingHeader::StartTime timeslice) -> std::string { @@ -803,7 +797,7 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, if (doInjectMissingData) { injectMissingData(*device, inputs, outputRoutes, doInjectMissingData, doPrintSizes); } - converter(timingInfo, ref, inputs, channelRetriever, timesliceIndex->getOldestPossibleOutput().timeslice.value, shouldstop); + bool didSendParts = converter(timingInfo, ref, inputs, channelRetriever, timesliceIndex->getOldestPossibleOutput().timeslice.value, shouldstop); // If we have enough EoS messages, we can stop the device // Notice that this has a number of failure modes: @@ -832,6 +826,7 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, std::fill(eosPeersCount.begin(), eosPeersCount.end(), 0); control->endOfStream(); } + return didSendParts; }; auto runHandler = [dataHandler, minSHM, sendTFcounter](ProcessingContext& ctx) { @@ -844,6 +839,7 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, inStopTransition = true; } + bool didSendParts = false; for (size_t ci = 0; ci < channels.size(); ++ci) { std::string const& channel = channels[ci]; int waitTime = channels.size() == 1 ? -1 : 1; @@ -875,7 +871,7 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, timingInfo.creation = dph->creation; } if (!inStopTransition) { - dataHandler(timingInfo, parts, 0, ci, newRun); + didSendParts |= dataHandler(ctx.services(), timingInfo, parts, 0, ci, newRun); } if (sendTFcounter) { ctx.services().get().send(o2::monitoring::Metric{(uint64_t)timingInfo.tfCounter, "df-sent"}.addTag(o2::monitoring::tags::Key::Subsystem, o2::monitoring::tags::Value::DPL)); @@ -887,6 +883,15 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, waitTime = 0; } } + // In case we did not send any part at all, we need to rewind by one + // to avoid creating extra timeslices. + auto& decongestion = ref.get(); + decongestion.nextEnumerationTimesliceRewinded = !didSendParts; + if (didSendParts) { + ref.get().fakeDispatch(); + } else { + decongestion.nextEnumerationTimeslice -= 1; + } }; return runHandler; From e7ae95ae76c74e8ce0f455b5ce0f6b5be2d5c101 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 19 Oct 2023 16:15:34 +0200 Subject: [PATCH 005/726] DPL: notify DPL about out of band data being sent by input proxy This is needed to avoid skipping some of the callbacks, in particular the CCDB one. --- .../Core/src/ExternalFairMQDeviceProxy.cxx | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx index 44a9b15d684b3..6a10a7e8a0111 100644 --- a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx +++ b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx @@ -561,13 +561,6 @@ InjectorFunction dplModelAdaptor(std::vector const& filterSpecs, DPL didSendParts = true; sendOnChannel(*device, channelParts, channelName, newTimesliceId); } - // In case we did not send any part at all, we need to rewind by one - // to avoid creating extra timeslices at the end of the run. - auto& decongestion = services.get(); - decongestion.nextEnumerationTimesliceRewinded = !didSendParts; - if (didSendParts == false) { - decongestion.nextEnumerationTimeslice -= 1; - } if (not unmatchedDescriptions.empty()) { if (throwOnUnmatchedInputs) { std::string descriptions; @@ -769,12 +762,13 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, return count; }; - auto dataHandler = [ref = ctx.services(), converter, doInjectMissingData, doPrintSizes, + // Data handler for incoming data. Must return true if it sent any data. + auto dataHandler = [converter, doInjectMissingData, doPrintSizes, outputRoutes = std::move(outputRoutes), control = &ctx.services().get(), deviceState = &ctx.services().get(), timesliceIndex = &ctx.services().get(), - outputChannels = std::move(outputChannels)](TimingInfo& timingInfo, fair::mq::Parts& inputs, int, size_t ci, bool newRun) { + outputChannels = std::move(outputChannels)](ServiceRegistryRef ref, TimingInfo& timingInfo, fair::mq::Parts& inputs, int, size_t ci, bool newRun) -> bool { auto* device = ref.get().device(); // pass a copy of the outputRoutes auto channelRetriever = [&outputRoutes](OutputSpec const& query, DataProcessingHeader::StartTime timeslice) -> std::string { @@ -803,7 +797,7 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, if (doInjectMissingData) { injectMissingData(*device, inputs, outputRoutes, doInjectMissingData, doPrintSizes); } - converter(timingInfo, ref, inputs, channelRetriever, timesliceIndex->getOldestPossibleOutput().timeslice.value, shouldstop); + bool didSendParts = converter(timingInfo, ref, inputs, channelRetriever, timesliceIndex->getOldestPossibleOutput().timeslice.value, shouldstop); // If we have enough EoS messages, we can stop the device // Notice that this has a number of failure modes: @@ -832,6 +826,7 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, std::fill(eosPeersCount.begin(), eosPeersCount.end(), 0); control->endOfStream(); } + return didSendParts; }; auto runHandler = [dataHandler, minSHM, sendTFcounter](ProcessingContext& ctx) { @@ -844,6 +839,7 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, inStopTransition = true; } + bool didSendParts = false; for (size_t ci = 0; ci < channels.size(); ++ci) { std::string const& channel = channels[ci]; int waitTime = channels.size() == 1 ? -1 : 1; @@ -875,7 +871,7 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, timingInfo.creation = dph->creation; } if (!inStopTransition) { - dataHandler(timingInfo, parts, 0, ci, newRun); + didSendParts |= dataHandler(ctx.services(), timingInfo, parts, 0, ci, newRun); } if (sendTFcounter) { ctx.services().get().send(o2::monitoring::Metric{(uint64_t)timingInfo.tfCounter, "df-sent"}.addTag(o2::monitoring::tags::Key::Subsystem, o2::monitoring::tags::Value::DPL)); @@ -887,6 +883,15 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, waitTime = 0; } } + // In case we did not send any part at all, we need to rewind by one + // to avoid creating extra timeslices. + auto& decongestion = ctx.services().get(); + decongestion.nextEnumerationTimesliceRewinded = !didSendParts; + if (didSendParts) { + ctx.services().get().fakeDispatch(); + } else { + decongestion.nextEnumerationTimeslice -= 1; + } }; return runHandler; From 7e04743709df64b14d9a510985a252088b61d266 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 20 Oct 2023 01:30:44 +0200 Subject: [PATCH 006/726] Avoid temporary objects --- Detectors/Raw/TFReaderDD/src/TFReaderSpec.cxx | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/Detectors/Raw/TFReaderDD/src/TFReaderSpec.cxx b/Detectors/Raw/TFReaderDD/src/TFReaderSpec.cxx index a75254c490c6a..4c916fc2e70d4 100644 --- a/Detectors/Raw/TFReaderDD/src/TFReaderSpec.cxx +++ b/Detectors/Raw/TFReaderDD/src/TFReaderSpec.cxx @@ -465,28 +465,28 @@ o2f::DataProcessorSpec o2::rawdd::getTFReaderSpec(o2::rawdd::TFReaderInp& rinp) rinp.hdVec.emplace_back(o2h::DataHeader{"CELLS", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent rinp.hdVec.emplace_back(o2h::DataHeader{"CELLTRIGREC", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent } else if (id == DetID::CPV) { - spec.outputs.emplace_back(o2f::OutputSpec{DetID::getDataOrigin(id), "DIGITS", 0}); - spec.outputs.emplace_back(o2f::OutputSpec{DetID::getDataOrigin(id), "DIGITTRIGREC", 0}); - spec.outputs.emplace_back(o2f::OutputSpec{DetID::getDataOrigin(id), "RAWHWERRORS", 0}); - rinp.hdVec.emplace_back(o2h::DataHeader{"DIGITS", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent - rinp.hdVec.emplace_back(o2h::DataHeader{"DIGITTRIGREC", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent - rinp.hdVec.emplace_back(o2h::DataHeader{"RAWHWERRORS", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent + spec.outputs.emplace_back(DetID::getDataOrigin(id), "DIGITS", 0); + spec.outputs.emplace_back(DetID::getDataOrigin(id), "DIGITTRIGREC", 0); + spec.outputs.emplace_back(DetID::getDataOrigin(id), "RAWHWERRORS", 0); + rinp.hdVec.emplace_back("DIGITS", DetID::getDataOrigin(id), 0, 0); // in abcence of real data this will be sent + rinp.hdVec.emplace_back("DIGITTRIGREC", DetID::getDataOrigin(id), 0, 0); // in abcence of real data this will be sent + rinp.hdVec.emplace_back("RAWHWERRORS", DetID::getDataOrigin(id), 0, 0); // in abcence of real data this will be sent } else if (id == DetID::EMC) { spec.outputs.emplace_back(o2f::OutputSpec{o2f::ConcreteDataTypeMatcher{DetID::getDataOrigin(id), "CELLS"}}); spec.outputs.emplace_back(o2f::OutputSpec{o2f::ConcreteDataTypeMatcher{DetID::getDataOrigin(id), "CELLSTRGR"}}); spec.outputs.emplace_back(o2f::OutputSpec{o2f::ConcreteDataTypeMatcher{DetID::getDataOrigin(id), "DECODERERR"}}); - rinp.hdVec.emplace_back(o2h::DataHeader{"CELLS", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent - rinp.hdVec.emplace_back(o2h::DataHeader{"CELLSTRGR", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent - rinp.hdVec.emplace_back(o2h::DataHeader{"DECODERERR", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent + rinp.hdVec.emplace_back("CELLS", DetID::getDataOrigin(id), 0, 0); // in abcence of real data this will be sent + rinp.hdVec.emplace_back("CELLSTRGR", DetID::getDataOrigin(id), 0, 0); // in abcence of real data this will be sent + rinp.hdVec.emplace_back("DECODERERR", DetID::getDataOrigin(id), 0, 0); // in abcence of real data this will be sent } else if (id == DetID::FOC) { spec.outputs.emplace_back(o2f::OutputSpec{o2f::ConcreteDataTypeMatcher{DetID::getDataOrigin(id), "PADLAYERS"}}); spec.outputs.emplace_back(o2f::OutputSpec{o2f::ConcreteDataTypeMatcher{DetID::getDataOrigin(id), "PIXELHITS"}}); spec.outputs.emplace_back(o2f::OutputSpec{o2f::ConcreteDataTypeMatcher{DetID::getDataOrigin(id), "PIXELCHIPS"}}); spec.outputs.emplace_back(o2f::OutputSpec{o2f::ConcreteDataTypeMatcher{DetID::getDataOrigin(id), "TRIGGERS"}}); - rinp.hdVec.emplace_back(o2h::DataHeader{"PADLAYERS", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent - rinp.hdVec.emplace_back(o2h::DataHeader{"PIXELHITS", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent - rinp.hdVec.emplace_back(o2h::DataHeader{"PIXELCHIPS", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent - rinp.hdVec.emplace_back(o2h::DataHeader{"TRIGGERS", DetID::getDataOrigin(id), 0, 0}); // in abcence of real data this will be sent + rinp.hdVec.emplace_back("PADLAYERS", DetID::getDataOrigin(id), 0, 0); // in abcence of real data this will be sent + rinp.hdVec.emplace_back("PIXELHITS", DetID::getDataOrigin(id), 0, 0); // in abcence of real data this will be sent + rinp.hdVec.emplace_back("PIXELCHIPS", DetID::getDataOrigin(id), 0, 0); // in abcence of real data this will be sent + rinp.hdVec.emplace_back("TRIGGERS", DetID::getDataOrigin(id), 0, 0); // in abcence of real data this will be sent } } } From 375aab351c6acbab32fe566385bcf646297a8b56 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 23 Oct 2023 12:34:18 +0200 Subject: [PATCH 007/726] DPL: do not add extra DataHeader to EoS Historically we had to have a DataHeader for EoS in any case. This is not needed anymore and it might explain some weird behavior we see online. --- .../Core/src/ExternalFairMQDeviceProxy.cxx | 23 ++++--------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx index 4d9d4c11df49f..d9f3a3b3e89ac 100644 --- a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx +++ b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx @@ -165,7 +165,8 @@ void sendOnChannel(fair::mq::Device& device, fair::mq::MessagePtr&& headerMessag { // const auto* dph = o2::header::get( *reinterpret_cast(headerMessage->GetData()) ); const auto* dph = o2::header::get(headerMessage->GetData()); - if (!dph) { + const auto* sih = o2::header::get(headerMessage->GetData()); + if (!dph && !sih) { LOG(error) << "Header does not follow the O2 data model, DataProcessingHeader missing"; return; } @@ -963,19 +964,11 @@ DataProcessorSpec specifyFairMQDeviceOutputProxy(char const* name, if (channelName != outputChannelName) { continue; } - DataHeader dh; - dh.dataOrigin = "DPL"; - dh.dataDescription = "EOS"; - dh.subSpecification = 0; - dh.payloadSize = 0; - dh.payloadSerializationMethod = o2::header::gSerializationMethodNone; - dh.tfCounter = 0; - dh.firstTForbit = 0; SourceInfoHeader sih; sih.state = InputChannelState::Completed; // allocate the header message using the underlying transport of the channel auto channelAlloc = o2::pmr::getTransportAllocator(channelInfo.second[0].Transport()); - auto headerMessage = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dh, *lastDataProcessingHeader, sih}); + auto headerMessage = o2::pmr::getMessage(o2::header::Stack{channelAlloc, sih}); fair::mq::Parts out; out.AddPart(std::move(headerMessage)); // add empty payload message @@ -1071,19 +1064,11 @@ DataProcessorSpec specifyFairMQDeviceMultiOutputProxy(char const* name, if (!checkChannel(channelName)) { continue; } - DataHeader dh; - dh.dataOrigin = "DPL"; - dh.dataDescription = "EOS"; - dh.subSpecification = 0; - dh.payloadSize = 0; - dh.payloadSerializationMethod = o2::header::gSerializationMethodNone; - dh.tfCounter = 0; - dh.firstTForbit = 0; SourceInfoHeader sih; sih.state = InputChannelState::Completed; // allocate the header message using the underlying transport of the channel auto channelAlloc = o2::pmr::getTransportAllocator(channelInfo.second[0].Transport()); - auto headerMessage = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dh, *lastDataProcessingHeader, sih}); + auto headerMessage = o2::pmr::getMessage(o2::header::Stack{channelAlloc, sih}); fair::mq::Parts out; out.AddPart(std::move(headerMessage)); // add empty payload message From 091f9d18c8b66d4fcaf8a9971a7ea8c869f6b948 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 20 Oct 2023 15:37:59 +0200 Subject: [PATCH 008/726] Drop need to include ptree in ConfigurableParam.h --- Common/SimConfig/test/testSimCutParam.cxx | 1 + .../include/CommonUtils/ConfigurableParam.h | 59 +++++++------------ Common/Utils/src/ConfigurableParam.cxx | 35 +++++++++-- .../MUON/MCH/Raw/test/testClosureCoDec.cxx | 1 + .../MCH/Simulation/test/testDigitizer.cxx | 1 + .../MCH/Triggering/test/testEventFinder.cxx | 1 + Detectors/TPC/base/test/testTPCParameters.cxx | 7 +-- .../include/TPCCalibration/IDCAverageGroup.h | 1 + .../include/TPCCalibration/IDCFactorization.h | 1 + .../include/TPCCalibration/SACFactorization.h | 1 + .../calibration/src/IDCGroupingParameter.cxx | 2 + .../TPC/reconstruction/macro/makeIonTail.C | 1 + Detectors/TPC/simulation/macro/toyCluster.C | 1 + .../TPC/workflow/src/ApplyCCDBCalibSpec.cxx | 1 + macro/o2sim.C | 1 + 15 files changed, 66 insertions(+), 48 deletions(-) diff --git a/Common/SimConfig/test/testSimCutParam.cxx b/Common/SimConfig/test/testSimCutParam.cxx index 7a17ae9d1f2cf..468d76429cbba 100644 --- a/Common/SimConfig/test/testSimCutParam.cxx +++ b/Common/SimConfig/test/testSimCutParam.cxx @@ -15,6 +15,7 @@ #include #include "SimConfig/SimParams.h" #include "CommonUtils/ConfigurableParam.h" +#include using namespace o2::conf; diff --git a/Common/Utils/include/CommonUtils/ConfigurableParam.h b/Common/Utils/include/CommonUtils/ConfigurableParam.h index 08356dc462de8..4314308e9f93a 100644 --- a/Common/Utils/include/CommonUtils/ConfigurableParam.h +++ b/Common/Utils/include/CommonUtils/ConfigurableParam.h @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include @@ -181,9 +181,6 @@ class ConfigurableParam static void setInputDir(const std::string& d) { sInputDir = d; } static void setOutputDir(const std::string& d) { sOutputDir = d; } - static boost::property_tree::ptree readINI(std::string const& filepath); - static boost::property_tree::ptree readJSON(std::string const& filepath); - static boost::property_tree::ptree readConfigFile(std::string const& filepath); static bool configFileExists(std::string const& filepath); // writes a human readable JSON file of all parameters @@ -195,10 +192,12 @@ class ConfigurableParam template static T getValueAs(std::string key) { - if (!sIsFullyInitialized) { - initialize(); - } - return sPtree->get(key); + return [](auto* tree, const std::string& key) -> T { + if (!sIsFullyInitialized) { + initialize(); + } + return tree->template get(key); + }(sPtree, key); } template @@ -207,19 +206,21 @@ class ConfigurableParam if (!sIsFullyInitialized) { initialize(); } - assert(sPtree); - try { - auto key = mainkey + "." + subkey; - if (sPtree->get_optional(key).is_initialized()) { - sPtree->put(key, x); - auto changed = updateThroughStorageMap(mainkey, subkey, typeid(T), (void*)&x); - if (changed != EParamUpdateStatus::Failed) { - sValueProvenanceMap->find(key)->second = kRT; // set to runtime + return [&subkey, &x, &mainkey](auto* tree) -> void { + assert(tree); + try { + auto key = mainkey + "." + subkey; + if (tree->template get_optional(key).is_initialized()) { + tree->put(key, x); + auto changed = updateThroughStorageMap(mainkey, subkey, typeid(T), (void*)&x); + if (changed != EParamUpdateStatus::Failed) { + sValueProvenanceMap->find(key)->second = kRT; // set to runtime + } } + } catch (std::exception const& e) { + std::cerr << "Error in setValue (T) " << e.what() << "\n"; } - } catch (std::exception const& e) { - std::cerr << "Error in setValue (T) " << e.what() << "\n"; - } + }(sPtree); } static void setProvenance(std::string const& mainkey, std::string const& subkey, EParamProvenance p) @@ -241,25 +242,7 @@ class ConfigurableParam // specialized for std::string // which means that the type will be converted internally - static void setValue(std::string const& key, std::string const& valuestring) - { - if (!sIsFullyInitialized) { - initialize(); - } - assert(sPtree); - try { - if (sPtree->get_optional(key).is_initialized()) { - sPtree->put(key, valuestring); - auto changed = updateThroughStorageMapWithConversion(key, valuestring); - if (changed != EParamUpdateStatus::Failed) { - sValueProvenanceMap->find(key)->second = kRT; // set to runtime - } - } - } catch (std::exception const& e) { - std::cerr << "Error in setValue (string) " << e.what() << "\n"; - } - } - + static void setValue(std::string const& key, std::string const& valuestring); static void setEnumValue(const std::string&, const std::string&); static void setArrayValue(const std::string&, const std::string&); diff --git a/Common/Utils/src/ConfigurableParam.cxx b/Common/Utils/src/ConfigurableParam.cxx index e581970fc81fb..29e1803096f33 100644 --- a/Common/Utils/src/ConfigurableParam.cxx +++ b/Common/Utils/src/ConfigurableParam.cxx @@ -43,6 +43,10 @@ namespace o2 { namespace conf { +boost::property_tree::ptree readINI(std::string const& filepath); +boost::property_tree::ptree readJSON(std::string const& filepath); +boost::property_tree::ptree readConfigFile(std::string const& filepath); + std::vector* ConfigurableParam::sRegisteredParamClasses = nullptr; boost::property_tree::ptree* ConfigurableParam::sPtree = nullptr; std::map>* ConfigurableParam::sKeyToStorageMap = nullptr; @@ -199,9 +203,9 @@ bool ConfigurableParam::configFileExists(std::string const& filepath) // ------------------------------------------------------------------ -boost::property_tree::ptree ConfigurableParam::readConfigFile(std::string const& filepath) +boost::property_tree::ptree readConfigFile(std::string const& inputDir, std::string const& filepath) { - auto inpfilename = o2::utils::Str::concat_string(sInputDir, filepath); + auto inpfilename = o2::utils::Str::concat_string(inputDir, filepath); if (!std::filesystem::exists(inpfilename)) { LOG(fatal) << inpfilename << " : config file does not exist!"; } @@ -219,9 +223,28 @@ boost::property_tree::ptree ConfigurableParam::readConfigFile(std::string const& return pt; } +void ConfigurableParam::setValue(std::string const& key, std::string const& valuestring) +{ + if (!sIsFullyInitialized) { + initialize(); + } + assert(sPtree); + try { + if (sPtree->get_optional(key).is_initialized()) { + sPtree->put(key, valuestring); + auto changed = updateThroughStorageMapWithConversion(key, valuestring); + if (changed != EParamUpdateStatus::Failed) { + sValueProvenanceMap->find(key)->second = kRT; // set to runtime + } + } + } catch (std::exception const& e) { + std::cerr << "Error in setValue (string) " << e.what() << "\n"; + } +} + // ------------------------------------------------------------------ -boost::property_tree::ptree ConfigurableParam::readINI(std::string const& filepath) +boost::property_tree::ptree readINI(std::string const& filepath) { boost::property_tree::ptree pt; try { @@ -237,7 +260,7 @@ boost::property_tree::ptree ConfigurableParam::readINI(std::string const& filepa // ------------------------------------------------------------------ -boost::property_tree::ptree ConfigurableParam::readJSON(std::string const& filepath) +boost::property_tree::ptree readJSON(std::string const& filepath) { boost::property_tree::ptree pt; @@ -258,7 +281,7 @@ void ConfigurableParam::writeJSON(std::string const& filename, std::string const LOG(info) << "ignoring writing of json file " << filename; return; } - initPropertyTree(); // update the boost tree before writing + initPropertyTree(); // update the boost tree before writing auto outfilename = o2::utils::Str::concat_string(sOutputDir, filename); if (!keyOnly.empty()) { // write ini for selected key only try { @@ -409,7 +432,7 @@ void ConfigurableParam::updateFromFile(std::string const& configFile, std::strin return; } - boost::property_tree::ptree pt = readConfigFile(cfgfile); + boost::property_tree::ptree pt = readConfigFile(ConfigurableParam::sInputDir, cfgfile); std::vector> keyValPairs; auto request = o2::utils::Str::tokenize(paramsList, ',', true); diff --git a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx index e545c15025d8e..060d3b816cc8e 100644 --- a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx +++ b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx @@ -16,6 +16,7 @@ #define BOOST_TEST_DYN_LINK #include +#include #include "DetectorsRaw/HBFUtils.h" #include "DetectorsRaw/RawFileWriter.h" #include "Framework/Logger.h" diff --git a/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx b/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx index 791ab8e7a3909..2a550220bb137 100644 --- a/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx +++ b/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx @@ -31,6 +31,7 @@ #include "TGeoManager.h" #include "boost/format.hpp" #include +#include #include #include diff --git a/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx b/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx index eb786fc1bb068..60275f2cad2f4 100644 --- a/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx +++ b/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx @@ -27,6 +27,7 @@ #include #include +#include #include "CommonUtils/ConfigurableParam.h" #include "SimulationDataFormat/MCCompLabel.h" diff --git a/Detectors/TPC/base/test/testTPCParameters.cxx b/Detectors/TPC/base/test/testTPCParameters.cxx index 14b84da2fbf62..98713e6f5ba57 100644 --- a/Detectors/TPC/base/test/testTPCParameters.cxx +++ b/Detectors/TPC/base/test/testTPCParameters.cxx @@ -23,9 +23,9 @@ #include "TPCBase/ParameterGas.h" #include #include -namespace o2 -{ -namespace tpc +#include + +namespace o2::tpc { constexpr float NominalTimeBin = 8 * o2::constants::lhc::LHCBunchSpacingNS * 1e-3; @@ -367,5 +367,4 @@ BOOST_AUTO_TEST_CASE(ParameterGEM_test2) BOOST_CHECK_CLOSE(o2::conf::ConfigurableParam::getValueAs("TPCGEMParam.KappaStack"), 32, 1e-12); BOOST_CHECK_CLOSE(o2::conf::ConfigurableParam::getValueAs("TPCGEMParam.EfficiencyStack"), 33, 1e-3); } -} // namespace tpc } // namespace o2 diff --git a/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h b/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h index fecbe9f14f18b..dd9a5fc257935 100644 --- a/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h +++ b/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h @@ -20,6 +20,7 @@ #include "TPCCalibration/IDCAverageGroupBase.h" #include "TPCBase/Sector.h" #include "TPCBase/CalDet.h" +#include #include #include "Rtypes.h" diff --git a/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h b/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h index 4e9d3ab542ff3..37f4e7c17aa8a 100644 --- a/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h +++ b/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h @@ -24,6 +24,7 @@ #include "TPCCalibration/IDCContainer.h" #include "TPCCalibration/IDCGroupHelperSector.h" #include "DataFormatsTPC/Defs.h" +#include namespace o2::tpc { diff --git a/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h b/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h index b1b4430c11a00..7141750b8ce37 100644 --- a/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h +++ b/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h @@ -21,6 +21,7 @@ #include "Rtypes.h" #include "TPCCalibration/IDCContainer.h" #include "DataFormatsTPC/Defs.h" +#include namespace o2::tpc { diff --git a/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx b/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx index bbecb4df8f8fd..895b6957d8a01 100644 --- a/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx +++ b/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx @@ -12,6 +12,8 @@ #include "TPCCalibration/IDCGroupingParameter.h" #include "Framework/Logger.h" #include "Algorithm/RangeTokenizer.h" +#include +#include #include using namespace o2::tpc; diff --git a/Detectors/TPC/reconstruction/macro/makeIonTail.C b/Detectors/TPC/reconstruction/macro/makeIonTail.C index 98db6540d20f7..d8d1be6830626 100644 --- a/Detectors/TPC/reconstruction/macro/makeIonTail.C +++ b/Detectors/TPC/reconstruction/macro/makeIonTail.C @@ -32,6 +32,7 @@ #include "TPCBase/CRUCalibHelpers.h" #include "TPCBase/CRU.h" #include "CommonUtils/TreeStreamRedirector.h" +#include using namespace o2::tpc; size_t digitsInSaturateion(std::vector& digits, bool correctCharge = false, CalPad* pedestals = nullptr, o2::utils::TreeStreamRedirector* stream = nullptr); diff --git a/Detectors/TPC/simulation/macro/toyCluster.C b/Detectors/TPC/simulation/macro/toyCluster.C index 257adaf5f79fc..2087a7773fe22 100644 --- a/Detectors/TPC/simulation/macro/toyCluster.C +++ b/Detectors/TPC/simulation/macro/toyCluster.C @@ -58,6 +58,7 @@ #include "TPCReconstruction/HwClusterer.h" #include "TPCSimulation/GEMAmplification.h" #endif +#include using namespace o2::tpc; void fillTPCHits(const float theta, const float phi, const float dedx, std::vector& hitGroupSector, std::pair& trackInfo); diff --git a/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx b/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx index 37429a04c4b34..9c588ff50a45c 100644 --- a/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx +++ b/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx @@ -19,6 +19,7 @@ #include "TPCBase/ParameterGas.h" #include "DataFormatsTPC/LtrCalibData.h" #include "TPCWorkflow/ApplyCCDBCalibSpec.h" +#include using namespace o2::framework; diff --git a/macro/o2sim.C b/macro/o2sim.C index 5257a225d3ed9..f610be730eeb7 100644 --- a/macro/o2sim.C +++ b/macro/o2sim.C @@ -39,6 +39,7 @@ #include #endif #include "migrateSimFiles.C" +#include void check_notransport() { From 63a61b8a97fde7d52856b07bdff6de05940748ab Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 10:26:44 +0200 Subject: [PATCH 009/726] Modernize Headers/TimeStamp.h --- DataFormats/Headers/include/Headers/TimeStamp.h | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/DataFormats/Headers/include/Headers/TimeStamp.h b/DataFormats/Headers/include/Headers/TimeStamp.h index bd4bd8c8b94e9..5867cf6bb4949 100644 --- a/DataFormats/Headers/include/Headers/TimeStamp.h +++ b/DataFormats/Headers/include/Headers/TimeStamp.h @@ -23,9 +23,7 @@ #include #include // for std::integral_constant -namespace o2 -{ -namespace header +namespace o2::header { // https://lhc-machine-outreach.web.cern.ch/lhc-machine-outreach/collisions.htm @@ -159,7 +157,7 @@ class TimeStamp } // TODO: implement transformation from one unit to the other - //void transform(const TimeUnitID& unit) { + // void transform(const TimeUnitID& unit) { // if (mUnit == unit) return; // ... //} @@ -182,7 +180,6 @@ class TimeStamp }; }; }; -} //namespace header -} //namespace o2 +} // namespace o2::header #endif From cfe60f1bf2604d872a706a3d600ef8a93038c54c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 10:26:45 +0200 Subject: [PATCH 010/726] Fix type comparison for C++20 The two are effectively different types and there is nothing which allows implicitly casting between them. Also add some protection making sure we notice if we change one of the two sides inadvertedly. --- DataFormats/Headers/include/Headers/TimeStamp.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/DataFormats/Headers/include/Headers/TimeStamp.h b/DataFormats/Headers/include/Headers/TimeStamp.h index 5867cf6bb4949..b6d2b26ea4822 100644 --- a/DataFormats/Headers/include/Headers/TimeStamp.h +++ b/DataFormats/Headers/include/Headers/TimeStamp.h @@ -138,12 +138,14 @@ class TimeStamp static_assert(std::is_same::value && std::is_same::value, "only clock and duration types defining the rep and period member types are allowed"); using duration = std::chrono::duration; - if (mUnit == sClockLHC) { + static_assert(sizeof(mUnit) == sizeof(sClockLHC), "size mismatch of mUnit and sClockLHC"); + if (memcmp(&mUnit, &sClockLHC, sizeof(sClockLHC)) == 0) { // cast each part individually, if the precision of the return type // is smaller the values are simply truncated return std::chrono::duration_cast(LHCOrbitClock::duration(mPeriod) + LHCBunchClock::duration(mBCNumber)); } - if (mUnit == sMicroSeconds) { + static_assert(sizeof(mUnit) == sizeof(sMicroSeconds), "size mismatch of mUnit and sMicroSeconds"); + if (memcmp(&mUnit, &sMicroSeconds, sizeof(sMicroSeconds)) == 0) { // TODO: is there a better way to mark the subticks invalid for the // micro seconds representation? First step is probably to remove/rename the // variable From 60e7d3b6641f1044eb208cba10abff3318770631 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 11:27:52 +0200 Subject: [PATCH 011/726] Adhere to coding conventions --- Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx b/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx index 2372ba07a28f7..a1f9b385fd81a 100644 --- a/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx +++ b/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx @@ -47,7 +47,7 @@ InjectorFunction dcs2dpl() // InjectorFunction dcs2dpl() { return [](TimingInfo&, ServiceRegistryRef const& services, fair::mq::Parts& parts, ChannelRetriever channelRetriever, size_t newTimesliceId, bool&) -> bool { - auto *device = services.get().device(); + auto* device = services.get().device(); std::string messageHeader{static_cast(parts.At(0)->GetData()), parts.At(0)->GetSize()}; size_t dataSize = parts.At(1)->GetSize(); std::string messageData{static_cast(parts.At(1)->GetData()), parts.At(1)->GetSize()}; From b1074f9135dd69ce6f088e6e323880567222a84b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 11:27:52 +0200 Subject: [PATCH 012/726] Notify DPL about dispatched out-of-band data --- Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx | 1 + 1 file changed, 1 insertion(+) diff --git a/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx b/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx index a1f9b385fd81a..73615bc5c47b4 100644 --- a/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx +++ b/Detectors/CTP/workflowScalers/src/ctp-qc-proxy.cxx @@ -91,6 +91,7 @@ InjectorFunction dcs2dpl() outParts.AddPart(std::move(plMessageF)); sendOnChannel(*device, outParts, channel, newTimesliceId); LOG(info) << "Sent CTP counters DPL message" << std::flush; + services.get().fakeDispatch(); return true; }; } From 12d0af3ffb6bf3b1f49f61d39126f93a459ab95d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 11:35:23 +0200 Subject: [PATCH 013/726] Ignore checking for FairMQ version * Version is way too obsolete in any case. * Anything which is not the version in alidist is unsupported in any case. * It breaks whenever git is not able to determine a valid version. --- dependencies/O2Dependencies.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dependencies/O2Dependencies.cmake b/dependencies/O2Dependencies.cmake index 4d90d59c9f2be..f4d591e07ab65 100644 --- a/dependencies/O2Dependencies.cmake +++ b/dependencies/O2Dependencies.cmake @@ -88,7 +88,7 @@ set_package_properties(Microsoft.GSL TYPE REQUIRED PURPOSE "Mainly for its span") -find_package(FairMQ 1.4.41 CONFIG) +find_package(FairMQ CONFIG) set_package_properties(FairMQ PROPERTIES TYPE REQUIRED) # find_package(protobuf CONFIG) From d9e40a2194fc468bdaa5d0902e642688423fe8e4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 12:28:27 +0200 Subject: [PATCH 014/726] DPL: propaedeutic for better GUI shutdown This will allow in the GUI to know wether the children have all shutdown and will keep displaying the debug gui, even when clicking on the close button, until that is the case. --- Framework/Core/include/Framework/GuiCallbackContext.h | 1 + Framework/Core/src/runDataProcessing.cxx | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Framework/Core/include/Framework/GuiCallbackContext.h b/Framework/Core/include/Framework/GuiCallbackContext.h index d1efd5717a4de..9c0f21f3aec9c 100644 --- a/Framework/Core/include/Framework/GuiCallbackContext.h +++ b/Framework/Core/include/Framework/GuiCallbackContext.h @@ -43,6 +43,7 @@ struct GuiCallbackContext { DebugGUI* plugin = nullptr; void* window = nullptr; bool* guiQuitRequested = nullptr; + bool* allChildrenGone = nullptr; std::function callback; std::set renderers; }; diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index c3d6dc1a7d4b7..0082a79906c61 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1424,6 +1424,8 @@ int runStateMachine(DataProcessorSpecs const& workflow, uv_timer_t metricDumpTimer; metricDumpTimer.data = &serverContext; + bool allChildrenGone = false; + guiContext.allChildrenGone = &allChildrenGone; while (true) { // If control forced some transition on us, we push it to the queue. @@ -2093,7 +2095,7 @@ int runStateMachine(DataProcessorSpecs const& workflow, driverInfo.sigchldRequested = false; processChildrenOutput(driverInfo, infos, runningWorkflow.devices, controls); hasError = processSigChild(infos, runningWorkflow.devices); - bool allChildrenGone = areAllChildrenGone(infos); + allChildrenGone = areAllChildrenGone(infos); bool canExit = checkIfCanExit(infos); bool supposedToQuit = (guiQuitRequested || canExit || graceful_exit); From 25910215071a38917b1f394d259517510d961434 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 13:00:41 +0200 Subject: [PATCH 015/726] DPL: dump walltime for last iteration This will dump the elapsed_time_ms metric in the performance metric providing every 5s the value of the last completed iteration. --- Framework/Core/src/runDataProcessing.cxx | 1 + 1 file changed, 1 insertion(+) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index c3d6dc1a7d4b7..ffdb0173abbff 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1152,6 +1152,7 @@ std::vector getDumpableMetrics() dumpableMetrics.emplace_back("^table-bytes-.*"); dumpableMetrics.emplace_back("^total-timeframes.*"); dumpableMetrics.emplace_back("^device_state.*"); + dumpableMetrics.emplace_back("^elapsed_time_ms$"); return dumpableMetrics; } From 188e115115681e0d2266baa747acf1adbe643b0d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 13:46:32 +0200 Subject: [PATCH 016/726] DPL: dump walltime for last iteration This will dump the elapsed_time_ms metric in the performance metric providing every 5s the value of the last completed iteration. --- Framework/Core/include/Framework/DataProcessingStats.h | 1 + Framework/Core/src/CommonServices.cxx | 4 ++++ Framework/Core/src/DataProcessingDevice.cxx | 8 ++++++-- Framework/Core/src/runDataProcessing.cxx | 1 + 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/Framework/Core/include/Framework/DataProcessingStats.h b/Framework/Core/include/Framework/DataProcessingStats.h index 6d9bceeeafa6f..ae8ac29d89e15 100644 --- a/Framework/Core/include/Framework/DataProcessingStats.h +++ b/Framework/Core/include/Framework/DataProcessingStats.h @@ -30,6 +30,7 @@ enum struct ProcessingStatsId : short { INCOMPLETE_INPUTS, TOTAL_INPUTS, LAST_ELAPSED_TIME_MS, + TOTAL_WALL_TIME_MS, LAST_PROCESSED_SIZE, TOTAL_PROCESSED_SIZE, TOTAL_SIGUSR1, diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 460702d654213..44d63cd2ab1f5 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -748,6 +748,10 @@ o2::framework::ServiceSpec CommonServices::dataProcessingStats() .metricId = (int)ProcessingStatsId::LAST_ELAPSED_TIME_MS, .kind = Kind::UInt64, .minPublishInterval = quickUpdateInterval}, + MetricSpec{.name = "total_wall_time_ms", + .metricId = (int)ProcessingStatsId::TOTAL_WALL_TIME_MS, + .kind = Kind::UInt64, + .minPublishInterval = quickUpdateInterval}, MetricSpec{.name = "last_processed_input_size_byte", .metricId = (int)ProcessingStatsId::LAST_PROCESSED_SIZE, .kind = Kind::UInt64, diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 23bdc9e75806b..b39607366adec 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -2179,9 +2179,13 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v buffer[record.size()] = 0; states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index), (int)(record.size() + buffer - relayerSlotState), relayerSlotState}); uint64_t tEnd = uv_hrtime(); - stats.updateStats({(int)ProcessingStatsId::LAST_ELAPSED_TIME_MS, DataProcessingStats::Op::Set, (int64_t)(tEnd - tStart)}); + // tEnd and tStart are in nanoseconds according to https://docs.libuv.org/en/v1.x/misc.html#c.uv_hrtime + int64_t wallTimeMs = (tEnd - tStart) / 1000000; + stats.updateStats({(int)ProcessingStatsId::LAST_ELAPSED_TIME_MS, DataProcessingStats::Op::Set, wallTimeMs}); + // Sum up the total wall time, in milliseconds. + stats.updateStats({(int)ProcessingStatsId::TOTAL_WALL_TIME_MS, DataProcessingStats::Op::Add, wallTimeMs}); // The time interval is in seconds while tEnd - tStart is in nanoseconds, so we divide by 1000000 to get the fraction in ms/s. - stats.updateStats({(short)ProcessingStatsId::CPU_USAGE_FRACTION, DataProcessingStats::Op::CumulativeRate, (int64_t)(tEnd - tStart) / 1000000}); + stats.updateStats({(short)ProcessingStatsId::CPU_USAGE_FRACTION, DataProcessingStats::Op::CumulativeRate, wallTimeMs}); stats.updateStats({(int)ProcessingStatsId::LAST_PROCESSED_SIZE, DataProcessingStats::Op::Set, calculateTotalInputRecordSize(record)}); stats.updateStats({(int)ProcessingStatsId::TOTAL_PROCESSED_SIZE, DataProcessingStats::Op::Add, calculateTotalInputRecordSize(record)}); auto latency = calculateInputRecordLatency(record, tStartMilli); diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index c3d6dc1a7d4b7..9216ea0463868 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1152,6 +1152,7 @@ std::vector getDumpableMetrics() dumpableMetrics.emplace_back("^table-bytes-.*"); dumpableMetrics.emplace_back("^total-timeframes.*"); dumpableMetrics.emplace_back("^device_state.*"); + dumpableMetrics.emplace_back("^total_wall_time_ms$"); return dumpableMetrics; } From 8cd4b7af7e6901b5a16caf171e1c6fc647bcf564 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 13:53:41 +0200 Subject: [PATCH 017/726] DPL: dump total wall time per device --- Framework/Core/include/Framework/DataProcessingStats.h | 1 + Framework/Core/src/CommonServices.cxx | 4 ++++ Framework/Core/src/DataProcessingDevice.cxx | 8 ++++++-- Framework/Core/src/runDataProcessing.cxx | 1 + 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/Framework/Core/include/Framework/DataProcessingStats.h b/Framework/Core/include/Framework/DataProcessingStats.h index 6d9bceeeafa6f..ae8ac29d89e15 100644 --- a/Framework/Core/include/Framework/DataProcessingStats.h +++ b/Framework/Core/include/Framework/DataProcessingStats.h @@ -30,6 +30,7 @@ enum struct ProcessingStatsId : short { INCOMPLETE_INPUTS, TOTAL_INPUTS, LAST_ELAPSED_TIME_MS, + TOTAL_WALL_TIME_MS, LAST_PROCESSED_SIZE, TOTAL_PROCESSED_SIZE, TOTAL_SIGUSR1, diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 460702d654213..44d63cd2ab1f5 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -748,6 +748,10 @@ o2::framework::ServiceSpec CommonServices::dataProcessingStats() .metricId = (int)ProcessingStatsId::LAST_ELAPSED_TIME_MS, .kind = Kind::UInt64, .minPublishInterval = quickUpdateInterval}, + MetricSpec{.name = "total_wall_time_ms", + .metricId = (int)ProcessingStatsId::TOTAL_WALL_TIME_MS, + .kind = Kind::UInt64, + .minPublishInterval = quickUpdateInterval}, MetricSpec{.name = "last_processed_input_size_byte", .metricId = (int)ProcessingStatsId::LAST_PROCESSED_SIZE, .kind = Kind::UInt64, diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 23bdc9e75806b..b39607366adec 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -2179,9 +2179,13 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v buffer[record.size()] = 0; states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index), (int)(record.size() + buffer - relayerSlotState), relayerSlotState}); uint64_t tEnd = uv_hrtime(); - stats.updateStats({(int)ProcessingStatsId::LAST_ELAPSED_TIME_MS, DataProcessingStats::Op::Set, (int64_t)(tEnd - tStart)}); + // tEnd and tStart are in nanoseconds according to https://docs.libuv.org/en/v1.x/misc.html#c.uv_hrtime + int64_t wallTimeMs = (tEnd - tStart) / 1000000; + stats.updateStats({(int)ProcessingStatsId::LAST_ELAPSED_TIME_MS, DataProcessingStats::Op::Set, wallTimeMs}); + // Sum up the total wall time, in milliseconds. + stats.updateStats({(int)ProcessingStatsId::TOTAL_WALL_TIME_MS, DataProcessingStats::Op::Add, wallTimeMs}); // The time interval is in seconds while tEnd - tStart is in nanoseconds, so we divide by 1000000 to get the fraction in ms/s. - stats.updateStats({(short)ProcessingStatsId::CPU_USAGE_FRACTION, DataProcessingStats::Op::CumulativeRate, (int64_t)(tEnd - tStart) / 1000000}); + stats.updateStats({(short)ProcessingStatsId::CPU_USAGE_FRACTION, DataProcessingStats::Op::CumulativeRate, wallTimeMs}); stats.updateStats({(int)ProcessingStatsId::LAST_PROCESSED_SIZE, DataProcessingStats::Op::Set, calculateTotalInputRecordSize(record)}); stats.updateStats({(int)ProcessingStatsId::TOTAL_PROCESSED_SIZE, DataProcessingStats::Op::Add, calculateTotalInputRecordSize(record)}); auto latency = calculateInputRecordLatency(record, tStartMilli); diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index c3d6dc1a7d4b7..9216ea0463868 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1152,6 +1152,7 @@ std::vector getDumpableMetrics() dumpableMetrics.emplace_back("^table-bytes-.*"); dumpableMetrics.emplace_back("^total-timeframes.*"); dumpableMetrics.emplace_back("^device_state.*"); + dumpableMetrics.emplace_back("^total_wall_time_ms$"); return dumpableMetrics; } From 9125ce1bef7599253ab51db3b11acd7bad72c567 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 15:05:10 +0200 Subject: [PATCH 018/726] Fix deprecation error / warning --- Utilities/Tools/cpulimit/cpulimit.c | 7 +----- Utilities/Tools/cpulimit/memrchr.c | 38 ----------------------------- 2 files changed, 1 insertion(+), 44 deletions(-) delete mode 100644 Utilities/Tools/cpulimit/memrchr.c diff --git a/Utilities/Tools/cpulimit/cpulimit.c b/Utilities/Tools/cpulimit/cpulimit.c index e7c801496ba3d..e35a1565253e7 100644 --- a/Utilities/Tools/cpulimit/cpulimit.c +++ b/Utilities/Tools/cpulimit/cpulimit.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include @@ -56,10 +55,6 @@ #include #endif -#ifdef __APPLE__ -#include "memrchr.c" -#endif - //some useful macro #ifndef MIN #define MIN(a,b) (((a)<(b))?(a):(b)) @@ -329,7 +324,7 @@ int main(int argc, char **argv) { int include_children = 0; //get program name - char *p = (char*)memrchr(argv[0], (unsigned int)'/', strlen(argv[0])); + char *p = (char*)strrchr(argv[0], '/'); program_name = p==NULL ? argv[0] : (p+1); //get current pid cpulimit_pid = getpid(); diff --git a/Utilities/Tools/cpulimit/memrchr.c b/Utilities/Tools/cpulimit/memrchr.c deleted file mode 100644 index 1f3787020afad..0000000000000 --- a/Utilities/Tools/cpulimit/memrchr.c +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2007 Todd C. Miller - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include - -/* - * Reverse memchr() - * Find the last occurrence of 'c' in the buffer 's' of size 'n'. - */ -void * -memrchr(s, c, n) - const void *s; - int c; - size_t n; -{ - if (n != 0) { - const unsigned char *cp; - cp = (unsigned char *)s + n; - do { - if (*(--cp) == (unsigned char)c) - return((void *)cp); - } while (--n != 0); - } - return((void *)0); -} From 0dc062532c403ca98d0d5f7d9b17fb488bdcacc6 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 22:17:54 +0200 Subject: [PATCH 019/726] DPL Analysis: drop unused API Since we use bulk reading, this is not really needed anymore. --- .../AnalysisSupport/src/DataInputDirector.cxx | 21 ------------------- .../AnalysisSupport/src/DataInputDirector.h | 1 - 2 files changed, 22 deletions(-) diff --git a/Framework/AnalysisSupport/src/DataInputDirector.cxx b/Framework/AnalysisSupport/src/DataInputDirector.cxx index c7f1a0c7d5de1..d8df1dab09feb 100644 --- a/Framework/AnalysisSupport/src/DataInputDirector.cxx +++ b/Framework/AnalysisSupport/src/DataInputDirector.cxx @@ -710,27 +710,6 @@ DataInputDescriptor* DataInputDirector::getDataInputDescriptor(header::DataHeade return result; } -std::unique_ptr DataInputDirector::getTreeReader(header::DataHeader dh, int counter, int numTF, std::string treename) -{ - std::unique_ptr reader = nullptr; - auto didesc = getDataInputDescriptor(dh); - // if NOT match then use defaultDataInputDescriptor - if (!didesc) { - didesc = mdefaultDataInputDescriptor; - } - - auto fileAndFolder = didesc->getFileFolder(counter, numTF); - if (fileAndFolder.file) { - treename = fileAndFolder.folderName + "/" + treename; - reader = std::make_unique(treename.c_str(), fileAndFolder.file); - if (!reader) { - throw std::runtime_error(fmt::format(R"(Couldn't create TTreeReader for tree "{}" in file "{}")", treename, fileAndFolder.file->GetName())); - } - } - - return reader; -} - FileAndFolder DataInputDirector::getFileFolder(header::DataHeader dh, int counter, int numTF) { auto didesc = getDataInputDescriptor(dh); diff --git a/Framework/AnalysisSupport/src/DataInputDirector.h b/Framework/AnalysisSupport/src/DataInputDirector.h index e9b483bbc729a..ec3f99779a5b1 100644 --- a/Framework/AnalysisSupport/src/DataInputDirector.h +++ b/Framework/AnalysisSupport/src/DataInputDirector.h @@ -138,7 +138,6 @@ class DataInputDirector DataInputDescriptor* getDataInputDescriptor(header::DataHeader dh); int getNumberInputDescriptors() { return mdataInputDescriptors.size(); } - std::unique_ptr getTreeReader(header::DataHeader dh, int counter, int numTF, std::string treeName); bool readTree(DataAllocator& outputs, header::DataHeader dh, int counter, int numTF, size_t& totalSizeCompressed, size_t& totalSizeUncompressed); uint64_t getTimeFrameNumber(header::DataHeader dh, int counter, int numTF); FileAndFolder getFileFolder(header::DataHeader dh, int counter, int numTF); From 25adbd8a3d5d72424fa1d83b84dc934a78059bc2 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 22:29:20 +0200 Subject: [PATCH 020/726] DPL Analysis: drop unused API Since we use bulk reading, TTreeReader usage is not really needed anymore. --- .../AnalysisSupport/src/DataInputDirector.cxx | 21 ------------------- .../AnalysisSupport/src/DataInputDirector.h | 2 -- 2 files changed, 23 deletions(-) diff --git a/Framework/AnalysisSupport/src/DataInputDirector.cxx b/Framework/AnalysisSupport/src/DataInputDirector.cxx index c7f1a0c7d5de1..d8df1dab09feb 100644 --- a/Framework/AnalysisSupport/src/DataInputDirector.cxx +++ b/Framework/AnalysisSupport/src/DataInputDirector.cxx @@ -710,27 +710,6 @@ DataInputDescriptor* DataInputDirector::getDataInputDescriptor(header::DataHeade return result; } -std::unique_ptr DataInputDirector::getTreeReader(header::DataHeader dh, int counter, int numTF, std::string treename) -{ - std::unique_ptr reader = nullptr; - auto didesc = getDataInputDescriptor(dh); - // if NOT match then use defaultDataInputDescriptor - if (!didesc) { - didesc = mdefaultDataInputDescriptor; - } - - auto fileAndFolder = didesc->getFileFolder(counter, numTF); - if (fileAndFolder.file) { - treename = fileAndFolder.folderName + "/" + treename; - reader = std::make_unique(treename.c_str(), fileAndFolder.file); - if (!reader) { - throw std::runtime_error(fmt::format(R"(Couldn't create TTreeReader for tree "{}" in file "{}")", treename, fileAndFolder.file->GetName())); - } - } - - return reader; -} - FileAndFolder DataInputDirector::getFileFolder(header::DataHeader dh, int counter, int numTF) { auto didesc = getDataInputDescriptor(dh); diff --git a/Framework/AnalysisSupport/src/DataInputDirector.h b/Framework/AnalysisSupport/src/DataInputDirector.h index e9b483bbc729a..ea54d6b3c7958 100644 --- a/Framework/AnalysisSupport/src/DataInputDirector.h +++ b/Framework/AnalysisSupport/src/DataInputDirector.h @@ -12,7 +12,6 @@ #define o2_framework_DataInputDirector_H_INCLUDED #include "TFile.h" -#include "TTreeReader.h" #include "Framework/DataDescriptorMatcher.h" #include "Framework/DataAllocator.h" @@ -138,7 +137,6 @@ class DataInputDirector DataInputDescriptor* getDataInputDescriptor(header::DataHeader dh); int getNumberInputDescriptors() { return mdataInputDescriptors.size(); } - std::unique_ptr getTreeReader(header::DataHeader dh, int counter, int numTF, std::string treeName); bool readTree(DataAllocator& outputs, header::DataHeader dh, int counter, int numTF, size_t& totalSizeCompressed, size_t& totalSizeUncompressed); uint64_t getTimeFrameNumber(header::DataHeader dh, int counter, int numTF); FileAndFolder getFileFolder(header::DataHeader dh, int counter, int numTF); From 89058ee6be5412e600e8d4bbf7ec69c7d6829a37 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 22:27:44 +0200 Subject: [PATCH 021/726] DPL: forward declare Monitoring --- Framework/AnalysisSupport/src/DataInputDirector.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Framework/AnalysisSupport/src/DataInputDirector.h b/Framework/AnalysisSupport/src/DataInputDirector.h index ceb43c2fec6f9..eca0ef195d111 100644 --- a/Framework/AnalysisSupport/src/DataInputDirector.h +++ b/Framework/AnalysisSupport/src/DataInputDirector.h @@ -15,11 +15,15 @@ #include "Framework/DataDescriptorMatcher.h" #include "Framework/DataAllocator.h" -#include "Monitoring/Monitoring.h" #include #include "rapidjson/fwd.h" +namespace o2::monitoring +{ +class Monitoring; +} + namespace o2::framework { From a947355c437ea57dae0f4dfe6511e0fbcc741b55 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 27 Oct 2023 22:27:38 +0200 Subject: [PATCH 022/726] DPL: follow coding conventions --- Framework/AnalysisSupport/src/DataInputDirector.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Framework/AnalysisSupport/src/DataInputDirector.h b/Framework/AnalysisSupport/src/DataInputDirector.h index ea54d6b3c7958..ceb43c2fec6f9 100644 --- a/Framework/AnalysisSupport/src/DataInputDirector.h +++ b/Framework/AnalysisSupport/src/DataInputDirector.h @@ -8,8 +8,8 @@ // In applying this license CERN does not waive the privileges and immunities // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -#ifndef o2_framework_DataInputDirector_H_INCLUDED -#define o2_framework_DataInputDirector_H_INCLUDED +#ifndef O2_FRAMEWORK_DATAINPUTDIRECTOR_H_ +#define O2_FRAMEWORK_DATAINPUTDIRECTOR_H_ #include "TFile.h" @@ -167,4 +167,4 @@ class DataInputDirector } // namespace o2::framework -#endif // o2_framework_DataInputDirector_H_INCLUDED +#endif // O2_FRAMEWORK_DATAINPUTDIRECTOR_H_ From 20e7dea9b76df3d1763ed322ef58832d53db6fca Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 28 Oct 2023 11:04:09 +0200 Subject: [PATCH 023/726] DPL: forward declare Monitoring --- Framework/AnalysisSupport/src/DataInputDirector.cxx | 2 ++ Framework/AnalysisSupport/src/DataInputDirector.h | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/Framework/AnalysisSupport/src/DataInputDirector.cxx b/Framework/AnalysisSupport/src/DataInputDirector.cxx index d8df1dab09feb..6e072d47e523f 100644 --- a/Framework/AnalysisSupport/src/DataInputDirector.cxx +++ b/Framework/AnalysisSupport/src/DataInputDirector.cxx @@ -16,6 +16,8 @@ #include "Headers/DataHeader.h" #include "Framework/TableTreeHelpers.h" #include "Monitoring/Tags.h" +#include "Monitoring/Metric.h" +#include "Monitoring/Monitoring.h" #include "rapidjson/document.h" #include "rapidjson/prettywriter.h" diff --git a/Framework/AnalysisSupport/src/DataInputDirector.h b/Framework/AnalysisSupport/src/DataInputDirector.h index ceb43c2fec6f9..eca0ef195d111 100644 --- a/Framework/AnalysisSupport/src/DataInputDirector.h +++ b/Framework/AnalysisSupport/src/DataInputDirector.h @@ -15,11 +15,15 @@ #include "Framework/DataDescriptorMatcher.h" #include "Framework/DataAllocator.h" -#include "Monitoring/Monitoring.h" #include #include "rapidjson/fwd.h" +namespace o2::monitoring +{ +class Monitoring; +} + namespace o2::framework { From 1afdb318ae37de7cff7ac68992b0b3c34d8bf91f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 28 Oct 2023 16:03:15 +0200 Subject: [PATCH 024/726] DPL: use Catch2 for ROOT tests --- Framework/Core/CMakeLists.txt | 14 ++-- Framework/Core/test/test_Root2ArrowTable.cxx | 68 +++++++++----------- 2 files changed, 41 insertions(+), 41 deletions(-) diff --git a/Framework/Core/CMakeLists.txt b/Framework/Core/CMakeLists.txt index 898fb3f6441fd..e58a6414e06ac 100644 --- a/Framework/Core/CMakeLists.txt +++ b/Framework/Core/CMakeLists.txt @@ -275,11 +275,15 @@ o2_add_test(O2DatabasePDG NAME test_Framework_test_O2DatabasePDG LABELS framework PUBLIC_LINK_LIBRARIES O2::Framework O2::FrameworkPhysicsSupport) -o2_add_test(Root2ArrowTable NAME test_Framework_test_Root2ArrowTable - SOURCES test/test_Root2ArrowTable.cxx - COMPONENT_NAME Framework - LABELS framework - PUBLIC_LINK_LIBRARIES O2::Framework ROOT::ROOTDataFrame) +# All the tests which require ROOT to work +add_executable(o2-test-framework-root + test/test_Root2ArrowTable.cxx + ) +target_link_libraries(o2-test-framework-root PRIVATE O2::Framework) +target_link_libraries(o2-test-framework-root PRIVATE O2::Catch2) +target_link_libraries(o2-test-framework-root PRIVATE ROOT::ROOTDataFrame) +set_property(TARGET o2-test-framework-root PROPERTY RUNTIME_OUTPUT_DIRECTORY ${outdir}) +add_test(NAME framework:root COMMAND o2-test-framework-root --skip-benchmarks) o2_add_test(InfoLogger NAME test_Framework_test_InfoLogger SOURCES test/test_InfoLogger.cxx diff --git a/Framework/Core/test/test_Root2ArrowTable.cxx b/Framework/Core/test/test_Root2ArrowTable.cxx index 43876b3230bae..b22432d8565a5 100644 --- a/Framework/Core/test/test_Root2ArrowTable.cxx +++ b/Framework/Core/test/test_Root2ArrowTable.cxx @@ -9,11 +9,7 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -#define BOOST_TEST_MODULE Test Framework TableBuilder -#define BOOST_TEST_MAIN -#define BOOST_TEST_DYN_LINK - -#include +#include #include "Framework/TableBuilder.h" #include "Framework/RootTableBuilderHelpers.h" @@ -32,7 +28,7 @@ using namespace o2::framework; -BOOST_AUTO_TEST_CASE(RootTree2Table) +TEST_CASE("RootTree2Table") { using namespace o2::framework; /// Create a simple TTree @@ -78,43 +74,43 @@ BOOST_AUTO_TEST_CASE(RootTree2Table) RootTableBuilderHelpers::convertTTree(builder, reader, std::move(xyzReader), std::move(ijkReader), std::move(pxReader), std::move(pyReader), std::move(pzReader), std::move(randomReader), std::move(evReader)); auto table = builder.finalize(); - BOOST_REQUIRE_EQUAL(table->num_rows(), 1000); - BOOST_REQUIRE_EQUAL(table->num_columns(), 7); - BOOST_REQUIRE_EQUAL(table->schema()->field(0)->type()->id(), arrow::fixed_size_list(arrow::float32(), 3)->id()); - BOOST_REQUIRE_EQUAL(table->schema()->field(1)->type()->id(), arrow::fixed_size_list(arrow::int32(), 2)->id()); - BOOST_REQUIRE_EQUAL(table->schema()->field(2)->type()->id(), arrow::float32()->id()); - BOOST_REQUIRE_EQUAL(table->schema()->field(3)->type()->id(), arrow::float32()->id()); - BOOST_REQUIRE_EQUAL(table->schema()->field(4)->type()->id(), arrow::float32()->id()); - BOOST_REQUIRE_EQUAL(table->schema()->field(5)->type()->id(), arrow::float64()->id()); - BOOST_REQUIRE_EQUAL(table->schema()->field(6)->type()->id(), arrow::int32()->id()); + REQUIRE(table->num_rows() == 1000); + REQUIRE(table->num_columns() == 7); + REQUIRE(table->schema()->field(0)->type()->id() == arrow::fixed_size_list(arrow::float32(), 3)->id()); + REQUIRE(table->schema()->field(1)->type()->id() == arrow::fixed_size_list(arrow::int32(), 2)->id()); + REQUIRE(table->schema()->field(2)->type()->id() == arrow::float32()->id()); + REQUIRE(table->schema()->field(3)->type()->id() == arrow::float32()->id()); + REQUIRE(table->schema()->field(4)->type()->id() == arrow::float32()->id()); + REQUIRE(table->schema()->field(5)->type()->id() == arrow::float64()->id()); + REQUIRE(table->schema()->field(6)->type()->id() == arrow::int32()->id()); { auto chunkToUse = table->column(0)->chunk(0); chunkToUse = std::dynamic_pointer_cast(chunkToUse)->values(); auto array = std::static_pointer_cast(chunkToUse); // array of 3 floats, time 1000. - BOOST_REQUIRE_EQUAL(array->length(), 3000); + REQUIRE(array->length() == 3000); const float* c = reinterpret_cast(array->values()->data()); //auto array = std::static_pointer_cast(table->column(0)->chunk(0)); - //BOOST_CHECK_EQUAL(array->byte_width(), sizeof(float[3])); + //CHECK_EQUAL(array->byte_width(), sizeof(float[3])); //const float* c = reinterpret_cast(array->Value(0)); - BOOST_CHECK_EQUAL(c[0], 1); - BOOST_CHECK_EQUAL(c[1], 2); - BOOST_CHECK_EQUAL(c[2], 1); + CHECK(c[0] == 1); + CHECK(c[1] == 2); + CHECK(c[2] == 1); } { //auto values = std::static_pointer_cast(table->column(1)->chunk(0)); auto chunkToUse = table->column(1)->chunk(0); chunkToUse = std::dynamic_pointer_cast(chunkToUse)->values(); auto array = std::static_pointer_cast(chunkToUse); - BOOST_REQUIRE_EQUAL(array->length(), 2000); + REQUIRE(array->length() == 2000); const int* ptr = reinterpret_cast(array->values()->data()); for (size_t i = 0; i < 1000; i++) { - BOOST_CHECK_EQUAL(ptr[2 * i + 0], i); - BOOST_CHECK_EQUAL(ptr[2 * i + 1], i + 1); + CHECK(ptr[2 * i + 0] == i); + CHECK(ptr[2 * i + 1] == i + 1); } } } @@ -138,7 +134,7 @@ DECLARE_SOA_TABLE(Test, "AOD", "ETAPHI", test::Random, test::Ev); } // namespace o2::aod -BOOST_AUTO_TEST_CASE(RootTree2TableViaASoA) +TEST_CASE("RootTree2TableViaASoA") { using namespace o2::framework; /// Create a simple TTree @@ -171,23 +167,23 @@ BOOST_AUTO_TEST_CASE(RootTree2TableViaASoA) // Create an arrow table from this. TableBuilder builder; TTreeReader reader(&t2); - BOOST_REQUIRE_EQUAL(t2.GetEntries(), 1000); + REQUIRE(t2.GetEntries() == 1000); RootTableBuilderHelpers::convertASoA(builder, reader); auto table = builder.finalize(); - BOOST_REQUIRE_EQUAL(table->num_rows(), 1000); - BOOST_REQUIRE_EQUAL(table->num_columns(), 7); - BOOST_REQUIRE_EQUAL(table->column(0)->type()->id(), arrow::float32()->id()); - BOOST_REQUIRE_EQUAL(table->column(1)->type()->id(), arrow::float32()->id()); - BOOST_REQUIRE_EQUAL(table->column(2)->type()->id(), arrow::float32()->id()); - BOOST_REQUIRE_EQUAL(table->column(3)->type()->id(), arrow::fixed_size_list(arrow::float32(), 3)->id()); - BOOST_REQUIRE_EQUAL(table->column(4)->type()->id(), arrow::fixed_size_list(arrow::int32(), 2)->id()); - BOOST_REQUIRE_EQUAL(table->column(5)->type()->id(), arrow::float64()->id()); - BOOST_REQUIRE_EQUAL(table->column(6)->type()->id(), arrow::int32()->id()); + REQUIRE(table->num_rows() == 1000); + REQUIRE(table->num_columns() == 7); + REQUIRE(table->column(0)->type()->id() == arrow::float32()->id()); + REQUIRE(table->column(1)->type()->id() == arrow::float32()->id()); + REQUIRE(table->column(2)->type()->id() == arrow::float32()->id()); + REQUIRE(table->column(3)->type()->id() == arrow::fixed_size_list(arrow::float32(), 3)->id()); + REQUIRE(table->column(4)->type()->id() == arrow::fixed_size_list(arrow::int32(), 2)->id()); + REQUIRE(table->column(5)->type()->id() == arrow::float64()->id()); + REQUIRE(table->column(6)->type()->id() == arrow::int32()->id()); o2::aod::Test testTable{table}; for (auto& row : testTable) { - BOOST_REQUIRE_EQUAL(row.ij()[0], row.ij()[1] - 1); - BOOST_REQUIRE_EQUAL(row.ij()[1], row.ev()); + REQUIRE(row.ij()[0] == row.ij()[1] - 1); + REQUIRE(row.ij()[1] == row.ev()); } } From 5f96d885dd967acb49f4d762841bad9369e87f9a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 30 Oct 2023 11:01:07 +0100 Subject: [PATCH 025/726] Drop old TOF compressor --- Detectors/TOF/compression/CMakeLists.txt | 1 - .../TOFCompression/CompressorTaskOld.h | 50 ---- .../TOF/compression/src/CompressorTaskOld.cxx | 216 ------------------ .../TOF/compression/src/tof-compressor.cxx | 27 +-- 4 files changed, 4 insertions(+), 290 deletions(-) delete mode 100644 Detectors/TOF/compression/include/TOFCompression/CompressorTaskOld.h delete mode 100644 Detectors/TOF/compression/src/CompressorTaskOld.cxx diff --git a/Detectors/TOF/compression/CMakeLists.txt b/Detectors/TOF/compression/CMakeLists.txt index 00b29e267eb92..8ea77eb8028ba 100644 --- a/Detectors/TOF/compression/CMakeLists.txt +++ b/Detectors/TOF/compression/CMakeLists.txt @@ -12,7 +12,6 @@ o2_add_library(TOFCompression SOURCES src/Compressor.cxx src/CompressorTask.cxx - src/CompressorTaskOld.cxx PUBLIC_LINK_LIBRARIES O2::TOFBase O2::Framework O2::Headers O2::DataFormatsTOF O2::DetectorsRaw ) diff --git a/Detectors/TOF/compression/include/TOFCompression/CompressorTaskOld.h b/Detectors/TOF/compression/include/TOFCompression/CompressorTaskOld.h deleted file mode 100644 index ed78dd88caa64..0000000000000 --- a/Detectors/TOF/compression/include/TOFCompression/CompressorTaskOld.h +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2019-2020 CERN and copyright holders of ALICE O2. -// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. -// All rights not expressly granted are reserved. -// -// This software is distributed under the terms of the GNU General Public -// License v3 (GPL Version 3), copied verbatim in the file "COPYING". -// -// In applying this license CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -/// @file CompressorTask.h -/// @author Roberto Preghenella -/// @since 2019-12-18 -/// @brief TOF raw data compressor task - -#ifndef O2_TOF_COMPRESSORTASK_OLD -#define O2_TOF_COMPRESSORTASK_OLD - -#include "Framework/Task.h" -#include "Framework/DataProcessorSpec.h" -#include "TOFCompression/Compressor.h" -#include - -using namespace o2::framework; - -namespace o2 -{ -namespace tof -{ - -template -class CompressorTaskOld : public Task -{ - public: - CompressorTaskOld(long payloadLim = -1) : mPayloadLimit(payloadLim) {} - ~CompressorTaskOld() override = default; - void init(InitContext& ic) final; - void run(ProcessingContext& pc) final; - - private: - Compressor mCompressor; - int mOutputBufferSize; - long mPayloadLimit = -1; -}; - -} // namespace tof -} // namespace o2 - -#endif /** O2_TOF_COMPRESSORTASK_OLD **/ diff --git a/Detectors/TOF/compression/src/CompressorTaskOld.cxx b/Detectors/TOF/compression/src/CompressorTaskOld.cxx deleted file mode 100644 index 1a4e28393a8c3..0000000000000 --- a/Detectors/TOF/compression/src/CompressorTaskOld.cxx +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2019-2020 CERN and copyright holders of ALICE O2. -// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. -// All rights not expressly granted are reserved. -// -// This software is distributed under the terms of the GNU General Public -// License v3 (GPL Version 3), copied verbatim in the file "COPYING". -// -// In applying this license CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -/// @file CompressorTaskOld.cxx -/// @author Roberto Preghenella -/// @since 2019-12-18 -/// @brief TOF raw data compressor task - -#include "TOFCompression/CompressorTaskOld.h" -#include "Framework/ControlService.h" -#include "Framework/ConfigParamRegistry.h" -#include "Framework/RawDeviceService.h" -#include "Framework/DeviceSpec.h" -#include "Framework/DataSpecUtils.h" -#include "Framework/InputRecordWalker.h" -#include "CommonUtils/VerbosityConfig.h" - -#include -#include - -using namespace o2::framework; - -namespace o2 -{ -namespace tof -{ - -template -void CompressorTaskOld::init(InitContext& ic) -{ - if (mPayloadLimit < 0) { - LOG(info) << "Compressor init"; - } else { - LOG(info) << "Compressor init with Payload limit at " << mPayloadLimit; - } - - auto decoderCONET = ic.options().get("tof-compressor-conet-mode"); - auto decoderVerbose = ic.options().get("tof-compressor-decoder-verbose"); - auto encoderVerbose = ic.options().get("tof-compressor-encoder-verbose"); - auto checkerVerbose = ic.options().get("tof-compressor-checker-verbose"); - mOutputBufferSize = ic.options().get("tof-compressor-output-buffer-size"); - - mCompressor.setDecoderCONET(decoderCONET); - mCompressor.setDecoderVerbose(decoderVerbose); - mCompressor.setEncoderVerbose(encoderVerbose); - mCompressor.setCheckerVerbose(checkerVerbose); - - auto finishFunction = [this]() { - mCompressor.checkSummary(); - }; - - ic.services().get().set(finishFunction); -} - -template -void CompressorTaskOld::run(ProcessingContext& pc) -{ - LOG(debug) << "Compressor run"; - - auto device = pc.services().get().device(); - auto outputRoutes = pc.services().get().spec().outputs; - if (outputRoutes.size() != 1) { - LOG(error) << "Compressor output routes size != 1"; - return; - } - auto fairMQChannel = outputRoutes.at(0).channel; - fair::mq::Parts partsOut; - - /** to store data sorted by subspec id **/ - std::map> subspecPartMap; - std::map subspecBufferSize; - - // if we see requested data type input with 0xDEADBEEF subspec and 0 payload this means that the "delayed message" - // mechanism created it in absence of real data from upstream. Processor should send empty output to not block the workflow - { - auto& inputs = pc.inputs(); - static size_t contDeadBeef = 0; // number of times 0xDEADBEEF was seen continuously - std::vector dummy{InputSpec{"dummy", ConcreteDataMatcher{"TOF", "RAWDATA", 0xDEADBEEF}}}; - for (const auto& ref : InputRecordWalker(inputs, dummy)) { - const auto* dh = o2::framework::DataRefUtils::getHeader(ref); - auto payloadSize = DataRefUtils::getPayloadSize(ref); - if (payloadSize == 0) { - auto maxWarn = o2::conf::VerbosityConfig::Instance().maxWarnDeadBeef; - if (++contDeadBeef <= maxWarn) { - LOGP(alarm, "Found input [{}/{}/{:#x}] TF#{} 1st_orbit:{} Payload {} : assuming no payload for all links in this TF{}", - dh->dataOrigin.str, dh->dataDescription.str, dh->subSpecification, dh->tfCounter, dh->firstTForbit, payloadSize, - contDeadBeef == maxWarn ? fmt::format(". {} such inputs in row received, stopping reporting", contDeadBeef) : ""); - } - // send empty message with DEADBEEF subspec - const auto* dph = o2::framework::DataRefUtils::getHeader(ref); - o2::header::DataHeader emptyDH("CRAWDATA", "TOF", 0xdeadbeef, 0, 0, 1); - emptyDH.runNumber = dh->runNumber; - emptyDH.payloadSerializationMethod = o2::header::gSerializationMethodNone; - emptyDH.firstTForbit = dh->firstTForbit; - emptyDH.tfCounter = dh->tfCounter; - - o2::header::Stack emptyStack{emptyDH, o2::framework::DataProcessingHeader{dph->startTime, dph->duration, dph->creation}}; - - auto headerMessage = device->NewMessage(emptyStack.size()); - auto payloadMessage = device->NewMessage(0); - std::memcpy(headerMessage->GetData(), emptyStack.data(), emptyStack.size()); - partsOut.AddPart(std::move(headerMessage)); - partsOut.AddPart(std::move(payloadMessage)); - device->Send(partsOut, fairMQChannel); - return; - } - } - contDeadBeef = 0; // if good data, reset the counter - } - - /** loop over inputs routes **/ - std::vector sel{InputSpec{"filter", ConcreteDataTypeMatcher{"TOF", "RAWDATA"}}}; - for (const auto& ref : InputRecordWalker(pc.inputs(), sel)) { - // for (auto iit = pc.inputs().begin(), iend = pc.inputs().end(); iit != iend; ++iit) { - // if (!iit.isValid()) { - // continue; - // } - - /** loop over input parts **/ - // for (auto const& ref : iit) { - - /** store parts in map **/ - auto headerIn = DataRefUtils::getHeader(ref); - auto payloadInSize = DataRefUtils::getPayloadSize(ref); - auto subspec = headerIn->subSpecification; - subspecPartMap[subspec].push_back(ref); - - /** increase subspec buffer size **/ - if (!subspecBufferSize.count(subspec)) { - subspecBufferSize[subspec] = 0; - } - subspecBufferSize[subspec] += payloadInSize; - // } - } - - /** loop over subspecs **/ - for (auto& subspecPartEntry : subspecPartMap) { - - auto subspec = subspecPartEntry.first; - auto parts = subspecPartEntry.second; - auto& firstPart = parts.at(0); - - /** use the first part to define output headers **/ - auto headerOut = *DataRefUtils::getHeader(firstPart); - auto dataProcessingHeaderOut = *DataRefUtils::getHeader(firstPart); - headerOut.dataDescription = "CRAWDATA"; - headerOut.payloadSize = 0; - headerOut.splitPayloadParts = 1; - - /** initialise output message **/ - auto bufferSize = mOutputBufferSize >= 0 ? mOutputBufferSize + subspecBufferSize[subspec] : std::abs(mOutputBufferSize); - auto bufferSizeDouble = bufferSize * 2; - auto payloadMessage = device->NewMessage(bufferSizeDouble); // start with double size since later resized (but resize cannot resize more than original one) - auto bufferPointer = (char*)payloadMessage->GetData(); - - /** loop over subspec parts **/ - for (const auto& ref : parts) { - - /** input **/ - auto headerIn = DataRefUtils::getHeader(ref); - auto dataProcessingHeaderIn = DataRefUtils::getHeader(ref); - auto payloadIn = ref.payload; - auto payloadInSize = DataRefUtils::getPayloadSize(ref); - - if (mPayloadLimit > -1 && payloadInSize > mPayloadLimit) { - LOG(error) << "Payload larger than limit (" << mPayloadLimit << "), payload = " << payloadInSize; - continue; - } - - /** prepare compressor **/ - mCompressor.setDecoderBuffer(payloadIn); - mCompressor.setDecoderBufferSize(payloadInSize); - mCompressor.setEncoderBuffer(bufferPointer); - mCompressor.setEncoderBufferSize(bufferSize); - - /** run **/ - mCompressor.run(); - auto payloadOutSize = mCompressor.getEncoderByteCounter(); - bufferPointer += payloadOutSize; - bufferSize -= payloadOutSize; - headerOut.payloadSize += payloadOutSize; - } - - /** finalise output message **/ - if (headerOut.payloadSize > bufferSizeDouble) { - headerOut.payloadSize = 0; // put payload to zero, otherwise it will trigger a crash - } - payloadMessage->SetUsedSize(headerOut.payloadSize); - o2::header::Stack headerStack{headerOut, dataProcessingHeaderOut}; - auto headerMessage = device->NewMessage(headerStack.size()); - std::memcpy(headerMessage->GetData(), headerStack.data(), headerStack.size()); - - /** add parts **/ - partsOut.AddPart(std::move(headerMessage)); - partsOut.AddPart(std::move(payloadMessage)); - } - - /** send message **/ - device->Send(partsOut, fairMQChannel); -} - -template class CompressorTaskOld; -template class CompressorTaskOld; -template class CompressorTaskOld; -template class CompressorTaskOld; - -} // namespace tof -} // namespace o2 diff --git a/Detectors/TOF/compression/src/tof-compressor.cxx b/Detectors/TOF/compression/src/tof-compressor.cxx index 4358ceae84bdb..29286911957d3 100644 --- a/Detectors/TOF/compression/src/tof-compressor.cxx +++ b/Detectors/TOF/compression/src/tof-compressor.cxx @@ -15,7 +15,6 @@ /// @brief Basic DPL workflow for TOF raw data compression #include "TOFCompression/CompressorTask.h" -#include "TOFCompression/CompressorTaskOld.h" #include "Framework/WorkflowSpec.h" #include "Framework/ConfigParamSpec.h" #include "Framework/ConcreteDataMatcher.h" @@ -45,7 +44,6 @@ void customize(std::vector& workflowOptions) workflowOptions.push_back(paranoid); workflowOptions.push_back(ignoreStf); workflowOptions.push_back(payloadlim); - workflowOptions.emplace_back(ConfigParamSpec{"old", VariantType::Bool, false, {"use the non-DPL version of the compressor"}}); workflowOptions.push_back(ConfigParamSpec{"configKeyValues", VariantType::String, "", {"Semicolon separated key=value strings"}}); } @@ -61,7 +59,6 @@ WorkflowSpec defineDataProcessing(ConfigContext const& cfgc) auto verbose = cfgc.options().get("tof-compressor-verbose"); auto paranoid = cfgc.options().get("tof-compressor-paranoid"); auto ignoreStf = cfgc.options().get("ignore-dist-stf"); - auto old = cfgc.options().get("old"); auto payloadLim = cfgc.options().get("payload-limit"); std::vector outputs; @@ -70,32 +67,16 @@ WorkflowSpec defineDataProcessing(ConfigContext const& cfgc) AlgorithmSpec algoSpec; if (rdhVersion == o2::raw::RDHUtils::getVersion()) { if (!verbose && !paranoid) { - if (old) { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } else { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } + algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; } if (!verbose && paranoid) { - if (old) { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } else { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } + algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; } if (verbose && !paranoid) { - if (old) { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } else { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } + algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; } if (verbose && paranoid) { - if (old) { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } else { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } + algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; } } From 357a86db4004fc07427d1504a003d0255767ea75 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 30 Oct 2023 11:53:46 +0100 Subject: [PATCH 026/726] Drop old TOF compressor --- Detectors/TOF/compression/CMakeLists.txt | 1 - .../TOFCompression/CompressorTaskOld.h | 50 ---- .../TOF/compression/src/CompressorTaskOld.cxx | 216 ------------------ .../TOF/compression/src/tof-compressor.cxx | 28 +-- 4 files changed, 7 insertions(+), 288 deletions(-) delete mode 100644 Detectors/TOF/compression/include/TOFCompression/CompressorTaskOld.h delete mode 100644 Detectors/TOF/compression/src/CompressorTaskOld.cxx diff --git a/Detectors/TOF/compression/CMakeLists.txt b/Detectors/TOF/compression/CMakeLists.txt index 00b29e267eb92..8ea77eb8028ba 100644 --- a/Detectors/TOF/compression/CMakeLists.txt +++ b/Detectors/TOF/compression/CMakeLists.txt @@ -12,7 +12,6 @@ o2_add_library(TOFCompression SOURCES src/Compressor.cxx src/CompressorTask.cxx - src/CompressorTaskOld.cxx PUBLIC_LINK_LIBRARIES O2::TOFBase O2::Framework O2::Headers O2::DataFormatsTOF O2::DetectorsRaw ) diff --git a/Detectors/TOF/compression/include/TOFCompression/CompressorTaskOld.h b/Detectors/TOF/compression/include/TOFCompression/CompressorTaskOld.h deleted file mode 100644 index ed78dd88caa64..0000000000000 --- a/Detectors/TOF/compression/include/TOFCompression/CompressorTaskOld.h +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2019-2020 CERN and copyright holders of ALICE O2. -// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. -// All rights not expressly granted are reserved. -// -// This software is distributed under the terms of the GNU General Public -// License v3 (GPL Version 3), copied verbatim in the file "COPYING". -// -// In applying this license CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -/// @file CompressorTask.h -/// @author Roberto Preghenella -/// @since 2019-12-18 -/// @brief TOF raw data compressor task - -#ifndef O2_TOF_COMPRESSORTASK_OLD -#define O2_TOF_COMPRESSORTASK_OLD - -#include "Framework/Task.h" -#include "Framework/DataProcessorSpec.h" -#include "TOFCompression/Compressor.h" -#include - -using namespace o2::framework; - -namespace o2 -{ -namespace tof -{ - -template -class CompressorTaskOld : public Task -{ - public: - CompressorTaskOld(long payloadLim = -1) : mPayloadLimit(payloadLim) {} - ~CompressorTaskOld() override = default; - void init(InitContext& ic) final; - void run(ProcessingContext& pc) final; - - private: - Compressor mCompressor; - int mOutputBufferSize; - long mPayloadLimit = -1; -}; - -} // namespace tof -} // namespace o2 - -#endif /** O2_TOF_COMPRESSORTASK_OLD **/ diff --git a/Detectors/TOF/compression/src/CompressorTaskOld.cxx b/Detectors/TOF/compression/src/CompressorTaskOld.cxx deleted file mode 100644 index 1a4e28393a8c3..0000000000000 --- a/Detectors/TOF/compression/src/CompressorTaskOld.cxx +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2019-2020 CERN and copyright holders of ALICE O2. -// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. -// All rights not expressly granted are reserved. -// -// This software is distributed under the terms of the GNU General Public -// License v3 (GPL Version 3), copied verbatim in the file "COPYING". -// -// In applying this license CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -/// @file CompressorTaskOld.cxx -/// @author Roberto Preghenella -/// @since 2019-12-18 -/// @brief TOF raw data compressor task - -#include "TOFCompression/CompressorTaskOld.h" -#include "Framework/ControlService.h" -#include "Framework/ConfigParamRegistry.h" -#include "Framework/RawDeviceService.h" -#include "Framework/DeviceSpec.h" -#include "Framework/DataSpecUtils.h" -#include "Framework/InputRecordWalker.h" -#include "CommonUtils/VerbosityConfig.h" - -#include -#include - -using namespace o2::framework; - -namespace o2 -{ -namespace tof -{ - -template -void CompressorTaskOld::init(InitContext& ic) -{ - if (mPayloadLimit < 0) { - LOG(info) << "Compressor init"; - } else { - LOG(info) << "Compressor init with Payload limit at " << mPayloadLimit; - } - - auto decoderCONET = ic.options().get("tof-compressor-conet-mode"); - auto decoderVerbose = ic.options().get("tof-compressor-decoder-verbose"); - auto encoderVerbose = ic.options().get("tof-compressor-encoder-verbose"); - auto checkerVerbose = ic.options().get("tof-compressor-checker-verbose"); - mOutputBufferSize = ic.options().get("tof-compressor-output-buffer-size"); - - mCompressor.setDecoderCONET(decoderCONET); - mCompressor.setDecoderVerbose(decoderVerbose); - mCompressor.setEncoderVerbose(encoderVerbose); - mCompressor.setCheckerVerbose(checkerVerbose); - - auto finishFunction = [this]() { - mCompressor.checkSummary(); - }; - - ic.services().get().set(finishFunction); -} - -template -void CompressorTaskOld::run(ProcessingContext& pc) -{ - LOG(debug) << "Compressor run"; - - auto device = pc.services().get().device(); - auto outputRoutes = pc.services().get().spec().outputs; - if (outputRoutes.size() != 1) { - LOG(error) << "Compressor output routes size != 1"; - return; - } - auto fairMQChannel = outputRoutes.at(0).channel; - fair::mq::Parts partsOut; - - /** to store data sorted by subspec id **/ - std::map> subspecPartMap; - std::map subspecBufferSize; - - // if we see requested data type input with 0xDEADBEEF subspec and 0 payload this means that the "delayed message" - // mechanism created it in absence of real data from upstream. Processor should send empty output to not block the workflow - { - auto& inputs = pc.inputs(); - static size_t contDeadBeef = 0; // number of times 0xDEADBEEF was seen continuously - std::vector dummy{InputSpec{"dummy", ConcreteDataMatcher{"TOF", "RAWDATA", 0xDEADBEEF}}}; - for (const auto& ref : InputRecordWalker(inputs, dummy)) { - const auto* dh = o2::framework::DataRefUtils::getHeader(ref); - auto payloadSize = DataRefUtils::getPayloadSize(ref); - if (payloadSize == 0) { - auto maxWarn = o2::conf::VerbosityConfig::Instance().maxWarnDeadBeef; - if (++contDeadBeef <= maxWarn) { - LOGP(alarm, "Found input [{}/{}/{:#x}] TF#{} 1st_orbit:{} Payload {} : assuming no payload for all links in this TF{}", - dh->dataOrigin.str, dh->dataDescription.str, dh->subSpecification, dh->tfCounter, dh->firstTForbit, payloadSize, - contDeadBeef == maxWarn ? fmt::format(". {} such inputs in row received, stopping reporting", contDeadBeef) : ""); - } - // send empty message with DEADBEEF subspec - const auto* dph = o2::framework::DataRefUtils::getHeader(ref); - o2::header::DataHeader emptyDH("CRAWDATA", "TOF", 0xdeadbeef, 0, 0, 1); - emptyDH.runNumber = dh->runNumber; - emptyDH.payloadSerializationMethod = o2::header::gSerializationMethodNone; - emptyDH.firstTForbit = dh->firstTForbit; - emptyDH.tfCounter = dh->tfCounter; - - o2::header::Stack emptyStack{emptyDH, o2::framework::DataProcessingHeader{dph->startTime, dph->duration, dph->creation}}; - - auto headerMessage = device->NewMessage(emptyStack.size()); - auto payloadMessage = device->NewMessage(0); - std::memcpy(headerMessage->GetData(), emptyStack.data(), emptyStack.size()); - partsOut.AddPart(std::move(headerMessage)); - partsOut.AddPart(std::move(payloadMessage)); - device->Send(partsOut, fairMQChannel); - return; - } - } - contDeadBeef = 0; // if good data, reset the counter - } - - /** loop over inputs routes **/ - std::vector sel{InputSpec{"filter", ConcreteDataTypeMatcher{"TOF", "RAWDATA"}}}; - for (const auto& ref : InputRecordWalker(pc.inputs(), sel)) { - // for (auto iit = pc.inputs().begin(), iend = pc.inputs().end(); iit != iend; ++iit) { - // if (!iit.isValid()) { - // continue; - // } - - /** loop over input parts **/ - // for (auto const& ref : iit) { - - /** store parts in map **/ - auto headerIn = DataRefUtils::getHeader(ref); - auto payloadInSize = DataRefUtils::getPayloadSize(ref); - auto subspec = headerIn->subSpecification; - subspecPartMap[subspec].push_back(ref); - - /** increase subspec buffer size **/ - if (!subspecBufferSize.count(subspec)) { - subspecBufferSize[subspec] = 0; - } - subspecBufferSize[subspec] += payloadInSize; - // } - } - - /** loop over subspecs **/ - for (auto& subspecPartEntry : subspecPartMap) { - - auto subspec = subspecPartEntry.first; - auto parts = subspecPartEntry.second; - auto& firstPart = parts.at(0); - - /** use the first part to define output headers **/ - auto headerOut = *DataRefUtils::getHeader(firstPart); - auto dataProcessingHeaderOut = *DataRefUtils::getHeader(firstPart); - headerOut.dataDescription = "CRAWDATA"; - headerOut.payloadSize = 0; - headerOut.splitPayloadParts = 1; - - /** initialise output message **/ - auto bufferSize = mOutputBufferSize >= 0 ? mOutputBufferSize + subspecBufferSize[subspec] : std::abs(mOutputBufferSize); - auto bufferSizeDouble = bufferSize * 2; - auto payloadMessage = device->NewMessage(bufferSizeDouble); // start with double size since later resized (but resize cannot resize more than original one) - auto bufferPointer = (char*)payloadMessage->GetData(); - - /** loop over subspec parts **/ - for (const auto& ref : parts) { - - /** input **/ - auto headerIn = DataRefUtils::getHeader(ref); - auto dataProcessingHeaderIn = DataRefUtils::getHeader(ref); - auto payloadIn = ref.payload; - auto payloadInSize = DataRefUtils::getPayloadSize(ref); - - if (mPayloadLimit > -1 && payloadInSize > mPayloadLimit) { - LOG(error) << "Payload larger than limit (" << mPayloadLimit << "), payload = " << payloadInSize; - continue; - } - - /** prepare compressor **/ - mCompressor.setDecoderBuffer(payloadIn); - mCompressor.setDecoderBufferSize(payloadInSize); - mCompressor.setEncoderBuffer(bufferPointer); - mCompressor.setEncoderBufferSize(bufferSize); - - /** run **/ - mCompressor.run(); - auto payloadOutSize = mCompressor.getEncoderByteCounter(); - bufferPointer += payloadOutSize; - bufferSize -= payloadOutSize; - headerOut.payloadSize += payloadOutSize; - } - - /** finalise output message **/ - if (headerOut.payloadSize > bufferSizeDouble) { - headerOut.payloadSize = 0; // put payload to zero, otherwise it will trigger a crash - } - payloadMessage->SetUsedSize(headerOut.payloadSize); - o2::header::Stack headerStack{headerOut, dataProcessingHeaderOut}; - auto headerMessage = device->NewMessage(headerStack.size()); - std::memcpy(headerMessage->GetData(), headerStack.data(), headerStack.size()); - - /** add parts **/ - partsOut.AddPart(std::move(headerMessage)); - partsOut.AddPart(std::move(payloadMessage)); - } - - /** send message **/ - device->Send(partsOut, fairMQChannel); -} - -template class CompressorTaskOld; -template class CompressorTaskOld; -template class CompressorTaskOld; -template class CompressorTaskOld; - -} // namespace tof -} // namespace o2 diff --git a/Detectors/TOF/compression/src/tof-compressor.cxx b/Detectors/TOF/compression/src/tof-compressor.cxx index 4358ceae84bdb..e305911ee152f 100644 --- a/Detectors/TOF/compression/src/tof-compressor.cxx +++ b/Detectors/TOF/compression/src/tof-compressor.cxx @@ -15,7 +15,6 @@ /// @brief Basic DPL workflow for TOF raw data compression #include "TOFCompression/CompressorTask.h" -#include "TOFCompression/CompressorTaskOld.h" #include "Framework/WorkflowSpec.h" #include "Framework/ConfigParamSpec.h" #include "Framework/ConcreteDataMatcher.h" @@ -62,6 +61,9 @@ WorkflowSpec defineDataProcessing(ConfigContext const& cfgc) auto paranoid = cfgc.options().get("tof-compressor-paranoid"); auto ignoreStf = cfgc.options().get("ignore-dist-stf"); auto old = cfgc.options().get("old"); + if (old) { + LOGP(error, "--old option is not actually supported, using the new version"); + } auto payloadLim = cfgc.options().get("payload-limit"); std::vector outputs; @@ -70,32 +72,16 @@ WorkflowSpec defineDataProcessing(ConfigContext const& cfgc) AlgorithmSpec algoSpec; if (rdhVersion == o2::raw::RDHUtils::getVersion()) { if (!verbose && !paranoid) { - if (old) { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } else { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } + algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; } if (!verbose && paranoid) { - if (old) { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } else { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } + algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; } if (verbose && !paranoid) { - if (old) { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } else { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } + algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; } if (verbose && paranoid) { - if (old) { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } else { - algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; - } + algoSpec = AlgorithmSpec{adaptFromTask>(payloadLim)}; } } From 30e0c6b21269450915194a7a66139ae5af40d29f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 30 Oct 2023 12:49:15 +0100 Subject: [PATCH 027/726] Avoid duplicate dependency --- Framework/Core/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/Framework/Core/CMakeLists.txt b/Framework/Core/CMakeLists.txt index e58a6414e06ac..25f2e60f402c4 100644 --- a/Framework/Core/CMakeLists.txt +++ b/Framework/Core/CMakeLists.txt @@ -158,7 +158,6 @@ o2_add_library(Framework O2::Headers O2::MemoryResources O2::PCG - ROOT::Tree RapidJSON::RapidJSON Arrow::arrow_shared Microsoft.GSL::GSL From 221bf8caf6ef5fea05734bbc0e91402a4f395a93 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 30 Oct 2023 16:27:01 +0100 Subject: [PATCH 028/726] DPL: improve homegeneous_apply_ref * Return something indexable for the case N == 0. * Improve tests. --- Framework/Foundation/include/Framework/StructToTuple.h | 2 +- Framework/Foundation/test/test_StructToTuple.cxx | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Framework/Foundation/include/Framework/StructToTuple.h b/Framework/Foundation/include/Framework/StructToTuple.h index 94655f605c9a3..4ea93e113249d 100644 --- a/Framework/Foundation/include/Framework/StructToTuple.h +++ b/Framework/Foundation/include/Framework/StructToTuple.h @@ -382,7 +382,7 @@ auto homogeneous_apply_refs(L l, T&& object) else if DPL_HOMOGENEOUS_APPLY_ENTRY_LOW (3) else if DPL_HOMOGENEOUS_APPLY_ENTRY_LOW (2) else if DPL_HOMOGENEOUS_APPLY_ENTRY_LOW (1) - else { return false; } + else { return std::array(); } // clang-format on } diff --git a/Framework/Foundation/test/test_StructToTuple.cxx b/Framework/Foundation/test/test_StructToTuple.cxx index 342d51a23bf80..06cf4b47a9f7c 100644 --- a/Framework/Foundation/test/test_StructToTuple.cxx +++ b/Framework/Foundation/test/test_StructToTuple.cxx @@ -135,4 +135,6 @@ TEST_CASE("TestStructToTuple") } }, nestedFoo); + REQUIRE(t6.size() == 3); + REQUIRE(t6[0] == true); } From bdb112db4e8ba43fc9e3ef5b1c6ba616a3b24126 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 30 Oct 2023 16:43:05 +0100 Subject: [PATCH 029/726] DPL: flush messages immediately when notifying state change --- Framework/Core/src/ControlService.cxx | 1 + 1 file changed, 1 insertion(+) diff --git a/Framework/Core/src/ControlService.cxx b/Framework/Core/src/ControlService.cxx index 65619baba1037..13940867d61e3 100644 --- a/Framework/Core/src/ControlService.cxx +++ b/Framework/Core/src/ControlService.cxx @@ -86,6 +86,7 @@ void ControlService::notifyDeviceState(std::string currentState) { std::scoped_lock lock(mMutex); mDriverClient.tell(fmt::format("CONTROL_ACTION: NOTIFY_DEVICE_STATE {}", currentState)); + mDriverClient.flushPending(mRegistry); } } // namespace o2::framework From d1371dc2c4e69d22b8962cb7ff0629b94ad9e5e8 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 30 Oct 2023 22:13:17 +0100 Subject: [PATCH 030/726] DPL: have common baseclass to abstract tree writing This is the the first step towards writing something else, e.g. a RNTuple. --- .../Core/include/Framework/TableTreeHelpers.h | 28 +++++++++--- Framework/Core/src/CommonDataProcessors.cxx | 10 ++--- Framework/Core/src/TableTreeHelpers.cxx | 44 +++++++++++-------- Framework/Core/test/benchmark_TableToTree.cxx | 2 +- Framework/Core/test/benchmark_TreeToTable.cxx | 2 +- Framework/Core/test/test_TreeToTable.cxx | 9 ++-- 6 files changed, 59 insertions(+), 36 deletions(-) diff --git a/Framework/Core/include/Framework/TableTreeHelpers.h b/Framework/Core/include/Framework/TableTreeHelpers.h index e12f4a38a973b..78bf0bba11389 100644 --- a/Framework/Core/include/Framework/TableTreeHelpers.h +++ b/Framework/Core/include/Framework/TableTreeHelpers.h @@ -105,18 +105,34 @@ class ColumnToBranch int mFieldSize = 0; }; -class TableToTree +class TableToRoot { public: - TableToTree(std::shared_ptr const& table, TFile* file, const char* treename); + TableToRoot(std::shared_ptr const& table, TFile* file, const char* name); + ~TableToRoot() = default; - std::shared_ptr process(); - void addBranch(std::shared_ptr const& column, std::shared_ptr const& field); - void addAllBranches(); + virtual void addColumn(std::shared_ptr const& column, std::shared_ptr const& field) = 0; + virtual void process() = 0; + // Helper function which marks all columns in the table for writing + void addAllColumns(); - private: + protected: arrow::Table* mTable; + TFile* mFile; + std::string mName; int64_t mRows = 0; +}; + +class TableToTree : public TableToRoot +{ + public: + TableToTree(std::shared_ptr const& table, TFile* file, const char* treename); + + void process() final; + void addColumn(std::shared_ptr const& column, std::shared_ptr const& field) final; + std::shared_ptr tree() { return mTree; } + + private: std::shared_ptr mTree; std::vector> mColumnReaders; }; diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 08283b5e49bb5..c9d8aa835d734 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -417,9 +417,7 @@ DataProcessorSpec for (auto d : ds) { auto fileAndFolder = dod->getFileFolder(d, tfNumber, aodInputFile); auto treename = fileAndFolder.folderName + "/" + d->treename; - TableToTree ta2tr(table, - fileAndFolder.file, - treename.c_str()); + std::unique_ptr ta2r = std::make_unique(table, fileAndFolder.file, treename.c_str()); // update metadata if (fileAndFolder.file->FindObjectAny("metaData")) { @@ -438,13 +436,13 @@ DataProcessorSpec auto col = table->column(idx); auto field = table->schema()->field(idx); if (idx != -1) { - ta2tr.addBranch(col, field); + ta2r->addColumn(col, field); } } } else { - ta2tr.addAllBranches(); + ta2r->addAllColumns(); } - ta2tr.process(); + ta2r->process(); } } }; diff --git a/Framework/Core/src/TableTreeHelpers.cxx b/Framework/Core/src/TableTreeHelpers.cxx index e6e6a1349e619..4db7a8630ef3a 100644 --- a/Framework/Core/src/TableTreeHelpers.cxx +++ b/Framework/Core/src/TableTreeHelpers.cxx @@ -407,14 +407,33 @@ void ColumnToBranch::nextChunk() accessChunk(); } -TableToTree::TableToTree(std::shared_ptr const& table, TFile* file, const char* treename) +TableToRoot::TableToRoot(std::shared_ptr const& table, TFile* file, const char* name) + : mTable{table.get()}, + mFile{file}, + mName{name} { - mTable = table.get(); - mTree.reset(static_cast(file->Get(treename))); +} + +void TableToRoot::addAllColumns() +{ + mRows = mTable->num_rows(); + auto columns = mTable->columns(); + auto fields = mTable->schema()->fields(); + assert(columns.size() == fields.size()); + for (auto i = 0u; i < columns.size(); ++i) { + addColumn(columns[i], fields[i]); + } +} + + +TableToTree::TableToTree(std::shared_ptr const& table, TFile* file, const char* name) + : TableToRoot{table, file, name} +{ + mTree.reset(static_cast(mFile->Get(mName.c_str()))); if (mTree) { return; } - std::string treeName(treename); + std::string treeName(mName); auto pos = treeName.find_first_of('/'); if (pos != std::string::npos) { file->cd(treeName.substr(0, pos).c_str()); @@ -423,18 +442,8 @@ TableToTree::TableToTree(std::shared_ptr const& table, TFile* file mTree = std::make_shared(treeName.c_str(), treeName.c_str()); } -void TableToTree::addAllBranches() -{ - mRows = mTable->num_rows(); - auto columns = mTable->columns(); - auto fields = mTable->schema()->fields(); - assert(columns.size() == fields.size()); - for (auto i = 0u; i < columns.size(); ++i) { - addBranch(columns[i], fields[i]); - } -} -void TableToTree::addBranch(std::shared_ptr const& column, std::shared_ptr const& field) +void TableToTree::addColumn(std::shared_ptr const& column, std::shared_ptr const& field) { if (mRows == 0) { mRows = column->length(); @@ -444,13 +453,13 @@ void TableToTree::addBranch(std::shared_ptr const& column, mColumnReaders.emplace_back(new ColumnToBranch{mTree.get(), column, field}); } -std::shared_ptr TableToTree::process() +void TableToTree::process() { int64_t row = 0; if (mTree->GetNbranches() == 0 || mRows == 0) { mTree->Write("", TObject::kOverwrite); mTree->SetDirectory(nullptr); - return mTree; + return; } for (auto& reader : mColumnReaders) { @@ -468,7 +477,6 @@ std::shared_ptr TableToTree::process() } mTree->Write("", TObject::kOverwrite); mTree->SetDirectory(nullptr); - return mTree; } TreeToTable::TreeToTable(arrow::MemoryPool* pool) diff --git a/Framework/Core/test/benchmark_TableToTree.cxx b/Framework/Core/test/benchmark_TableToTree.cxx index 0a7963518f00b..0ca4eb81112b8 100644 --- a/Framework/Core/test/benchmark_TableToTree.cxx +++ b/Framework/Core/test/benchmark_TableToTree.cxx @@ -62,7 +62,7 @@ static void BM_TableToTree(benchmark::State& state) // benchmark TableToTree TableToTree ta2tr(table, &fout, "table2tree"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); ta2tr.process(); // clean up diff --git a/Framework/Core/test/benchmark_TreeToTable.cxx b/Framework/Core/test/benchmark_TreeToTable.cxx index 6eca853a90ce4..476e653acaf83 100644 --- a/Framework/Core/test/benchmark_TreeToTable.cxx +++ b/Framework/Core/test/benchmark_TreeToTable.cxx @@ -58,7 +58,7 @@ static void BM_TreeToTable(benchmark::State& state) // now convert the table to a tree TFile fout("tree2table.root", "RECREATE"); TableToTree ta2tr(table, &fout, "tree2table"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); ta2tr.process(); fout.Close(); diff --git a/Framework/Core/test/test_TreeToTable.cxx b/Framework/Core/test/test_TreeToTable.cxx index 65569b72b9f35..32575a8caf3e0 100644 --- a/Framework/Core/test/test_TreeToTable.cxx +++ b/Framework/Core/test/test_TreeToTable.cxx @@ -143,9 +143,10 @@ TEST_CASE("TreeToTableConversion") // save table as tree TFile* f2 = TFile::Open("table2tree.root", "RECREATE"); TableToTree ta2tr(table, f2, "mytree"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); - auto t2 = ta2tr.process(); + ta2tr.process(); + auto t2 = ta2tr.tree(); auto br = (TBranch*)t2->GetBranch("ok"); REQUIRE(t2->GetEntries() == ndp); REQUIRE(br->GetEntries() == ndp); @@ -201,8 +202,8 @@ TEST_CASE("VariableLists") auto* f = TFile::Open("variable_lists.root", "RECREATE"); TableToTree ta2tr(table, f, "lists"); - ta2tr.addAllBranches(); - auto tree = ta2tr.process(); + ta2tr.addAllColumns(); + ta2tr.process(); f->Close(); auto* f2 = TFile::Open("variable_lists.root", "READ"); From 83003d35172e4abe0ecf8e2a3fd4efe42000439b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 31 Oct 2023 15:37:09 +0100 Subject: [PATCH 031/726] DPL: have common baseclass to abstract tree writing This is the the first step towards writing something else, e.g. a RNTuple. --- .../Core/include/Framework/TableTreeHelpers.h | 28 +++++++++--- Framework/Core/src/CommonDataProcessors.cxx | 10 ++--- Framework/Core/src/TableTreeHelpers.cxx | 43 +++++++++++-------- Framework/Core/test/benchmark_TableToTree.cxx | 2 +- Framework/Core/test/benchmark_TreeToTable.cxx | 2 +- Framework/Core/test/test_TreeToTable.cxx | 9 ++-- 6 files changed, 58 insertions(+), 36 deletions(-) diff --git a/Framework/Core/include/Framework/TableTreeHelpers.h b/Framework/Core/include/Framework/TableTreeHelpers.h index e12f4a38a973b..78bf0bba11389 100644 --- a/Framework/Core/include/Framework/TableTreeHelpers.h +++ b/Framework/Core/include/Framework/TableTreeHelpers.h @@ -105,18 +105,34 @@ class ColumnToBranch int mFieldSize = 0; }; -class TableToTree +class TableToRoot { public: - TableToTree(std::shared_ptr const& table, TFile* file, const char* treename); + TableToRoot(std::shared_ptr const& table, TFile* file, const char* name); + ~TableToRoot() = default; - std::shared_ptr process(); - void addBranch(std::shared_ptr const& column, std::shared_ptr const& field); - void addAllBranches(); + virtual void addColumn(std::shared_ptr const& column, std::shared_ptr const& field) = 0; + virtual void process() = 0; + // Helper function which marks all columns in the table for writing + void addAllColumns(); - private: + protected: arrow::Table* mTable; + TFile* mFile; + std::string mName; int64_t mRows = 0; +}; + +class TableToTree : public TableToRoot +{ + public: + TableToTree(std::shared_ptr const& table, TFile* file, const char* treename); + + void process() final; + void addColumn(std::shared_ptr const& column, std::shared_ptr const& field) final; + std::shared_ptr tree() { return mTree; } + + private: std::shared_ptr mTree; std::vector> mColumnReaders; }; diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 08283b5e49bb5..c9d8aa835d734 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -417,9 +417,7 @@ DataProcessorSpec for (auto d : ds) { auto fileAndFolder = dod->getFileFolder(d, tfNumber, aodInputFile); auto treename = fileAndFolder.folderName + "/" + d->treename; - TableToTree ta2tr(table, - fileAndFolder.file, - treename.c_str()); + std::unique_ptr ta2r = std::make_unique(table, fileAndFolder.file, treename.c_str()); // update metadata if (fileAndFolder.file->FindObjectAny("metaData")) { @@ -438,13 +436,13 @@ DataProcessorSpec auto col = table->column(idx); auto field = table->schema()->field(idx); if (idx != -1) { - ta2tr.addBranch(col, field); + ta2r->addColumn(col, field); } } } else { - ta2tr.addAllBranches(); + ta2r->addAllColumns(); } - ta2tr.process(); + ta2r->process(); } } }; diff --git a/Framework/Core/src/TableTreeHelpers.cxx b/Framework/Core/src/TableTreeHelpers.cxx index e6e6a1349e619..015bff26a0606 100644 --- a/Framework/Core/src/TableTreeHelpers.cxx +++ b/Framework/Core/src/TableTreeHelpers.cxx @@ -407,14 +407,32 @@ void ColumnToBranch::nextChunk() accessChunk(); } -TableToTree::TableToTree(std::shared_ptr const& table, TFile* file, const char* treename) +TableToRoot::TableToRoot(std::shared_ptr const& table, TFile* file, const char* name) + : mTable{table.get()}, + mFile{file}, + mName{name} { - mTable = table.get(); - mTree.reset(static_cast(file->Get(treename))); +} + +void TableToRoot::addAllColumns() +{ + mRows = mTable->num_rows(); + auto columns = mTable->columns(); + auto fields = mTable->schema()->fields(); + assert(columns.size() == fields.size()); + for (auto i = 0u; i < columns.size(); ++i) { + addColumn(columns[i], fields[i]); + } +} + +TableToTree::TableToTree(std::shared_ptr const& table, TFile* file, const char* name) + : TableToRoot{table, file, name} +{ + mTree.reset(static_cast(mFile->Get(mName.c_str()))); if (mTree) { return; } - std::string treeName(treename); + std::string treeName(mName); auto pos = treeName.find_first_of('/'); if (pos != std::string::npos) { file->cd(treeName.substr(0, pos).c_str()); @@ -423,18 +441,8 @@ TableToTree::TableToTree(std::shared_ptr const& table, TFile* file mTree = std::make_shared(treeName.c_str(), treeName.c_str()); } -void TableToTree::addAllBranches() -{ - mRows = mTable->num_rows(); - auto columns = mTable->columns(); - auto fields = mTable->schema()->fields(); - assert(columns.size() == fields.size()); - for (auto i = 0u; i < columns.size(); ++i) { - addBranch(columns[i], fields[i]); - } -} -void TableToTree::addBranch(std::shared_ptr const& column, std::shared_ptr const& field) +void TableToTree::addColumn(std::shared_ptr const& column, std::shared_ptr const& field) { if (mRows == 0) { mRows = column->length(); @@ -444,13 +452,13 @@ void TableToTree::addBranch(std::shared_ptr const& column, mColumnReaders.emplace_back(new ColumnToBranch{mTree.get(), column, field}); } -std::shared_ptr TableToTree::process() +void TableToTree::process() { int64_t row = 0; if (mTree->GetNbranches() == 0 || mRows == 0) { mTree->Write("", TObject::kOverwrite); mTree->SetDirectory(nullptr); - return mTree; + return; } for (auto& reader : mColumnReaders) { @@ -468,7 +476,6 @@ std::shared_ptr TableToTree::process() } mTree->Write("", TObject::kOverwrite); mTree->SetDirectory(nullptr); - return mTree; } TreeToTable::TreeToTable(arrow::MemoryPool* pool) diff --git a/Framework/Core/test/benchmark_TableToTree.cxx b/Framework/Core/test/benchmark_TableToTree.cxx index 0a7963518f00b..0ca4eb81112b8 100644 --- a/Framework/Core/test/benchmark_TableToTree.cxx +++ b/Framework/Core/test/benchmark_TableToTree.cxx @@ -62,7 +62,7 @@ static void BM_TableToTree(benchmark::State& state) // benchmark TableToTree TableToTree ta2tr(table, &fout, "table2tree"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); ta2tr.process(); // clean up diff --git a/Framework/Core/test/benchmark_TreeToTable.cxx b/Framework/Core/test/benchmark_TreeToTable.cxx index 6eca853a90ce4..476e653acaf83 100644 --- a/Framework/Core/test/benchmark_TreeToTable.cxx +++ b/Framework/Core/test/benchmark_TreeToTable.cxx @@ -58,7 +58,7 @@ static void BM_TreeToTable(benchmark::State& state) // now convert the table to a tree TFile fout("tree2table.root", "RECREATE"); TableToTree ta2tr(table, &fout, "tree2table"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); ta2tr.process(); fout.Close(); diff --git a/Framework/Core/test/test_TreeToTable.cxx b/Framework/Core/test/test_TreeToTable.cxx index 65569b72b9f35..32575a8caf3e0 100644 --- a/Framework/Core/test/test_TreeToTable.cxx +++ b/Framework/Core/test/test_TreeToTable.cxx @@ -143,9 +143,10 @@ TEST_CASE("TreeToTableConversion") // save table as tree TFile* f2 = TFile::Open("table2tree.root", "RECREATE"); TableToTree ta2tr(table, f2, "mytree"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); - auto t2 = ta2tr.process(); + ta2tr.process(); + auto t2 = ta2tr.tree(); auto br = (TBranch*)t2->GetBranch("ok"); REQUIRE(t2->GetEntries() == ndp); REQUIRE(br->GetEntries() == ndp); @@ -201,8 +202,8 @@ TEST_CASE("VariableLists") auto* f = TFile::Open("variable_lists.root", "RECREATE"); TableToTree ta2tr(table, f, "lists"); - ta2tr.addAllBranches(); - auto tree = ta2tr.process(); + ta2tr.addAllColumns(); + ta2tr.process(); f->Close(); auto* f2 = TFile::Open("variable_lists.root", "READ"); From 695d9f1810e8f6f294915cf187a16126c94823c3 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 31 Oct 2023 15:40:29 +0100 Subject: [PATCH 032/726] DPL: have common baseclass to abstract tree writing This is the the first step towards writing something else, e.g. a RNTuple. --- .../Core/include/Framework/TableTreeHelpers.h | 28 +++++++++--- Framework/Core/src/CommonDataProcessors.cxx | 10 ++--- Framework/Core/src/TableTreeHelpers.cxx | 44 +++++++++++-------- Framework/Core/test/benchmark_TableToTree.cxx | 2 +- Framework/Core/test/benchmark_TreeToTable.cxx | 2 +- Framework/Core/test/test_TreeToTable.cxx | 9 ++-- 6 files changed, 58 insertions(+), 37 deletions(-) diff --git a/Framework/Core/include/Framework/TableTreeHelpers.h b/Framework/Core/include/Framework/TableTreeHelpers.h index e12f4a38a973b..78bf0bba11389 100644 --- a/Framework/Core/include/Framework/TableTreeHelpers.h +++ b/Framework/Core/include/Framework/TableTreeHelpers.h @@ -105,18 +105,34 @@ class ColumnToBranch int mFieldSize = 0; }; -class TableToTree +class TableToRoot { public: - TableToTree(std::shared_ptr const& table, TFile* file, const char* treename); + TableToRoot(std::shared_ptr const& table, TFile* file, const char* name); + ~TableToRoot() = default; - std::shared_ptr process(); - void addBranch(std::shared_ptr const& column, std::shared_ptr const& field); - void addAllBranches(); + virtual void addColumn(std::shared_ptr const& column, std::shared_ptr const& field) = 0; + virtual void process() = 0; + // Helper function which marks all columns in the table for writing + void addAllColumns(); - private: + protected: arrow::Table* mTable; + TFile* mFile; + std::string mName; int64_t mRows = 0; +}; + +class TableToTree : public TableToRoot +{ + public: + TableToTree(std::shared_ptr const& table, TFile* file, const char* treename); + + void process() final; + void addColumn(std::shared_ptr const& column, std::shared_ptr const& field) final; + std::shared_ptr tree() { return mTree; } + + private: std::shared_ptr mTree; std::vector> mColumnReaders; }; diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 08283b5e49bb5..c9d8aa835d734 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -417,9 +417,7 @@ DataProcessorSpec for (auto d : ds) { auto fileAndFolder = dod->getFileFolder(d, tfNumber, aodInputFile); auto treename = fileAndFolder.folderName + "/" + d->treename; - TableToTree ta2tr(table, - fileAndFolder.file, - treename.c_str()); + std::unique_ptr ta2r = std::make_unique(table, fileAndFolder.file, treename.c_str()); // update metadata if (fileAndFolder.file->FindObjectAny("metaData")) { @@ -438,13 +436,13 @@ DataProcessorSpec auto col = table->column(idx); auto field = table->schema()->field(idx); if (idx != -1) { - ta2tr.addBranch(col, field); + ta2r->addColumn(col, field); } } } else { - ta2tr.addAllBranches(); + ta2r->addAllColumns(); } - ta2tr.process(); + ta2r->process(); } } }; diff --git a/Framework/Core/src/TableTreeHelpers.cxx b/Framework/Core/src/TableTreeHelpers.cxx index e6e6a1349e619..1c96157637433 100644 --- a/Framework/Core/src/TableTreeHelpers.cxx +++ b/Framework/Core/src/TableTreeHelpers.cxx @@ -407,34 +407,41 @@ void ColumnToBranch::nextChunk() accessChunk(); } -TableToTree::TableToTree(std::shared_ptr const& table, TFile* file, const char* treename) +TableToRoot::TableToRoot(std::shared_ptr const& table, TFile* file, const char* name) + : mTable{table.get()}, + mFile{file}, + mName{name} { - mTable = table.get(); - mTree.reset(static_cast(file->Get(treename))); - if (mTree) { - return; - } - std::string treeName(treename); - auto pos = treeName.find_first_of('/'); - if (pos != std::string::npos) { - file->cd(treeName.substr(0, pos).c_str()); - treeName = treeName.substr(pos + 1, std::string::npos); - } - mTree = std::make_shared(treeName.c_str(), treeName.c_str()); } -void TableToTree::addAllBranches() +void TableToRoot::addAllColumns() { mRows = mTable->num_rows(); auto columns = mTable->columns(); auto fields = mTable->schema()->fields(); assert(columns.size() == fields.size()); for (auto i = 0u; i < columns.size(); ++i) { - addBranch(columns[i], fields[i]); + addColumn(columns[i], fields[i]); + } +} + +TableToTree::TableToTree(std::shared_ptr const& table, TFile* file, const char* name) + : TableToRoot{table, file, name} +{ + mTree.reset(static_cast(mFile->Get(mName.c_str()))); + if (mTree) { + return; } + std::string treeName(mName); + auto pos = treeName.find_first_of('/'); + if (pos != std::string::npos) { + file->cd(treeName.substr(0, pos).c_str()); + treeName = treeName.substr(pos + 1, std::string::npos); + } + mTree = std::make_shared(treeName.c_str(), treeName.c_str()); } -void TableToTree::addBranch(std::shared_ptr const& column, std::shared_ptr const& field) +void TableToTree::addColumn(std::shared_ptr const& column, std::shared_ptr const& field) { if (mRows == 0) { mRows = column->length(); @@ -444,13 +451,13 @@ void TableToTree::addBranch(std::shared_ptr const& column, mColumnReaders.emplace_back(new ColumnToBranch{mTree.get(), column, field}); } -std::shared_ptr TableToTree::process() +void TableToTree::process() { int64_t row = 0; if (mTree->GetNbranches() == 0 || mRows == 0) { mTree->Write("", TObject::kOverwrite); mTree->SetDirectory(nullptr); - return mTree; + return; } for (auto& reader : mColumnReaders) { @@ -468,7 +475,6 @@ std::shared_ptr TableToTree::process() } mTree->Write("", TObject::kOverwrite); mTree->SetDirectory(nullptr); - return mTree; } TreeToTable::TreeToTable(arrow::MemoryPool* pool) diff --git a/Framework/Core/test/benchmark_TableToTree.cxx b/Framework/Core/test/benchmark_TableToTree.cxx index 0a7963518f00b..0ca4eb81112b8 100644 --- a/Framework/Core/test/benchmark_TableToTree.cxx +++ b/Framework/Core/test/benchmark_TableToTree.cxx @@ -62,7 +62,7 @@ static void BM_TableToTree(benchmark::State& state) // benchmark TableToTree TableToTree ta2tr(table, &fout, "table2tree"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); ta2tr.process(); // clean up diff --git a/Framework/Core/test/benchmark_TreeToTable.cxx b/Framework/Core/test/benchmark_TreeToTable.cxx index 6eca853a90ce4..476e653acaf83 100644 --- a/Framework/Core/test/benchmark_TreeToTable.cxx +++ b/Framework/Core/test/benchmark_TreeToTable.cxx @@ -58,7 +58,7 @@ static void BM_TreeToTable(benchmark::State& state) // now convert the table to a tree TFile fout("tree2table.root", "RECREATE"); TableToTree ta2tr(table, &fout, "tree2table"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); ta2tr.process(); fout.Close(); diff --git a/Framework/Core/test/test_TreeToTable.cxx b/Framework/Core/test/test_TreeToTable.cxx index 65569b72b9f35..32575a8caf3e0 100644 --- a/Framework/Core/test/test_TreeToTable.cxx +++ b/Framework/Core/test/test_TreeToTable.cxx @@ -143,9 +143,10 @@ TEST_CASE("TreeToTableConversion") // save table as tree TFile* f2 = TFile::Open("table2tree.root", "RECREATE"); TableToTree ta2tr(table, f2, "mytree"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); - auto t2 = ta2tr.process(); + ta2tr.process(); + auto t2 = ta2tr.tree(); auto br = (TBranch*)t2->GetBranch("ok"); REQUIRE(t2->GetEntries() == ndp); REQUIRE(br->GetEntries() == ndp); @@ -201,8 +202,8 @@ TEST_CASE("VariableLists") auto* f = TFile::Open("variable_lists.root", "RECREATE"); TableToTree ta2tr(table, f, "lists"); - ta2tr.addAllBranches(); - auto tree = ta2tr.process(); + ta2tr.addAllColumns(); + ta2tr.process(); f->Close(); auto* f2 = TFile::Open("variable_lists.root", "READ"); From bdc4e1f2b9c45cb3a9e319596683ec96e2aafd2a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 31 Oct 2023 15:52:46 +0100 Subject: [PATCH 033/726] Fix warning on unused result --- Detectors/TPC/base/src/Painter.cxx | 2 -- 1 file changed, 2 deletions(-) diff --git a/Detectors/TPC/base/src/Painter.cxx b/Detectors/TPC/base/src/Painter.cxx index 7339a11e4c8e0..0f7b89b71e07a 100644 --- a/Detectors/TPC/base/src/Painter.cxx +++ b/Detectors/TPC/base/src/Painter.cxx @@ -927,9 +927,7 @@ TH3F painter::convertCalDetToTH3(const std::vector>& calDet, const const int binRBottomStart = std::clamp(histConvSum.GetYaxis()->FindBin(radiusPadCoord[0]) - 1, 1, nRBins); const int binRTopEnd = std::clamp(histConvSum.GetYaxis()->FindBin(radiusPadCoord[1]) + 1, 1, nRBins); int binPhiStart = std::min(histConvSum.GetXaxis()->FindBin(phiPadCoord[0]), histConvSum.GetXaxis()->FindBin(phiPadCoord[1])); - std::clamp(binPhiStart - 1, 1, nPhiBins); int binPhiEnd = std::max(histConvSum.GetXaxis()->FindBin(phiPadCoord[2]), histConvSum.GetXaxis()->FindBin(phiPadCoord[3])); - std::clamp(binPhiEnd + 1, 1, nPhiBins); // define boost geoemtry object polygon geoPad; From 5ff18e2b36b14c28d2a983b52f1c846420ad9602 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 31 Oct 2023 22:08:44 +0100 Subject: [PATCH 034/726] Make sure all initializer clauses are designated Not doing so is actually an error and breaks C++20. --- Framework/Core/src/DataProcessingDevice.cxx | 4 +++- Framework/Core/src/DataRelayer.cxx | 16 +++++++++------- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index b39607366adec..0bb0d6bf543ac 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -2177,7 +2177,9 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v buffer[ai] = record.isValid(ai) ? '3' : '0'; } buffer[record.size()] = 0; - states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index), (int)(record.size() + buffer - relayerSlotState), relayerSlotState}); + states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index), + .size = (int)(record.size() + buffer - relayerSlotState), + .data = relayerSlotState}); uint64_t tEnd = uv_hrtime(); // tEnd and tStart are in nanoseconds according to https://docs.libuv.org/en/v1.x/misc.html#c.uv_hrtime int64_t wallTimeMs = (tEnd - tStart) / 1000000; diff --git a/Framework/Core/src/DataRelayer.cxx b/Framework/Core/src/DataRelayer.cxx index 7dc6d28a1a11f..94711ac5d954b 100644 --- a/Framework/Core/src/DataRelayer.cxx +++ b/Framework/Core/src/DataRelayer.cxx @@ -271,7 +271,9 @@ void sendVariableContextMetrics(VariableContext& context, TimesliceSlot slot, Da } state += ";"; } - states.updateState({.id = short((int)ProcessingStateId::CONTEXT_VARIABLES_BASE + slot.index), (int)state.size(), state.data()}); + states.updateState({.id = short((int)ProcessingStateId::CONTEXT_VARIABLES_BASE + slot.index), + .size = (int)state.size(), + .data = state.data()}); }, &states, slot); } @@ -516,7 +518,7 @@ DataRelayer::RelayChoice index.publishSlot(slot); index.markAsDirty(slot, true); stats.updateStats({static_cast(ProcessingStatsId::RELAYED_MESSAGES), DataProcessingStats::Op::Add, (int)1}); - return RelayChoice{RelayChoice::Type::WillRelay, timeslice}; + return RelayChoice{.type = RelayChoice::Type::WillRelay, .timeslice = timeslice}; } /// If not, we find which timeslice we really were looking at @@ -543,7 +545,7 @@ DataRelayer::RelayChoice for (size_t pi = 0; pi < nMessages; ++pi) { messages[pi].reset(nullptr); } - return RelayChoice{.type = RelayChoice::Type::Invalid, timeslice}; + return RelayChoice{.type = RelayChoice::Type::Invalid, .timeslice = timeslice}; } if (TimesliceId::isValid(timeslice) == false) { @@ -553,7 +555,7 @@ DataRelayer::RelayChoice for (size_t pi = 0; pi < nMessages; ++pi) { messages[pi].reset(nullptr); } - return RelayChoice{.type = RelayChoice::Type::Invalid, timeslice}; + return RelayChoice{.type = RelayChoice::Type::Invalid, .timeslice = timeslice}; } TimesliceIndex::ActionTaken action; @@ -563,7 +565,7 @@ DataRelayer::RelayChoice switch (action) { case TimesliceIndex::ActionTaken::Wait: - return RelayChoice{.type = RelayChoice::Type::Backpressured, timeslice}; + return RelayChoice{.type = RelayChoice::Type::Backpressured, .timeslice = timeslice}; case TimesliceIndex::ActionTaken::DropObsolete: static std::atomic obsoleteCount = 0; static std::atomic mult = 1; @@ -573,7 +575,7 @@ DataRelayer::RelayChoice mult = mult * 10; } } - return RelayChoice{.type = RelayChoice::Type::Dropped, timeslice}; + return RelayChoice{.type = RelayChoice::Type::Dropped, .timeslice = timeslice}; case TimesliceIndex::ActionTaken::DropInvalid: LOG(warning) << "Incoming data is invalid, not relaying."; stats.updateStats({static_cast(ProcessingStatsId::MALFORMED_INPUTS), DataProcessingStats::Op::Add, (int)1}); @@ -581,7 +583,7 @@ DataRelayer::RelayChoice for (size_t pi = 0; pi < nMessages; ++pi) { messages[pi].reset(nullptr); } - return RelayChoice{.type = RelayChoice::Type::Invalid, timeslice}; + return RelayChoice{.type = RelayChoice::Type::Invalid, .timeslice = timeslice}; case TimesliceIndex::ActionTaken::ReplaceUnused: case TimesliceIndex::ActionTaken::ReplaceObsolete: // At this point the variables match the new input but the From b4f312d7191d3e997e64a3e95413cdbee3fe90d4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 31 Oct 2023 22:08:44 +0100 Subject: [PATCH 035/726] DPL: fix bad API usage --- Framework/Core/src/DataSender.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/DataSender.cxx b/Framework/Core/src/DataSender.cxx index 8b63a2c45ec32..f0753102e2d40 100644 --- a/Framework/Core/src/DataSender.cxx +++ b/Framework/Core/src/DataSender.cxx @@ -119,7 +119,7 @@ void DataSender::verifyMissingSporadic() const { for (auto present : mPresent) { if (!present) { - LOGP(debug, O2DataModelHelpers::describeMissingOutputs(mOutputs, mPresent).c_str()); + LOGP(debug, "{}", O2DataModelHelpers::describeMissingOutputs(mOutputs, mPresent).c_str()); return; } } From b9ddf10d376877b581ae1c2d3df514bba7477b11 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 31 Oct 2023 22:18:03 +0100 Subject: [PATCH 036/726] Make sure all initializer clauses are designated Not doing so is actually an error and breaks C++20. --- Framework/Core/src/DataProcessingDevice.cxx | 4 +++- Framework/Core/src/DataRelayer.cxx | 16 +++++++++------- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index b39607366adec..9a575a95b078a 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -2177,7 +2177,9 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v buffer[ai] = record.isValid(ai) ? '3' : '0'; } buffer[record.size()] = 0; - states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index), (int)(record.size() + buffer - relayerSlotState), relayerSlotState}); + states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index), + .size = (int)(record.size() + buffer - relayerSlotState), + .data = relayerSlotState}); uint64_t tEnd = uv_hrtime(); // tEnd and tStart are in nanoseconds according to https://docs.libuv.org/en/v1.x/misc.html#c.uv_hrtime int64_t wallTimeMs = (tEnd - tStart) / 1000000; diff --git a/Framework/Core/src/DataRelayer.cxx b/Framework/Core/src/DataRelayer.cxx index 7dc6d28a1a11f..c507d6eb308e3 100644 --- a/Framework/Core/src/DataRelayer.cxx +++ b/Framework/Core/src/DataRelayer.cxx @@ -271,7 +271,9 @@ void sendVariableContextMetrics(VariableContext& context, TimesliceSlot slot, Da } state += ";"; } - states.updateState({.id = short((int)ProcessingStateId::CONTEXT_VARIABLES_BASE + slot.index), (int)state.size(), state.data()}); + states.updateState({.id = short((int)ProcessingStateId::CONTEXT_VARIABLES_BASE + slot.index), + .size = (int)state.size(), + .data = state.data()}); }, &states, slot); } @@ -516,7 +518,7 @@ DataRelayer::RelayChoice index.publishSlot(slot); index.markAsDirty(slot, true); stats.updateStats({static_cast(ProcessingStatsId::RELAYED_MESSAGES), DataProcessingStats::Op::Add, (int)1}); - return RelayChoice{RelayChoice::Type::WillRelay, timeslice}; + return RelayChoice{.type = RelayChoice::Type::WillRelay, .timeslice = timeslice}; } /// If not, we find which timeslice we really were looking at @@ -543,7 +545,7 @@ DataRelayer::RelayChoice for (size_t pi = 0; pi < nMessages; ++pi) { messages[pi].reset(nullptr); } - return RelayChoice{.type = RelayChoice::Type::Invalid, timeslice}; + return RelayChoice{.type = RelayChoice::Type::Invalid, .timeslice = timeslice}; } if (TimesliceId::isValid(timeslice) == false) { @@ -553,7 +555,7 @@ DataRelayer::RelayChoice for (size_t pi = 0; pi < nMessages; ++pi) { messages[pi].reset(nullptr); } - return RelayChoice{.type = RelayChoice::Type::Invalid, timeslice}; + return RelayChoice{.type = RelayChoice::Type::Invalid, .timeslice = timeslice}; } TimesliceIndex::ActionTaken action; @@ -563,7 +565,7 @@ DataRelayer::RelayChoice switch (action) { case TimesliceIndex::ActionTaken::Wait: - return RelayChoice{.type = RelayChoice::Type::Backpressured, timeslice}; + return RelayChoice{.type = RelayChoice::Type::Backpressured, .timeslice = timeslice}; case TimesliceIndex::ActionTaken::DropObsolete: static std::atomic obsoleteCount = 0; static std::atomic mult = 1; @@ -573,7 +575,7 @@ DataRelayer::RelayChoice mult = mult * 10; } } - return RelayChoice{.type = RelayChoice::Type::Dropped, timeslice}; + return RelayChoice{.type = RelayChoice::Type::Dropped, .timeslice = timeslice}; case TimesliceIndex::ActionTaken::DropInvalid: LOG(warning) << "Incoming data is invalid, not relaying."; stats.updateStats({static_cast(ProcessingStatsId::MALFORMED_INPUTS), DataProcessingStats::Op::Add, (int)1}); @@ -581,7 +583,7 @@ DataRelayer::RelayChoice for (size_t pi = 0; pi < nMessages; ++pi) { messages[pi].reset(nullptr); } - return RelayChoice{.type = RelayChoice::Type::Invalid, timeslice}; + return RelayChoice{.type = RelayChoice::Type::Invalid, .timeslice = timeslice}; case TimesliceIndex::ActionTaken::ReplaceUnused: case TimesliceIndex::ActionTaken::ReplaceObsolete: // At this point the variables match the new input but the From 63f4e3579ea1dad836f5b5614037bda350892d68 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 31 Oct 2023 22:18:03 +0100 Subject: [PATCH 037/726] DPL: fix bad API usage --- Framework/Core/src/DataSender.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/DataSender.cxx b/Framework/Core/src/DataSender.cxx index 8b63a2c45ec32..f0753102e2d40 100644 --- a/Framework/Core/src/DataSender.cxx +++ b/Framework/Core/src/DataSender.cxx @@ -119,7 +119,7 @@ void DataSender::verifyMissingSporadic() const { for (auto present : mPresent) { if (!present) { - LOGP(debug, O2DataModelHelpers::describeMissingOutputs(mOutputs, mPresent).c_str()); + LOGP(debug, "{}", O2DataModelHelpers::describeMissingOutputs(mOutputs, mPresent).c_str()); return; } } From 3214f289f6402cedd443d5d6b1691928048ad879 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 2 Nov 2023 14:26:05 +0100 Subject: [PATCH 038/726] DPL Analysis: add version to the table size --- Framework/Core/src/DataProcessor.cxx | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Framework/Core/src/DataProcessor.cxx b/Framework/Core/src/DataProcessor.cxx index bdfd740a998b3..e9938a304b8a6 100644 --- a/Framework/Core/src/DataProcessor.cxx +++ b/Framework/Core/src/DataProcessor.cxx @@ -107,12 +107,14 @@ void DataProcessor::doSend(DataSender& sender, ArrowContext& context, ServiceReg auto origin = std::regex_replace(dh->dataOrigin.as(), invalid_metric, "_"); auto description = std::regex_replace(dh->dataDescription.as(), invalid_metric, "_"); + uint64_t version = dh->subSpecification; monitoring.send(Metric{(uint64_t)payload->GetSize(), - fmt::format("table-bytes-{}-{}-created", + fmt::format("table-bytes-{}-{}-{}-created", origin, - description)} + description, + version)} .addTag(Key::Subsystem, Value::DPL)); - LOGP(detail, "Creating {}MB for table {}/{}.", payload->GetSize() / 1000000., dh->dataOrigin, dh->dataDescription); + LOGP(detail, "Creating {}MB for table {}/{}/{}.", payload->GetSize() / 1000000., dh->dataOrigin, dh->dataDescription, version); context.updateBytesSent(payload->GetSize()); context.updateMessagesSent(1); parts.AddPart(std::move(messageRef.header)); From e6be56b71f47ea24b8248dfa66b1853031120916 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 3 Nov 2023 11:02:04 +0100 Subject: [PATCH 039/726] DPL: only listen to SIGUSR1 when the task is alive --- Framework/Core/include/Framework/DeviceContext.h | 2 ++ Framework/Core/src/DataProcessingDevice.cxx | 15 +++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/Framework/Core/include/Framework/DeviceContext.h b/Framework/Core/include/Framework/DeviceContext.h index d34192ec6ad3d..3777e7f608b75 100644 --- a/Framework/Core/include/Framework/DeviceContext.h +++ b/Framework/Core/include/Framework/DeviceContext.h @@ -16,6 +16,7 @@ #define O2_FRAMEWORK_DEVICECONTEXT_H_ typedef struct uv_timer_s uv_timer_t; +typedef struct uv_signal_s uv_signal_t; namespace o2::framework { @@ -27,6 +28,7 @@ struct ComputingQuotaStats; struct DeviceContext { ComputingQuotaStats* quotaStats = nullptr; uv_timer_t* gracePeriodTimer = nullptr; + uv_signal_t* sigusr1Handle = nullptr; int expectedRegionCallbacks = 0; int exitTransitionTimeout = 0; }; diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 9a575a95b078a..0bfeb7df2fdc5 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -972,10 +972,12 @@ void DataProcessingDevice::InitTask() // an event from the outside, making sure that the event loop can // be unblocked (e.g. by a quitting DPL driver) even when there // is no data pending to be processed. - auto* sigusr1Handle = (uv_signal_t*)malloc(sizeof(uv_signal_t)); - uv_signal_init(state.loop, sigusr1Handle); - sigusr1Handle->data = &mServiceRegistry; - uv_signal_start(sigusr1Handle, on_signal_callback, SIGUSR1); + if (deviceContext.sigusr1Handle == nullptr) { + deviceContext.sigusr1Handle = (uv_signal_t*)malloc(sizeof(uv_signal_t)); + deviceContext.sigusr1Handle->data = &mServiceRegistry; + uv_signal_init(state.loop, deviceContext.sigusr1Handle); + } + uv_signal_start(deviceContext.sigusr1Handle, on_signal_callback, SIGUSR1); /// Initialise the pollers DataProcessingDevice::initPollers(); @@ -1675,6 +1677,11 @@ void DataProcessingDevice::ResetTask() { ServiceRegistryRef ref{mServiceRegistry}; ref.get().clear(); + auto &deviceContext = ref.get(); + // Stop the signal handler + if (deviceContext.sigusr1Handle) { + uv_signal_stop(deviceContext.sigusr1Handle); + } } struct WaitBackpressurePolicy { From 484aff8091d90b5e9b1d6da59b9feb3f18aa30c1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 3 Nov 2023 11:37:46 +0100 Subject: [PATCH 040/726] DPL: only listen to SIGUSR1 when the task is alive --- Framework/Core/include/Framework/DeviceContext.h | 2 ++ Framework/Core/src/DataProcessingDevice.cxx | 15 +++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/Framework/Core/include/Framework/DeviceContext.h b/Framework/Core/include/Framework/DeviceContext.h index d34192ec6ad3d..3777e7f608b75 100644 --- a/Framework/Core/include/Framework/DeviceContext.h +++ b/Framework/Core/include/Framework/DeviceContext.h @@ -16,6 +16,7 @@ #define O2_FRAMEWORK_DEVICECONTEXT_H_ typedef struct uv_timer_s uv_timer_t; +typedef struct uv_signal_s uv_signal_t; namespace o2::framework { @@ -27,6 +28,7 @@ struct ComputingQuotaStats; struct DeviceContext { ComputingQuotaStats* quotaStats = nullptr; uv_timer_t* gracePeriodTimer = nullptr; + uv_signal_t* sigusr1Handle = nullptr; int expectedRegionCallbacks = 0; int exitTransitionTimeout = 0; }; diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 9a575a95b078a..1a4ec7ae1b04f 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -972,10 +972,12 @@ void DataProcessingDevice::InitTask() // an event from the outside, making sure that the event loop can // be unblocked (e.g. by a quitting DPL driver) even when there // is no data pending to be processed. - auto* sigusr1Handle = (uv_signal_t*)malloc(sizeof(uv_signal_t)); - uv_signal_init(state.loop, sigusr1Handle); - sigusr1Handle->data = &mServiceRegistry; - uv_signal_start(sigusr1Handle, on_signal_callback, SIGUSR1); + if (deviceContext.sigusr1Handle == nullptr) { + deviceContext.sigusr1Handle = (uv_signal_t*)malloc(sizeof(uv_signal_t)); + deviceContext.sigusr1Handle->data = &mServiceRegistry; + uv_signal_init(state.loop, deviceContext.sigusr1Handle); + } + uv_signal_start(deviceContext.sigusr1Handle, on_signal_callback, SIGUSR1); /// Initialise the pollers DataProcessingDevice::initPollers(); @@ -1675,6 +1677,11 @@ void DataProcessingDevice::ResetTask() { ServiceRegistryRef ref{mServiceRegistry}; ref.get().clear(); + auto& deviceContext = ref.get(); + // Stop the signal handler + if (deviceContext.sigusr1Handle) { + uv_signal_stop(deviceContext.sigusr1Handle); + } } struct WaitBackpressurePolicy { From 96f12c21eab217649188f26bde5bd93355252ccb Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 3 Nov 2023 13:29:36 +0100 Subject: [PATCH 041/726] DPL: only listen to SIGUSR1 when the task is alive --- .../Core/include/Framework/DeviceContext.h | 2 ++ Framework/Core/src/DataProcessingDevice.cxx | 24 +++++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/Framework/Core/include/Framework/DeviceContext.h b/Framework/Core/include/Framework/DeviceContext.h index d34192ec6ad3d..3777e7f608b75 100644 --- a/Framework/Core/include/Framework/DeviceContext.h +++ b/Framework/Core/include/Framework/DeviceContext.h @@ -16,6 +16,7 @@ #define O2_FRAMEWORK_DEVICECONTEXT_H_ typedef struct uv_timer_s uv_timer_t; +typedef struct uv_signal_s uv_signal_t; namespace o2::framework { @@ -27,6 +28,7 @@ struct ComputingQuotaStats; struct DeviceContext { ComputingQuotaStats* quotaStats = nullptr; uv_timer_t* gracePeriodTimer = nullptr; + uv_signal_t* sigusr1Handle = nullptr; int expectedRegionCallbacks = 0; int exitTransitionTimeout = 0; }; diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 9a575a95b078a..5542be850fb70 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -493,6 +493,10 @@ void on_signal_callback(uv_signal_t* handle, int signum) ZoneScopedN("Signal callaback"); LOG(debug) << "Signal " << signum << " received."; auto* registry = (ServiceRegistry*)handle->data; + if (!registry) { + LOG(debug) << "No registry active. Ignoring signal"; + return; + } ServiceRegistryRef ref{*registry}; auto& state = ref.get(); auto& quotaEvaluator = ref.get(); @@ -972,10 +976,14 @@ void DataProcessingDevice::InitTask() // an event from the outside, making sure that the event loop can // be unblocked (e.g. by a quitting DPL driver) even when there // is no data pending to be processed. - auto* sigusr1Handle = (uv_signal_t*)malloc(sizeof(uv_signal_t)); - uv_signal_init(state.loop, sigusr1Handle); - sigusr1Handle->data = &mServiceRegistry; - uv_signal_start(sigusr1Handle, on_signal_callback, SIGUSR1); + if (deviceContext.sigusr1Handle == nullptr) { + deviceContext.sigusr1Handle = (uv_signal_t*)malloc(sizeof(uv_signal_t)); + deviceContext.sigusr1Handle->data = &mServiceRegistry; + uv_signal_init(state.loop, deviceContext.sigusr1Handle); + uv_signal_start(deviceContext.sigusr1Handle, on_signal_callback, SIGUSR1); + } + // When we start, we must make sure that we do listen to the signal + deviceContext.sigusr1Handle->data = &mServiceRegistry; /// Initialise the pollers DataProcessingDevice::initPollers(); @@ -1675,6 +1683,14 @@ void DataProcessingDevice::ResetTask() { ServiceRegistryRef ref{mServiceRegistry}; ref.get().clear(); + auto& deviceContext = ref.get(); + // If the signal handler is there, we should + // hide the registry from it, so that we do not + // end up calling the signal handler on something + // which might not be there anymore. + if (deviceContext.sigusr1Handle) { + deviceContext.sigusr1Handle->data = nullptr; + } } struct WaitBackpressurePolicy { From 0d7959dc6e6921d18fe48832db0a1c77d01d472c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 4 Nov 2023 10:02:21 +0100 Subject: [PATCH 042/726] DPL: have common baseclass to abstract tree writing This is the the first step towards writing something else, e.g. a RNTuple. --- Detectors/AOD/src/StandaloneAODProducer.cxx | 6 +-- .../Core/include/Framework/TableTreeHelpers.h | 28 +++++++++--- Framework/Core/src/CommonDataProcessors.cxx | 10 ++--- Framework/Core/src/TableTreeHelpers.cxx | 44 +++++++++++-------- Framework/Core/test/benchmark_TableToTree.cxx | 2 +- Framework/Core/test/benchmark_TreeToTable.cxx | 2 +- Framework/Core/test/test_TreeToTable.cxx | 9 ++-- 7 files changed, 61 insertions(+), 40 deletions(-) diff --git a/Detectors/AOD/src/StandaloneAODProducer.cxx b/Detectors/AOD/src/StandaloneAODProducer.cxx index 151038465c1d1..e37a877cf05b1 100644 --- a/Detectors/AOD/src/StandaloneAODProducer.cxx +++ b/Detectors/AOD/src/StandaloneAODProducer.cxx @@ -95,7 +95,7 @@ void fillMCollisionTable(o2::steer::MCKinematicsReader const& mcreader) TFile outfile("aod.root", "UPDATE"); { TableToTree t2t(mccoltable, &outfile, aod::MetadataTrait::metadata::tableLabel()); - t2t.addAllBranches(); + t2t.addAllColumns(); t2t.process(); } } @@ -201,12 +201,12 @@ void fillCollisionAndTrackTable() TFile outfile("aod.root", "RECREATE"); { TableToTree t2t(colltable, &outfile, aod::MetadataTrait::metadata::tableLabel()); - t2t.addAllBranches(); + t2t.addAllColumns(); t2t.process(); } { TableToTree t2t(tracktable, &outfile, "Tracks" /* aod::MetadataTrait::metadata::tableLabel() */); - t2t.addAllBranches(); + t2t.addAllColumns(); t2t.process(); } } diff --git a/Framework/Core/include/Framework/TableTreeHelpers.h b/Framework/Core/include/Framework/TableTreeHelpers.h index e12f4a38a973b..78bf0bba11389 100644 --- a/Framework/Core/include/Framework/TableTreeHelpers.h +++ b/Framework/Core/include/Framework/TableTreeHelpers.h @@ -105,18 +105,34 @@ class ColumnToBranch int mFieldSize = 0; }; -class TableToTree +class TableToRoot { public: - TableToTree(std::shared_ptr const& table, TFile* file, const char* treename); + TableToRoot(std::shared_ptr const& table, TFile* file, const char* name); + ~TableToRoot() = default; - std::shared_ptr process(); - void addBranch(std::shared_ptr const& column, std::shared_ptr const& field); - void addAllBranches(); + virtual void addColumn(std::shared_ptr const& column, std::shared_ptr const& field) = 0; + virtual void process() = 0; + // Helper function which marks all columns in the table for writing + void addAllColumns(); - private: + protected: arrow::Table* mTable; + TFile* mFile; + std::string mName; int64_t mRows = 0; +}; + +class TableToTree : public TableToRoot +{ + public: + TableToTree(std::shared_ptr const& table, TFile* file, const char* treename); + + void process() final; + void addColumn(std::shared_ptr const& column, std::shared_ptr const& field) final; + std::shared_ptr tree() { return mTree; } + + private: std::shared_ptr mTree; std::vector> mColumnReaders; }; diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 08283b5e49bb5..c9d8aa835d734 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -417,9 +417,7 @@ DataProcessorSpec for (auto d : ds) { auto fileAndFolder = dod->getFileFolder(d, tfNumber, aodInputFile); auto treename = fileAndFolder.folderName + "/" + d->treename; - TableToTree ta2tr(table, - fileAndFolder.file, - treename.c_str()); + std::unique_ptr ta2r = std::make_unique(table, fileAndFolder.file, treename.c_str()); // update metadata if (fileAndFolder.file->FindObjectAny("metaData")) { @@ -438,13 +436,13 @@ DataProcessorSpec auto col = table->column(idx); auto field = table->schema()->field(idx); if (idx != -1) { - ta2tr.addBranch(col, field); + ta2r->addColumn(col, field); } } } else { - ta2tr.addAllBranches(); + ta2r->addAllColumns(); } - ta2tr.process(); + ta2r->process(); } } }; diff --git a/Framework/Core/src/TableTreeHelpers.cxx b/Framework/Core/src/TableTreeHelpers.cxx index e6e6a1349e619..1c96157637433 100644 --- a/Framework/Core/src/TableTreeHelpers.cxx +++ b/Framework/Core/src/TableTreeHelpers.cxx @@ -407,34 +407,41 @@ void ColumnToBranch::nextChunk() accessChunk(); } -TableToTree::TableToTree(std::shared_ptr const& table, TFile* file, const char* treename) +TableToRoot::TableToRoot(std::shared_ptr const& table, TFile* file, const char* name) + : mTable{table.get()}, + mFile{file}, + mName{name} { - mTable = table.get(); - mTree.reset(static_cast(file->Get(treename))); - if (mTree) { - return; - } - std::string treeName(treename); - auto pos = treeName.find_first_of('/'); - if (pos != std::string::npos) { - file->cd(treeName.substr(0, pos).c_str()); - treeName = treeName.substr(pos + 1, std::string::npos); - } - mTree = std::make_shared(treeName.c_str(), treeName.c_str()); } -void TableToTree::addAllBranches() +void TableToRoot::addAllColumns() { mRows = mTable->num_rows(); auto columns = mTable->columns(); auto fields = mTable->schema()->fields(); assert(columns.size() == fields.size()); for (auto i = 0u; i < columns.size(); ++i) { - addBranch(columns[i], fields[i]); + addColumn(columns[i], fields[i]); + } +} + +TableToTree::TableToTree(std::shared_ptr const& table, TFile* file, const char* name) + : TableToRoot{table, file, name} +{ + mTree.reset(static_cast(mFile->Get(mName.c_str()))); + if (mTree) { + return; } + std::string treeName(mName); + auto pos = treeName.find_first_of('/'); + if (pos != std::string::npos) { + file->cd(treeName.substr(0, pos).c_str()); + treeName = treeName.substr(pos + 1, std::string::npos); + } + mTree = std::make_shared(treeName.c_str(), treeName.c_str()); } -void TableToTree::addBranch(std::shared_ptr const& column, std::shared_ptr const& field) +void TableToTree::addColumn(std::shared_ptr const& column, std::shared_ptr const& field) { if (mRows == 0) { mRows = column->length(); @@ -444,13 +451,13 @@ void TableToTree::addBranch(std::shared_ptr const& column, mColumnReaders.emplace_back(new ColumnToBranch{mTree.get(), column, field}); } -std::shared_ptr TableToTree::process() +void TableToTree::process() { int64_t row = 0; if (mTree->GetNbranches() == 0 || mRows == 0) { mTree->Write("", TObject::kOverwrite); mTree->SetDirectory(nullptr); - return mTree; + return; } for (auto& reader : mColumnReaders) { @@ -468,7 +475,6 @@ std::shared_ptr TableToTree::process() } mTree->Write("", TObject::kOverwrite); mTree->SetDirectory(nullptr); - return mTree; } TreeToTable::TreeToTable(arrow::MemoryPool* pool) diff --git a/Framework/Core/test/benchmark_TableToTree.cxx b/Framework/Core/test/benchmark_TableToTree.cxx index 0a7963518f00b..0ca4eb81112b8 100644 --- a/Framework/Core/test/benchmark_TableToTree.cxx +++ b/Framework/Core/test/benchmark_TableToTree.cxx @@ -62,7 +62,7 @@ static void BM_TableToTree(benchmark::State& state) // benchmark TableToTree TableToTree ta2tr(table, &fout, "table2tree"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); ta2tr.process(); // clean up diff --git a/Framework/Core/test/benchmark_TreeToTable.cxx b/Framework/Core/test/benchmark_TreeToTable.cxx index 6eca853a90ce4..476e653acaf83 100644 --- a/Framework/Core/test/benchmark_TreeToTable.cxx +++ b/Framework/Core/test/benchmark_TreeToTable.cxx @@ -58,7 +58,7 @@ static void BM_TreeToTable(benchmark::State& state) // now convert the table to a tree TFile fout("tree2table.root", "RECREATE"); TableToTree ta2tr(table, &fout, "tree2table"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); ta2tr.process(); fout.Close(); diff --git a/Framework/Core/test/test_TreeToTable.cxx b/Framework/Core/test/test_TreeToTable.cxx index 65569b72b9f35..32575a8caf3e0 100644 --- a/Framework/Core/test/test_TreeToTable.cxx +++ b/Framework/Core/test/test_TreeToTable.cxx @@ -143,9 +143,10 @@ TEST_CASE("TreeToTableConversion") // save table as tree TFile* f2 = TFile::Open("table2tree.root", "RECREATE"); TableToTree ta2tr(table, f2, "mytree"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); - auto t2 = ta2tr.process(); + ta2tr.process(); + auto t2 = ta2tr.tree(); auto br = (TBranch*)t2->GetBranch("ok"); REQUIRE(t2->GetEntries() == ndp); REQUIRE(br->GetEntries() == ndp); @@ -201,8 +202,8 @@ TEST_CASE("VariableLists") auto* f = TFile::Open("variable_lists.root", "RECREATE"); TableToTree ta2tr(table, f, "lists"); - ta2tr.addAllBranches(); - auto tree = ta2tr.process(); + ta2tr.addAllColumns(); + ta2tr.process(); f->Close(); auto* f2 = TFile::Open("variable_lists.root", "READ"); From 7ca1f03bbd7f825e45511460a3dfa24acb31457d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:11:40 +0100 Subject: [PATCH 043/726] DPL: have common baseclass to abstract tree writing This is the the first step towards writing something else, e.g. a RNTuple. --- Detectors/AOD/src/StandaloneAODProducer.cxx | 6 +-- .../Core/include/Framework/TableTreeHelpers.h | 28 +++++++++--- Framework/Core/src/CommonDataProcessors.cxx | 10 ++--- Framework/Core/src/TableTreeHelpers.cxx | 44 +++++++++++-------- Framework/Core/test/benchmark_TableToTree.cxx | 2 +- Framework/Core/test/benchmark_TreeToTable.cxx | 2 +- Framework/Core/test/test_TreeToTable.cxx | 9 ++-- 7 files changed, 61 insertions(+), 40 deletions(-) diff --git a/Detectors/AOD/src/StandaloneAODProducer.cxx b/Detectors/AOD/src/StandaloneAODProducer.cxx index 151038465c1d1..e37a877cf05b1 100644 --- a/Detectors/AOD/src/StandaloneAODProducer.cxx +++ b/Detectors/AOD/src/StandaloneAODProducer.cxx @@ -95,7 +95,7 @@ void fillMCollisionTable(o2::steer::MCKinematicsReader const& mcreader) TFile outfile("aod.root", "UPDATE"); { TableToTree t2t(mccoltable, &outfile, aod::MetadataTrait::metadata::tableLabel()); - t2t.addAllBranches(); + t2t.addAllColumns(); t2t.process(); } } @@ -201,12 +201,12 @@ void fillCollisionAndTrackTable() TFile outfile("aod.root", "RECREATE"); { TableToTree t2t(colltable, &outfile, aod::MetadataTrait::metadata::tableLabel()); - t2t.addAllBranches(); + t2t.addAllColumns(); t2t.process(); } { TableToTree t2t(tracktable, &outfile, "Tracks" /* aod::MetadataTrait::metadata::tableLabel() */); - t2t.addAllBranches(); + t2t.addAllColumns(); t2t.process(); } } diff --git a/Framework/Core/include/Framework/TableTreeHelpers.h b/Framework/Core/include/Framework/TableTreeHelpers.h index e12f4a38a973b..bcf68e57844ec 100644 --- a/Framework/Core/include/Framework/TableTreeHelpers.h +++ b/Framework/Core/include/Framework/TableTreeHelpers.h @@ -105,18 +105,34 @@ class ColumnToBranch int mFieldSize = 0; }; -class TableToTree +class TableToRoot { public: - TableToTree(std::shared_ptr const& table, TFile* file, const char* treename); + TableToRoot(std::shared_ptr const& table, TFile* file, const char* name); + virtual ~TableToRoot() = default; - std::shared_ptr process(); - void addBranch(std::shared_ptr const& column, std::shared_ptr const& field); - void addAllBranches(); + virtual void addColumn(std::shared_ptr const& column, std::shared_ptr const& field) = 0; + virtual void process() = 0; + // Helper function which marks all columns in the table for writing + void addAllColumns(); - private: + protected: arrow::Table* mTable; + TFile* mFile; + std::string mName; int64_t mRows = 0; +}; + +class TableToTree : public TableToRoot +{ + public: + TableToTree(std::shared_ptr const& table, TFile* file, const char* treename); + + void process() final; + void addColumn(std::shared_ptr const& column, std::shared_ptr const& field) final; + std::shared_ptr tree() { return mTree; } + + private: std::shared_ptr mTree; std::vector> mColumnReaders; }; diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 08283b5e49bb5..c9d8aa835d734 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -417,9 +417,7 @@ DataProcessorSpec for (auto d : ds) { auto fileAndFolder = dod->getFileFolder(d, tfNumber, aodInputFile); auto treename = fileAndFolder.folderName + "/" + d->treename; - TableToTree ta2tr(table, - fileAndFolder.file, - treename.c_str()); + std::unique_ptr ta2r = std::make_unique(table, fileAndFolder.file, treename.c_str()); // update metadata if (fileAndFolder.file->FindObjectAny("metaData")) { @@ -438,13 +436,13 @@ DataProcessorSpec auto col = table->column(idx); auto field = table->schema()->field(idx); if (idx != -1) { - ta2tr.addBranch(col, field); + ta2r->addColumn(col, field); } } } else { - ta2tr.addAllBranches(); + ta2r->addAllColumns(); } - ta2tr.process(); + ta2r->process(); } } }; diff --git a/Framework/Core/src/TableTreeHelpers.cxx b/Framework/Core/src/TableTreeHelpers.cxx index e6e6a1349e619..1c96157637433 100644 --- a/Framework/Core/src/TableTreeHelpers.cxx +++ b/Framework/Core/src/TableTreeHelpers.cxx @@ -407,34 +407,41 @@ void ColumnToBranch::nextChunk() accessChunk(); } -TableToTree::TableToTree(std::shared_ptr const& table, TFile* file, const char* treename) +TableToRoot::TableToRoot(std::shared_ptr const& table, TFile* file, const char* name) + : mTable{table.get()}, + mFile{file}, + mName{name} { - mTable = table.get(); - mTree.reset(static_cast(file->Get(treename))); - if (mTree) { - return; - } - std::string treeName(treename); - auto pos = treeName.find_first_of('/'); - if (pos != std::string::npos) { - file->cd(treeName.substr(0, pos).c_str()); - treeName = treeName.substr(pos + 1, std::string::npos); - } - mTree = std::make_shared(treeName.c_str(), treeName.c_str()); } -void TableToTree::addAllBranches() +void TableToRoot::addAllColumns() { mRows = mTable->num_rows(); auto columns = mTable->columns(); auto fields = mTable->schema()->fields(); assert(columns.size() == fields.size()); for (auto i = 0u; i < columns.size(); ++i) { - addBranch(columns[i], fields[i]); + addColumn(columns[i], fields[i]); + } +} + +TableToTree::TableToTree(std::shared_ptr const& table, TFile* file, const char* name) + : TableToRoot{table, file, name} +{ + mTree.reset(static_cast(mFile->Get(mName.c_str()))); + if (mTree) { + return; } + std::string treeName(mName); + auto pos = treeName.find_first_of('/'); + if (pos != std::string::npos) { + file->cd(treeName.substr(0, pos).c_str()); + treeName = treeName.substr(pos + 1, std::string::npos); + } + mTree = std::make_shared(treeName.c_str(), treeName.c_str()); } -void TableToTree::addBranch(std::shared_ptr const& column, std::shared_ptr const& field) +void TableToTree::addColumn(std::shared_ptr const& column, std::shared_ptr const& field) { if (mRows == 0) { mRows = column->length(); @@ -444,13 +451,13 @@ void TableToTree::addBranch(std::shared_ptr const& column, mColumnReaders.emplace_back(new ColumnToBranch{mTree.get(), column, field}); } -std::shared_ptr TableToTree::process() +void TableToTree::process() { int64_t row = 0; if (mTree->GetNbranches() == 0 || mRows == 0) { mTree->Write("", TObject::kOverwrite); mTree->SetDirectory(nullptr); - return mTree; + return; } for (auto& reader : mColumnReaders) { @@ -468,7 +475,6 @@ std::shared_ptr TableToTree::process() } mTree->Write("", TObject::kOverwrite); mTree->SetDirectory(nullptr); - return mTree; } TreeToTable::TreeToTable(arrow::MemoryPool* pool) diff --git a/Framework/Core/test/benchmark_TableToTree.cxx b/Framework/Core/test/benchmark_TableToTree.cxx index 0a7963518f00b..0ca4eb81112b8 100644 --- a/Framework/Core/test/benchmark_TableToTree.cxx +++ b/Framework/Core/test/benchmark_TableToTree.cxx @@ -62,7 +62,7 @@ static void BM_TableToTree(benchmark::State& state) // benchmark TableToTree TableToTree ta2tr(table, &fout, "table2tree"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); ta2tr.process(); // clean up diff --git a/Framework/Core/test/benchmark_TreeToTable.cxx b/Framework/Core/test/benchmark_TreeToTable.cxx index 6eca853a90ce4..476e653acaf83 100644 --- a/Framework/Core/test/benchmark_TreeToTable.cxx +++ b/Framework/Core/test/benchmark_TreeToTable.cxx @@ -58,7 +58,7 @@ static void BM_TreeToTable(benchmark::State& state) // now convert the table to a tree TFile fout("tree2table.root", "RECREATE"); TableToTree ta2tr(table, &fout, "tree2table"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); ta2tr.process(); fout.Close(); diff --git a/Framework/Core/test/test_TreeToTable.cxx b/Framework/Core/test/test_TreeToTable.cxx index 65569b72b9f35..32575a8caf3e0 100644 --- a/Framework/Core/test/test_TreeToTable.cxx +++ b/Framework/Core/test/test_TreeToTable.cxx @@ -143,9 +143,10 @@ TEST_CASE("TreeToTableConversion") // save table as tree TFile* f2 = TFile::Open("table2tree.root", "RECREATE"); TableToTree ta2tr(table, f2, "mytree"); - ta2tr.addAllBranches(); + ta2tr.addAllColumns(); - auto t2 = ta2tr.process(); + ta2tr.process(); + auto t2 = ta2tr.tree(); auto br = (TBranch*)t2->GetBranch("ok"); REQUIRE(t2->GetEntries() == ndp); REQUIRE(br->GetEntries() == ndp); @@ -201,8 +202,8 @@ TEST_CASE("VariableLists") auto* f = TFile::Open("variable_lists.root", "RECREATE"); TableToTree ta2tr(table, f, "lists"); - ta2tr.addAllBranches(); - auto tree = ta2tr.process(); + ta2tr.addAllColumns(); + ta2tr.process(); f->Close(); auto* f2 = TFile::Open("variable_lists.root", "READ"); From 552fc3cdad54e8e376fdfc8ad4fd2f5734ba90c0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:33:09 +0100 Subject: [PATCH 044/726] C++20 needs vector to be well defined --- Framework/Core/include/Framework/AnalysisHelpers.h | 1 + 1 file changed, 1 insertion(+) diff --git a/Framework/Core/include/Framework/AnalysisHelpers.h b/Framework/Core/include/Framework/AnalysisHelpers.h index b382d4f11b265..e5299c7a91b99 100644 --- a/Framework/Core/include/Framework/AnalysisHelpers.h +++ b/Framework/Core/include/Framework/AnalysisHelpers.h @@ -23,6 +23,7 @@ #include "Framework/Output.h" #include "Framework/IndexBuilderHelpers.h" #include "Framework/Plugins.h" +#include "Framework/ExpressionHelpers.h" #include namespace o2::framework From ea60f829ba6b7554dff7f92ce78fd0a513130f7b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:34:49 +0100 Subject: [PATCH 045/726] DPL: provide cast to string_view for header::Descriptor<16> This will make some code compile under C++20, since C++20 is more aggressive in doing implicit conversions where it currently uses the comparison operator. --- .../Headers/include/Headers/DataHeader.h | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/DataFormats/Headers/include/Headers/DataHeader.h b/DataFormats/Headers/include/Headers/DataHeader.h index eb640be82ed60..ed25d0d7f5082 100644 --- a/DataFormats/Headers/include/Headers/DataHeader.h +++ b/DataFormats/Headers/include/Headers/DataHeader.h @@ -224,14 +224,21 @@ struct Descriptor { // Note: don't need to define operator=(ItgType v) because the compiler // can use Descriptor(ItgType initializer) for conversion + using ImplicitConversion = std::conditional_t<(size <= 8), ItgType, std::string_view>; // type cast operator for simplified usage of the descriptor's integer member - // TODO: this is sort of a hack, takes the first element. - // we should rethink these implicit conversions - operator ItgType() const + // in case it does not fit into the descriptor, the string representation is returned + operator ImplicitConversion() const { - static_assert(arraySize == 1, "casting Descriptor to ItgType only allowed for N<=8"); - return itg[0]; + if constexpr (std::is_same_v) { + return itg[0]; + } else { + size_t len = size; + while (len > 1 && str[len - 1] == 0) { + --len; + } + return std::string_view(str, len); + } } /// constructor from a compile-time string From 51d7463e4fee5a546801740d60c850a8778e7465 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:36:02 +0100 Subject: [PATCH 046/726] C++20 disables aggregate initialisation if default constructors found --- DataFormats/Detectors/CTP/include/DataFormatsCTP/Digits.h | 2 -- .../Detectors/CTP/include/DataFormatsCTP/LumiInfo.h | 1 - .../include/ITSMFTReconstruction/Clusterer.h | 1 - .../reconstruction/include/ITS3Reconstruction/Clusterer.h | 1 - Framework/Core/include/Framework/DeviceSpec.h | 8 +++----- Framework/Core/src/DeviceSpecHelpers.cxx | 1 + 6 files changed, 4 insertions(+), 10 deletions(-) diff --git a/DataFormats/Detectors/CTP/include/DataFormatsCTP/Digits.h b/DataFormats/Detectors/CTP/include/DataFormatsCTP/Digits.h index c05a59599e707..e1ef4de73f222 100644 --- a/DataFormats/Detectors/CTP/include/DataFormatsCTP/Digits.h +++ b/DataFormats/Detectors/CTP/include/DataFormatsCTP/Digits.h @@ -51,7 +51,6 @@ struct CTPDigit { o2::InteractionRecord intRecord; std::bitset CTPInputMask; std::bitset CTPClassMask; - CTPDigit() = default; void printStream(std::ostream& stream) const; void setInputMask(gbtword80_t mask); void setClassMask(gbtword80_t mask); @@ -72,7 +71,6 @@ struct CTPInputDigit { o2::InteractionRecord intRecord; std::bitset inputsMask; o2::detectors::DetID::ID detector; - CTPInputDigit() = default; ClassDefNV(CTPInputDigit, 1) }; } // namespace ctp diff --git a/DataFormats/Detectors/CTP/include/DataFormatsCTP/LumiInfo.h b/DataFormats/Detectors/CTP/include/DataFormatsCTP/LumiInfo.h index e9eb8ba497c17..c75fcc32ddaf4 100644 --- a/DataFormats/Detectors/CTP/include/DataFormatsCTP/LumiInfo.h +++ b/DataFormats/Detectors/CTP/include/DataFormatsCTP/LumiInfo.h @@ -22,7 +22,6 @@ namespace o2 namespace ctp { struct LumiInfo { - LumiInfo() = default; uint32_t orbit = 0; // orbit of TF when was updated uint32_t nHBFCounted = 0; // length of interval in HB uint32_t nHBFCountedFV0 = 0; diff --git a/Detectors/ITSMFT/common/reconstruction/include/ITSMFTReconstruction/Clusterer.h b/Detectors/ITSMFT/common/reconstruction/include/ITSMFTReconstruction/Clusterer.h index e1636c93d035a..d79eb4ac2b460 100644 --- a/Detectors/ITSMFT/common/reconstruction/include/ITSMFTReconstruction/Clusterer.h +++ b/Detectors/ITSMFT/common/reconstruction/include/ITSMFTReconstruction/Clusterer.h @@ -118,7 +118,6 @@ class Clusterer uint32_t firstPatt = 0; uint32_t nClus = 0; uint32_t nPatt = 0; - ThreadStat() = default; }; struct ClustererThread { diff --git a/Detectors/Upgrades/ITS3/reconstruction/include/ITS3Reconstruction/Clusterer.h b/Detectors/Upgrades/ITS3/reconstruction/include/ITS3Reconstruction/Clusterer.h index 6eda6e98f0b52..4008dfb215dd2 100644 --- a/Detectors/Upgrades/ITS3/reconstruction/include/ITS3Reconstruction/Clusterer.h +++ b/Detectors/Upgrades/ITS3/reconstruction/include/ITS3Reconstruction/Clusterer.h @@ -118,7 +118,6 @@ class Clusterer uint32_t firstPatt = 0; uint32_t nClus = 0; uint32_t nPatt = 0; - ThreadStat() = default; }; struct ClustererThread { diff --git a/Framework/Core/include/Framework/DeviceSpec.h b/Framework/Core/include/Framework/DeviceSpec.h index 6a11146283190..a22fc7eb1b7dc 100644 --- a/Framework/Core/include/Framework/DeviceSpec.h +++ b/Framework/Core/include/Framework/DeviceSpec.h @@ -46,13 +46,11 @@ namespace o2::framework /// /// auto &deviceSpec = ctx.services().get(); struct DeviceSpec { - DeviceSpec(const DeviceSpec&) = delete; - DeviceSpec(DeviceSpec&&) = default; /// The name of the associated DataProcessorSpec - std::string name; + std::string name = "unknown"; /// The id of the device, including time-pipelining and suffix - std::string id; - std::string channelPrefix; + std::string id = "unknown"; + std::string channelPrefix = ""; std::vector inputChannels; std::vector outputChannels; std::vector arguments; diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index f11f60171e451..969a15fa40674 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -534,6 +534,7 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext .name = processor.name, .id = processor.maxInputTimeslices == 1 ? processor.name : processor.name + "_t" + std::to_string(edge.producerTimeIndex), .channelPrefix = channelPrefix, + .inputChannels = {}, .options = processor.options, .services = ServiceSpecHelpers::filterDisabled(processor.requiredServices, overrideServices), .algorithm = processor.algorithm, From d9c990eeda097de585154892ffa9a38467edaee3 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:36:03 +0100 Subject: [PATCH 047/726] C++20 does not allow forward declaring classes which end up in a vector This is due to the fact the destructor of std::vector is now constexpr. --- Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h | 1 + 1 file changed, 1 insertion(+) diff --git a/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h b/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h index da47976b00a63..b9a66b07b2332 100644 --- a/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h +++ b/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h @@ -17,6 +17,7 @@ #ifndef O2_MCH_CLUSTERORIGINAL_H_ #define O2_MCH_CLUSTERORIGINAL_H_ +#include "PadOriginal.h" #include #include From d15e8d8fb842932b8e0457487c3abb74aef7b424 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:36:03 +0100 Subject: [PATCH 048/726] C++20 now considers equality to be commutable (A == B) === (B == A) This means that the operator must be const so that swapping is allowed. --- Algorithm/test/pageparser.cxx | 2 +- Framework/Core/test/test_ASoA.cxx | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Algorithm/test/pageparser.cxx b/Algorithm/test/pageparser.cxx index 14b24c670cfd6..7551c32d9d864 100644 --- a/Algorithm/test/pageparser.cxx +++ b/Algorithm/test/pageparser.cxx @@ -50,7 +50,7 @@ struct ClusterData { { } - bool operator==(const ClusterData& rhs) + bool operator==(const ClusterData& rhs) const { return clusterid == rhs.clusterid && x == rhs.x && y == rhs.y && z == rhs.z && e == rhs.e; } diff --git a/Framework/Core/test/test_ASoA.cxx b/Framework/Core/test/test_ASoA.cxx index e6a537dab3dc9..467b15ec5bde0 100644 --- a/Framework/Core/test/test_ASoA.cxx +++ b/Framework/Core/test/test_ASoA.cxx @@ -147,10 +147,10 @@ TEST_CASE("TestTableIteration") b = tests2.begin(); REQUIRE(b != e); - REQUIRE((b + 1) == (b + 1)); - REQUIRE((b + 7) != b); - REQUIRE((b + 7) != e); - REQUIRE((b + 8) == e); + REQUIRE(((b + 1) == (b + 1))); + REQUIRE(((b + 7) != b)); + REQUIRE(((b + 7) != e)); + REQUIRE(((b + 8) == e)); for (auto& t : tests2) { REQUIRE(t.x() == value / 4); From c8992413849c8d284f6778ef7b1ecfc314b1bc80 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:36:04 +0100 Subject: [PATCH 049/726] Move to C++20 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ca431a8b95be2..5cbc12e72ade3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -37,7 +37,7 @@ set_property(GLOBAL PROPERTY JOB_POOLS analysis=${ANALYSIS_COMPILE_POOL}) include(O2BuildSanityChecks) o2_build_sanity_checks() -set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED TRUE) find_package(ONNXRuntime::ONNXRuntime CONFIG) From 6ff3738cffa1cd2919f22668df7c52993ee8a593 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:33:09 +0100 Subject: [PATCH 050/726] C++20 needs vector to be well defined --- Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h | 2 -- Framework/Core/include/Framework/AnalysisHelpers.h | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h b/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h index da47976b00a63..daca95e683365 100644 --- a/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h +++ b/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h @@ -27,8 +27,6 @@ namespace o2 namespace mch { -class PadOriginal; - /// cluster for internal use class ClusterOriginal { diff --git a/Framework/Core/include/Framework/AnalysisHelpers.h b/Framework/Core/include/Framework/AnalysisHelpers.h index b382d4f11b265..e5299c7a91b99 100644 --- a/Framework/Core/include/Framework/AnalysisHelpers.h +++ b/Framework/Core/include/Framework/AnalysisHelpers.h @@ -23,6 +23,7 @@ #include "Framework/Output.h" #include "Framework/IndexBuilderHelpers.h" #include "Framework/Plugins.h" +#include "Framework/ExpressionHelpers.h" #include namespace o2::framework From 07342adb9775bdaa31a5ddb73876d565925db4b6 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:34:49 +0100 Subject: [PATCH 051/726] DPL: provide cast to string_view for header::Descriptor<16> This will make some code compile under C++20, since C++20 is more aggressive in doing implicit conversions where it currently uses the comparison operator. --- .../Headers/include/Headers/DataHeader.h | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/DataFormats/Headers/include/Headers/DataHeader.h b/DataFormats/Headers/include/Headers/DataHeader.h index eb640be82ed60..ed25d0d7f5082 100644 --- a/DataFormats/Headers/include/Headers/DataHeader.h +++ b/DataFormats/Headers/include/Headers/DataHeader.h @@ -224,14 +224,21 @@ struct Descriptor { // Note: don't need to define operator=(ItgType v) because the compiler // can use Descriptor(ItgType initializer) for conversion + using ImplicitConversion = std::conditional_t<(size <= 8), ItgType, std::string_view>; // type cast operator for simplified usage of the descriptor's integer member - // TODO: this is sort of a hack, takes the first element. - // we should rethink these implicit conversions - operator ItgType() const + // in case it does not fit into the descriptor, the string representation is returned + operator ImplicitConversion() const { - static_assert(arraySize == 1, "casting Descriptor to ItgType only allowed for N<=8"); - return itg[0]; + if constexpr (std::is_same_v) { + return itg[0]; + } else { + size_t len = size; + while (len > 1 && str[len - 1] == 0) { + --len; + } + return std::string_view(str, len); + } } /// constructor from a compile-time string From 31361e29c2957b10a82895f6a177517f0f0df99d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:36:03 +0100 Subject: [PATCH 052/726] C++20 now considers equality to be commutable (A == B) === (B == A) This means that the operator must be const so that swapping is allowed. --- Algorithm/test/pageparser.cxx | 2 +- Framework/Core/test/test_ASoA.cxx | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Algorithm/test/pageparser.cxx b/Algorithm/test/pageparser.cxx index 14b24c670cfd6..7551c32d9d864 100644 --- a/Algorithm/test/pageparser.cxx +++ b/Algorithm/test/pageparser.cxx @@ -50,7 +50,7 @@ struct ClusterData { { } - bool operator==(const ClusterData& rhs) + bool operator==(const ClusterData& rhs) const { return clusterid == rhs.clusterid && x == rhs.x && y == rhs.y && z == rhs.z && e == rhs.e; } diff --git a/Framework/Core/test/test_ASoA.cxx b/Framework/Core/test/test_ASoA.cxx index e6a537dab3dc9..467b15ec5bde0 100644 --- a/Framework/Core/test/test_ASoA.cxx +++ b/Framework/Core/test/test_ASoA.cxx @@ -147,10 +147,10 @@ TEST_CASE("TestTableIteration") b = tests2.begin(); REQUIRE(b != e); - REQUIRE((b + 1) == (b + 1)); - REQUIRE((b + 7) != b); - REQUIRE((b + 7) != e); - REQUIRE((b + 8) == e); + REQUIRE(((b + 1) == (b + 1))); + REQUIRE(((b + 7) != b)); + REQUIRE(((b + 7) != e)); + REQUIRE(((b + 8) == e)); for (auto& t : tests2) { REQUIRE(t.x() == value / 4); From ec28150efa735dfa1d176d39786b26c8ce881b91 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:36:02 +0100 Subject: [PATCH 053/726] C++20 disables aggregate initialisation if default constructors found --- DataFormats/Detectors/CTP/include/DataFormatsCTP/Digits.h | 2 -- .../Detectors/CTP/include/DataFormatsCTP/LumiInfo.h | 1 - .../include/ITSMFTReconstruction/Clusterer.h | 1 - .../reconstruction/include/ITS3Reconstruction/Clusterer.h | 1 - Framework/Core/include/Framework/DeviceSpec.h | 8 +++----- Framework/Core/src/DeviceSpecHelpers.cxx | 1 + 6 files changed, 4 insertions(+), 10 deletions(-) diff --git a/DataFormats/Detectors/CTP/include/DataFormatsCTP/Digits.h b/DataFormats/Detectors/CTP/include/DataFormatsCTP/Digits.h index c05a59599e707..e1ef4de73f222 100644 --- a/DataFormats/Detectors/CTP/include/DataFormatsCTP/Digits.h +++ b/DataFormats/Detectors/CTP/include/DataFormatsCTP/Digits.h @@ -51,7 +51,6 @@ struct CTPDigit { o2::InteractionRecord intRecord; std::bitset CTPInputMask; std::bitset CTPClassMask; - CTPDigit() = default; void printStream(std::ostream& stream) const; void setInputMask(gbtword80_t mask); void setClassMask(gbtword80_t mask); @@ -72,7 +71,6 @@ struct CTPInputDigit { o2::InteractionRecord intRecord; std::bitset inputsMask; o2::detectors::DetID::ID detector; - CTPInputDigit() = default; ClassDefNV(CTPInputDigit, 1) }; } // namespace ctp diff --git a/DataFormats/Detectors/CTP/include/DataFormatsCTP/LumiInfo.h b/DataFormats/Detectors/CTP/include/DataFormatsCTP/LumiInfo.h index e9eb8ba497c17..c75fcc32ddaf4 100644 --- a/DataFormats/Detectors/CTP/include/DataFormatsCTP/LumiInfo.h +++ b/DataFormats/Detectors/CTP/include/DataFormatsCTP/LumiInfo.h @@ -22,7 +22,6 @@ namespace o2 namespace ctp { struct LumiInfo { - LumiInfo() = default; uint32_t orbit = 0; // orbit of TF when was updated uint32_t nHBFCounted = 0; // length of interval in HB uint32_t nHBFCountedFV0 = 0; diff --git a/Detectors/ITSMFT/common/reconstruction/include/ITSMFTReconstruction/Clusterer.h b/Detectors/ITSMFT/common/reconstruction/include/ITSMFTReconstruction/Clusterer.h index e1636c93d035a..d79eb4ac2b460 100644 --- a/Detectors/ITSMFT/common/reconstruction/include/ITSMFTReconstruction/Clusterer.h +++ b/Detectors/ITSMFT/common/reconstruction/include/ITSMFTReconstruction/Clusterer.h @@ -118,7 +118,6 @@ class Clusterer uint32_t firstPatt = 0; uint32_t nClus = 0; uint32_t nPatt = 0; - ThreadStat() = default; }; struct ClustererThread { diff --git a/Detectors/Upgrades/ITS3/reconstruction/include/ITS3Reconstruction/Clusterer.h b/Detectors/Upgrades/ITS3/reconstruction/include/ITS3Reconstruction/Clusterer.h index 6eda6e98f0b52..4008dfb215dd2 100644 --- a/Detectors/Upgrades/ITS3/reconstruction/include/ITS3Reconstruction/Clusterer.h +++ b/Detectors/Upgrades/ITS3/reconstruction/include/ITS3Reconstruction/Clusterer.h @@ -118,7 +118,6 @@ class Clusterer uint32_t firstPatt = 0; uint32_t nClus = 0; uint32_t nPatt = 0; - ThreadStat() = default; }; struct ClustererThread { diff --git a/Framework/Core/include/Framework/DeviceSpec.h b/Framework/Core/include/Framework/DeviceSpec.h index 6a11146283190..a22fc7eb1b7dc 100644 --- a/Framework/Core/include/Framework/DeviceSpec.h +++ b/Framework/Core/include/Framework/DeviceSpec.h @@ -46,13 +46,11 @@ namespace o2::framework /// /// auto &deviceSpec = ctx.services().get(); struct DeviceSpec { - DeviceSpec(const DeviceSpec&) = delete; - DeviceSpec(DeviceSpec&&) = default; /// The name of the associated DataProcessorSpec - std::string name; + std::string name = "unknown"; /// The id of the device, including time-pipelining and suffix - std::string id; - std::string channelPrefix; + std::string id = "unknown"; + std::string channelPrefix = ""; std::vector inputChannels; std::vector outputChannels; std::vector arguments; diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index f11f60171e451..969a15fa40674 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -534,6 +534,7 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext .name = processor.name, .id = processor.maxInputTimeslices == 1 ? processor.name : processor.name + "_t" + std::to_string(edge.producerTimeIndex), .channelPrefix = channelPrefix, + .inputChannels = {}, .options = processor.options, .services = ServiceSpecHelpers::filterDisabled(processor.requiredServices, overrideServices), .algorithm = processor.algorithm, From 87d12c52eca4f237a47b3d925484c280518f2cc9 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:36:04 +0100 Subject: [PATCH 054/726] Move to C++20 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ca431a8b95be2..5cbc12e72ade3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -37,7 +37,7 @@ set_property(GLOBAL PROPERTY JOB_POOLS analysis=${ANALYSIS_COMPILE_POOL}) include(O2BuildSanityChecks) o2_build_sanity_checks() -set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED TRUE) find_package(ONNXRuntime::ONNXRuntime CONFIG) From bf3ac496675aa4f3802a4f47343225e8e159f8ed Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:36:03 +0100 Subject: [PATCH 055/726] C++20 does not allow forward declaring classes which end up in a vector This is due to the fact the destructor of std::vector is now constexpr. --- Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h | 1 + 1 file changed, 1 insertion(+) diff --git a/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h b/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h index daca95e683365..730a116c942a9 100644 --- a/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h +++ b/Detectors/MUON/MCH/Clustering/src/ClusterOriginal.h @@ -17,6 +17,7 @@ #ifndef O2_MCH_CLUSTERORIGINAL_H_ #define O2_MCH_CLUSTERORIGINAL_H_ +#include "PadOriginal.h" #include #include From a31c76bc551d2f616a525e3b997a4af45ea37813 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:36:03 +0100 Subject: [PATCH 056/726] C++20 now considers equality to be commutable (A == B) === (B == A) This means that the operator must be const so that swapping is allowed. --- Algorithm/test/pageparser.cxx | 2 +- DataFormats/Headers/include/Headers/DataHeader.h | 2 ++ Framework/Core/test/test_ASoA.cxx | 8 ++++---- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/Algorithm/test/pageparser.cxx b/Algorithm/test/pageparser.cxx index 14b24c670cfd6..7551c32d9d864 100644 --- a/Algorithm/test/pageparser.cxx +++ b/Algorithm/test/pageparser.cxx @@ -50,7 +50,7 @@ struct ClusterData { { } - bool operator==(const ClusterData& rhs) + bool operator==(const ClusterData& rhs) const { return clusterid == rhs.clusterid && x == rhs.x && y == rhs.y && z == rhs.z && e == rhs.e; } diff --git a/DataFormats/Headers/include/Headers/DataHeader.h b/DataFormats/Headers/include/Headers/DataHeader.h index ed25d0d7f5082..628cb86dd7bc8 100644 --- a/DataFormats/Headers/include/Headers/DataHeader.h +++ b/DataFormats/Headers/include/Headers/DataHeader.h @@ -286,6 +286,8 @@ struct Descriptor { bool operator<(const Descriptor& other) const { return std::memcmp(this->str, other.str, N) < 0; } bool operator!=(const Descriptor& other) const { return not this->operator==(other); } + // Convesion operators for comparison with their implicitly convertible types + friend bool operator==(const Descriptor& lhs, ImplicitConversion rhs) { return static_cast(lhs)==rhs; } // explicitly forbid comparison with e.g. const char* strings // use: value == Descriptor("DESC") for the appropriate // template instantiation instead diff --git a/Framework/Core/test/test_ASoA.cxx b/Framework/Core/test/test_ASoA.cxx index e6a537dab3dc9..467b15ec5bde0 100644 --- a/Framework/Core/test/test_ASoA.cxx +++ b/Framework/Core/test/test_ASoA.cxx @@ -147,10 +147,10 @@ TEST_CASE("TestTableIteration") b = tests2.begin(); REQUIRE(b != e); - REQUIRE((b + 1) == (b + 1)); - REQUIRE((b + 7) != b); - REQUIRE((b + 7) != e); - REQUIRE((b + 8) == e); + REQUIRE(((b + 1) == (b + 1))); + REQUIRE(((b + 7) != b)); + REQUIRE(((b + 7) != e)); + REQUIRE(((b + 8) == e)); for (auto& t : tests2) { REQUIRE(t.x() == value / 4); From ec5406fd7b65e1ee90a5af46ea8ba7615598264c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:36:04 +0100 Subject: [PATCH 057/726] Move to C++20 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ca431a8b95be2..5cbc12e72ade3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -37,7 +37,7 @@ set_property(GLOBAL PROPERTY JOB_POOLS analysis=${ANALYSIS_COMPILE_POOL}) include(O2BuildSanityChecks) o2_build_sanity_checks() -set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED TRUE) find_package(ONNXRuntime::ONNXRuntime CONFIG) From 7eeaf87153d0f2ed6bb358292bc26b5b7e0eea86 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 13:12:45 +0100 Subject: [PATCH 058/726] C++20 now considers equality to be commutable (A == B) === (B == A) This means that the operator must be const so that swapping is allowed. --- Algorithm/test/pageparser.cxx | 2 +- DataFormats/Headers/include/Headers/DataHeader.h | 2 ++ Framework/Core/test/test_ASoA.cxx | 8 ++++---- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/Algorithm/test/pageparser.cxx b/Algorithm/test/pageparser.cxx index 14b24c670cfd6..7551c32d9d864 100644 --- a/Algorithm/test/pageparser.cxx +++ b/Algorithm/test/pageparser.cxx @@ -50,7 +50,7 @@ struct ClusterData { { } - bool operator==(const ClusterData& rhs) + bool operator==(const ClusterData& rhs) const { return clusterid == rhs.clusterid && x == rhs.x && y == rhs.y && z == rhs.z && e == rhs.e; } diff --git a/DataFormats/Headers/include/Headers/DataHeader.h b/DataFormats/Headers/include/Headers/DataHeader.h index ed25d0d7f5082..a4d3abdba65c7 100644 --- a/DataFormats/Headers/include/Headers/DataHeader.h +++ b/DataFormats/Headers/include/Headers/DataHeader.h @@ -286,6 +286,8 @@ struct Descriptor { bool operator<(const Descriptor& other) const { return std::memcmp(this->str, other.str, N) < 0; } bool operator!=(const Descriptor& other) const { return not this->operator==(other); } + // Convesion operators for comparison with their implicitly convertible types + friend bool operator==(const Descriptor& lhs, ImplicitConversion rhs) { return static_cast(lhs) == rhs; } // explicitly forbid comparison with e.g. const char* strings // use: value == Descriptor("DESC") for the appropriate // template instantiation instead diff --git a/Framework/Core/test/test_ASoA.cxx b/Framework/Core/test/test_ASoA.cxx index e6a537dab3dc9..467b15ec5bde0 100644 --- a/Framework/Core/test/test_ASoA.cxx +++ b/Framework/Core/test/test_ASoA.cxx @@ -147,10 +147,10 @@ TEST_CASE("TestTableIteration") b = tests2.begin(); REQUIRE(b != e); - REQUIRE((b + 1) == (b + 1)); - REQUIRE((b + 7) != b); - REQUIRE((b + 7) != e); - REQUIRE((b + 8) == e); + REQUIRE(((b + 1) == (b + 1))); + REQUIRE(((b + 7) != b)); + REQUIRE(((b + 7) != e)); + REQUIRE(((b + 8) == e)); for (auto& t : tests2) { REQUIRE(t.x() == value / 4); From d104b904a8281b65b5aa9f61f09b7524398ec2e1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 13:12:45 +0100 Subject: [PATCH 059/726] Move to C++20 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ca431a8b95be2..5cbc12e72ade3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -37,7 +37,7 @@ set_property(GLOBAL PROPERTY JOB_POOLS analysis=${ANALYSIS_COMPILE_POOL}) include(O2BuildSanityChecks) o2_build_sanity_checks() -set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED TRUE) find_package(ONNXRuntime::ONNXRuntime CONFIG) From 06e0d43ead6aab27ad3fa4379c21b6fb3d8766c6 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:31:25 +0100 Subject: [PATCH 060/726] DPL: reset Oldest Possible Timeslice counters on new run Centralise resetting of the Oldest Possible Timeslice counters now that we can. --- Framework/Core/src/CommonServices.cxx | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 44d63cd2ab1f5..fb0787b02da70 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -503,6 +503,17 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() } } decongestion->lastTimeslice = oldestPossibleOutput.timeslice.value; }, + .stop = [](ServiceRegistryRef services, void* service) { + auto* decongestion = (DecongestionService*)service; + decongestion->nextEnumerationTimeslice = 0; + decongestion->nextEnumerationTimesliceRewinded = false; + decongestion->lastTimeslice = 0; + decongestion->nextTimeslice = 0; + decongestion->oldestPossibleTimesliceTask = {0}; + auto &state = services.get(); + for (auto &channel : state.inputChannelInfos) { + channel.oldestForChannel = {0}; + } }, .domainInfoUpdated = [](ServiceRegistryRef services, size_t oldestPossibleTimeslice, ChannelIndex channel) { auto& decongestion = services.get(); auto& relayer = services.get(); From 80aa547753a2ca17ed9a28191e1ded1fb8cfb19c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:31:25 +0100 Subject: [PATCH 061/726] Fix coding convention --- Framework/Core/src/CommonServices.cxx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index fb0787b02da70..4caddb9b98c78 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -941,8 +941,7 @@ o2::framework::ServiceSpec CommonServices::dataProcessingStats() .configure = noConfiguration(), .preProcessing = [](ProcessingContext& context, void* service) { auto* stats = (DataProcessingStats*)service; - flushMetrics(context.services(), *stats); - }, + flushMetrics(context.services(), *stats); }, .postProcessing = [](ProcessingContext& context, void* service) { auto* stats = (DataProcessingStats*)service; stats->updateStats({(short)ProcessingStatsId::PERFORMED_COMPUTATIONS, DataProcessingStats::Op::Add, 1}); From d9c94b33ac55294c50ef62b76154e0d4fe30fa18 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:32:03 +0100 Subject: [PATCH 062/726] Fix coding convention --- Framework/Core/src/CommonServices.cxx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 44d63cd2ab1f5..dca98618895cd 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -930,8 +930,7 @@ o2::framework::ServiceSpec CommonServices::dataProcessingStats() .configure = noConfiguration(), .preProcessing = [](ProcessingContext& context, void* service) { auto* stats = (DataProcessingStats*)service; - flushMetrics(context.services(), *stats); - }, + flushMetrics(context.services(), *stats); }, .postProcessing = [](ProcessingContext& context, void* service) { auto* stats = (DataProcessingStats*)service; stats->updateStats({(short)ProcessingStatsId::PERFORMED_COMPUTATIONS, DataProcessingStats::Op::Add, 1}); From 57fbeb6546f329a62aab74a310de3f35f935b13c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:32:04 +0100 Subject: [PATCH 063/726] DPL: reset Oldest Possible Timeslice counters on new run Centralise resetting of the Oldest Possible Timeslice counters now that we can. --- Framework/Core/src/CommonServices.cxx | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index dca98618895cd..4caddb9b98c78 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -503,6 +503,17 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() } } decongestion->lastTimeslice = oldestPossibleOutput.timeslice.value; }, + .stop = [](ServiceRegistryRef services, void* service) { + auto* decongestion = (DecongestionService*)service; + decongestion->nextEnumerationTimeslice = 0; + decongestion->nextEnumerationTimesliceRewinded = false; + decongestion->lastTimeslice = 0; + decongestion->nextTimeslice = 0; + decongestion->oldestPossibleTimesliceTask = {0}; + auto &state = services.get(); + for (auto &channel : state.inputChannelInfos) { + channel.oldestForChannel = {0}; + } }, .domainInfoUpdated = [](ServiceRegistryRef services, size_t oldestPossibleTimeslice, ChannelIndex channel) { auto& decongestion = services.get(); auto& relayer = services.get(); From ad0f5eab899fc340f89c5d8d4644d81d8598cd1c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 22:34:00 +0100 Subject: [PATCH 064/726] DPL GUI: do not hang if quit requested but some devices are stuck --- Framework/Core/src/runDataProcessing.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 61fdef070d874..49c5ebadb6460 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1073,7 +1073,7 @@ void gui_callback(uv_timer_s* ctx) if (gui->plugin->supportsDeferredClose()) { // For now, there is nothing for which we want to defer the close // so if the flag is set, we simply exit - if (*(gui->guiQuitRequested)) { + if (*(gui->guiQuitRequested) && *(gui->allChildrenGone)) { return; } void* draw_data = nullptr; From 397c3b7d379316c947c28a6e8088f2c2d87f1a46 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 22:34:00 +0100 Subject: [PATCH 065/726] DPL GUI: add GUI to list devices preventing quitting --- Framework/Core/include/Framework/DebugGUI.h | 4 +- Framework/Core/src/runDataProcessing.cxx | 6 +- .../GUISupport/src/FrameworkGUIDebugger.cxx | 110 ++++++++++++------ .../GUISupport/src/FrameworkGUIDebugger.h | 4 +- Framework/GUISupport/src/Plugin.cxx | 5 +- 5 files changed, 87 insertions(+), 42 deletions(-) diff --git a/Framework/Core/include/Framework/DebugGUI.h b/Framework/Core/include/Framework/DebugGUI.h index c7a611d50a1d4..109fd36e27511 100644 --- a/Framework/Core/include/Framework/DebugGUI.h +++ b/Framework/Core/include/Framework/DebugGUI.h @@ -26,6 +26,7 @@ namespace o2::framework { struct ServiceRegistry; +struct GuiCallbackContext; /// Plugin interface for DPL GUIs. struct DebugGUI { virtual std::function getGUIDebugger(std::vector const& infos, @@ -35,7 +36,8 @@ struct DebugGUI { std::vector const& metricsInfos, o2::framework::DriverInfo const& driverInfo, std::vector& controls, - o2::framework::DriverControl& driverControl) = 0; + o2::framework::DriverControl& driverControl, + GuiCallbackContext& context) = 0; virtual void updateMousePos(float x, float y) = 0; virtual void updateMouseButton(bool isClicked) = 0; virtual void updateMouseWheel(int direction) = 0; diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 49c5ebadb6460..44a5c6b4ee94a 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1302,7 +1302,8 @@ int runStateMachine(DataProcessorSpecs const& workflow, DebugGUI* debugGUI = nullptr; void* window = nullptr; - decltype(debugGUI->getGUIDebugger(infos, runningWorkflow.devices, allStates, dataProcessorInfos, metricsInfos, driverInfo, controls, driverControl)) debugGUICallback; + GuiCallbackContext guiContext; + decltype(debugGUI->getGUIDebugger(infos, runningWorkflow.devices, allStates, dataProcessorInfos, metricsInfos, driverInfo, controls, driverControl, guiContext)) debugGUICallback; // An empty frameworkId means this is the driver, so we initialise the GUI auto initDebugGUI = []() -> DebugGUI* { @@ -1389,7 +1390,6 @@ int runStateMachine(DataProcessorSpecs const& workflow, ServiceRegistryRef ref{serviceRegistry}; ref.registerService(ServiceRegistryHelpers::handleForService(devicesManager)); - GuiCallbackContext guiContext; guiContext.plugin = debugGUI; guiContext.frameLast = uv_hrtime(); guiContext.frameLatency = &driverInfo.frameLatency; @@ -1882,7 +1882,7 @@ int runStateMachine(DataProcessorSpecs const& workflow, uv_timer_stop(gui_timer); } - auto callback = debugGUI->getGUIDebugger(infos, runningWorkflow.devices, allStates, dataProcessorInfos, metricsInfos, driverInfo, controls, driverControl); + auto callback = debugGUI->getGUIDebugger(infos, runningWorkflow.devices, allStates, dataProcessorInfos, metricsInfos, driverInfo, controls, driverControl, guiContext); guiContext.callback = [&serviceRegistry, &driverServices, &debugGUI, &infos, &runningWorkflow, &dataProcessorInfos, &metricsInfos, &driverInfo, &controls, &driverControl, callback]() { callback(); for (auto& service : driverServices) { diff --git a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx index 35e0bc563a22e..375afef1007f9 100644 --- a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx @@ -16,6 +16,7 @@ #include "DebugGUI/imgui_extras.h" #include "Framework/DriverControl.h" #include "Framework/DriverInfo.h" +#include "Framework/GuiCallbackContext.h" #include "Framework/DeviceMetricsHelper.h" #include "Framework/DeviceMetricsInfo.h" #include "FrameworkGUIDeviceInspector.h" @@ -506,7 +507,7 @@ void displayDeviceMetrics(const char* label, case MetricsDisplayStyle::Lines: { auto xAxisFlags = ImPlotAxisFlags_None; auto yAxisFlags = ImPlotAxisFlags_LockMin; - //ImPlot::FitNextPlotAxes(true, true, true, true); + // ImPlot::FitNextPlotAxes(true, true, true, true); if (ImPlot::BeginPlot("##Some plot", {-1, -1}, axisFlags)) { ImPlot::SetupAxes("time", "value", xAxisFlags, yAxisFlags); ImPlot::SetupAxisFormat(ImAxis_Y1, formatSI, nullptr); @@ -1008,6 +1009,71 @@ void popWindowColorDueToStatus() ImGui::PopStyleVar(1); } +int debugPID(int pid) +{ + std::string pidStr = std::to_string(pid); + setenv("O2DEBUGGEDPID", pidStr.c_str(), 1); +#ifdef __APPLE__ + std::string defaultAppleDebugCommand = + "osascript -e 'tell application \"Terminal\"'" + " -e 'activate'" + " -e 'do script \"lldb -p \" & (system attribute \"O2DEBUGGEDPID\") & \"; exit\"'" + " -e 'end tell'"; + setenv("O2DPLDEBUG", defaultAppleDebugCommand.c_str(), 0); +#else + setenv("O2DPLDEBUG", "xterm -hold -e gdb attach $O2DEBUGGEDPID &", 0); +#endif + return system(getenv("O2DPLDEBUG")); +} + +int profilePID(int pid) +{ + std::string pidStr = std::to_string(pid); + setenv("O2PROFILEDPID", pidStr.c_str(), 1); +#ifdef __APPLE__ + auto defaultAppleProfileCommand = fmt::format( + "osascript -e 'tell application \"Terminal\"'" + " -e 'activate'" + " -e 'do script \"xcrun xctrace record --output dpl-profile-{0}.trace" + " --instrument os_signpost --time-limit 30s --template Time\\\\ Profiler --attach {0} " + " && open dpl-profile-{0}.trace && exit\"'" + " -e 'end tell'", + pid); + std::cout << defaultAppleProfileCommand << std::endl; + setenv("O2DPLPROFILE", defaultAppleProfileCommand.c_str(), 0); +#else + setenv("O2DPLPROFILE", "xterm -hold -e perf record -a -g -p $O2PROFILEDPID > perf-$O2PROFILEDPID.data &", 0); +#endif + return system(getenv("O2DPLPROFILE")); +} + +/// Show a window with the list of devices which have not yet exited +/// and provide a few controls to interact with them. +void displayShutdownDialog(std::vector const& specs, std::vector const& infos) +{ + for (size_t i = 0; i < infos.size(); ++i) { + auto& info = infos[i]; + if (info.active == false) { + continue; + } + auto& spec = specs[i]; + ImGui::Text("Unable to quit due to the following devices:"); + ImGui::Text("%s (%d).", spec.name.c_str(), info.pid); + ImGui::SameLine(); + if (ImGui::Button("Force quit")) { + kill(info.pid, SIGKILL); + } + ImGui::SameLine(); + if (ImGui::Button("Debug")) { + debugPID(info.pid); + } + ImGui::SameLine(); + if (ImGui::Button("Profile")) { + profilePID(info.pid); + } + } +} + /// Display information window about the driver /// and its state. void displayDriverInfo(DriverInfo const& driverInfo, DriverControl& driverControl) @@ -1035,42 +1101,12 @@ void displayDriverInfo(DriverInfo const& driverInfo, DriverControl& driverContro } ImGui::SameLine(); if (ImGui::Button("Debug driver")) { - std::string pidStr = std::to_string(pid); - setenv("O2DEBUGGEDPID", pidStr.c_str(), 1); -#ifdef __APPLE__ - std::string defaultAppleDebugCommand = - "osascript -e 'tell application \"Terminal\"'" - " -e 'activate'" - " -e 'do script \"lldb -p \" & (system attribute \"O2DEBUGGEDPID\") & \"; exit\"'" - " -e 'end tell'"; - setenv("O2DPLDEBUG", defaultAppleDebugCommand.c_str(), 0); -#else - setenv("O2DPLDEBUG", "xterm -hold -e gdb attach $O2DEBUGGEDPID &", 0); -#endif - int retVal = system(getenv("O2DPLDEBUG")); - (void)retVal; + debugPID(pid); } ImGui::SameLine(); if (ImGui::Button("Profile")) { - std::string pidStr = std::to_string(pid); - setenv("O2PROFILEDPID", pidStr.c_str(), 1); -#ifdef __APPLE__ - auto defaultAppleProfileCommand = fmt::format( - "osascript -e 'tell application \"Terminal\"'" - " -e 'activate'" - " -e 'do script \"xcrun xctrace record --output dpl-profile-{0}.trace" - " --instrument os_signpost --time-limit 30s --template Time\\\\ Profiler --attach {0} " - " && open dpl-profile-{0}.trace && exit\"'" - " -e 'end tell'", - pid); - std::cout << defaultAppleProfileCommand << std::endl; - setenv("O2DPLPROFILE", defaultAppleProfileCommand.c_str(), 0); -#else - setenv("O2DPLPROFILE", "xterm -hold -e perf record -a -g -p $O2PROFILEDPID > perf-$O2PROFILEDPID.data &", 0); -#endif - int retVal = system(getenv("O2DPLPROFILE")); - (void)retVal; + profilePID(pid); } // FIXME: this should really be a combo box @@ -1110,7 +1146,8 @@ std::function getGUIDebugger(std::vector const& infos, std::vector const& metricsInfos, DriverInfo const& driverInfo, std::vector& controls, - DriverControl& driverControl) + DriverControl& driverControl, + GuiCallbackContext& gui) { static gui::WorkspaceGUIState globalGUIState; gui::WorkspaceGUIState& guiState = globalGUIState; @@ -1141,7 +1178,7 @@ std::function getGUIDebugger(std::vector const& infos, std::vector driverNodesInfos; driverNodesInfos.push_back(TopologyNodeInfo{"driver"}); - return [&infos, &devices, &allStates, &metadata, &controls, &metricsInfos, &driverInfo, &driverControl, deviceNodesInfos, driverNodesInfos]() { + return [&infos, &devices, &allStates, &metadata, &controls, &metricsInfos, &driverInfo, &driverControl, &gui, deviceNodesInfos, driverNodesInfos]() { ImGuiStyle& style = ImGui::GetStyle(); style.FrameRounding = 0.; style.WindowRounding = 0.; @@ -1158,6 +1195,9 @@ std::function getGUIDebugger(std::vector const& infos, metricsStore.specs[DRIVER_METRICS] = gsl::span(driverNodesInfos); displayMetrics(guiState, driverInfo, infos, metadata, controls, metricsStore); displayDriverInfo(driverInfo, driverControl); + if (*(gui.guiQuitRequested) && !*(gui.allChildrenGone)) { + displayShutdownDialog(devices, infos); + } int windowPosStepping = (ImGui::GetIO().DisplaySize.y - 500) / guiState.devices.size(); diff --git a/Framework/GUISupport/src/FrameworkGUIDebugger.h b/Framework/GUISupport/src/FrameworkGUIDebugger.h index 5de5e350ddf8d..98bb50b07fd44 100644 --- a/Framework/GUISupport/src/FrameworkGUIDebugger.h +++ b/Framework/GUISupport/src/FrameworkGUIDebugger.h @@ -26,6 +26,7 @@ namespace o2::framework class DriverInfo; class DriverControl; +class GuiCallbackContext; namespace gui { @@ -37,7 +38,8 @@ std::function getGUIDebugger(std::vector const& infos, std::vector const& metricsInfos, DriverInfo const& driverInfo, std::vector& controls, - DriverControl& driverControl); + DriverControl& driverControl, + GuiCallbackContext& context); void updateMousePos(float x, float y); void updateMouseButton(bool clicked); diff --git a/Framework/GUISupport/src/Plugin.cxx b/Framework/GUISupport/src/Plugin.cxx index 7a356f6e55d9b..022b74174f632 100644 --- a/Framework/GUISupport/src/Plugin.cxx +++ b/Framework/GUISupport/src/Plugin.cxx @@ -36,9 +36,10 @@ struct ImGUIDebugGUI : o2::framework::DebugGUI { std::vector const& metricsInfos, DriverInfo const& driverInfo, std::vector& controls, - DriverControl& driverControl) override + DriverControl& driverControl, + GuiCallbackContext& context) override { - return o2::framework::gui::getGUIDebugger(infos, devices, allStates, metadata, metricsInfos, driverInfo, controls, driverControl); + return o2::framework::gui::getGUIDebugger(infos, devices, allStates, metadata, metricsInfos, driverInfo, controls, driverControl, context); } void updateMousePos(float x, float y) override From b7d30656d4d75df957c321694a25533e96a4bf9c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 7 Nov 2023 22:34:00 +0100 Subject: [PATCH 066/726] DPL GUI: allow GUI to move between different states --- Framework/Core/src/DataProcessingDevice.cxx | 49 ++++++++++++++++++- Framework/Core/src/WSDriverClient.cxx | 24 +++++++++ Framework/Core/src/runDataProcessing.cxx | 13 ++++- .../GUISupport/src/FrameworkGUIDebugger.cxx | 2 +- .../src/FrameworkGUIDeviceInspector.cxx | 27 +++++++++- .../src/FrameworkGUIDeviceInspector.h | 5 +- .../src/FrameworkGUIDevicesGraph.cxx | 5 +- .../GUISupport/src/FrameworkGUIDevicesGraph.h | 8 ++- 8 files changed, 123 insertions(+), 10 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 5542be850fb70..3f78543bdc594 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -25,6 +25,7 @@ #include "Framework/DispatchPolicy.h" #include "Framework/DispatchControl.h" #include "Framework/DanglingContext.h" +#include "Framework/DriverInfo.h" #include "Framework/DomainInfoHeader.h" #include "Framework/DriverClient.h" #include "Framework/EndOfStreamContext.h" @@ -159,16 +160,60 @@ DataProcessingDevice::DataProcessingDevice(RunningDeviceRef running, ServiceRegi std::function stateWatcher = [this, ®istry = mServiceRegistry](const fair::mq::State state) -> void { auto ref = ServiceRegistryRef{registry, ServiceRegistry::globalDeviceSalt()}; + auto controlKind = this->GetConfig()->GetPropertyAsString("control"); auto& deviceState = ref.get(); auto& control = ref.get(); auto& callbacks = ref.get(); - control.notifyDeviceState(fair::mq::GetStateName(state)); + auto stateName = fair::mq::GetStateName(state); + control.notifyDeviceState(stateName); callbacks.call(ServiceRegistryRef{ref}, (int)state); + LOG(detail) << "In state watcher callback " << stateName; + + // If the termination policy is not to wait, we simply ignore all + // user imposed state changes and keep running until we are done. + if (controlKind != "gui") { + return; + } + + static bool runningOnce = false; if (deviceState.nextFairMQState.empty() == false) { + LOG(detail) << "State change requested, changing state to " << deviceState.nextFairMQState.back(); auto state = deviceState.nextFairMQState.back(); - (void)this->ChangeState(state); + bool changed = this->ChangeState(state); + if (!changed) { + LOG(error) << "Failed to change state to " << state; + } deviceState.nextFairMQState.pop_back(); + } else if (state == fair::mq::State::Running && deviceState.nextFairMQState.empty()) { + LOGP(detail, "Device is running and no transition expected. We are done."); + deviceState.transitionHandling = TransitionHandlingState::NoTransition; + } else { + while (runningOnce && deviceState.nextFairMQState.empty() && this->NewStatePending() == false) { + LOG(detail) << "No state change requested, waiting for next state change " << this->NewStatePending(); + if (stateName == "EXITING") { + // Send ctrl c to ourselves. To bad FairMQ does not seem to exit when + // reaching the EXITING state. + kill(getpid(), SIGTERM); + return; + } + uv_run(deviceState.loop, UV_RUN_ONCE); + LOG(detail) << "Woke up from event loop"; + } + if (runningOnce && deviceState.nextFairMQState.empty() == false) { + LOG(detail) << "State change requested, changing state to " << deviceState.nextFairMQState.back(); + auto state = deviceState.nextFairMQState.back(); + bool changed = this->ChangeState(state); + if (!changed) { + LOG(error) << "Failed to change state to " << state; + } + deviceState.nextFairMQState.pop_back(); + } + LOG(detail) << "Exiting callback for state " << state; + } + if (runningOnce == false && state == fair::mq::State::Running) { + LOG(detail) << "First iteration, next time we start the event loop"; + runningOnce = true; } }; diff --git a/Framework/Core/src/WSDriverClient.cxx b/Framework/Core/src/WSDriverClient.cxx index d4ed77b9a004e..91f011e61cda3 100644 --- a/Framework/Core/src/WSDriverClient.cxx +++ b/Framework/Core/src/WSDriverClient.cxx @@ -15,6 +15,8 @@ #include "Framework/ServiceRegistry.h" #include "Framework/DeviceSpec.h" #include "DriverClientContext.h" +#include "Framework/RawDeviceService.h" +#include "Device.h" #include "DPLWebSocket.h" #include #include @@ -134,6 +136,28 @@ void on_connect(uv_connect_t* connection, int status) state.nextFairMQState.emplace_back("STOP"); }); + client->observe("/shutdown", [ref = context->ref](std::string_view) { + auto currentStateName = ref.get().device()->GetCurrentStateName(); + LOGP(info, "Received shutdown request while in {}", currentStateName); + + auto& state = ref.get(); + state.nextFairMQState.emplace_back("END"); + if (currentStateName == "IDLE") { + return; + } + state.nextFairMQState.emplace_back("AUTO"); + state.nextFairMQState.emplace_back("RESET DEVICE"); + if (currentStateName == "DEVICE READY") { + return; + } + state.nextFairMQState.emplace_back("AUTO"); + state.nextFairMQState.emplace_back("RESET TASK"); + if (currentStateName == "READY") { + return; + } + state.nextFairMQState.emplace_back("STOP"); + }); + client->observe("/trace", [ref = context->ref](std::string_view cmd) { auto& state = ref.get(); static constexpr int prefixSize = std::string_view{"/trace "}.size(); diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 44a5c6b4ee94a..c7a57acd83971 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -29,6 +29,7 @@ #include "Framework/DeviceMetricsInfo.h" #include "Framework/DeviceMetricsHelper.h" #include "Framework/DeviceConfigInfo.h" +#include "Framework/DeviceController.h" #include "Framework/DeviceSpec.h" #include "Framework/DeviceState.h" #include "Framework/DeviceConfig.h" @@ -2068,7 +2069,17 @@ int runStateMachine(DataProcessorSpecs const& workflow, // We send SIGCONT to make sure stopped children are resumed killChildren(infos, SIGCONT); // We send SIGTERM to make sure we do the STOP transition in FairMQ - killChildren(infos, SIGTERM); + if (driverInfo.processingPolicies.termination == TerminationPolicy::WAIT) { + for (size_t di = 0; di < infos.size(); ++di) { + auto& info = infos[di]; + auto& control = controls[di]; + if (info.active == true) { + control.controller->write("/shutdown", strlen("/shutdown")); + } + } + } else { + killChildren(infos, SIGTERM); + } // We have a timer to send SIGUSR1 to make sure we advance all devices // in a timely manner. force_step_timer.data = &infos; diff --git a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx index 375afef1007f9..cc5180093ec94 100644 --- a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx @@ -1185,7 +1185,7 @@ std::function getGUIDebugger(std::vector const& infos, style.Colors[ImGuiCol_WindowBg] = ImVec4(0x1b / 255.f, 0x1b / 255.f, 0x1b / 255.f, 1.00f); style.Colors[ImGuiCol_ScrollbarBg] = ImVec4(0x1b / 255.f, 0x1b / 255.f, 0x1b / 255.f, 1.00f); - showTopologyNodeGraph(guiState, infos, devices, allStates, metadata, controls, metricsInfos); + showTopologyNodeGraph(guiState, infos, devices, allStates, metadata, controls, metricsInfos, driverInfo.processingPolicies.termination); AllMetricsStore metricsStore; diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index a82753eb5af1f..9febb93858cb8 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -11,6 +11,7 @@ #include "FrameworkGUIDeviceInspector.h" #include "Framework/DataProcessorInfo.h" +#include "Framework/ProcessingPolicies.h" #include "Framework/DeviceControl.h" #include "Framework/DeviceSpec.h" @@ -251,7 +252,8 @@ void displayDeviceInspector(DeviceSpec const& spec, DataProcessingStates const& states, DeviceMetricsInfo const& metrics, DataProcessorInfo const& metadata, - DeviceControl& control) + DeviceControl& control, + TerminationPolicy terminationPolicy) { ImGui::Text("Name: %s", spec.name.c_str()); ImGui::Text("Executable: %s", metadata.executable.c_str()); @@ -340,12 +342,33 @@ void displayDeviceInspector(DeviceSpec const& spec, } if (control.requestedState > info.providedState) { - ImGui::Text(ICON_FA_CLOCK_O); + ImGui::TextUnformatted(ICON_FA_CLOCK_O "Requested transition in progress"); } else { + // We only allow navigation if the termination policy is "WAIT" + ImGui::BeginDisabled(terminationPolicy == TerminationPolicy::QUIT); if (ImGui::Button("Restart")) { control.requestedState = info.providedState + 1; control.controller->write("/restart", strlen("/restart")); } + if (info.deviceState == "RUNNING") { + ImGui::SameLine(); + if (ImGui::Button(ICON_FA_STOP)) { + control.requestedState = info.providedState + 1; + control.controller->write("/stop", strlen("/stop")); + } + } else if (info.deviceState == "READY") { + ImGui::SameLine(); + if (ImGui::Button(ICON_FA_PLAY)) { + control.requestedState = info.providedState + 1; + control.controller->write("/start", strlen("/start")); + } + ImGui::SameLine(); + if (ImGui::Button(ICON_FA_POWER_OFF)) { + control.requestedState = info.providedState + 1; + control.controller->write("/shutdown", strlen("/shutdown")); + } + } + ImGui::EndDisabled(); } } diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.h b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.h index 8dac3204ea3ab..b69c6e9286215 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.h +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.h @@ -18,12 +18,15 @@ struct DeviceInfo; struct DeviceMetricsInfo; struct DataProcessorInfo; struct DataProcessingStates; +enum struct TerminationPolicy; namespace gui { /// Helper to display information about a device -void displayDeviceInspector(DeviceSpec const& spec, DeviceInfo const& info, DataProcessingStates const& states, DeviceMetricsInfo const& metrics, DataProcessorInfo const& metadata, DeviceControl& control); +void displayDeviceInspector(DeviceSpec const& spec, DeviceInfo const& info, DataProcessingStates const& states, + DeviceMetricsInfo const& metrics, DataProcessorInfo const& metadata, DeviceControl& control, + TerminationPolicy TerminationPolicy); } // namespace gui } // namespace o2::framework diff --git a/Framework/GUISupport/src/FrameworkGUIDevicesGraph.cxx b/Framework/GUISupport/src/FrameworkGUIDevicesGraph.cxx index 0b6ca07a9c86d..639122153e599 100644 --- a/Framework/GUISupport/src/FrameworkGUIDevicesGraph.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDevicesGraph.cxx @@ -365,7 +365,8 @@ void showTopologyNodeGraph(WorkspaceGUIState& state, std::vector const& allStates, std::vector const& metadata, std::vector& controls, - std::vector const& metricsInfos) + std::vector const& metricsInfos, + enum TerminationPolicy terminationPolicy) { ImGui::SetNextWindowPos(ImVec2(0, 0), 0); if (state.bottomPaneVisible) { @@ -893,7 +894,7 @@ void showTopologyNodeGraph(WorkspaceGUIState& state, auto& metadatum = metadata[group.metadataId]; if (state.rightPaneVisible) { - gui::displayDeviceInspector(spec, info, states, metrics, metadatum, control); + gui::displayDeviceInspector(spec, info, states, metrics, metadatum, control, terminationPolicy); } } else { ImGui::TextWrapped("Select a node in the topology to display information about it"); diff --git a/Framework/GUISupport/src/FrameworkGUIDevicesGraph.h b/Framework/GUISupport/src/FrameworkGUIDevicesGraph.h index 7950f1893ebba..d53cdf26dd5a8 100644 --- a/Framework/GUISupport/src/FrameworkGUIDevicesGraph.h +++ b/Framework/GUISupport/src/FrameworkGUIDevicesGraph.h @@ -19,6 +19,11 @@ #include +namespace o2::framework +{ +enum struct TerminationPolicy; +} // namespace o2::framework + namespace o2::framework::gui { @@ -30,7 +35,8 @@ void showTopologyNodeGraph(WorkspaceGUIState& state, std::vector const& allStates, std::vector const& metadata, std::vector& controls, - std::vector const& metricsInfos); + std::vector const& metricsInfos, + TerminationPolicy terminationPolicy); } // namespace o2::framework::gui From 5d0a6197278c283d4adcd4c310f33d8c081caaec Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 8 Nov 2023 15:07:49 +0100 Subject: [PATCH 067/726] Support new fmt. --- Framework/Core/src/DataRelayer.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/DataRelayer.cxx b/Framework/Core/src/DataRelayer.cxx index c507d6eb308e3..266f5b054add1 100644 --- a/Framework/Core/src/DataRelayer.cxx +++ b/Framework/Core/src/DataRelayer.cxx @@ -570,7 +570,7 @@ DataRelayer::RelayChoice static std::atomic obsoleteCount = 0; static std::atomic mult = 1; if ((obsoleteCount++ % (1 * mult)) == 0) { - LOGP(warning, "Over {} incoming messages are already obsolete, not relaying.", obsoleteCount); + LOGP(warning, "Over {} incoming messages are already obsolete, not relaying.", obsoleteCount.load()); if (obsoleteCount > mult * 10) { mult = mult * 10; } From 0c1507c1e76a484ff08d1fd46f99f1e7a057998f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:46:00 +0100 Subject: [PATCH 068/726] GPU: support fmt 10.1.1 --- GPU/GPUTracking/Base/GPUReconstruction.cxx | 2 +- GPU/GPUTracking/DataTypes/CalibdEdxTrackTopologyPol.cxx | 6 +++--- GPU/TPCFastTransformation/MultivariatePolynomial.h | 8 ++++---- .../MultivariatePolynomialHelper.cxx | 2 +- GPU/TPCFastTransformation/NDPiecewisePolynomials.h | 8 ++++---- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/GPU/GPUTracking/Base/GPUReconstruction.cxx b/GPU/GPUTracking/Base/GPUReconstruction.cxx index 676ab0f6cf004..bf5ba46b6ab34 100644 --- a/GPU/GPUTracking/Base/GPUReconstruction.cxx +++ b/GPU/GPUTracking/Base/GPUReconstruction.cxx @@ -1215,7 +1215,7 @@ bool GPUReconstruction::CheckInstanceAvailable(DeviceType type) } else if (type == DeviceType::OCL2) { return sLibOCL2->LoadLibrary() == 0; } else { - GPUError("Error: Invalid device type %u", type); + GPUError("Error: Invalid device type %u", (unsigned)type); return false; } } diff --git a/GPU/GPUTracking/DataTypes/CalibdEdxTrackTopologyPol.cxx b/GPU/GPUTracking/DataTypes/CalibdEdxTrackTopologyPol.cxx index 435d2e3f56e35..aa57c8f85083e 100644 --- a/GPU/GPUTracking/DataTypes/CalibdEdxTrackTopologyPol.cxx +++ b/GPU/GPUTracking/DataTypes/CalibdEdxTrackTopologyPol.cxx @@ -180,13 +180,13 @@ void CalibdEdxTrackTopologyPol::writeToFile(TFile& outf, const char* name) const void CalibdEdxTrackTopologyPol::setFromContainer(const CalibdEdxTrackTopologyPolContainer& container) { if (2 * FFits != container.mCalibPols.size()) { - LOGP(warning, fmt::format("wrong number of polynomials stored! this {} container {}", 2 * FFits, container.mCalibPols.size())); + LOGP(warning, "wrong number of polynomials stored! this {} container {}", 2 * FFits, container.mCalibPols.size()); return; } const auto nFacCont = container.mScalingFactorsqMax.size() + container.mScalingFactorsqTot.size(); if (2 * FFits != nFacCont) { - LOGP(warning, fmt::format("wrong number of scaling factors stored! this {} container {}", 2 * FFits, nFacCont)); + LOGP(warning, "wrong number of scaling factors stored! this {} container {}", 2 * FFits, nFacCont); return; } @@ -214,7 +214,7 @@ void CalibdEdxTrackTopologyPol::loadFromFile(const char* fileName, const char* n setFromContainer(*polTmp); delete polTmp; } else { - LOGP(info, fmt::format("couldnt load object {} from input file", name)); + LOGP(info, "couldnt load object {} from input file", name); } } diff --git a/GPU/TPCFastTransformation/MultivariatePolynomial.h b/GPU/TPCFastTransformation/MultivariatePolynomial.h index d2d80886a925c..bc980065f6263 100644 --- a/GPU/TPCFastTransformation/MultivariatePolynomial.h +++ b/GPU/TPCFastTransformation/MultivariatePolynomial.h @@ -178,7 +178,7 @@ void MultivariatePolynomial::loadFromFile(TFile& i delete polTmp; } else { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("couldnt load object {} from input file", name)); + LOGP(info, "couldnt load object {} from input file", name); #endif } } @@ -189,19 +189,19 @@ void MultivariatePolynomial::setFromContainer(cons if constexpr (Dim > 0 && Degree > 0) { if (this->getDim() != container.mDim) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("wrong number of dimensions! this {} container {}", this->getDim(), container.mDim)); + LOGP(info, "wrong number of dimensions! this {} container {}", this->getDim(), container.mDim); #endif return; } if (this->getDegree() != container.mDegree) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("wrong number of degrees! this {} container {}", this->getDegree(), container.mDegree)); + LOGP(info, "wrong number of degrees! this {} container {}", this->getDegree(), container.mDegree); #endif return; } if (this->isInteractionOnly() != container.mInteractionOnly) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("InteractionOnly is set for this object to {}, but stored as {} in the container", this->isInteractionOnly(), container.mInteractionOnly)); + LOGP(info, "InteractionOnly is set for this object to {}, but stored as {} in the container", this->isInteractionOnly(), container.mInteractionOnly); #endif return; } diff --git a/GPU/TPCFastTransformation/MultivariatePolynomialHelper.cxx b/GPU/TPCFastTransformation/MultivariatePolynomialHelper.cxx index 4d981293ca91f..1e40fcb51359e 100644 --- a/GPU/TPCFastTransformation/MultivariatePolynomialHelper.cxx +++ b/GPU/TPCFastTransformation/MultivariatePolynomialHelper.cxx @@ -26,7 +26,7 @@ using namespace GPUCA_NAMESPACE::gpu; void MultivariatePolynomialHelper<0, 0, false>::print() const { #ifndef GPUCA_NO_FMT - LOGP(info, getFormula().c_str()); + LOGP(info, fmt::runtime(getFormula().c_str())); #endif } diff --git a/GPU/TPCFastTransformation/NDPiecewisePolynomials.h b/GPU/TPCFastTransformation/NDPiecewisePolynomials.h index caff727c36975..13ce49a142470 100644 --- a/GPU/TPCFastTransformation/NDPiecewisePolynomials.h +++ b/GPU/TPCFastTransformation/NDPiecewisePolynomials.h @@ -317,7 +317,7 @@ void NDPiecewisePolynomials::loadFromFile(TFile& i delete gridTmp; } else { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("couldnt load object {} from input file", name)); + LOGP(info, "couldnt load object {} from input file", name); #endif } } @@ -326,19 +326,19 @@ void NDPiecewisePolynomials::setFromContainer(cons { if (Dim != container.mDim) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("wrong number of dimensions! this {} container {}", Dim, container.mDim)); + LOGP(info, "wrong number of dimensions! this {} container {}", Dim, container.mDim); #endif return; } if (Degree != container.mDegree) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("wrong number of degrees! this {} container {}", Degree, container.mDegree)); + LOGP(info, "wrong number of degrees! this {} container {}", Degree, container.mDegree); #endif return; } if (InteractionOnly != container.mInteractionOnly) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("InteractionOnly is set for this object to {}, but stored as {} in the container", InteractionOnly, container.mInteractionOnly)); + LOGP(info, "InteractionOnly is set for this object to {}, but stored as {} in the container", InteractionOnly, container.mInteractionOnly); #endif return; } From 99d75df2f37cc8df3647bac9d4df86e25563f87b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:46:01 +0100 Subject: [PATCH 069/726] Framework: make sure we support fmt 10.1.1 --- CCDB/src/CcdbApi.cxx | 2 +- Common/Field/src/MagneticField.cxx | 2 +- Detectors/AOD/src/AODProducerWorkflowSpec.cxx | 2 +- EventVisualisation/Workflow/src/FileProducer.cxx | 2 +- Framework/AnalysisSupport/src/AODJAlienReaderHelpers.cxx | 4 ++-- Framework/Core/src/DPLWebSocket.cxx | 2 +- Framework/Core/src/DataOutputDirector.cxx | 2 +- Framework/Core/src/DataProcessingContext.cxx | 2 +- Framework/Core/src/HTTPParser.cxx | 4 ++-- Framework/Core/src/runDataProcessing.cxx | 2 +- Framework/Utils/include/DPLUtils/RawParser.h | 2 -- Framework/Utils/src/RawParser.cxx | 8 ++++---- 12 files changed, 16 insertions(+), 18 deletions(-) diff --git a/CCDB/src/CcdbApi.cxx b/CCDB/src/CcdbApi.cxx index aabc96cb779f8..904d979bfbd79 100644 --- a/CCDB/src/CcdbApi.cxx +++ b/CCDB/src/CcdbApi.cxx @@ -710,7 +710,7 @@ bool CcdbApi::retrieveBlob(std::string const& path, std::string const& targetdir try { o2::utils::createDirectoriesIfAbsent(fulltargetdir); } catch (std::exception e) { - LOGP(error, fmt::format("Could not create local snapshot cache directory {}, reason: {}", fulltargetdir, e.what())); + LOGP(error, "Could not create local snapshot cache directory {}, reason: {}", fulltargetdir, e.what()); return false; } diff --git a/Common/Field/src/MagneticField.cxx b/Common/Field/src/MagneticField.cxx index 2e6ca6d647b11..5df6bbc0b0d34 100644 --- a/Common/Field/src/MagneticField.cxx +++ b/Common/Field/src/MagneticField.cxx @@ -539,7 +539,7 @@ void MagneticField::rescaleField(float l3Cur, float diCur, bool uniform, int con float sclL3 = l3Cur, sclDip = diCur; MagFieldParam::BMap_t map = getFieldMapScale(sclL3, sclDip, uniform); if (map != mMapType) { - LOGP(fatal, "Provided L3current={} DipCurrent={} uniform={} leads to map type {}, incompatible with loaded {}", l3Cur, diCur, uniform, map, mMapType); + LOGP(fatal, "Provided L3current={} DipCurrent={} uniform={} leads to map type {}, incompatible with loaded {}", l3Cur, diCur, uniform, (int)map, (int)mMapType); } setFactorSolenoid(sclL3); setFactorDipole(sclDip); diff --git a/Detectors/AOD/src/AODProducerWorkflowSpec.cxx b/Detectors/AOD/src/AODProducerWorkflowSpec.cxx index fa028535f379c..9b6151cb557b8 100644 --- a/Detectors/AOD/src/AODProducerWorkflowSpec.cxx +++ b/Detectors/AOD/src/AODProducerWorkflowSpec.cxx @@ -1221,7 +1221,7 @@ void AODProducerWorkflowDPL::fillSecondaryVertices(const o2::globaltracking::Rec if (item != mGIDToTableID.end()) { tableIdx[i] = item->second; } else { - LOG(warn) << fmt::format("Could not find a track index for prong ID {}", trIDs[i]); + LOG(warn) << fmt::format("Could not find a track index for prong ID {}", (int)trIDs[i]); missing = true; } } diff --git a/EventVisualisation/Workflow/src/FileProducer.cxx b/EventVisualisation/Workflow/src/FileProducer.cxx index 541afdefc4553..818ab546f3047 100644 --- a/EventVisualisation/Workflow/src/FileProducer.cxx +++ b/EventVisualisation/Workflow/src/FileProducer.cxx @@ -47,7 +47,7 @@ std::string FileProducer::newFileName() const gethostname(hostname, _POSIX_HOST_NAME_MAX); auto pid = getpid(); - auto result = fmt::format(this->mName, + auto result = fmt::format(fmt::runtime(this->mName), fmt::arg("hostname", hostname), fmt::arg("pid", pid), fmt::arg("timestamp", millisec_since_epoch), diff --git a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.cxx b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.cxx index 1e20f46c2e464..092857d585a02 100644 --- a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.cxx +++ b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.cxx @@ -246,11 +246,11 @@ AlgorithmSpec AODJAlienReaderHelpers::rootFileReaderCallback() // get first folder of next file ntf = 0; if (!didir->readTree(outputs, dh, fcnt, ntf, totalSizeCompressed, totalSizeUncompressed)) { - LOGP(fatal, "Can not retrieve tree for table {}: fileCounter {}, timeFrame {}", concrete.origin, fcnt, ntf); + LOGP(fatal, "Can not retrieve tree for table {}: fileCounter {}, timeFrame {}", concrete.origin.as(), fcnt, ntf); throw std::runtime_error("Processing is stopped!"); } } else { - LOGP(fatal, "Can not retrieve tree for table {}: fileCounter {}, timeFrame {}", concrete.origin, fcnt, ntf); + LOGP(fatal, "Can not retrieve tree for table {}: fileCounter {}, timeFrame {}", concrete.origin.as(), fcnt, ntf); throw std::runtime_error("Processing is stopped!"); } } diff --git a/Framework/Core/src/DPLWebSocket.cxx b/Framework/Core/src/DPLWebSocket.cxx index 97755727cbbd9..146a13206b55a 100644 --- a/Framework/Core/src/DPLWebSocket.cxx +++ b/Framework/Core/src/DPLWebSocket.cxx @@ -375,7 +375,7 @@ void WSDPLHandler::write(std::vector& outputs) /// Helper to return an error void WSDPLHandler::error(int code, char const* message) { - static char const* errorFMT = "HTTP/1.1 {} {}\r\ncontent-type: text/plain\r\n\r\n{}: {}\r\n"; + static constexpr auto errorFMT = "HTTP/1.1 {} {}\r\ncontent-type: text/plain\r\n\r\n{}: {}\r\n"; std::string error = fmt::format(errorFMT, code, message, code, message); char* reply = strdup(error.data()); uv_buf_t bfr = uv_buf_init(reply, error.size()); diff --git a/Framework/Core/src/DataOutputDirector.cxx b/Framework/Core/src/DataOutputDirector.cxx index dcf7fe40b6214..078af891c9058 100644 --- a/Framework/Core/src/DataOutputDirector.cxx +++ b/Framework/Core/src/DataOutputDirector.cxx @@ -584,7 +584,7 @@ void DataOutputDirector::printOut() LOGP(info, " File name bases :"); for (auto const& fb : mfilenameBases) { - LOGP(info, fb); + LOGP(info, "{}", fb); } } diff --git a/Framework/Core/src/DataProcessingContext.cxx b/Framework/Core/src/DataProcessingContext.cxx index 365975c706722..9c023e5021a9b 100644 --- a/Framework/Core/src/DataProcessingContext.cxx +++ b/Framework/Core/src/DataProcessingContext.cxx @@ -52,7 +52,7 @@ void DataProcessorContext::preDanglingCallbacks(DanglingContext& danglingContext void DataProcessorContext::postDanglingCallbacks(DanglingContext& danglingContext) { for (auto& handle : postDanglingHandles) { - LOGP(debug, "Invoking postDanglingCallback for service {} {}", handle.spec.name); + LOGP(debug, "Invoking postDanglingCallback for service {}", handle.spec.name); handle.callback(danglingContext, handle.service); } } diff --git a/Framework/Core/src/HTTPParser.cxx b/Framework/Core/src/HTTPParser.cxx index 137706c5f07c9..04ca6e8fdce55 100644 --- a/Framework/Core/src/HTTPParser.cxx +++ b/Framework/Core/src/HTTPParser.cxx @@ -189,7 +189,7 @@ void decode_websocket(char* start, size_t size, WebSocketHandler& handler) std::string encode_websocket_handshake_request(const char* endpoint, const char* protocol, int version, char const* nonce, std::vector> headers) { - char const* res = + constexpr auto res = "GET {} HTTP/1.1\r\n" "Upgrade: websocket\r\n" "Connection: Upgrade\r\n" @@ -216,7 +216,7 @@ std::string HTTPParserHelpers::calculateAccept(const char* nonce) std::string encode_websocket_handshake_reply(char const* nonce) { - char const* res = + constexpr auto res = "HTTP/1.1 101 Switching Protocols\r\n" "Upgrade: websocket\r\n" "Connection: Upgrade\r\n" diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 61fdef070d874..86cc15299e8f3 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -696,7 +696,7 @@ void spawnDevice(uv_loop_t* loop, close(childFds[ref.index].childstdout[1]); if (varmap.count("post-fork-command")) { auto templateCmd = varmap["post-fork-command"]; - auto cmd = fmt::format(templateCmd.as(), + auto cmd = fmt::format(fmt::runtime(templateCmd.as()), fmt::arg("pid", id), fmt::arg("id", spec.id), fmt::arg("cpu", parentCPU), diff --git a/Framework/Utils/include/DPLUtils/RawParser.h b/Framework/Utils/include/DPLUtils/RawParser.h index 2c35c008983f0..c1ba1ef4802b2 100644 --- a/Framework/Utils/include/DPLUtils/RawParser.h +++ b/Framework/Utils/include/DPLUtils/RawParser.h @@ -58,14 +58,12 @@ struct RDHFormatter { template <> struct RDHFormatter { using type = header::RAWDataHeaderV7; - static const char* sFormatString; static void apply(std::ostream&, type const&, FormatSpec, const char* = ""); }; template <> struct RDHFormatter { using type = header::RAWDataHeaderV6; - static const char* sFormatString; static void apply(std::ostream&, type const&, FormatSpec, const char* = ""); }; diff --git a/Framework/Utils/src/RawParser.cxx b/Framework/Utils/src/RawParser.cxx index 2ec48c7f93d36..c7cbbf3d28880 100644 --- a/Framework/Utils/src/RawParser.cxx +++ b/Framework/Utils/src/RawParser.cxx @@ -51,9 +51,9 @@ void RawParserHelper::warnDeadBeef(const o2::header::DataHeader* dh) } } -const char* RDHFormatter::sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1} {:>2}"; void RDHFormatter::apply(std::ostream& os, V7 const& header, FormatSpec choice, const char* delimiter) { + static constexpr const char* sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1} {:>2}"; if (choice == FormatSpec::Info) { os << "RDH v7"; } else if (choice == FormatSpec::TableHeader) { @@ -73,9 +73,9 @@ void RDHFormatter::apply(std::ostream& os, V7 const& header, FormatSpec choi } os << delimiter; } -const char* RDHFormatter::sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1}"; void RDHFormatter::apply(std::ostream& os, V6 const& header, FormatSpec choice, const char* delimiter) { + static constexpr const char* sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1}"; if (choice == FormatSpec::Info) { os << "RDH v6"; } else if (choice == FormatSpec::TableHeader) { @@ -95,9 +95,9 @@ void RDHFormatter::apply(std::ostream& os, V6 const& header, FormatSpec choi os << delimiter; } -const char* RDHFormatter::sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1}"; void RDHFormatter::apply(std::ostream& os, V5 const& header, FormatSpec choice, const char* delimiter) { + static constexpr const char* sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1}"; if (choice == FormatSpec::Info) { os << "RDH v5"; } else if (choice == FormatSpec::TableHeader) { @@ -116,9 +116,9 @@ void RDHFormatter::apply(std::ostream& os, V5 const& header, FormatSpec choi os << delimiter; } -const char* RDHFormatter::sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>10} {:>5} {:>1}"; void RDHFormatter::apply(std::ostream& os, V4 const& header, FormatSpec choice, const char* delimiter) { + static constexpr const char* sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>10} {:>5} {:>1}"; if (choice == FormatSpec::Info) { os << "RDH v4"; } else if (choice == FormatSpec::TableHeader) { From dca52476b7b73168cd68570d8b35ccc4e5d6e287 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:46:02 +0100 Subject: [PATCH 070/726] Detectors: make sure fmt 10.1.1 is supported --- .../include/DetectorsCommonDataFormats/EncodedBlocks.h | 3 ++- .../RecoContainerCreateTracksVariadic.h | 4 ++-- DataFormats/Parameters/src/GRPTool.cxx | 2 +- Detectors/CTF/workflow/src/ctf-reader-workflow.cxx | 2 +- Detectors/DCS/src/AliasExpander.cxx | 4 ++-- Detectors/EMCAL/base/src/RCUTrailer.cxx | 4 ++-- Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx | 2 +- Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx | 2 +- Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx | 2 +- Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx | 2 +- Detectors/MUON/MCH/Calibration/src/PedestalData.cxx | 2 +- .../MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx | 2 +- Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx | 2 +- .../MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx | 4 ++-- .../MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx | 6 +++--- Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx | 2 +- Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx | 2 +- Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx | 2 +- Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx | 2 +- Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx | 2 +- .../MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx | 2 +- Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx | 2 +- Detectors/Raw/src/RawDumpSpec.cxx | 4 ++-- Detectors/Raw/src/rawfile-reader-workflow.cxx | 2 +- Detectors/TPC/base/include/TPCBase/CalDet.h | 2 +- Detectors/TPC/base/src/CDBInterface.cxx | 2 +- Detectors/TPC/calibration/src/CalibTreeDump.cxx | 2 +- Detectors/TPC/calibration/src/CalibdEdx.cxx | 3 ++- Detectors/TPC/calibration/src/VDriftHelper.cxx | 3 ++- Detectors/TPC/dcs/src/DCSConfigSpec.cxx | 2 +- Detectors/TPC/dcs/src/DCSProcessor.cxx | 6 +++--- Detectors/TPC/reconstruction/src/RawReaderCRU.cxx | 2 +- .../TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h | 2 +- .../include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h | 2 +- Detectors/TPC/workflow/src/ClusterQCSpec.cxx | 2 +- Detectors/TPC/workflow/src/IDCToVectorSpec.cxx | 4 ++-- Detectors/TRD/qc/src/Tracking.cxx | 2 +- 37 files changed, 50 insertions(+), 47 deletions(-) diff --git a/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h b/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h index a0853b8f14c73..6c024186b3085 100644 --- a/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h +++ b/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h @@ -402,7 +402,8 @@ class EncodedBlocks if (getANSHeader() == ANSVersionUnspecified) { rans::DenseHistogram histogram{block.getDict(), block.getDict() + block.getNDict(), metadata.min}; size_t renormingBits = rans::utils::sanitizeRenormingBitRange(metadata.probabilityBits); - LOG_IF(debug, renormingBits != metadata.probabilityBits) << fmt::format("While reading metadata from external dictionary, rANSV1 is rounding renorming precision from {} to {}"); + LOG_IF(debug, renormingBits != metadata.probabilityBits) + << fmt::format("While reading metadata from external dictionary, rANSV1 is rounding renorming precision from {} to {}", metadata.probabilityBits, renormingBits); return rans::renorm(std::move(histogram), renormingBits, rans::RenormingPolicy::ForceIncompressible); } else { // dictionary is elias-delta coded inside the block diff --git a/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h b/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h index 4bbe355a781c7..4cec730c620a5 100644 --- a/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h +++ b/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h @@ -285,7 +285,7 @@ void o2::globaltracking::RecoContainer::createTracksVariadic(T creator, GTrackID if (srcSel[currentSource]) { if (matchesTPCTOF.size() && !tracksTPCTOF.size()) { throw std::runtime_error(fmt::format("TPC-TOF matched tracks ({}) require TPCTOF matches ({}) and TPCTOF tracks ({})", - matchesTPCTOF.size(), tracksTPCTOF.size())); + -1, matchesTPCTOF.size(), tracksTPCTOF.size())); } for (unsigned i = 0; i < matchesTPCTOF.size(); i++) { const auto& match = matchesTPCTOF[i]; @@ -321,7 +321,7 @@ void o2::globaltracking::RecoContainer::createTracksVariadic(T creator, GTrackID if (srcSel[currentSource]) { if (matchesMCHMID.size() && !tracksMCH.size()) { throw std::runtime_error(fmt::format("MCH-MID matched tracks ({}) require MCHMID matches ({}) and MCH tracks ({})", - matchesMCHMID.size(), tracksMCH.size())); + -1, matchesMCHMID.size(), tracksMCH.size())); } for (unsigned i = 0; i < matchesMCHMID.size(); i++) { const auto& match = matchesMCHMID[i]; diff --git a/DataFormats/Parameters/src/GRPTool.cxx b/DataFormats/Parameters/src/GRPTool.cxx index 9b5078b60733c..413b08482d84a 100644 --- a/DataFormats/Parameters/src/GRPTool.cxx +++ b/DataFormats/Parameters/src/GRPTool.cxx @@ -205,7 +205,7 @@ bool publish(std::string const& filename, std::string const& path, std::string C try { o2::utils::createDirectoriesIfAbsent(targetdir); } catch (std::exception e) { - LOGP(error, fmt::format("Could not create local snapshot cache directory {}, reason: {}", targetdir, e.what())); + LOGP(error, "Could not create local snapshot cache directory {}, reason: {}", targetdir, e.what()); return false; } diff --git a/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx b/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx index 3fcb4f69d0b8f..ef3a0f8d3c2c4 100644 --- a/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx +++ b/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx @@ -134,7 +134,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& configcontext) int rateLimitingIPCID = std::stoi(configcontext.options().get("timeframes-rate-limit-ipcid")); std::string chanFmt = configcontext.options().get("metric-feedback-channel-format"); if (rateLimitingIPCID > -1 && !chanFmt.empty()) { - ctfInput.metricChannel = fmt::format(chanFmt, o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); + ctfInput.metricChannel = fmt::format(fmt::runtime(chanFmt), o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); } specs.push_back(o2::ctf::getCTFReaderSpec(ctfInput)); diff --git a/Detectors/DCS/src/AliasExpander.cxx b/Detectors/DCS/src/AliasExpander.cxx index b197d2fb78d67..41aaec8351f72 100644 --- a/Detectors/DCS/src/AliasExpander.cxx +++ b/Detectors/DCS/src/AliasExpander.cxx @@ -82,7 +82,7 @@ std::vector extractRange(std::string range) std::vector result; for (auto i = a; i <= b; i++) { - auto substituted = fmt::format(intFormat, i); + auto substituted = fmt::format(fmt::runtime(intFormat), i); result.push_back(substituted); } return result; @@ -124,7 +124,7 @@ std::vector expandAlias(const std::string& pattern) std::vector result; for (auto r : range) { - auto substituted = fmt::format(newPattern, r); + auto substituted = fmt::format(fmt::runtime(newPattern), r); result.emplace_back(substituted); } diff --git a/Detectors/EMCAL/base/src/RCUTrailer.cxx b/Detectors/EMCAL/base/src/RCUTrailer.cxx index 6d10d0cb93c1d..24aac7b5bb25a 100644 --- a/Detectors/EMCAL/base/src/RCUTrailer.cxx +++ b/Detectors/EMCAL/base/src/RCUTrailer.cxx @@ -172,7 +172,7 @@ void RCUTrailer::setTimeSamplePhaseNS(uint64_t triggertime, uint64_t timesample) sample = 2; break; default: - throw Error(Error::ErrorType_t::SAMPLINGFREQ_INVALID, fmt::format("invalid time sample: {:f}", timesample).data()); + throw Error(Error::ErrorType_t::SAMPLINGFREQ_INVALID, fmt::format(fmt::runtime("invalid time sample: {:f}"), timesample).data()); }; mAltroConfig.mSampleTime = sample; // calculate L1 phase @@ -275,4 +275,4 @@ std::ostream& o2::emcal::operator<<(std::ostream& stream, const o2::emcal::RCUTr { trailer.printStream(stream); return stream; -} \ No newline at end of file +} diff --git a/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx b/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx index 55bfe8fc101e7..7b3a88bf27385 100644 --- a/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx +++ b/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx @@ -395,7 +395,7 @@ double HMPIDDCSProcessor::calculatePhotonEnergy(int i) if (dp.id.get_type() == DeliveryType::DPVAL_DOUBLE) { lambda = o2::dcs::getValue(dp); } else { - LOGP(warn, "DP type is {}", dp.id.get_type()); + LOGP(warn, "DP type is {}", (int)dp.id.get_type()); LOGP(warn, "Not correct datatype for HMP_TRANPLANT_MEASURE_{}_WAVELENGTH --> Default wavelength used for iteration procTrans{}", i, i); lambda = arrWaveLenDefault[i]; } diff --git a/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx b/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx index af445c14f09c1..6220341f20054 100644 --- a/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx +++ b/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx @@ -1384,7 +1384,7 @@ int MillePede2::SolveGlobalMatEq() if (fgIterSol == MinResSolve::kSolFGMRes) { res = slv->SolveFGMRES(sol, fgMinResCondType, fgMinResMaxIter, fgMinResTol, fgNKrylovV); } else { - LOGF(warning, "MillePede2 - Undefined Iteritive Solver ID=%d, only %d are defined", fgIterSol, MinResSolve::kNSolvers); + LOGF(warning, "MillePede2 - Undefined Iteritive Solver ID=%d, only %d are defined", fgIterSol, (int)MinResSolve::kNSolvers); } } diff --git a/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx b/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx index a643f6fc4af1d..41b487786e90e 100644 --- a/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx +++ b/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx @@ -48,7 +48,7 @@ std::string ChipPixelData::getErrorDetails(int pos) const std::string rbuf = ": 0x<"; int nc = getNBytesInRawBuff(); for (int i = 0; i < nc; i++) { - rbuf += fmt::format(i ? " {:02x}" : "{:02x}", (int)getRawErrBuff()[i]); + rbuf += fmt::format(fmt::runtime(i ? " {:02x}" : "{:02x}"), (int)getRawErrBuff()[i]); } rbuf += '>'; return rbuf; diff --git a/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx b/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx index 38c11d96ddd27..a05fd0504049d 100644 --- a/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx +++ b/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx @@ -35,5 +35,5 @@ std::string ChipInfo::asString() const void ChipInfo::print() const { - LOGP(info, asString()); + LOGP(info, fmt::runtime(asString())); } diff --git a/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx b/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx index 0f5b377a4c92d..661bab7913b8e 100644 --- a/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx +++ b/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx @@ -120,7 +120,7 @@ void PedestalData::merge(const PedestalData* prev) void PedestalData::print() const { for (const auto& p : const_cast(*this)) { - LOGP(info, p.asString()); + LOGP(info, fmt::runtime(p.asString())); } } diff --git a/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx b/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx index ea173bd1fc7cc..fe40659bc9265 100644 --- a/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx +++ b/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx @@ -88,7 +88,7 @@ class DigitFilteringTask if (!isOK(error)) { if (error.nofOutOfBounds > 0) { - LOGP(error, asString(error)); + LOGP(error, "{}", asString(error)); LOGP(error, "in a TF with {} rofs and {} digits", iRofs.size(), iDigits.size()); abort = true; } diff --git a/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx b/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx index cc9d504d92e50..651a764820a1c 100644 --- a/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx +++ b/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx @@ -102,7 +102,7 @@ int main(int argc, char** argv) int nbad{0}; for (auto de : deIds) { - std::string filepath(fmt::format(filePattern, de)); + std::string filepath(fmt::format(fmt::runtime(filePattern), de)); std::ifstream in(filepath); if (!in) { std::cout << "Cannot open " << filepath << "\n"; diff --git a/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx b/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx index 2989401b55628..408ffda2ca9dc 100644 --- a/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx +++ b/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx @@ -83,7 +83,7 @@ class PreClusterFinderTask LOG(info) << "deinitializing preclusterizer in: " << std::chrono::duration(tEnd - tStart).count() << " ms"; mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, "{}", error.asString()); }); }; ic.services().get().set(stop); @@ -117,7 +117,7 @@ class PreClusterFinderTask if (!isOK(error)) { if (error.nofOutOfBounds > 0) { // FIXME: replace this error log with a counters' message ? - LOGP(error, asString(error)); + LOGP(error, "{}", asString(error)); LOGP(error, "in a TF with {} rofs and {} digits", digitROFs.size(), digits.size()); abort = true; } diff --git a/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx b/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx index 43aefe520cb6e..519e56244aba2 100644 --- a/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx +++ b/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx @@ -369,7 +369,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(SyncInTheMiddleChargeSumModeTwoChannels, V, testTy 5); std::string r2 = "S361-J6-DS2-ch-63-ts-345-q-123456-cs-789\n"; r2 += "S361-J6-DS2-ch-63-ts-346-q-789012-cs-345\n"; - r2 += fmt::format("S361-J6-DS2-chip-5-error-{}\n", ErrorUnexpectedSyncPacket); + r2 += fmt::format("S361-J6-DS2-chip-5-error-{}\n", (int)ErrorUnexpectedSyncPacket); BOOST_CHECK_EQUAL(r, r2); } @@ -387,7 +387,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(TestCruPageBadClusterSize, V, testTypes) gsl::span page = CruPageBadClusterSize; std::string r = testPayloadDecodeCruPages(page); BOOST_CHECK_EQUAL(r, - fmt::format("S81-J0-DS0-chip-1-error-{}\nS81-J0-DS0-ch-42-ts-0-q-1\n", ErrorBadClusterSize)); + fmt::format("S81-J0-DS0-chip-1-error-{}\nS81-J0-DS0-ch-42-ts-0-q-1\n", (int)ErrorBadClusterSize)); } BOOST_AUTO_TEST_CASE_TEMPLATE(TestCruPageBadN10bitWords, V, testTypes) @@ -396,7 +396,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(TestCruPageBadN10bitWords, V, testTypes) std::string r = testPayloadDecodeCruPages(page); std::string expected = fmt::format("S81-J0-DS0-ch-42-ts-87-q-2-1-0-0-1-0-0-0\nS81-J0-DS0-chip-1-error-{}\nS81-J0-DS0-ch-42-ts-0-q-1\n", - ErrorBadIncompleteWord); + (int)ErrorBadIncompleteWord); BOOST_CHECK_EQUAL(r, expected); } diff --git a/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx b/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx index 7b63ac2969e56..17f5f4463b16a 100644 --- a/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx +++ b/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx @@ -20,7 +20,7 @@ void AssertBranch(ROOT::Internal::TTreeReaderValueBase& value) { if (value.GetSetupStatus() < 0) { throw std::invalid_argument(fmt::format("Error {} setting up tree reader for branch {}", - value.GetSetupStatus(), value.GetBranchName())); + (int)value.GetSetupStatus(), value.GetBranchName())); } } diff --git a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx index e545c15025d8e..2407796773f57 100644 --- a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx +++ b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx @@ -104,7 +104,7 @@ std::vector paginate(gsl::span buffer, const std::st return pages; } -const char* sampaClusterFormat = "{}-CH{}-{}"; +constexpr const char* sampaClusterFormat = "{}-CH{}-{}"; // Create a vector of SampaCluster from a string d // where d is of the form ts-#-bc-#-cs-#-q-# or diff --git a/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx b/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx index 661fd7741c954..aac72b144a80f 100644 --- a/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx +++ b/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx @@ -28,7 +28,7 @@ using namespace o2::mch::raw; -const char* sampaClusterFormat = "{}-CH{}-{}"; +constexpr const char* sampaClusterFormat = "{}-CH{}-{}"; const bool useDummyElecMap = true; struct DePadId { diff --git a/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx b/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx index de920200bf4ff..c40df5d4b448e 100644 --- a/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx +++ b/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx @@ -105,7 +105,7 @@ class TrackFinderTask mTrackFinder.printTimers(); LOG(info) << "tracking duration = " << mElapsedTime.count() << " s"; mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, "{}", error.asString()); }); }; ic.services().get().set(stop); diff --git a/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx b/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx index be6e415c74324..5158cc436b277 100644 --- a/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx +++ b/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx @@ -173,7 +173,7 @@ class ClusterFinderGEMTask mGEMDump = nullptr; } mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, "{}", error.asString()); }); }); auto stop = [this]() { diff --git a/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx b/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx index 2007f0634960f..8344d2837b814 100644 --- a/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx +++ b/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx @@ -73,7 +73,7 @@ class ClusterFinderOriginalTask ic.services().get().set([this]() { LOG(info) << "cluster finder duration = " << mTimeClusterFinder.count() << " s"; mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, fmt::runtime(error.asString())); }); this->mClusterFinder.deinit(); }); diff --git a/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx b/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx index facb8a880ba66..c468d1660fcc7 100644 --- a/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx +++ b/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx @@ -86,7 +86,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& configcontext) int rateLimitingIPCID = std::stoi(configcontext.options().get("timeframes-rate-limit-ipcid")); std::string chanFmt = configcontext.options().get("metric-feedback-channel-format"); if (rateLimitingIPCID > -1 && !chanFmt.empty()) { - rinp.metricChannel = fmt::format(chanFmt, o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); + rinp.metricChannel = fmt::format(fmt::runtime(chanFmt), o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); } WorkflowSpec specs; diff --git a/Detectors/Raw/src/RawDumpSpec.cxx b/Detectors/Raw/src/RawDumpSpec.cxx index 345eba5cefdc0..1cb667febe2a3 100644 --- a/Detectors/Raw/src/RawDumpSpec.cxx +++ b/Detectors/Raw/src/RawDumpSpec.cxx @@ -273,7 +273,7 @@ void RawDump::endOfStream(EndOfStreamContext& ec) auto fnm = fmt::format("{}{}{}raw.cfg", mOutDir, mOutDir.back() == '/' ? "" : "/", DetID::getName(id)); auto fh = std::fopen(fnm.c_str(), "w"); if (!fh) { - LOGP(fatal, "Failed to create configuration file {}"); + LOGP(fatal, "Failed to create configuration file {}", fnm); } auto ws = std::fwrite(mConfigEntries[id].data(), 1, mConfigEntries[id].size(), fh); if (ws != mConfigEntries[id].size()) { @@ -655,7 +655,7 @@ std::string RawDump::getBaseFileNameTRD(const header::RDHAny* rdh) int link = supermodule * 4 + side * 2 + ep, cru = link / 2; if (link >= NLinks) { auto flpname = fmt::format("flp-unknown_cru{}_ep{}_feeid0x{:05x}", cru, int(RDHUtils::getEndPointID(rdh)), RDHUtils::getFEEID(rdh)); - LOGP(error, "Got wrong link {}, setting TRF file name to unrecognized flp {}", flpname); + LOGP(error, "Got wrong link {}, setting TRF file name to unrecognized flp {}", link, flpname); return flpname; } return fmt::format("alio2-cr1-flp{}_cru{}_{}", trdHWMap[cru].flpid, trdHWMap[cru].cruHWID, ep); diff --git a/Detectors/Raw/src/rawfile-reader-workflow.cxx b/Detectors/Raw/src/rawfile-reader-workflow.cxx index d8cc44fd5adbd..3a5440f80ed2d 100644 --- a/Detectors/Raw/src/rawfile-reader-workflow.cxx +++ b/Detectors/Raw/src/rawfile-reader-workflow.cxx @@ -90,7 +90,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& configcontext) int rateLimitingIPCID = std::stoi(configcontext.options().get("timeframes-rate-limit-ipcid")); std::string chanFmt = configcontext.options().get("metric-feedback-channel-format"); if (rateLimitingIPCID > -1 && !chanFmt.empty()) { - rinp.metricChannel = fmt::format(chanFmt, o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); + rinp.metricChannel = fmt::format(fmt::runtime(chanFmt), o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); } o2::conf::ConfigurableParam::updateFromString(configcontext.options().get("configKeyValues")); auto hbfini = configcontext.options().get("hbfutils-config"); diff --git a/Detectors/TPC/base/include/TPCBase/CalDet.h b/Detectors/TPC/base/include/TPCBase/CalDet.h index d6b6b70119e4b..835e3638642df 100644 --- a/Detectors/TPC/base/include/TPCBase/CalDet.h +++ b/Detectors/TPC/base/include/TPCBase/CalDet.h @@ -463,7 +463,7 @@ void CalDet::initData() if (!hasData) { mData.push_back(CalType(mPadSubset, i)); } - mData[i].setName(fmt::format(frmt, mName, i)); + mData[i].setName(fmt::format(fmt::runtime(frmt), mName, i)); } } diff --git a/Detectors/TPC/base/src/CDBInterface.cxx b/Detectors/TPC/base/src/CDBInterface.cxx index 1180fd1cc8b1d..b359a04b385cb 100644 --- a/Detectors/TPC/base/src/CDBInterface.cxx +++ b/Detectors/TPC/base/src/CDBInterface.cxx @@ -633,5 +633,5 @@ void CDBStorage::printObjectSummary(std::string_view name, CDBType const type, M message += fmt::format("{:>20} = {}\n", key, value); } - LOGP(info, message); + LOGP(info, "{}", message); } diff --git a/Detectors/TPC/calibration/src/CalibTreeDump.cxx b/Detectors/TPC/calibration/src/CalibTreeDump.cxx index 3423be676566d..0748f1be55da5 100644 --- a/Detectors/TPC/calibration/src/CalibTreeDump.cxx +++ b/Detectors/TPC/calibration/src/CalibTreeDump.cxx @@ -223,7 +223,7 @@ void CalibTreeDump::addCalDetObjects(TTree* tree) std::string name = calDet.getName(); if (name == "PadCalibrationObject" || name.size() == 0) { - name = fmt::format("calDet_{%02d}", iter); + name = fmt::format(fmt::runtime("calDet_{%02d}"), iter); } std::string meanName = fmt::format("{}_mean", name); diff --git a/Detectors/TPC/calibration/src/CalibdEdx.cxx b/Detectors/TPC/calibration/src/CalibdEdx.cxx index c012a4a2b3ec9..8c098cc04cf20 100644 --- a/Detectors/TPC/calibration/src/CalibdEdx.cxx +++ b/Detectors/TPC/calibration/src/CalibdEdx.cxx @@ -215,7 +215,8 @@ void fitHist(const Hist& hist, CalibdEdxCorrection& corr, TLinearFitter& fitter, corr.setChi2(id, charge, fitter.GetChisquare()); corr.setEntries(id, charge, entries); } - LOGP(debug, "Sector: {}, gemType: {}, charge: {}, Fit pass: {} with {} % outliers in {} entries. Fitter Points: {}, mean fit: {}", id.sector, int(id.type), charge, fitPass, (float)outliers / (float)entries * 100, entries, fitter.GetNpoints(), params[0]); + LOGP(debug, "Sector: {}, gemType: {}, charge: {}, Fit pass: {} with {} % outliers in {} entries. Fitter Points: {}, mean fit: {}", + id.sector, int(id.type), int(charge), fitPass, (float)outliers / (float)entries * 100, entries, fitter.GetNpoints(), params[0]); } } } diff --git a/Detectors/TPC/calibration/src/VDriftHelper.cxx b/Detectors/TPC/calibration/src/VDriftHelper.cxx index 034888998e567..b420c0392899b 100644 --- a/Detectors/TPC/calibration/src/VDriftHelper.cxx +++ b/Detectors/TPC/calibration/src/VDriftHelper.cxx @@ -43,7 +43,8 @@ VDriftHelper::VDriftHelper() if (o2::conf::ConfigurableParam::getProvenance("TPCDetParam.DriftTimeOffset") == o2::conf::ConfigurableParam::EParamProvenance::kRT) { // we stick to this value mVD.creationTime = std::numeric_limits::max(); mForceParamOffset = true; - LOGP(info, "TPC dridt time offset was set from command line to {} mus ({} TB}, will neglect update from CCDB", mVD.refTimeOffset, detpar.DriftTimeOffset); + LOGP(info, "TPC dridt time offset was set from command line to {} mus ({} TB), will neglect update from CCDB", + mVD.refTimeOffset, detpar.DriftTimeOffset); } mUpdated = true; diff --git a/Detectors/TPC/dcs/src/DCSConfigSpec.cxx b/Detectors/TPC/dcs/src/DCSConfigSpec.cxx index 967b825d65a1d..34270cbca528d 100644 --- a/Detectors/TPC/dcs/src/DCSConfigSpec.cxx +++ b/Detectors/TPC/dcs/src/DCSConfigSpec.cxx @@ -289,7 +289,7 @@ void DCSConfigDevice::fillCRUConfig(gsl::span configBuff, bool updat } if (!update && (nLines != CRU::MaxCRU)) { - LOGP(error, "Full FEEConfig expected, but only {} / {} lines read for CRUConfig", nLines, CRU::MaxCRU); + LOGP(error, "Full FEEConfig expected, but only {} / {} lines read for CRUConfig", nLines, (int)CRU::MaxCRU); } else { LOGP(info, "updating CRUConfig for {} crus", nLines); } diff --git a/Detectors/TPC/dcs/src/DCSProcessor.cxx b/Detectors/TPC/dcs/src/DCSProcessor.cxx index 3a1572a6e6834..a26bce43e5c2e 100644 --- a/Detectors/TPC/dcs/src/DCSProcessor.cxx +++ b/Detectors/TPC/dcs/src/DCSProcessor.cxx @@ -69,7 +69,7 @@ float DCSProcessor::getValueF(const DPCOM& dp) const } else if (dp.id.get_type() == DeliveryType::DPVAL_DOUBLE) { return static_cast(o2::dcs::getValue(dp)); } else { - LOGP(warning, "Unexpected delivery type for {}: {}", dp.id.get_alias(), dp.id.get_type()); + LOGP(warning, "Unexpected delivery type for {}: {}", dp.id.get_alias(), (int)dp.id.get_type()); } return 0.f; @@ -94,7 +94,7 @@ void DCSProcessor::fillHV(const DPCOM& dp) // TODO: Remove once type is clear static bool statTypePrinted = false; if (!statTypePrinted) { - LOGP(info, "Delivery type for STATUS ({}): {}", alias, type); + LOGP(info, "Delivery type for STATUS ({}): {}", alias, (int)type); statTypePrinted = true; } if (type == DeliveryType::DPVAL_UINT) { @@ -109,7 +109,7 @@ void DCSProcessor::fillHV(const DPCOM& dp) // TODO: Remove once type is clear static bool uiTypePrinted = false; if (!uiTypePrinted) { - LOGP(info, "Delivery type for current, voltage ({}): {}", alias, type); + LOGP(info, "Delivery type for current, voltage ({}): {}", alias, (int)type); uiTypePrinted = true; } const auto value = getValueF(dp); diff --git a/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx b/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx index 40063f23612ae..3a552ebcce82f 100644 --- a/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx +++ b/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx @@ -973,7 +973,7 @@ void RawReaderCRU::writeGBTDataPerLink(std::string_view outputDirectory, int max const int ep = iLink >= 12; const int link = iLink - (ep)*12; - auto outputFileName = fmt::format("{}/CRU_{:02}_EP_{}_Link_{:02}", outputDirectory.data(), mCRU, ep, link); + auto outputFileName = fmt::format("{}/CRU_{:02}_EP_{}_Link_{:02}", outputDirectory.data(), (int)mCRU, ep, link); std::ofstream outputFile(outputFileName, std::ios_base::binary | std::ios_base::app); for (auto packetNumber : linkInfo.PacketPositions) { diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h index 69f90b2e71764..a4107690c941a 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h @@ -311,7 +311,7 @@ class TPCFactorizeIDCSpec : public o2::framework::Task o2::ccdb::CcdbObjectInfo ccdbInfoIDCDelta(CDBTypeMap.at(sideA ? CDBType::CalIDCDeltaA : CDBType::CalIDCDeltaC), std::string{}, std::string{}, std::map{}, timeStampStartDelta, timeStampEndDelta); if (mDumpIDCDelta) { - mIDCGrouping.dumpToFile(fmt::format("{}DeltaAveraged_chunk{:02}_{:02}_side{}.root", getCurrentType(), iChunk, timeStampStartDelta, side).data()); + mIDCGrouping.dumpToFile(fmt::format("{}DeltaAveraged_chunk{:02}_{:02}_side{}.root", getCurrentType(), iChunk, timeStampStartDelta, (int)side).data()); } auto startCCDBIDCDelta = timer::now(); diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h index bb8334901b8d9..c24bbd1b245ce 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h @@ -125,7 +125,7 @@ class TPCFourierTransformAggregatorSpec : public o2::framework::Task if (mDumpFFT) { LOGP(info, "dumping FT to file"); - mIDCFourierTransform[side].dumpToFile(fmt::format("FourierAGG_{:02}_side{}.root", processing_helpers::getCurrentTF(pc), side).data()); + mIDCFourierTransform[side].dumpToFile(fmt::format("FourierAGG_{:02}_side{}.root", processing_helpers::getCurrentTF(pc), (int)side).data()); } if (mSendOutDebug) { diff --git a/Detectors/TPC/workflow/src/ClusterQCSpec.cxx b/Detectors/TPC/workflow/src/ClusterQCSpec.cxx index 284085005ec15..8c93d2755b090 100644 --- a/Detectors/TPC/workflow/src/ClusterQCSpec.cxx +++ b/Detectors/TPC/workflow/src/ClusterQCSpec.cxx @@ -126,7 +126,7 @@ void ClusterQCDevice::endInterval() LOGP(info, "End interval for run: {}, TFs: {} - {}, creation: {} - {}, processed TFs: {}", mRunNumber, mFirstTF, mLastTF, mFirstCreation, mLastCreation, mClusterQC.getProcessedTFs()); - const auto outputFileName = fmt::format(mOutputFileName, fmt::arg("run", mRunNumber), + const auto outputFileName = fmt::format(fmt::runtime(mOutputFileName), fmt::arg("run", mRunNumber), fmt::arg("firstTF", mFirstTF), fmt::arg("lastTF", mLastTF), fmt::arg("firstCreation", mFirstCreation), fmt::arg("lastCreation", mLastCreation)); std::unique_ptr f(TFile::Open(outputFileName.data(), "recreate")); diff --git a/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx b/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx index e9b26209a6fd1..a1b77b2774228 100644 --- a/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx +++ b/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx @@ -124,7 +124,7 @@ class IDCToVectorDevice : public o2::framework::Task // open files if necessary if ((mWriteDebug || mWriteDebugOnError) && !mDebugStream) { - const auto debugFileName = fmt::format(mDebugStreamFileName, fmt::arg("run", runNumber)); + const auto debugFileName = fmt::format(fmt::runtime(mDebugStreamFileName), fmt::arg("run", runNumber)); LOGP(info, "creating debug stream {}", debugFileName); mDebugStream = std::make_unique(debugFileName.data(), "recreate"); } @@ -134,7 +134,7 @@ class IDCToVectorDevice : public o2::framework::Task if (mRawDataType == 4) { rawType = "idc.raw"; } - const auto rawFileName = fmt::format(mRawOutputFileName, fmt::arg("run", runNumber), fmt::arg("raw_type", rawType)); + const auto rawFileName = fmt::format(fmt::runtime(mRawOutputFileName), fmt::arg("run", runNumber), fmt::arg("raw_type", rawType)); LOGP(info, "creating raw debug file {}", rawFileName); mRawOutputFile.open(rawFileName, std::ios::binary); } diff --git a/Detectors/TRD/qc/src/Tracking.cxx b/Detectors/TRD/qc/src/Tracking.cxx index 98131ced41961..a827c3f5a1081 100644 --- a/Detectors/TRD/qc/src/Tracking.cxx +++ b/Detectors/TRD/qc/src/Tracking.cxx @@ -57,7 +57,7 @@ void Tracking::checkTrack(const TrackTRD& trkTrd, bool isTPCTRD) qcStruct.refGlobalTrackId = id; qcStruct.trackTRD = trkTrd; - LOGF(debug, "Got track with %i tracklets and ID %i", trkTrd.getNtracklets(), id); + LOGF(debug, "Got track with %i tracklets and ID %i", trkTrd.getNtracklets(), (int)id); o2::track::TrackParCov trk = isTPCTRD ? mTracksTPC[id].getParamOut() : mTracksITSTPC[id].getParamOut(); qcStruct.trackSeed = trk; if (mPID) { From eb7857f037b9893f38c421f57232f48c54f9b240 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:46:02 +0100 Subject: [PATCH 071/726] Support ROOT v6.30.01 --- Common/MathUtils/include/MathUtils/fit.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Common/MathUtils/include/MathUtils/fit.h b/Common/MathUtils/include/MathUtils/fit.h index 2b1064b13fecb..00c39486a4ba0 100644 --- a/Common/MathUtils/include/MathUtils/fit.h +++ b/Common/MathUtils/include/MathUtils/fit.h @@ -59,7 +59,7 @@ template TFitResultPtr fit(const size_t nBins, const T* arr, const T xMin, const T xMax, TF1& func, std::string_view option = "") { Foption_t fitOption; - ROOT::Fit::FitOptionsMake(ROOT::Fit::kHistogram, option.data(), fitOption); + ROOT::Fit::FitOptionsMake(ROOT::Fit::EFitObjectType::kHistogram, option.data(), fitOption); ROOT::Fit::DataRange range(xMin, xMax); ROOT::Fit::DataOptions opt; From 456d9b0c503dc3ca31f720c3fd8c0d7f1b9c298c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:46:03 +0100 Subject: [PATCH 072/726] C++20 does not allow aggregate initialisation if default ctor present --- Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h | 1 - .../Vertexing/include/DetectorsVertexing/PVertexerHelpers.h | 1 - 2 files changed, 2 deletions(-) diff --git a/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h b/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h index f38195dbeb6ec..6f26fca6f4d03 100644 --- a/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h +++ b/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h @@ -288,7 +288,6 @@ struct InteractionCandidate : public o2::InteractionRecord { int rofITS; // corresponding ITS ROF entry (in the ROFRecord vectors) uint32_t flag; // origin, etc. o2::dataformats::RangeReference seedsRef; // references to AB seeds - InteractionCandidate() = default; InteractionCandidate(const o2::InteractionRecord& ir, float t, float dt, int rof, uint32_t f = 0) : o2::InteractionRecord(ir), tBracket(t - dt, t + dt), rofITS(rof), flag(f) {} }; diff --git a/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h b/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h index ec11c6e289c71..34c2d7c15a011 100644 --- a/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h +++ b/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h @@ -277,7 +277,6 @@ struct InteractionCandidate : public o2::InteractionRecord { float time = 0; float amplitude = 0; uint32_t flag = 0; // origin, etc. - InteractionCandidate() = default; }; } // namespace vertexing From 87c1722f6df43536e995726ecc944daa6e1b4a1a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:46:04 +0100 Subject: [PATCH 073/726] DPL: adapt RCombinedDS to new ROOT API --- .../RootAnalysisHelpers/include/Framework/RCombinedDS.h | 8 ++++---- Framework/RootAnalysisHelpers/src/RCombinedDS.cxx | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h b/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h index b3b7979232467..c4dd7bce6a86f 100644 --- a/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h +++ b/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h @@ -341,14 +341,14 @@ class RCombinedDS final : public ROOT::RDF::RDataSource dummy += colName.data(); throw std::runtime_error(dummy); } - const std::vector& GetColumnNames() const override; + [[nodiscard]] const std::vector& GetColumnNames() const override; std::vector> GetEntryRanges() override; - std::string GetTypeName(std::string_view colName) const override; - bool HasColumn(std::string_view colName) const override; + [[nodiscard]] std::string GetTypeName(std::string_view colName) const override; + [[nodiscard]] bool HasColumn(std::string_view colName) const override; bool SetEntry(unsigned int slot, ULong64_t entry) override; void InitSlot(unsigned int slot, ULong64_t firstEntry) override; void SetNSlots(unsigned int nSlots) override; - void Initialise() override; + virtual void Initialise(); }; //////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx b/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx index f8ff71f8f6583..06958991cdf14 100644 --- a/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx +++ b/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx @@ -224,8 +224,8 @@ void RCombinedDS::Initialise() { fEntryRanges = fIndex->BuildIndex(fLeftDF, fRightDF); - fLeft->Initialise(); - fRight->Initialise(); + fLeft->Initialize(); + fRight->Initialize(); } /// Creates a RDataFrame using an arrow::Table as input. From cdfce85eb5c22acfdcd5342a7e21c58780377e4a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:55:58 +0100 Subject: [PATCH 074/726] Detectors: make sure fmt 10.1.1 is supported --- .../include/DetectorsCommonDataFormats/EncodedBlocks.h | 3 ++- .../RecoContainerCreateTracksVariadic.h | 4 ++-- DataFormats/Parameters/src/GRPTool.cxx | 2 +- Detectors/CTF/workflow/src/ctf-reader-workflow.cxx | 2 +- Detectors/DCS/src/AliasExpander.cxx | 4 ++-- Detectors/EMCAL/base/src/RCUTrailer.cxx | 4 ++-- Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx | 2 +- Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx | 2 +- Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx | 2 +- Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx | 2 +- Detectors/MUON/MCH/Calibration/src/PedestalData.cxx | 2 +- .../MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx | 2 +- Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx | 2 +- .../MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx | 4 ++-- .../MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx | 6 +++--- Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx | 2 +- Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx | 2 +- Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx | 2 +- Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx | 2 +- Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx | 2 +- .../MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx | 2 +- Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx | 2 +- Detectors/Raw/src/RawDumpSpec.cxx | 4 ++-- Detectors/Raw/src/rawfile-reader-workflow.cxx | 2 +- Detectors/TPC/base/include/TPCBase/CalDet.h | 2 +- Detectors/TPC/base/src/CDBInterface.cxx | 2 +- Detectors/TPC/calibration/src/CalibTreeDump.cxx | 2 +- Detectors/TPC/calibration/src/CalibdEdx.cxx | 3 ++- Detectors/TPC/calibration/src/VDriftHelper.cxx | 3 ++- Detectors/TPC/dcs/src/DCSConfigSpec.cxx | 2 +- Detectors/TPC/dcs/src/DCSProcessor.cxx | 6 +++--- Detectors/TPC/reconstruction/src/RawReaderCRU.cxx | 2 +- .../TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h | 2 +- .../include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h | 2 +- Detectors/TPC/workflow/src/ClusterQCSpec.cxx | 2 +- Detectors/TPC/workflow/src/IDCToVectorSpec.cxx | 4 ++-- Detectors/TRD/qc/src/Tracking.cxx | 2 +- 37 files changed, 50 insertions(+), 47 deletions(-) diff --git a/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h b/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h index a0853b8f14c73..6c024186b3085 100644 --- a/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h +++ b/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h @@ -402,7 +402,8 @@ class EncodedBlocks if (getANSHeader() == ANSVersionUnspecified) { rans::DenseHistogram histogram{block.getDict(), block.getDict() + block.getNDict(), metadata.min}; size_t renormingBits = rans::utils::sanitizeRenormingBitRange(metadata.probabilityBits); - LOG_IF(debug, renormingBits != metadata.probabilityBits) << fmt::format("While reading metadata from external dictionary, rANSV1 is rounding renorming precision from {} to {}"); + LOG_IF(debug, renormingBits != metadata.probabilityBits) + << fmt::format("While reading metadata from external dictionary, rANSV1 is rounding renorming precision from {} to {}", metadata.probabilityBits, renormingBits); return rans::renorm(std::move(histogram), renormingBits, rans::RenormingPolicy::ForceIncompressible); } else { // dictionary is elias-delta coded inside the block diff --git a/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h b/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h index 4bbe355a781c7..4cec730c620a5 100644 --- a/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h +++ b/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h @@ -285,7 +285,7 @@ void o2::globaltracking::RecoContainer::createTracksVariadic(T creator, GTrackID if (srcSel[currentSource]) { if (matchesTPCTOF.size() && !tracksTPCTOF.size()) { throw std::runtime_error(fmt::format("TPC-TOF matched tracks ({}) require TPCTOF matches ({}) and TPCTOF tracks ({})", - matchesTPCTOF.size(), tracksTPCTOF.size())); + -1, matchesTPCTOF.size(), tracksTPCTOF.size())); } for (unsigned i = 0; i < matchesTPCTOF.size(); i++) { const auto& match = matchesTPCTOF[i]; @@ -321,7 +321,7 @@ void o2::globaltracking::RecoContainer::createTracksVariadic(T creator, GTrackID if (srcSel[currentSource]) { if (matchesMCHMID.size() && !tracksMCH.size()) { throw std::runtime_error(fmt::format("MCH-MID matched tracks ({}) require MCHMID matches ({}) and MCH tracks ({})", - matchesMCHMID.size(), tracksMCH.size())); + -1, matchesMCHMID.size(), tracksMCH.size())); } for (unsigned i = 0; i < matchesMCHMID.size(); i++) { const auto& match = matchesMCHMID[i]; diff --git a/DataFormats/Parameters/src/GRPTool.cxx b/DataFormats/Parameters/src/GRPTool.cxx index 9b5078b60733c..413b08482d84a 100644 --- a/DataFormats/Parameters/src/GRPTool.cxx +++ b/DataFormats/Parameters/src/GRPTool.cxx @@ -205,7 +205,7 @@ bool publish(std::string const& filename, std::string const& path, std::string C try { o2::utils::createDirectoriesIfAbsent(targetdir); } catch (std::exception e) { - LOGP(error, fmt::format("Could not create local snapshot cache directory {}, reason: {}", targetdir, e.what())); + LOGP(error, "Could not create local snapshot cache directory {}, reason: {}", targetdir, e.what()); return false; } diff --git a/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx b/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx index 3fcb4f69d0b8f..ef3a0f8d3c2c4 100644 --- a/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx +++ b/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx @@ -134,7 +134,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& configcontext) int rateLimitingIPCID = std::stoi(configcontext.options().get("timeframes-rate-limit-ipcid")); std::string chanFmt = configcontext.options().get("metric-feedback-channel-format"); if (rateLimitingIPCID > -1 && !chanFmt.empty()) { - ctfInput.metricChannel = fmt::format(chanFmt, o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); + ctfInput.metricChannel = fmt::format(fmt::runtime(chanFmt), o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); } specs.push_back(o2::ctf::getCTFReaderSpec(ctfInput)); diff --git a/Detectors/DCS/src/AliasExpander.cxx b/Detectors/DCS/src/AliasExpander.cxx index b197d2fb78d67..41aaec8351f72 100644 --- a/Detectors/DCS/src/AliasExpander.cxx +++ b/Detectors/DCS/src/AliasExpander.cxx @@ -82,7 +82,7 @@ std::vector extractRange(std::string range) std::vector result; for (auto i = a; i <= b; i++) { - auto substituted = fmt::format(intFormat, i); + auto substituted = fmt::format(fmt::runtime(intFormat), i); result.push_back(substituted); } return result; @@ -124,7 +124,7 @@ std::vector expandAlias(const std::string& pattern) std::vector result; for (auto r : range) { - auto substituted = fmt::format(newPattern, r); + auto substituted = fmt::format(fmt::runtime(newPattern), r); result.emplace_back(substituted); } diff --git a/Detectors/EMCAL/base/src/RCUTrailer.cxx b/Detectors/EMCAL/base/src/RCUTrailer.cxx index 6d10d0cb93c1d..24aac7b5bb25a 100644 --- a/Detectors/EMCAL/base/src/RCUTrailer.cxx +++ b/Detectors/EMCAL/base/src/RCUTrailer.cxx @@ -172,7 +172,7 @@ void RCUTrailer::setTimeSamplePhaseNS(uint64_t triggertime, uint64_t timesample) sample = 2; break; default: - throw Error(Error::ErrorType_t::SAMPLINGFREQ_INVALID, fmt::format("invalid time sample: {:f}", timesample).data()); + throw Error(Error::ErrorType_t::SAMPLINGFREQ_INVALID, fmt::format(fmt::runtime("invalid time sample: {:f}"), timesample).data()); }; mAltroConfig.mSampleTime = sample; // calculate L1 phase @@ -275,4 +275,4 @@ std::ostream& o2::emcal::operator<<(std::ostream& stream, const o2::emcal::RCUTr { trailer.printStream(stream); return stream; -} \ No newline at end of file +} diff --git a/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx b/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx index 55bfe8fc101e7..7b3a88bf27385 100644 --- a/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx +++ b/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx @@ -395,7 +395,7 @@ double HMPIDDCSProcessor::calculatePhotonEnergy(int i) if (dp.id.get_type() == DeliveryType::DPVAL_DOUBLE) { lambda = o2::dcs::getValue(dp); } else { - LOGP(warn, "DP type is {}", dp.id.get_type()); + LOGP(warn, "DP type is {}", (int)dp.id.get_type()); LOGP(warn, "Not correct datatype for HMP_TRANPLANT_MEASURE_{}_WAVELENGTH --> Default wavelength used for iteration procTrans{}", i, i); lambda = arrWaveLenDefault[i]; } diff --git a/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx b/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx index af445c14f09c1..6220341f20054 100644 --- a/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx +++ b/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx @@ -1384,7 +1384,7 @@ int MillePede2::SolveGlobalMatEq() if (fgIterSol == MinResSolve::kSolFGMRes) { res = slv->SolveFGMRES(sol, fgMinResCondType, fgMinResMaxIter, fgMinResTol, fgNKrylovV); } else { - LOGF(warning, "MillePede2 - Undefined Iteritive Solver ID=%d, only %d are defined", fgIterSol, MinResSolve::kNSolvers); + LOGF(warning, "MillePede2 - Undefined Iteritive Solver ID=%d, only %d are defined", fgIterSol, (int)MinResSolve::kNSolvers); } } diff --git a/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx b/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx index a643f6fc4af1d..41b487786e90e 100644 --- a/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx +++ b/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx @@ -48,7 +48,7 @@ std::string ChipPixelData::getErrorDetails(int pos) const std::string rbuf = ": 0x<"; int nc = getNBytesInRawBuff(); for (int i = 0; i < nc; i++) { - rbuf += fmt::format(i ? " {:02x}" : "{:02x}", (int)getRawErrBuff()[i]); + rbuf += fmt::format(fmt::runtime(i ? " {:02x}" : "{:02x}"), (int)getRawErrBuff()[i]); } rbuf += '>'; return rbuf; diff --git a/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx b/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx index 38c11d96ddd27..a05fd0504049d 100644 --- a/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx +++ b/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx @@ -35,5 +35,5 @@ std::string ChipInfo::asString() const void ChipInfo::print() const { - LOGP(info, asString()); + LOGP(info, fmt::runtime(asString())); } diff --git a/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx b/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx index 0f5b377a4c92d..661bab7913b8e 100644 --- a/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx +++ b/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx @@ -120,7 +120,7 @@ void PedestalData::merge(const PedestalData* prev) void PedestalData::print() const { for (const auto& p : const_cast(*this)) { - LOGP(info, p.asString()); + LOGP(info, fmt::runtime(p.asString())); } } diff --git a/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx b/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx index ea173bd1fc7cc..fe40659bc9265 100644 --- a/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx +++ b/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx @@ -88,7 +88,7 @@ class DigitFilteringTask if (!isOK(error)) { if (error.nofOutOfBounds > 0) { - LOGP(error, asString(error)); + LOGP(error, "{}", asString(error)); LOGP(error, "in a TF with {} rofs and {} digits", iRofs.size(), iDigits.size()); abort = true; } diff --git a/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx b/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx index cc9d504d92e50..651a764820a1c 100644 --- a/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx +++ b/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx @@ -102,7 +102,7 @@ int main(int argc, char** argv) int nbad{0}; for (auto de : deIds) { - std::string filepath(fmt::format(filePattern, de)); + std::string filepath(fmt::format(fmt::runtime(filePattern), de)); std::ifstream in(filepath); if (!in) { std::cout << "Cannot open " << filepath << "\n"; diff --git a/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx b/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx index 2989401b55628..408ffda2ca9dc 100644 --- a/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx +++ b/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx @@ -83,7 +83,7 @@ class PreClusterFinderTask LOG(info) << "deinitializing preclusterizer in: " << std::chrono::duration(tEnd - tStart).count() << " ms"; mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, "{}", error.asString()); }); }; ic.services().get().set(stop); @@ -117,7 +117,7 @@ class PreClusterFinderTask if (!isOK(error)) { if (error.nofOutOfBounds > 0) { // FIXME: replace this error log with a counters' message ? - LOGP(error, asString(error)); + LOGP(error, "{}", asString(error)); LOGP(error, "in a TF with {} rofs and {} digits", digitROFs.size(), digits.size()); abort = true; } diff --git a/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx b/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx index 43aefe520cb6e..519e56244aba2 100644 --- a/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx +++ b/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx @@ -369,7 +369,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(SyncInTheMiddleChargeSumModeTwoChannels, V, testTy 5); std::string r2 = "S361-J6-DS2-ch-63-ts-345-q-123456-cs-789\n"; r2 += "S361-J6-DS2-ch-63-ts-346-q-789012-cs-345\n"; - r2 += fmt::format("S361-J6-DS2-chip-5-error-{}\n", ErrorUnexpectedSyncPacket); + r2 += fmt::format("S361-J6-DS2-chip-5-error-{}\n", (int)ErrorUnexpectedSyncPacket); BOOST_CHECK_EQUAL(r, r2); } @@ -387,7 +387,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(TestCruPageBadClusterSize, V, testTypes) gsl::span page = CruPageBadClusterSize; std::string r = testPayloadDecodeCruPages(page); BOOST_CHECK_EQUAL(r, - fmt::format("S81-J0-DS0-chip-1-error-{}\nS81-J0-DS0-ch-42-ts-0-q-1\n", ErrorBadClusterSize)); + fmt::format("S81-J0-DS0-chip-1-error-{}\nS81-J0-DS0-ch-42-ts-0-q-1\n", (int)ErrorBadClusterSize)); } BOOST_AUTO_TEST_CASE_TEMPLATE(TestCruPageBadN10bitWords, V, testTypes) @@ -396,7 +396,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(TestCruPageBadN10bitWords, V, testTypes) std::string r = testPayloadDecodeCruPages(page); std::string expected = fmt::format("S81-J0-DS0-ch-42-ts-87-q-2-1-0-0-1-0-0-0\nS81-J0-DS0-chip-1-error-{}\nS81-J0-DS0-ch-42-ts-0-q-1\n", - ErrorBadIncompleteWord); + (int)ErrorBadIncompleteWord); BOOST_CHECK_EQUAL(r, expected); } diff --git a/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx b/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx index 7b63ac2969e56..17f5f4463b16a 100644 --- a/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx +++ b/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx @@ -20,7 +20,7 @@ void AssertBranch(ROOT::Internal::TTreeReaderValueBase& value) { if (value.GetSetupStatus() < 0) { throw std::invalid_argument(fmt::format("Error {} setting up tree reader for branch {}", - value.GetSetupStatus(), value.GetBranchName())); + (int)value.GetSetupStatus(), value.GetBranchName())); } } diff --git a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx index e545c15025d8e..2407796773f57 100644 --- a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx +++ b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx @@ -104,7 +104,7 @@ std::vector paginate(gsl::span buffer, const std::st return pages; } -const char* sampaClusterFormat = "{}-CH{}-{}"; +constexpr const char* sampaClusterFormat = "{}-CH{}-{}"; // Create a vector of SampaCluster from a string d // where d is of the form ts-#-bc-#-cs-#-q-# or diff --git a/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx b/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx index 661fd7741c954..aac72b144a80f 100644 --- a/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx +++ b/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx @@ -28,7 +28,7 @@ using namespace o2::mch::raw; -const char* sampaClusterFormat = "{}-CH{}-{}"; +constexpr const char* sampaClusterFormat = "{}-CH{}-{}"; const bool useDummyElecMap = true; struct DePadId { diff --git a/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx b/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx index de920200bf4ff..c40df5d4b448e 100644 --- a/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx +++ b/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx @@ -105,7 +105,7 @@ class TrackFinderTask mTrackFinder.printTimers(); LOG(info) << "tracking duration = " << mElapsedTime.count() << " s"; mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, "{}", error.asString()); }); }; ic.services().get().set(stop); diff --git a/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx b/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx index be6e415c74324..5158cc436b277 100644 --- a/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx +++ b/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx @@ -173,7 +173,7 @@ class ClusterFinderGEMTask mGEMDump = nullptr; } mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, "{}", error.asString()); }); }); auto stop = [this]() { diff --git a/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx b/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx index 2007f0634960f..8344d2837b814 100644 --- a/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx +++ b/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx @@ -73,7 +73,7 @@ class ClusterFinderOriginalTask ic.services().get().set([this]() { LOG(info) << "cluster finder duration = " << mTimeClusterFinder.count() << " s"; mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, fmt::runtime(error.asString())); }); this->mClusterFinder.deinit(); }); diff --git a/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx b/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx index facb8a880ba66..c468d1660fcc7 100644 --- a/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx +++ b/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx @@ -86,7 +86,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& configcontext) int rateLimitingIPCID = std::stoi(configcontext.options().get("timeframes-rate-limit-ipcid")); std::string chanFmt = configcontext.options().get("metric-feedback-channel-format"); if (rateLimitingIPCID > -1 && !chanFmt.empty()) { - rinp.metricChannel = fmt::format(chanFmt, o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); + rinp.metricChannel = fmt::format(fmt::runtime(chanFmt), o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); } WorkflowSpec specs; diff --git a/Detectors/Raw/src/RawDumpSpec.cxx b/Detectors/Raw/src/RawDumpSpec.cxx index 345eba5cefdc0..1cb667febe2a3 100644 --- a/Detectors/Raw/src/RawDumpSpec.cxx +++ b/Detectors/Raw/src/RawDumpSpec.cxx @@ -273,7 +273,7 @@ void RawDump::endOfStream(EndOfStreamContext& ec) auto fnm = fmt::format("{}{}{}raw.cfg", mOutDir, mOutDir.back() == '/' ? "" : "/", DetID::getName(id)); auto fh = std::fopen(fnm.c_str(), "w"); if (!fh) { - LOGP(fatal, "Failed to create configuration file {}"); + LOGP(fatal, "Failed to create configuration file {}", fnm); } auto ws = std::fwrite(mConfigEntries[id].data(), 1, mConfigEntries[id].size(), fh); if (ws != mConfigEntries[id].size()) { @@ -655,7 +655,7 @@ std::string RawDump::getBaseFileNameTRD(const header::RDHAny* rdh) int link = supermodule * 4 + side * 2 + ep, cru = link / 2; if (link >= NLinks) { auto flpname = fmt::format("flp-unknown_cru{}_ep{}_feeid0x{:05x}", cru, int(RDHUtils::getEndPointID(rdh)), RDHUtils::getFEEID(rdh)); - LOGP(error, "Got wrong link {}, setting TRF file name to unrecognized flp {}", flpname); + LOGP(error, "Got wrong link {}, setting TRF file name to unrecognized flp {}", link, flpname); return flpname; } return fmt::format("alio2-cr1-flp{}_cru{}_{}", trdHWMap[cru].flpid, trdHWMap[cru].cruHWID, ep); diff --git a/Detectors/Raw/src/rawfile-reader-workflow.cxx b/Detectors/Raw/src/rawfile-reader-workflow.cxx index d8cc44fd5adbd..3a5440f80ed2d 100644 --- a/Detectors/Raw/src/rawfile-reader-workflow.cxx +++ b/Detectors/Raw/src/rawfile-reader-workflow.cxx @@ -90,7 +90,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& configcontext) int rateLimitingIPCID = std::stoi(configcontext.options().get("timeframes-rate-limit-ipcid")); std::string chanFmt = configcontext.options().get("metric-feedback-channel-format"); if (rateLimitingIPCID > -1 && !chanFmt.empty()) { - rinp.metricChannel = fmt::format(chanFmt, o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); + rinp.metricChannel = fmt::format(fmt::runtime(chanFmt), o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); } o2::conf::ConfigurableParam::updateFromString(configcontext.options().get("configKeyValues")); auto hbfini = configcontext.options().get("hbfutils-config"); diff --git a/Detectors/TPC/base/include/TPCBase/CalDet.h b/Detectors/TPC/base/include/TPCBase/CalDet.h index d6b6b70119e4b..835e3638642df 100644 --- a/Detectors/TPC/base/include/TPCBase/CalDet.h +++ b/Detectors/TPC/base/include/TPCBase/CalDet.h @@ -463,7 +463,7 @@ void CalDet::initData() if (!hasData) { mData.push_back(CalType(mPadSubset, i)); } - mData[i].setName(fmt::format(frmt, mName, i)); + mData[i].setName(fmt::format(fmt::runtime(frmt), mName, i)); } } diff --git a/Detectors/TPC/base/src/CDBInterface.cxx b/Detectors/TPC/base/src/CDBInterface.cxx index 1180fd1cc8b1d..b359a04b385cb 100644 --- a/Detectors/TPC/base/src/CDBInterface.cxx +++ b/Detectors/TPC/base/src/CDBInterface.cxx @@ -633,5 +633,5 @@ void CDBStorage::printObjectSummary(std::string_view name, CDBType const type, M message += fmt::format("{:>20} = {}\n", key, value); } - LOGP(info, message); + LOGP(info, "{}", message); } diff --git a/Detectors/TPC/calibration/src/CalibTreeDump.cxx b/Detectors/TPC/calibration/src/CalibTreeDump.cxx index 3423be676566d..0748f1be55da5 100644 --- a/Detectors/TPC/calibration/src/CalibTreeDump.cxx +++ b/Detectors/TPC/calibration/src/CalibTreeDump.cxx @@ -223,7 +223,7 @@ void CalibTreeDump::addCalDetObjects(TTree* tree) std::string name = calDet.getName(); if (name == "PadCalibrationObject" || name.size() == 0) { - name = fmt::format("calDet_{%02d}", iter); + name = fmt::format(fmt::runtime("calDet_{%02d}"), iter); } std::string meanName = fmt::format("{}_mean", name); diff --git a/Detectors/TPC/calibration/src/CalibdEdx.cxx b/Detectors/TPC/calibration/src/CalibdEdx.cxx index c012a4a2b3ec9..6d6aa980b9680 100644 --- a/Detectors/TPC/calibration/src/CalibdEdx.cxx +++ b/Detectors/TPC/calibration/src/CalibdEdx.cxx @@ -215,7 +215,8 @@ void fitHist(const Hist& hist, CalibdEdxCorrection& corr, TLinearFitter& fitter, corr.setChi2(id, charge, fitter.GetChisquare()); corr.setEntries(id, charge, entries); } - LOGP(debug, "Sector: {}, gemType: {}, charge: {}, Fit pass: {} with {} % outliers in {} entries. Fitter Points: {}, mean fit: {}", id.sector, int(id.type), charge, fitPass, (float)outliers / (float)entries * 100, entries, fitter.GetNpoints(), params[0]); + LOGP(debug, "Sector: {}, gemType: {}, charge: {}, Fit pass: {} with {} % outliers in {} entries. Fitter Points: {}, mean fit: {}", + id.sector, int(id.type), int(charge), fitPass, (float)outliers / (float)entries * 100, entries, fitter.GetNpoints(), params[0]); } } } diff --git a/Detectors/TPC/calibration/src/VDriftHelper.cxx b/Detectors/TPC/calibration/src/VDriftHelper.cxx index 034888998e567..0df353adba91f 100644 --- a/Detectors/TPC/calibration/src/VDriftHelper.cxx +++ b/Detectors/TPC/calibration/src/VDriftHelper.cxx @@ -43,7 +43,8 @@ VDriftHelper::VDriftHelper() if (o2::conf::ConfigurableParam::getProvenance("TPCDetParam.DriftTimeOffset") == o2::conf::ConfigurableParam::EParamProvenance::kRT) { // we stick to this value mVD.creationTime = std::numeric_limits::max(); mForceParamOffset = true; - LOGP(info, "TPC dridt time offset was set from command line to {} mus ({} TB}, will neglect update from CCDB", mVD.refTimeOffset, detpar.DriftTimeOffset); + LOGP(info, "TPC dridt time offset was set from command line to {} mus ({} TB), will neglect update from CCDB", + mVD.refTimeOffset, detpar.DriftTimeOffset); } mUpdated = true; diff --git a/Detectors/TPC/dcs/src/DCSConfigSpec.cxx b/Detectors/TPC/dcs/src/DCSConfigSpec.cxx index 967b825d65a1d..34270cbca528d 100644 --- a/Detectors/TPC/dcs/src/DCSConfigSpec.cxx +++ b/Detectors/TPC/dcs/src/DCSConfigSpec.cxx @@ -289,7 +289,7 @@ void DCSConfigDevice::fillCRUConfig(gsl::span configBuff, bool updat } if (!update && (nLines != CRU::MaxCRU)) { - LOGP(error, "Full FEEConfig expected, but only {} / {} lines read for CRUConfig", nLines, CRU::MaxCRU); + LOGP(error, "Full FEEConfig expected, but only {} / {} lines read for CRUConfig", nLines, (int)CRU::MaxCRU); } else { LOGP(info, "updating CRUConfig for {} crus", nLines); } diff --git a/Detectors/TPC/dcs/src/DCSProcessor.cxx b/Detectors/TPC/dcs/src/DCSProcessor.cxx index 3a1572a6e6834..a26bce43e5c2e 100644 --- a/Detectors/TPC/dcs/src/DCSProcessor.cxx +++ b/Detectors/TPC/dcs/src/DCSProcessor.cxx @@ -69,7 +69,7 @@ float DCSProcessor::getValueF(const DPCOM& dp) const } else if (dp.id.get_type() == DeliveryType::DPVAL_DOUBLE) { return static_cast(o2::dcs::getValue(dp)); } else { - LOGP(warning, "Unexpected delivery type for {}: {}", dp.id.get_alias(), dp.id.get_type()); + LOGP(warning, "Unexpected delivery type for {}: {}", dp.id.get_alias(), (int)dp.id.get_type()); } return 0.f; @@ -94,7 +94,7 @@ void DCSProcessor::fillHV(const DPCOM& dp) // TODO: Remove once type is clear static bool statTypePrinted = false; if (!statTypePrinted) { - LOGP(info, "Delivery type for STATUS ({}): {}", alias, type); + LOGP(info, "Delivery type for STATUS ({}): {}", alias, (int)type); statTypePrinted = true; } if (type == DeliveryType::DPVAL_UINT) { @@ -109,7 +109,7 @@ void DCSProcessor::fillHV(const DPCOM& dp) // TODO: Remove once type is clear static bool uiTypePrinted = false; if (!uiTypePrinted) { - LOGP(info, "Delivery type for current, voltage ({}): {}", alias, type); + LOGP(info, "Delivery type for current, voltage ({}): {}", alias, (int)type); uiTypePrinted = true; } const auto value = getValueF(dp); diff --git a/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx b/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx index 40063f23612ae..3a552ebcce82f 100644 --- a/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx +++ b/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx @@ -973,7 +973,7 @@ void RawReaderCRU::writeGBTDataPerLink(std::string_view outputDirectory, int max const int ep = iLink >= 12; const int link = iLink - (ep)*12; - auto outputFileName = fmt::format("{}/CRU_{:02}_EP_{}_Link_{:02}", outputDirectory.data(), mCRU, ep, link); + auto outputFileName = fmt::format("{}/CRU_{:02}_EP_{}_Link_{:02}", outputDirectory.data(), (int)mCRU, ep, link); std::ofstream outputFile(outputFileName, std::ios_base::binary | std::ios_base::app); for (auto packetNumber : linkInfo.PacketPositions) { diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h index 69f90b2e71764..a4107690c941a 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h @@ -311,7 +311,7 @@ class TPCFactorizeIDCSpec : public o2::framework::Task o2::ccdb::CcdbObjectInfo ccdbInfoIDCDelta(CDBTypeMap.at(sideA ? CDBType::CalIDCDeltaA : CDBType::CalIDCDeltaC), std::string{}, std::string{}, std::map{}, timeStampStartDelta, timeStampEndDelta); if (mDumpIDCDelta) { - mIDCGrouping.dumpToFile(fmt::format("{}DeltaAveraged_chunk{:02}_{:02}_side{}.root", getCurrentType(), iChunk, timeStampStartDelta, side).data()); + mIDCGrouping.dumpToFile(fmt::format("{}DeltaAveraged_chunk{:02}_{:02}_side{}.root", getCurrentType(), iChunk, timeStampStartDelta, (int)side).data()); } auto startCCDBIDCDelta = timer::now(); diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h index bb8334901b8d9..c24bbd1b245ce 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h @@ -125,7 +125,7 @@ class TPCFourierTransformAggregatorSpec : public o2::framework::Task if (mDumpFFT) { LOGP(info, "dumping FT to file"); - mIDCFourierTransform[side].dumpToFile(fmt::format("FourierAGG_{:02}_side{}.root", processing_helpers::getCurrentTF(pc), side).data()); + mIDCFourierTransform[side].dumpToFile(fmt::format("FourierAGG_{:02}_side{}.root", processing_helpers::getCurrentTF(pc), (int)side).data()); } if (mSendOutDebug) { diff --git a/Detectors/TPC/workflow/src/ClusterQCSpec.cxx b/Detectors/TPC/workflow/src/ClusterQCSpec.cxx index 284085005ec15..8c93d2755b090 100644 --- a/Detectors/TPC/workflow/src/ClusterQCSpec.cxx +++ b/Detectors/TPC/workflow/src/ClusterQCSpec.cxx @@ -126,7 +126,7 @@ void ClusterQCDevice::endInterval() LOGP(info, "End interval for run: {}, TFs: {} - {}, creation: {} - {}, processed TFs: {}", mRunNumber, mFirstTF, mLastTF, mFirstCreation, mLastCreation, mClusterQC.getProcessedTFs()); - const auto outputFileName = fmt::format(mOutputFileName, fmt::arg("run", mRunNumber), + const auto outputFileName = fmt::format(fmt::runtime(mOutputFileName), fmt::arg("run", mRunNumber), fmt::arg("firstTF", mFirstTF), fmt::arg("lastTF", mLastTF), fmt::arg("firstCreation", mFirstCreation), fmt::arg("lastCreation", mLastCreation)); std::unique_ptr f(TFile::Open(outputFileName.data(), "recreate")); diff --git a/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx b/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx index e9b26209a6fd1..a1b77b2774228 100644 --- a/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx +++ b/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx @@ -124,7 +124,7 @@ class IDCToVectorDevice : public o2::framework::Task // open files if necessary if ((mWriteDebug || mWriteDebugOnError) && !mDebugStream) { - const auto debugFileName = fmt::format(mDebugStreamFileName, fmt::arg("run", runNumber)); + const auto debugFileName = fmt::format(fmt::runtime(mDebugStreamFileName), fmt::arg("run", runNumber)); LOGP(info, "creating debug stream {}", debugFileName); mDebugStream = std::make_unique(debugFileName.data(), "recreate"); } @@ -134,7 +134,7 @@ class IDCToVectorDevice : public o2::framework::Task if (mRawDataType == 4) { rawType = "idc.raw"; } - const auto rawFileName = fmt::format(mRawOutputFileName, fmt::arg("run", runNumber), fmt::arg("raw_type", rawType)); + const auto rawFileName = fmt::format(fmt::runtime(mRawOutputFileName), fmt::arg("run", runNumber), fmt::arg("raw_type", rawType)); LOGP(info, "creating raw debug file {}", rawFileName); mRawOutputFile.open(rawFileName, std::ios::binary); } diff --git a/Detectors/TRD/qc/src/Tracking.cxx b/Detectors/TRD/qc/src/Tracking.cxx index 98131ced41961..a827c3f5a1081 100644 --- a/Detectors/TRD/qc/src/Tracking.cxx +++ b/Detectors/TRD/qc/src/Tracking.cxx @@ -57,7 +57,7 @@ void Tracking::checkTrack(const TrackTRD& trkTrd, bool isTPCTRD) qcStruct.refGlobalTrackId = id; qcStruct.trackTRD = trkTrd; - LOGF(debug, "Got track with %i tracklets and ID %i", trkTrd.getNtracklets(), id); + LOGF(debug, "Got track with %i tracklets and ID %i", trkTrd.getNtracklets(), (int)id); o2::track::TrackParCov trk = isTPCTRD ? mTracksTPC[id].getParamOut() : mTracksITSTPC[id].getParamOut(); qcStruct.trackSeed = trk; if (mPID) { From 5fe1995516e022abe732ad23bbbeb4658436dc4a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:55:58 +0100 Subject: [PATCH 075/726] Support ROOT v6.30.01 --- Common/MathUtils/include/MathUtils/fit.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Common/MathUtils/include/MathUtils/fit.h b/Common/MathUtils/include/MathUtils/fit.h index 2b1064b13fecb..00c39486a4ba0 100644 --- a/Common/MathUtils/include/MathUtils/fit.h +++ b/Common/MathUtils/include/MathUtils/fit.h @@ -59,7 +59,7 @@ template TFitResultPtr fit(const size_t nBins, const T* arr, const T xMin, const T xMax, TF1& func, std::string_view option = "") { Foption_t fitOption; - ROOT::Fit::FitOptionsMake(ROOT::Fit::kHistogram, option.data(), fitOption); + ROOT::Fit::FitOptionsMake(ROOT::Fit::EFitObjectType::kHistogram, option.data(), fitOption); ROOT::Fit::DataRange range(xMin, xMax); ROOT::Fit::DataOptions opt; From ff8b346486f537914c67310fc31ad4e47a994d38 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:55:58 +0100 Subject: [PATCH 076/726] C++20 does not allow aggregate initialisation if default ctor present --- Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h | 1 - .../Vertexing/include/DetectorsVertexing/PVertexerHelpers.h | 1 - 2 files changed, 2 deletions(-) diff --git a/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h b/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h index f38195dbeb6ec..6f26fca6f4d03 100644 --- a/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h +++ b/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h @@ -288,7 +288,6 @@ struct InteractionCandidate : public o2::InteractionRecord { int rofITS; // corresponding ITS ROF entry (in the ROFRecord vectors) uint32_t flag; // origin, etc. o2::dataformats::RangeReference seedsRef; // references to AB seeds - InteractionCandidate() = default; InteractionCandidate(const o2::InteractionRecord& ir, float t, float dt, int rof, uint32_t f = 0) : o2::InteractionRecord(ir), tBracket(t - dt, t + dt), rofITS(rof), flag(f) {} }; diff --git a/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h b/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h index ec11c6e289c71..34c2d7c15a011 100644 --- a/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h +++ b/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h @@ -277,7 +277,6 @@ struct InteractionCandidate : public o2::InteractionRecord { float time = 0; float amplitude = 0; uint32_t flag = 0; // origin, etc. - InteractionCandidate() = default; }; } // namespace vertexing From 12a0e812fe0c9dd19e7f92cc06ed6e603c729a67 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 9 Nov 2023 11:55:58 +0100 Subject: [PATCH 077/726] DPL: adapt RCombinedDS to new ROOT API --- .../RootAnalysisHelpers/include/Framework/RCombinedDS.h | 8 ++++---- Framework/RootAnalysisHelpers/src/RCombinedDS.cxx | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h b/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h index b3b7979232467..c4dd7bce6a86f 100644 --- a/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h +++ b/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h @@ -341,14 +341,14 @@ class RCombinedDS final : public ROOT::RDF::RDataSource dummy += colName.data(); throw std::runtime_error(dummy); } - const std::vector& GetColumnNames() const override; + [[nodiscard]] const std::vector& GetColumnNames() const override; std::vector> GetEntryRanges() override; - std::string GetTypeName(std::string_view colName) const override; - bool HasColumn(std::string_view colName) const override; + [[nodiscard]] std::string GetTypeName(std::string_view colName) const override; + [[nodiscard]] bool HasColumn(std::string_view colName) const override; bool SetEntry(unsigned int slot, ULong64_t entry) override; void InitSlot(unsigned int slot, ULong64_t firstEntry) override; void SetNSlots(unsigned int nSlots) override; - void Initialise() override; + virtual void Initialise(); }; //////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx b/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx index f8ff71f8f6583..06958991cdf14 100644 --- a/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx +++ b/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx @@ -224,8 +224,8 @@ void RCombinedDS::Initialise() { fEntryRanges = fIndex->BuildIndex(fLeftDF, fRightDF); - fLeft->Initialise(); - fRight->Initialise(); + fLeft->Initialize(); + fRight->Initialize(); } /// Creates a RDataFrame using an arrow::Table as input. From 6499173e550fb03dad9dc881c1fba1a91e9fe3b4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 9 Nov 2023 16:05:11 +0100 Subject: [PATCH 078/726] More race conditions for exit 128 --- Framework/Core/src/DataProcessingDevice.cxx | 9 +++++++++ Framework/Core/src/DeviceSpecHelpers.cxx | 3 +++ 2 files changed, 12 insertions(+) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 5542be850fb70..851cb5e193d6f 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -982,6 +982,10 @@ void DataProcessingDevice::InitTask() uv_signal_init(state.loop, deviceContext.sigusr1Handle); uv_signal_start(deviceContext.sigusr1Handle, on_signal_callback, SIGUSR1); } + // If there is any signal, we want to make sure they are active + for (auto& handle : state.activeSignals) { + handle->data = &state; + } // When we start, we must make sure that we do listen to the signal deviceContext.sigusr1Handle->data = &mServiceRegistry; @@ -1691,6 +1695,11 @@ void DataProcessingDevice::ResetTask() if (deviceContext.sigusr1Handle) { deviceContext.sigusr1Handle->data = nullptr; } + // Makes sure we do not have a working context on + // shutdown. + for (auto& handle : ref.get().activeSignals) { + handle->data = nullptr; + } } struct WaitBackpressurePolicy { diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 969a15fa40674..c54dd6cec7a99 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -90,6 +90,9 @@ void signal_callback(uv_signal_t* handle, int) { // We simply wake up the event loop. Nothing to be done here. auto* state = (DeviceState*)handle->data; + if (!state) { + return; + } state->loopReason |= DeviceState::SIGNAL_ARRIVED; state->loopReason |= DeviceState::DATA_INCOMING; } From 22192f9c1ca8f701bec165770a594b8d01a298eb Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 08:37:45 +0100 Subject: [PATCH 079/726] GPU: support fmt 10.1.1 --- GPU/GPUTracking/Base/GPUReconstruction.cxx | 2 +- GPU/GPUTracking/DataTypes/CalibdEdxTrackTopologyPol.cxx | 6 +++--- GPU/TPCFastTransformation/MultivariatePolynomial.h | 8 ++++---- .../MultivariatePolynomialHelper.cxx | 2 +- GPU/TPCFastTransformation/NDPiecewisePolynomials.h | 8 ++++---- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/GPU/GPUTracking/Base/GPUReconstruction.cxx b/GPU/GPUTracking/Base/GPUReconstruction.cxx index 676ab0f6cf004..bf5ba46b6ab34 100644 --- a/GPU/GPUTracking/Base/GPUReconstruction.cxx +++ b/GPU/GPUTracking/Base/GPUReconstruction.cxx @@ -1215,7 +1215,7 @@ bool GPUReconstruction::CheckInstanceAvailable(DeviceType type) } else if (type == DeviceType::OCL2) { return sLibOCL2->LoadLibrary() == 0; } else { - GPUError("Error: Invalid device type %u", type); + GPUError("Error: Invalid device type %u", (unsigned)type); return false; } } diff --git a/GPU/GPUTracking/DataTypes/CalibdEdxTrackTopologyPol.cxx b/GPU/GPUTracking/DataTypes/CalibdEdxTrackTopologyPol.cxx index 435d2e3f56e35..aa57c8f85083e 100644 --- a/GPU/GPUTracking/DataTypes/CalibdEdxTrackTopologyPol.cxx +++ b/GPU/GPUTracking/DataTypes/CalibdEdxTrackTopologyPol.cxx @@ -180,13 +180,13 @@ void CalibdEdxTrackTopologyPol::writeToFile(TFile& outf, const char* name) const void CalibdEdxTrackTopologyPol::setFromContainer(const CalibdEdxTrackTopologyPolContainer& container) { if (2 * FFits != container.mCalibPols.size()) { - LOGP(warning, fmt::format("wrong number of polynomials stored! this {} container {}", 2 * FFits, container.mCalibPols.size())); + LOGP(warning, "wrong number of polynomials stored! this {} container {}", 2 * FFits, container.mCalibPols.size()); return; } const auto nFacCont = container.mScalingFactorsqMax.size() + container.mScalingFactorsqTot.size(); if (2 * FFits != nFacCont) { - LOGP(warning, fmt::format("wrong number of scaling factors stored! this {} container {}", 2 * FFits, nFacCont)); + LOGP(warning, "wrong number of scaling factors stored! this {} container {}", 2 * FFits, nFacCont); return; } @@ -214,7 +214,7 @@ void CalibdEdxTrackTopologyPol::loadFromFile(const char* fileName, const char* n setFromContainer(*polTmp); delete polTmp; } else { - LOGP(info, fmt::format("couldnt load object {} from input file", name)); + LOGP(info, "couldnt load object {} from input file", name); } } diff --git a/GPU/TPCFastTransformation/MultivariatePolynomial.h b/GPU/TPCFastTransformation/MultivariatePolynomial.h index d2d80886a925c..bc980065f6263 100644 --- a/GPU/TPCFastTransformation/MultivariatePolynomial.h +++ b/GPU/TPCFastTransformation/MultivariatePolynomial.h @@ -178,7 +178,7 @@ void MultivariatePolynomial::loadFromFile(TFile& i delete polTmp; } else { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("couldnt load object {} from input file", name)); + LOGP(info, "couldnt load object {} from input file", name); #endif } } @@ -189,19 +189,19 @@ void MultivariatePolynomial::setFromContainer(cons if constexpr (Dim > 0 && Degree > 0) { if (this->getDim() != container.mDim) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("wrong number of dimensions! this {} container {}", this->getDim(), container.mDim)); + LOGP(info, "wrong number of dimensions! this {} container {}", this->getDim(), container.mDim); #endif return; } if (this->getDegree() != container.mDegree) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("wrong number of degrees! this {} container {}", this->getDegree(), container.mDegree)); + LOGP(info, "wrong number of degrees! this {} container {}", this->getDegree(), container.mDegree); #endif return; } if (this->isInteractionOnly() != container.mInteractionOnly) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("InteractionOnly is set for this object to {}, but stored as {} in the container", this->isInteractionOnly(), container.mInteractionOnly)); + LOGP(info, "InteractionOnly is set for this object to {}, but stored as {} in the container", this->isInteractionOnly(), container.mInteractionOnly); #endif return; } diff --git a/GPU/TPCFastTransformation/MultivariatePolynomialHelper.cxx b/GPU/TPCFastTransformation/MultivariatePolynomialHelper.cxx index 4d981293ca91f..1e40fcb51359e 100644 --- a/GPU/TPCFastTransformation/MultivariatePolynomialHelper.cxx +++ b/GPU/TPCFastTransformation/MultivariatePolynomialHelper.cxx @@ -26,7 +26,7 @@ using namespace GPUCA_NAMESPACE::gpu; void MultivariatePolynomialHelper<0, 0, false>::print() const { #ifndef GPUCA_NO_FMT - LOGP(info, getFormula().c_str()); + LOGP(info, fmt::runtime(getFormula().c_str())); #endif } diff --git a/GPU/TPCFastTransformation/NDPiecewisePolynomials.h b/GPU/TPCFastTransformation/NDPiecewisePolynomials.h index caff727c36975..13ce49a142470 100644 --- a/GPU/TPCFastTransformation/NDPiecewisePolynomials.h +++ b/GPU/TPCFastTransformation/NDPiecewisePolynomials.h @@ -317,7 +317,7 @@ void NDPiecewisePolynomials::loadFromFile(TFile& i delete gridTmp; } else { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("couldnt load object {} from input file", name)); + LOGP(info, "couldnt load object {} from input file", name); #endif } } @@ -326,19 +326,19 @@ void NDPiecewisePolynomials::setFromContainer(cons { if (Dim != container.mDim) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("wrong number of dimensions! this {} container {}", Dim, container.mDim)); + LOGP(info, "wrong number of dimensions! this {} container {}", Dim, container.mDim); #endif return; } if (Degree != container.mDegree) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("wrong number of degrees! this {} container {}", Degree, container.mDegree)); + LOGP(info, "wrong number of degrees! this {} container {}", Degree, container.mDegree); #endif return; } if (InteractionOnly != container.mInteractionOnly) { #ifndef GPUCA_ALIROOT_LIB - LOGP(info, fmt::format("InteractionOnly is set for this object to {}, but stored as {} in the container", InteractionOnly, container.mInteractionOnly)); + LOGP(info, "InteractionOnly is set for this object to {}, but stored as {} in the container", InteractionOnly, container.mInteractionOnly); #endif return; } From 00127b7aa4d7dcc0638c47ef74d8281aafc7946c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 08:37:45 +0100 Subject: [PATCH 080/726] Framework: make sure we support fmt 10.1.1 --- CCDB/src/CcdbApi.cxx | 2 +- Common/Field/src/MagneticField.cxx | 2 +- Detectors/AOD/src/AODProducerWorkflowSpec.cxx | 2 +- EventVisualisation/Workflow/src/FileProducer.cxx | 2 +- Framework/AnalysisSupport/src/AODJAlienReaderHelpers.cxx | 4 ++-- Framework/Core/src/DPLWebSocket.cxx | 2 +- Framework/Core/src/DataOutputDirector.cxx | 2 +- Framework/Core/src/DataProcessingContext.cxx | 2 +- Framework/Core/src/HTTPParser.cxx | 4 ++-- Framework/Core/src/runDataProcessing.cxx | 2 +- Framework/Utils/include/DPLUtils/RawParser.h | 2 -- Framework/Utils/src/RawParser.cxx | 8 ++++---- 12 files changed, 16 insertions(+), 18 deletions(-) diff --git a/CCDB/src/CcdbApi.cxx b/CCDB/src/CcdbApi.cxx index aabc96cb779f8..904d979bfbd79 100644 --- a/CCDB/src/CcdbApi.cxx +++ b/CCDB/src/CcdbApi.cxx @@ -710,7 +710,7 @@ bool CcdbApi::retrieveBlob(std::string const& path, std::string const& targetdir try { o2::utils::createDirectoriesIfAbsent(fulltargetdir); } catch (std::exception e) { - LOGP(error, fmt::format("Could not create local snapshot cache directory {}, reason: {}", fulltargetdir, e.what())); + LOGP(error, "Could not create local snapshot cache directory {}, reason: {}", fulltargetdir, e.what()); return false; } diff --git a/Common/Field/src/MagneticField.cxx b/Common/Field/src/MagneticField.cxx index 2e6ca6d647b11..5df6bbc0b0d34 100644 --- a/Common/Field/src/MagneticField.cxx +++ b/Common/Field/src/MagneticField.cxx @@ -539,7 +539,7 @@ void MagneticField::rescaleField(float l3Cur, float diCur, bool uniform, int con float sclL3 = l3Cur, sclDip = diCur; MagFieldParam::BMap_t map = getFieldMapScale(sclL3, sclDip, uniform); if (map != mMapType) { - LOGP(fatal, "Provided L3current={} DipCurrent={} uniform={} leads to map type {}, incompatible with loaded {}", l3Cur, diCur, uniform, map, mMapType); + LOGP(fatal, "Provided L3current={} DipCurrent={} uniform={} leads to map type {}, incompatible with loaded {}", l3Cur, diCur, uniform, (int)map, (int)mMapType); } setFactorSolenoid(sclL3); setFactorDipole(sclDip); diff --git a/Detectors/AOD/src/AODProducerWorkflowSpec.cxx b/Detectors/AOD/src/AODProducerWorkflowSpec.cxx index fa028535f379c..9b6151cb557b8 100644 --- a/Detectors/AOD/src/AODProducerWorkflowSpec.cxx +++ b/Detectors/AOD/src/AODProducerWorkflowSpec.cxx @@ -1221,7 +1221,7 @@ void AODProducerWorkflowDPL::fillSecondaryVertices(const o2::globaltracking::Rec if (item != mGIDToTableID.end()) { tableIdx[i] = item->second; } else { - LOG(warn) << fmt::format("Could not find a track index for prong ID {}", trIDs[i]); + LOG(warn) << fmt::format("Could not find a track index for prong ID {}", (int)trIDs[i]); missing = true; } } diff --git a/EventVisualisation/Workflow/src/FileProducer.cxx b/EventVisualisation/Workflow/src/FileProducer.cxx index 541afdefc4553..818ab546f3047 100644 --- a/EventVisualisation/Workflow/src/FileProducer.cxx +++ b/EventVisualisation/Workflow/src/FileProducer.cxx @@ -47,7 +47,7 @@ std::string FileProducer::newFileName() const gethostname(hostname, _POSIX_HOST_NAME_MAX); auto pid = getpid(); - auto result = fmt::format(this->mName, + auto result = fmt::format(fmt::runtime(this->mName), fmt::arg("hostname", hostname), fmt::arg("pid", pid), fmt::arg("timestamp", millisec_since_epoch), diff --git a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.cxx b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.cxx index 1e20f46c2e464..092857d585a02 100644 --- a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.cxx +++ b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.cxx @@ -246,11 +246,11 @@ AlgorithmSpec AODJAlienReaderHelpers::rootFileReaderCallback() // get first folder of next file ntf = 0; if (!didir->readTree(outputs, dh, fcnt, ntf, totalSizeCompressed, totalSizeUncompressed)) { - LOGP(fatal, "Can not retrieve tree for table {}: fileCounter {}, timeFrame {}", concrete.origin, fcnt, ntf); + LOGP(fatal, "Can not retrieve tree for table {}: fileCounter {}, timeFrame {}", concrete.origin.as(), fcnt, ntf); throw std::runtime_error("Processing is stopped!"); } } else { - LOGP(fatal, "Can not retrieve tree for table {}: fileCounter {}, timeFrame {}", concrete.origin, fcnt, ntf); + LOGP(fatal, "Can not retrieve tree for table {}: fileCounter {}, timeFrame {}", concrete.origin.as(), fcnt, ntf); throw std::runtime_error("Processing is stopped!"); } } diff --git a/Framework/Core/src/DPLWebSocket.cxx b/Framework/Core/src/DPLWebSocket.cxx index 97755727cbbd9..146a13206b55a 100644 --- a/Framework/Core/src/DPLWebSocket.cxx +++ b/Framework/Core/src/DPLWebSocket.cxx @@ -375,7 +375,7 @@ void WSDPLHandler::write(std::vector& outputs) /// Helper to return an error void WSDPLHandler::error(int code, char const* message) { - static char const* errorFMT = "HTTP/1.1 {} {}\r\ncontent-type: text/plain\r\n\r\n{}: {}\r\n"; + static constexpr auto errorFMT = "HTTP/1.1 {} {}\r\ncontent-type: text/plain\r\n\r\n{}: {}\r\n"; std::string error = fmt::format(errorFMT, code, message, code, message); char* reply = strdup(error.data()); uv_buf_t bfr = uv_buf_init(reply, error.size()); diff --git a/Framework/Core/src/DataOutputDirector.cxx b/Framework/Core/src/DataOutputDirector.cxx index dcf7fe40b6214..078af891c9058 100644 --- a/Framework/Core/src/DataOutputDirector.cxx +++ b/Framework/Core/src/DataOutputDirector.cxx @@ -584,7 +584,7 @@ void DataOutputDirector::printOut() LOGP(info, " File name bases :"); for (auto const& fb : mfilenameBases) { - LOGP(info, fb); + LOGP(info, "{}", fb); } } diff --git a/Framework/Core/src/DataProcessingContext.cxx b/Framework/Core/src/DataProcessingContext.cxx index 365975c706722..9c023e5021a9b 100644 --- a/Framework/Core/src/DataProcessingContext.cxx +++ b/Framework/Core/src/DataProcessingContext.cxx @@ -52,7 +52,7 @@ void DataProcessorContext::preDanglingCallbacks(DanglingContext& danglingContext void DataProcessorContext::postDanglingCallbacks(DanglingContext& danglingContext) { for (auto& handle : postDanglingHandles) { - LOGP(debug, "Invoking postDanglingCallback for service {} {}", handle.spec.name); + LOGP(debug, "Invoking postDanglingCallback for service {}", handle.spec.name); handle.callback(danglingContext, handle.service); } } diff --git a/Framework/Core/src/HTTPParser.cxx b/Framework/Core/src/HTTPParser.cxx index 137706c5f07c9..04ca6e8fdce55 100644 --- a/Framework/Core/src/HTTPParser.cxx +++ b/Framework/Core/src/HTTPParser.cxx @@ -189,7 +189,7 @@ void decode_websocket(char* start, size_t size, WebSocketHandler& handler) std::string encode_websocket_handshake_request(const char* endpoint, const char* protocol, int version, char const* nonce, std::vector> headers) { - char const* res = + constexpr auto res = "GET {} HTTP/1.1\r\n" "Upgrade: websocket\r\n" "Connection: Upgrade\r\n" @@ -216,7 +216,7 @@ std::string HTTPParserHelpers::calculateAccept(const char* nonce) std::string encode_websocket_handshake_reply(char const* nonce) { - char const* res = + constexpr auto res = "HTTP/1.1 101 Switching Protocols\r\n" "Upgrade: websocket\r\n" "Connection: Upgrade\r\n" diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 61fdef070d874..86cc15299e8f3 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -696,7 +696,7 @@ void spawnDevice(uv_loop_t* loop, close(childFds[ref.index].childstdout[1]); if (varmap.count("post-fork-command")) { auto templateCmd = varmap["post-fork-command"]; - auto cmd = fmt::format(templateCmd.as(), + auto cmd = fmt::format(fmt::runtime(templateCmd.as()), fmt::arg("pid", id), fmt::arg("id", spec.id), fmt::arg("cpu", parentCPU), diff --git a/Framework/Utils/include/DPLUtils/RawParser.h b/Framework/Utils/include/DPLUtils/RawParser.h index 2c35c008983f0..c1ba1ef4802b2 100644 --- a/Framework/Utils/include/DPLUtils/RawParser.h +++ b/Framework/Utils/include/DPLUtils/RawParser.h @@ -58,14 +58,12 @@ struct RDHFormatter { template <> struct RDHFormatter { using type = header::RAWDataHeaderV7; - static const char* sFormatString; static void apply(std::ostream&, type const&, FormatSpec, const char* = ""); }; template <> struct RDHFormatter { using type = header::RAWDataHeaderV6; - static const char* sFormatString; static void apply(std::ostream&, type const&, FormatSpec, const char* = ""); }; diff --git a/Framework/Utils/src/RawParser.cxx b/Framework/Utils/src/RawParser.cxx index 2ec48c7f93d36..c7cbbf3d28880 100644 --- a/Framework/Utils/src/RawParser.cxx +++ b/Framework/Utils/src/RawParser.cxx @@ -51,9 +51,9 @@ void RawParserHelper::warnDeadBeef(const o2::header::DataHeader* dh) } } -const char* RDHFormatter::sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1} {:>2}"; void RDHFormatter::apply(std::ostream& os, V7 const& header, FormatSpec choice, const char* delimiter) { + static constexpr const char* sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1} {:>2}"; if (choice == FormatSpec::Info) { os << "RDH v7"; } else if (choice == FormatSpec::TableHeader) { @@ -73,9 +73,9 @@ void RDHFormatter::apply(std::ostream& os, V7 const& header, FormatSpec choi } os << delimiter; } -const char* RDHFormatter::sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1}"; void RDHFormatter::apply(std::ostream& os, V6 const& header, FormatSpec choice, const char* delimiter) { + static constexpr const char* sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1}"; if (choice == FormatSpec::Info) { os << "RDH v6"; } else if (choice == FormatSpec::TableHeader) { @@ -95,9 +95,9 @@ void RDHFormatter::apply(std::ostream& os, V6 const& header, FormatSpec choi os << delimiter; } -const char* RDHFormatter::sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1}"; void RDHFormatter::apply(std::ostream& os, V5 const& header, FormatSpec choice, const char* delimiter) { + static constexpr const char* sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>1}"; if (choice == FormatSpec::Info) { os << "RDH v5"; } else if (choice == FormatSpec::TableHeader) { @@ -116,9 +116,9 @@ void RDHFormatter::apply(std::ostream& os, V5 const& header, FormatSpec choi os << delimiter; } -const char* RDHFormatter::sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>10} {:>5} {:>1}"; void RDHFormatter::apply(std::ostream& os, V4 const& header, FormatSpec choice, const char* delimiter) { + static constexpr const char* sFormatString = "{:>5} {:>4} {:>4} {:>4} {:>3} {:>3} {:>3} {:>10} {:>5} {:>1}"; if (choice == FormatSpec::Info) { os << "RDH v4"; } else if (choice == FormatSpec::TableHeader) { From 22c06b25ef80e4738f36dcd87084e6cdd4f2eb25 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 08:37:46 +0100 Subject: [PATCH 081/726] Detectors: make sure fmt 10.1.1 is supported --- .../include/DetectorsCommonDataFormats/EncodedBlocks.h | 3 ++- .../RecoContainerCreateTracksVariadic.h | 4 ++-- DataFormats/Parameters/src/GRPTool.cxx | 2 +- Detectors/CTF/workflow/src/ctf-reader-workflow.cxx | 2 +- Detectors/DCS/src/AliasExpander.cxx | 4 ++-- Detectors/EMCAL/base/src/RCUTrailer.cxx | 4 ++-- Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx | 2 +- Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx | 2 +- Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx | 2 +- Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx | 2 +- Detectors/MUON/MCH/Calibration/src/PedestalData.cxx | 2 +- .../MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx | 2 +- Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx | 2 +- .../MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx | 4 ++-- .../MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx | 6 +++--- Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx | 2 +- Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx | 2 +- Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx | 2 +- Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx | 2 +- Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx | 2 +- .../MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx | 2 +- Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx | 2 +- Detectors/Raw/src/RawDumpSpec.cxx | 4 ++-- Detectors/Raw/src/rawfile-reader-workflow.cxx | 2 +- Detectors/TPC/base/include/TPCBase/CalDet.h | 2 +- Detectors/TPC/base/src/CDBInterface.cxx | 2 +- Detectors/TPC/base/src/FEEConfig.cxx | 2 +- Detectors/TPC/calibration/src/CalibTreeDump.cxx | 2 +- Detectors/TPC/calibration/src/CalibdEdx.cxx | 3 ++- Detectors/TPC/calibration/src/VDriftHelper.cxx | 3 ++- Detectors/TPC/dcs/src/DCSConfigSpec.cxx | 2 +- Detectors/TPC/dcs/src/DCSProcessor.cxx | 6 +++--- Detectors/TPC/reconstruction/src/RawReaderCRU.cxx | 2 +- .../TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h | 2 +- .../include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h | 2 +- Detectors/TPC/workflow/src/ClusterQCSpec.cxx | 2 +- Detectors/TPC/workflow/src/IDCToVectorSpec.cxx | 4 ++-- Detectors/TRD/qc/src/Tracking.cxx | 2 +- 38 files changed, 51 insertions(+), 48 deletions(-) diff --git a/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h b/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h index a0853b8f14c73..6c024186b3085 100644 --- a/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h +++ b/DataFormats/Detectors/Common/include/DetectorsCommonDataFormats/EncodedBlocks.h @@ -402,7 +402,8 @@ class EncodedBlocks if (getANSHeader() == ANSVersionUnspecified) { rans::DenseHistogram histogram{block.getDict(), block.getDict() + block.getNDict(), metadata.min}; size_t renormingBits = rans::utils::sanitizeRenormingBitRange(metadata.probabilityBits); - LOG_IF(debug, renormingBits != metadata.probabilityBits) << fmt::format("While reading metadata from external dictionary, rANSV1 is rounding renorming precision from {} to {}"); + LOG_IF(debug, renormingBits != metadata.probabilityBits) + << fmt::format("While reading metadata from external dictionary, rANSV1 is rounding renorming precision from {} to {}", metadata.probabilityBits, renormingBits); return rans::renorm(std::move(histogram), renormingBits, rans::RenormingPolicy::ForceIncompressible); } else { // dictionary is elias-delta coded inside the block diff --git a/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h b/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h index 4bbe355a781c7..4cec730c620a5 100644 --- a/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h +++ b/DataFormats/Detectors/GlobalTracking/include/DataFormatsGlobalTracking/RecoContainerCreateTracksVariadic.h @@ -285,7 +285,7 @@ void o2::globaltracking::RecoContainer::createTracksVariadic(T creator, GTrackID if (srcSel[currentSource]) { if (matchesTPCTOF.size() && !tracksTPCTOF.size()) { throw std::runtime_error(fmt::format("TPC-TOF matched tracks ({}) require TPCTOF matches ({}) and TPCTOF tracks ({})", - matchesTPCTOF.size(), tracksTPCTOF.size())); + -1, matchesTPCTOF.size(), tracksTPCTOF.size())); } for (unsigned i = 0; i < matchesTPCTOF.size(); i++) { const auto& match = matchesTPCTOF[i]; @@ -321,7 +321,7 @@ void o2::globaltracking::RecoContainer::createTracksVariadic(T creator, GTrackID if (srcSel[currentSource]) { if (matchesMCHMID.size() && !tracksMCH.size()) { throw std::runtime_error(fmt::format("MCH-MID matched tracks ({}) require MCHMID matches ({}) and MCH tracks ({})", - matchesMCHMID.size(), tracksMCH.size())); + -1, matchesMCHMID.size(), tracksMCH.size())); } for (unsigned i = 0; i < matchesMCHMID.size(); i++) { const auto& match = matchesMCHMID[i]; diff --git a/DataFormats/Parameters/src/GRPTool.cxx b/DataFormats/Parameters/src/GRPTool.cxx index 9b5078b60733c..413b08482d84a 100644 --- a/DataFormats/Parameters/src/GRPTool.cxx +++ b/DataFormats/Parameters/src/GRPTool.cxx @@ -205,7 +205,7 @@ bool publish(std::string const& filename, std::string const& path, std::string C try { o2::utils::createDirectoriesIfAbsent(targetdir); } catch (std::exception e) { - LOGP(error, fmt::format("Could not create local snapshot cache directory {}, reason: {}", targetdir, e.what())); + LOGP(error, "Could not create local snapshot cache directory {}, reason: {}", targetdir, e.what()); return false; } diff --git a/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx b/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx index 3fcb4f69d0b8f..ef3a0f8d3c2c4 100644 --- a/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx +++ b/Detectors/CTF/workflow/src/ctf-reader-workflow.cxx @@ -134,7 +134,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& configcontext) int rateLimitingIPCID = std::stoi(configcontext.options().get("timeframes-rate-limit-ipcid")); std::string chanFmt = configcontext.options().get("metric-feedback-channel-format"); if (rateLimitingIPCID > -1 && !chanFmt.empty()) { - ctfInput.metricChannel = fmt::format(chanFmt, o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); + ctfInput.metricChannel = fmt::format(fmt::runtime(chanFmt), o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); } specs.push_back(o2::ctf::getCTFReaderSpec(ctfInput)); diff --git a/Detectors/DCS/src/AliasExpander.cxx b/Detectors/DCS/src/AliasExpander.cxx index b197d2fb78d67..41aaec8351f72 100644 --- a/Detectors/DCS/src/AliasExpander.cxx +++ b/Detectors/DCS/src/AliasExpander.cxx @@ -82,7 +82,7 @@ std::vector extractRange(std::string range) std::vector result; for (auto i = a; i <= b; i++) { - auto substituted = fmt::format(intFormat, i); + auto substituted = fmt::format(fmt::runtime(intFormat), i); result.push_back(substituted); } return result; @@ -124,7 +124,7 @@ std::vector expandAlias(const std::string& pattern) std::vector result; for (auto r : range) { - auto substituted = fmt::format(newPattern, r); + auto substituted = fmt::format(fmt::runtime(newPattern), r); result.emplace_back(substituted); } diff --git a/Detectors/EMCAL/base/src/RCUTrailer.cxx b/Detectors/EMCAL/base/src/RCUTrailer.cxx index 6d10d0cb93c1d..24aac7b5bb25a 100644 --- a/Detectors/EMCAL/base/src/RCUTrailer.cxx +++ b/Detectors/EMCAL/base/src/RCUTrailer.cxx @@ -172,7 +172,7 @@ void RCUTrailer::setTimeSamplePhaseNS(uint64_t triggertime, uint64_t timesample) sample = 2; break; default: - throw Error(Error::ErrorType_t::SAMPLINGFREQ_INVALID, fmt::format("invalid time sample: {:f}", timesample).data()); + throw Error(Error::ErrorType_t::SAMPLINGFREQ_INVALID, fmt::format(fmt::runtime("invalid time sample: {:f}"), timesample).data()); }; mAltroConfig.mSampleTime = sample; // calculate L1 phase @@ -275,4 +275,4 @@ std::ostream& o2::emcal::operator<<(std::ostream& stream, const o2::emcal::RCUTr { trailer.printStream(stream); return stream; -} \ No newline at end of file +} diff --git a/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx b/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx index 55bfe8fc101e7..7b3a88bf27385 100644 --- a/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx +++ b/Detectors/HMPID/calibration/src/HMPIDDCSProcessor.cxx @@ -395,7 +395,7 @@ double HMPIDDCSProcessor::calculatePhotonEnergy(int i) if (dp.id.get_type() == DeliveryType::DPVAL_DOUBLE) { lambda = o2::dcs::getValue(dp); } else { - LOGP(warn, "DP type is {}", dp.id.get_type()); + LOGP(warn, "DP type is {}", (int)dp.id.get_type()); LOGP(warn, "Not correct datatype for HMP_TRANPLANT_MEASURE_{}_WAVELENGTH --> Default wavelength used for iteration procTrans{}", i, i); lambda = arrWaveLenDefault[i]; } diff --git a/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx b/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx index af445c14f09c1..6220341f20054 100644 --- a/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx +++ b/Detectors/ITSMFT/MFT/alignment/src/MillePede2.cxx @@ -1384,7 +1384,7 @@ int MillePede2::SolveGlobalMatEq() if (fgIterSol == MinResSolve::kSolFGMRes) { res = slv->SolveFGMRES(sol, fgMinResCondType, fgMinResMaxIter, fgMinResTol, fgNKrylovV); } else { - LOGF(warning, "MillePede2 - Undefined Iteritive Solver ID=%d, only %d are defined", fgIterSol, MinResSolve::kNSolvers); + LOGF(warning, "MillePede2 - Undefined Iteritive Solver ID=%d, only %d are defined", fgIterSol, (int)MinResSolve::kNSolvers); } } diff --git a/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx b/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx index a643f6fc4af1d..41b487786e90e 100644 --- a/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx +++ b/Detectors/ITSMFT/common/reconstruction/src/PixelData.cxx @@ -48,7 +48,7 @@ std::string ChipPixelData::getErrorDetails(int pos) const std::string rbuf = ": 0x<"; int nc = getNBytesInRawBuff(); for (int i = 0; i < nc; i++) { - rbuf += fmt::format(i ? " {:02x}" : "{:02x}", (int)getRawErrBuff()[i]); + rbuf += fmt::format(fmt::runtime(i ? " {:02x}" : "{:02x}"), (int)getRawErrBuff()[i]); } rbuf += '>'; return rbuf; diff --git a/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx b/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx index 38c11d96ddd27..a05fd0504049d 100644 --- a/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx +++ b/Detectors/ITSMFT/common/reconstruction/src/RUInfo.cxx @@ -35,5 +35,5 @@ std::string ChipInfo::asString() const void ChipInfo::print() const { - LOGP(info, asString()); + LOGP(info, fmt::runtime(asString())); } diff --git a/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx b/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx index 0f5b377a4c92d..661bab7913b8e 100644 --- a/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx +++ b/Detectors/MUON/MCH/Calibration/src/PedestalData.cxx @@ -120,7 +120,7 @@ void PedestalData::merge(const PedestalData* prev) void PedestalData::print() const { for (const auto& p : const_cast(*this)) { - LOGP(info, p.asString()); + LOGP(info, fmt::runtime(p.asString())); } } diff --git a/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx b/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx index ea173bd1fc7cc..fe40659bc9265 100644 --- a/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx +++ b/Detectors/MUON/MCH/DigitFiltering/src/DigitFilteringSpec.cxx @@ -88,7 +88,7 @@ class DigitFilteringTask if (!isOK(error)) { if (error.nofOutOfBounds > 0) { - LOGP(error, asString(error)); + LOGP(error, "{}", asString(error)); LOGP(error, "in a TF with {} rofs and {} digits", iRofs.size(), iDigits.size()); abort = true; } diff --git a/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx b/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx index cc9d504d92e50..651a764820a1c 100644 --- a/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx +++ b/Detectors/MUON/MCH/Mapping/test/src/testPadIndices.cxx @@ -102,7 +102,7 @@ int main(int argc, char** argv) int nbad{0}; for (auto de : deIds) { - std::string filepath(fmt::format(filePattern, de)); + std::string filepath(fmt::format(fmt::runtime(filePattern), de)); std::ifstream in(filepath); if (!in) { std::cout << "Cannot open " << filepath << "\n"; diff --git a/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx b/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx index 2989401b55628..408ffda2ca9dc 100644 --- a/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx +++ b/Detectors/MUON/MCH/PreClustering/src/PreClusterFinderSpec.cxx @@ -83,7 +83,7 @@ class PreClusterFinderTask LOG(info) << "deinitializing preclusterizer in: " << std::chrono::duration(tEnd - tStart).count() << " ms"; mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, "{}", error.asString()); }); }; ic.services().get().set(stop); @@ -117,7 +117,7 @@ class PreClusterFinderTask if (!isOK(error)) { if (error.nofOutOfBounds > 0) { // FIXME: replace this error log with a counters' message ? - LOGP(error, asString(error)); + LOGP(error, "{}", asString(error)); LOGP(error, "in a TF with {} rofs and {} digits", digitROFs.size(), digits.size()); abort = true; } diff --git a/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx b/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx index 43aefe520cb6e..519e56244aba2 100644 --- a/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx +++ b/Detectors/MUON/MCH/Raw/Decoder/src/testUserLogicEndpointDecoder.cxx @@ -369,7 +369,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(SyncInTheMiddleChargeSumModeTwoChannels, V, testTy 5); std::string r2 = "S361-J6-DS2-ch-63-ts-345-q-123456-cs-789\n"; r2 += "S361-J6-DS2-ch-63-ts-346-q-789012-cs-345\n"; - r2 += fmt::format("S361-J6-DS2-chip-5-error-{}\n", ErrorUnexpectedSyncPacket); + r2 += fmt::format("S361-J6-DS2-chip-5-error-{}\n", (int)ErrorUnexpectedSyncPacket); BOOST_CHECK_EQUAL(r, r2); } @@ -387,7 +387,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(TestCruPageBadClusterSize, V, testTypes) gsl::span page = CruPageBadClusterSize; std::string r = testPayloadDecodeCruPages(page); BOOST_CHECK_EQUAL(r, - fmt::format("S81-J0-DS0-chip-1-error-{}\nS81-J0-DS0-ch-42-ts-0-q-1\n", ErrorBadClusterSize)); + fmt::format("S81-J0-DS0-chip-1-error-{}\nS81-J0-DS0-ch-42-ts-0-q-1\n", (int)ErrorBadClusterSize)); } BOOST_AUTO_TEST_CASE_TEMPLATE(TestCruPageBadN10bitWords, V, testTypes) @@ -396,7 +396,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(TestCruPageBadN10bitWords, V, testTypes) std::string r = testPayloadDecodeCruPages(page); std::string expected = fmt::format("S81-J0-DS0-ch-42-ts-87-q-2-1-0-0-1-0-0-0\nS81-J0-DS0-chip-1-error-{}\nS81-J0-DS0-ch-42-ts-0-q-1\n", - ErrorBadIncompleteWord); + (int)ErrorBadIncompleteWord); BOOST_CHECK_EQUAL(r, expected); } diff --git a/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx b/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx index 7b63ac2969e56..17f5f4463b16a 100644 --- a/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx +++ b/Detectors/MUON/MCH/Raw/Encoder/Digit/DigitTreeReader.cxx @@ -20,7 +20,7 @@ void AssertBranch(ROOT::Internal::TTreeReaderValueBase& value) { if (value.GetSetupStatus() < 0) { throw std::invalid_argument(fmt::format("Error {} setting up tree reader for branch {}", - value.GetSetupStatus(), value.GetBranchName())); + (int)value.GetSetupStatus(), value.GetBranchName())); } } diff --git a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx index e545c15025d8e..2407796773f57 100644 --- a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx +++ b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx @@ -104,7 +104,7 @@ std::vector paginate(gsl::span buffer, const std::st return pages; } -const char* sampaClusterFormat = "{}-CH{}-{}"; +constexpr const char* sampaClusterFormat = "{}-CH{}-{}"; // Create a vector of SampaCluster from a string d // where d is of the form ts-#-bc-#-cs-#-q-# or diff --git a/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx b/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx index 661fd7741c954..aac72b144a80f 100644 --- a/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx +++ b/Detectors/MUON/MCH/Raw/test/testClosureCoDecDigit.cxx @@ -28,7 +28,7 @@ using namespace o2::mch::raw; -const char* sampaClusterFormat = "{}-CH{}-{}"; +constexpr const char* sampaClusterFormat = "{}-CH{}-{}"; const bool useDummyElecMap = true; struct DePadId { diff --git a/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx b/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx index de920200bf4ff..c40df5d4b448e 100644 --- a/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx +++ b/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx @@ -105,7 +105,7 @@ class TrackFinderTask mTrackFinder.printTimers(); LOG(info) << "tracking duration = " << mElapsedTime.count() << " s"; mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, "{}", error.asString()); }); }; ic.services().get().set(stop); diff --git a/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx b/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx index be6e415c74324..5158cc436b277 100644 --- a/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx +++ b/Detectors/MUON/MCH/Workflow/src/ClusterFinderGEMSpec.cxx @@ -173,7 +173,7 @@ class ClusterFinderGEMTask mGEMDump = nullptr; } mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, "{}", error.asString()); }); }); auto stop = [this]() { diff --git a/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx b/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx index 2007f0634960f..8344d2837b814 100644 --- a/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx +++ b/Detectors/MUON/MCH/Workflow/src/ClusterFinderOriginalSpec.cxx @@ -73,7 +73,7 @@ class ClusterFinderOriginalTask ic.services().get().set([this]() { LOG(info) << "cluster finder duration = " << mTimeClusterFinder.count() << " s"; mErrorMap.forEach([](Error error) { - LOGP(warning, error.asString()); + LOGP(warning, fmt::runtime(error.asString())); }); this->mClusterFinder.deinit(); }); diff --git a/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx b/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx index facb8a880ba66..c468d1660fcc7 100644 --- a/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx +++ b/Detectors/Raw/TFReaderDD/src/tf-reader-workflow.cxx @@ -86,7 +86,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& configcontext) int rateLimitingIPCID = std::stoi(configcontext.options().get("timeframes-rate-limit-ipcid")); std::string chanFmt = configcontext.options().get("metric-feedback-channel-format"); if (rateLimitingIPCID > -1 && !chanFmt.empty()) { - rinp.metricChannel = fmt::format(chanFmt, o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); + rinp.metricChannel = fmt::format(fmt::runtime(chanFmt), o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); } WorkflowSpec specs; diff --git a/Detectors/Raw/src/RawDumpSpec.cxx b/Detectors/Raw/src/RawDumpSpec.cxx index 345eba5cefdc0..1cb667febe2a3 100644 --- a/Detectors/Raw/src/RawDumpSpec.cxx +++ b/Detectors/Raw/src/RawDumpSpec.cxx @@ -273,7 +273,7 @@ void RawDump::endOfStream(EndOfStreamContext& ec) auto fnm = fmt::format("{}{}{}raw.cfg", mOutDir, mOutDir.back() == '/' ? "" : "/", DetID::getName(id)); auto fh = std::fopen(fnm.c_str(), "w"); if (!fh) { - LOGP(fatal, "Failed to create configuration file {}"); + LOGP(fatal, "Failed to create configuration file {}", fnm); } auto ws = std::fwrite(mConfigEntries[id].data(), 1, mConfigEntries[id].size(), fh); if (ws != mConfigEntries[id].size()) { @@ -655,7 +655,7 @@ std::string RawDump::getBaseFileNameTRD(const header::RDHAny* rdh) int link = supermodule * 4 + side * 2 + ep, cru = link / 2; if (link >= NLinks) { auto flpname = fmt::format("flp-unknown_cru{}_ep{}_feeid0x{:05x}", cru, int(RDHUtils::getEndPointID(rdh)), RDHUtils::getFEEID(rdh)); - LOGP(error, "Got wrong link {}, setting TRF file name to unrecognized flp {}", flpname); + LOGP(error, "Got wrong link {}, setting TRF file name to unrecognized flp {}", link, flpname); return flpname; } return fmt::format("alio2-cr1-flp{}_cru{}_{}", trdHWMap[cru].flpid, trdHWMap[cru].cruHWID, ep); diff --git a/Detectors/Raw/src/rawfile-reader-workflow.cxx b/Detectors/Raw/src/rawfile-reader-workflow.cxx index d8cc44fd5adbd..3a5440f80ed2d 100644 --- a/Detectors/Raw/src/rawfile-reader-workflow.cxx +++ b/Detectors/Raw/src/rawfile-reader-workflow.cxx @@ -90,7 +90,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& configcontext) int rateLimitingIPCID = std::stoi(configcontext.options().get("timeframes-rate-limit-ipcid")); std::string chanFmt = configcontext.options().get("metric-feedback-channel-format"); if (rateLimitingIPCID > -1 && !chanFmt.empty()) { - rinp.metricChannel = fmt::format(chanFmt, o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); + rinp.metricChannel = fmt::format(fmt::runtime(chanFmt), o2::framework::ChannelSpecHelpers::defaultIPCFolder(), rateLimitingIPCID); } o2::conf::ConfigurableParam::updateFromString(configcontext.options().get("configKeyValues")); auto hbfini = configcontext.options().get("hbfutils-config"); diff --git a/Detectors/TPC/base/include/TPCBase/CalDet.h b/Detectors/TPC/base/include/TPCBase/CalDet.h index d6b6b70119e4b..835e3638642df 100644 --- a/Detectors/TPC/base/include/TPCBase/CalDet.h +++ b/Detectors/TPC/base/include/TPCBase/CalDet.h @@ -463,7 +463,7 @@ void CalDet::initData() if (!hasData) { mData.push_back(CalType(mPadSubset, i)); } - mData[i].setName(fmt::format(frmt, mName, i)); + mData[i].setName(fmt::format(fmt::runtime(frmt), mName, i)); } } diff --git a/Detectors/TPC/base/src/CDBInterface.cxx b/Detectors/TPC/base/src/CDBInterface.cxx index 1180fd1cc8b1d..b359a04b385cb 100644 --- a/Detectors/TPC/base/src/CDBInterface.cxx +++ b/Detectors/TPC/base/src/CDBInterface.cxx @@ -633,5 +633,5 @@ void CDBStorage::printObjectSummary(std::string_view name, CDBType const type, M message += fmt::format("{:>20} = {}\n", key, value); } - LOGP(info, message); + LOGP(info, "{}", message); } diff --git a/Detectors/TPC/base/src/FEEConfig.cxx b/Detectors/TPC/base/src/FEEConfig.cxx index 033bab55cd8d7..e3685928b3dca 100644 --- a/Detectors/TPC/base/src/FEEConfig.cxx +++ b/Detectors/TPC/base/src/FEEConfig.cxx @@ -133,7 +133,7 @@ void FEEConfig::print() const fmt::print("{0:=^{1}}\n", message, boxWidth); if (cruConfig.size() != CRU::MaxCRU) { - LOGP(error, "Unexpected size of cru config:{} != {}", cruConfig.size(), CRU::MaxCRU); + LOGP(error, "Unexpected size of cru config:{} != {}", cruConfig.size(), (int)CRU::MaxCRU); } else { for (int iCRU = 0; iCRU < cruConfig.size(); ++iCRU) { const auto& c = cruConfig.at(iCRU); diff --git a/Detectors/TPC/calibration/src/CalibTreeDump.cxx b/Detectors/TPC/calibration/src/CalibTreeDump.cxx index 3423be676566d..0748f1be55da5 100644 --- a/Detectors/TPC/calibration/src/CalibTreeDump.cxx +++ b/Detectors/TPC/calibration/src/CalibTreeDump.cxx @@ -223,7 +223,7 @@ void CalibTreeDump::addCalDetObjects(TTree* tree) std::string name = calDet.getName(); if (name == "PadCalibrationObject" || name.size() == 0) { - name = fmt::format("calDet_{%02d}", iter); + name = fmt::format(fmt::runtime("calDet_{%02d}"), iter); } std::string meanName = fmt::format("{}_mean", name); diff --git a/Detectors/TPC/calibration/src/CalibdEdx.cxx b/Detectors/TPC/calibration/src/CalibdEdx.cxx index c012a4a2b3ec9..6d6aa980b9680 100644 --- a/Detectors/TPC/calibration/src/CalibdEdx.cxx +++ b/Detectors/TPC/calibration/src/CalibdEdx.cxx @@ -215,7 +215,8 @@ void fitHist(const Hist& hist, CalibdEdxCorrection& corr, TLinearFitter& fitter, corr.setChi2(id, charge, fitter.GetChisquare()); corr.setEntries(id, charge, entries); } - LOGP(debug, "Sector: {}, gemType: {}, charge: {}, Fit pass: {} with {} % outliers in {} entries. Fitter Points: {}, mean fit: {}", id.sector, int(id.type), charge, fitPass, (float)outliers / (float)entries * 100, entries, fitter.GetNpoints(), params[0]); + LOGP(debug, "Sector: {}, gemType: {}, charge: {}, Fit pass: {} with {} % outliers in {} entries. Fitter Points: {}, mean fit: {}", + id.sector, int(id.type), int(charge), fitPass, (float)outliers / (float)entries * 100, entries, fitter.GetNpoints(), params[0]); } } } diff --git a/Detectors/TPC/calibration/src/VDriftHelper.cxx b/Detectors/TPC/calibration/src/VDriftHelper.cxx index 034888998e567..0df353adba91f 100644 --- a/Detectors/TPC/calibration/src/VDriftHelper.cxx +++ b/Detectors/TPC/calibration/src/VDriftHelper.cxx @@ -43,7 +43,8 @@ VDriftHelper::VDriftHelper() if (o2::conf::ConfigurableParam::getProvenance("TPCDetParam.DriftTimeOffset") == o2::conf::ConfigurableParam::EParamProvenance::kRT) { // we stick to this value mVD.creationTime = std::numeric_limits::max(); mForceParamOffset = true; - LOGP(info, "TPC dridt time offset was set from command line to {} mus ({} TB}, will neglect update from CCDB", mVD.refTimeOffset, detpar.DriftTimeOffset); + LOGP(info, "TPC dridt time offset was set from command line to {} mus ({} TB), will neglect update from CCDB", + mVD.refTimeOffset, detpar.DriftTimeOffset); } mUpdated = true; diff --git a/Detectors/TPC/dcs/src/DCSConfigSpec.cxx b/Detectors/TPC/dcs/src/DCSConfigSpec.cxx index 967b825d65a1d..34270cbca528d 100644 --- a/Detectors/TPC/dcs/src/DCSConfigSpec.cxx +++ b/Detectors/TPC/dcs/src/DCSConfigSpec.cxx @@ -289,7 +289,7 @@ void DCSConfigDevice::fillCRUConfig(gsl::span configBuff, bool updat } if (!update && (nLines != CRU::MaxCRU)) { - LOGP(error, "Full FEEConfig expected, but only {} / {} lines read for CRUConfig", nLines, CRU::MaxCRU); + LOGP(error, "Full FEEConfig expected, but only {} / {} lines read for CRUConfig", nLines, (int)CRU::MaxCRU); } else { LOGP(info, "updating CRUConfig for {} crus", nLines); } diff --git a/Detectors/TPC/dcs/src/DCSProcessor.cxx b/Detectors/TPC/dcs/src/DCSProcessor.cxx index 3a1572a6e6834..a26bce43e5c2e 100644 --- a/Detectors/TPC/dcs/src/DCSProcessor.cxx +++ b/Detectors/TPC/dcs/src/DCSProcessor.cxx @@ -69,7 +69,7 @@ float DCSProcessor::getValueF(const DPCOM& dp) const } else if (dp.id.get_type() == DeliveryType::DPVAL_DOUBLE) { return static_cast(o2::dcs::getValue(dp)); } else { - LOGP(warning, "Unexpected delivery type for {}: {}", dp.id.get_alias(), dp.id.get_type()); + LOGP(warning, "Unexpected delivery type for {}: {}", dp.id.get_alias(), (int)dp.id.get_type()); } return 0.f; @@ -94,7 +94,7 @@ void DCSProcessor::fillHV(const DPCOM& dp) // TODO: Remove once type is clear static bool statTypePrinted = false; if (!statTypePrinted) { - LOGP(info, "Delivery type for STATUS ({}): {}", alias, type); + LOGP(info, "Delivery type for STATUS ({}): {}", alias, (int)type); statTypePrinted = true; } if (type == DeliveryType::DPVAL_UINT) { @@ -109,7 +109,7 @@ void DCSProcessor::fillHV(const DPCOM& dp) // TODO: Remove once type is clear static bool uiTypePrinted = false; if (!uiTypePrinted) { - LOGP(info, "Delivery type for current, voltage ({}): {}", alias, type); + LOGP(info, "Delivery type for current, voltage ({}): {}", alias, (int)type); uiTypePrinted = true; } const auto value = getValueF(dp); diff --git a/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx b/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx index 40063f23612ae..3a552ebcce82f 100644 --- a/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx +++ b/Detectors/TPC/reconstruction/src/RawReaderCRU.cxx @@ -973,7 +973,7 @@ void RawReaderCRU::writeGBTDataPerLink(std::string_view outputDirectory, int max const int ep = iLink >= 12; const int link = iLink - (ep)*12; - auto outputFileName = fmt::format("{}/CRU_{:02}_EP_{}_Link_{:02}", outputDirectory.data(), mCRU, ep, link); + auto outputFileName = fmt::format("{}/CRU_{:02}_EP_{}_Link_{:02}", outputDirectory.data(), (int)mCRU, ep, link); std::ofstream outputFile(outputFileName, std::ios_base::binary | std::ios_base::app); for (auto packetNumber : linkInfo.PacketPositions) { diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h index 69f90b2e71764..a4107690c941a 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFactorizeIDCSpec.h @@ -311,7 +311,7 @@ class TPCFactorizeIDCSpec : public o2::framework::Task o2::ccdb::CcdbObjectInfo ccdbInfoIDCDelta(CDBTypeMap.at(sideA ? CDBType::CalIDCDeltaA : CDBType::CalIDCDeltaC), std::string{}, std::string{}, std::map{}, timeStampStartDelta, timeStampEndDelta); if (mDumpIDCDelta) { - mIDCGrouping.dumpToFile(fmt::format("{}DeltaAveraged_chunk{:02}_{:02}_side{}.root", getCurrentType(), iChunk, timeStampStartDelta, side).data()); + mIDCGrouping.dumpToFile(fmt::format("{}DeltaAveraged_chunk{:02}_{:02}_side{}.root", getCurrentType(), iChunk, timeStampStartDelta, (int)side).data()); } auto startCCDBIDCDelta = timer::now(); diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h index bb8334901b8d9..c24bbd1b245ce 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformAggregatorSpec.h @@ -125,7 +125,7 @@ class TPCFourierTransformAggregatorSpec : public o2::framework::Task if (mDumpFFT) { LOGP(info, "dumping FT to file"); - mIDCFourierTransform[side].dumpToFile(fmt::format("FourierAGG_{:02}_side{}.root", processing_helpers::getCurrentTF(pc), side).data()); + mIDCFourierTransform[side].dumpToFile(fmt::format("FourierAGG_{:02}_side{}.root", processing_helpers::getCurrentTF(pc), (int)side).data()); } if (mSendOutDebug) { diff --git a/Detectors/TPC/workflow/src/ClusterQCSpec.cxx b/Detectors/TPC/workflow/src/ClusterQCSpec.cxx index 284085005ec15..8c93d2755b090 100644 --- a/Detectors/TPC/workflow/src/ClusterQCSpec.cxx +++ b/Detectors/TPC/workflow/src/ClusterQCSpec.cxx @@ -126,7 +126,7 @@ void ClusterQCDevice::endInterval() LOGP(info, "End interval for run: {}, TFs: {} - {}, creation: {} - {}, processed TFs: {}", mRunNumber, mFirstTF, mLastTF, mFirstCreation, mLastCreation, mClusterQC.getProcessedTFs()); - const auto outputFileName = fmt::format(mOutputFileName, fmt::arg("run", mRunNumber), + const auto outputFileName = fmt::format(fmt::runtime(mOutputFileName), fmt::arg("run", mRunNumber), fmt::arg("firstTF", mFirstTF), fmt::arg("lastTF", mLastTF), fmt::arg("firstCreation", mFirstCreation), fmt::arg("lastCreation", mLastCreation)); std::unique_ptr f(TFile::Open(outputFileName.data(), "recreate")); diff --git a/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx b/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx index e9b26209a6fd1..a1b77b2774228 100644 --- a/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx +++ b/Detectors/TPC/workflow/src/IDCToVectorSpec.cxx @@ -124,7 +124,7 @@ class IDCToVectorDevice : public o2::framework::Task // open files if necessary if ((mWriteDebug || mWriteDebugOnError) && !mDebugStream) { - const auto debugFileName = fmt::format(mDebugStreamFileName, fmt::arg("run", runNumber)); + const auto debugFileName = fmt::format(fmt::runtime(mDebugStreamFileName), fmt::arg("run", runNumber)); LOGP(info, "creating debug stream {}", debugFileName); mDebugStream = std::make_unique(debugFileName.data(), "recreate"); } @@ -134,7 +134,7 @@ class IDCToVectorDevice : public o2::framework::Task if (mRawDataType == 4) { rawType = "idc.raw"; } - const auto rawFileName = fmt::format(mRawOutputFileName, fmt::arg("run", runNumber), fmt::arg("raw_type", rawType)); + const auto rawFileName = fmt::format(fmt::runtime(mRawOutputFileName), fmt::arg("run", runNumber), fmt::arg("raw_type", rawType)); LOGP(info, "creating raw debug file {}", rawFileName); mRawOutputFile.open(rawFileName, std::ios::binary); } diff --git a/Detectors/TRD/qc/src/Tracking.cxx b/Detectors/TRD/qc/src/Tracking.cxx index 98131ced41961..a827c3f5a1081 100644 --- a/Detectors/TRD/qc/src/Tracking.cxx +++ b/Detectors/TRD/qc/src/Tracking.cxx @@ -57,7 +57,7 @@ void Tracking::checkTrack(const TrackTRD& trkTrd, bool isTPCTRD) qcStruct.refGlobalTrackId = id; qcStruct.trackTRD = trkTrd; - LOGF(debug, "Got track with %i tracklets and ID %i", trkTrd.getNtracklets(), id); + LOGF(debug, "Got track with %i tracklets and ID %i", trkTrd.getNtracklets(), (int)id); o2::track::TrackParCov trk = isTPCTRD ? mTracksTPC[id].getParamOut() : mTracksITSTPC[id].getParamOut(); qcStruct.trackSeed = trk; if (mPID) { From 01391a1f67f1bc2632cd122af033824485249774 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 08:37:47 +0100 Subject: [PATCH 082/726] Support ROOT v6.30.01 --- Common/MathUtils/include/MathUtils/fit.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Common/MathUtils/include/MathUtils/fit.h b/Common/MathUtils/include/MathUtils/fit.h index 2b1064b13fecb..00c39486a4ba0 100644 --- a/Common/MathUtils/include/MathUtils/fit.h +++ b/Common/MathUtils/include/MathUtils/fit.h @@ -59,7 +59,7 @@ template TFitResultPtr fit(const size_t nBins, const T* arr, const T xMin, const T xMax, TF1& func, std::string_view option = "") { Foption_t fitOption; - ROOT::Fit::FitOptionsMake(ROOT::Fit::kHistogram, option.data(), fitOption); + ROOT::Fit::FitOptionsMake(ROOT::Fit::EFitObjectType::kHistogram, option.data(), fitOption); ROOT::Fit::DataRange range(xMin, xMax); ROOT::Fit::DataOptions opt; From 0cb520d62df8b696232cd1d7b6855abbf4b77022 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 08:37:47 +0100 Subject: [PATCH 083/726] C++20 does not allow aggregate initialisation if default ctor present --- Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h | 1 - .../Vertexing/include/DetectorsVertexing/PVertexerHelpers.h | 1 - 2 files changed, 2 deletions(-) diff --git a/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h b/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h index f38195dbeb6ec..6f26fca6f4d03 100644 --- a/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h +++ b/Detectors/GlobalTracking/include/GlobalTracking/MatchTPCITS.h @@ -288,7 +288,6 @@ struct InteractionCandidate : public o2::InteractionRecord { int rofITS; // corresponding ITS ROF entry (in the ROFRecord vectors) uint32_t flag; // origin, etc. o2::dataformats::RangeReference seedsRef; // references to AB seeds - InteractionCandidate() = default; InteractionCandidate(const o2::InteractionRecord& ir, float t, float dt, int rof, uint32_t f = 0) : o2::InteractionRecord(ir), tBracket(t - dt, t + dt), rofITS(rof), flag(f) {} }; diff --git a/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h b/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h index ec11c6e289c71..34c2d7c15a011 100644 --- a/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h +++ b/Detectors/Vertexing/include/DetectorsVertexing/PVertexerHelpers.h @@ -277,7 +277,6 @@ struct InteractionCandidate : public o2::InteractionRecord { float time = 0; float amplitude = 0; uint32_t flag = 0; // origin, etc. - InteractionCandidate() = default; }; } // namespace vertexing From ff6ba8c1c39a7228f6bbab79b547c124d1e05c0a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 09:00:41 +0100 Subject: [PATCH 084/726] DPL: adapt RCombinedDS to new ROOT API --- .../include/Framework/RCombinedDS.h | 8 ++++---- .../RootAnalysisHelpers/src/RCombinedDS.cxx | 20 +++++++++++++++++-- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h b/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h index b3b7979232467..c65f53dfb1356 100644 --- a/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h +++ b/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h @@ -341,14 +341,14 @@ class RCombinedDS final : public ROOT::RDF::RDataSource dummy += colName.data(); throw std::runtime_error(dummy); } - const std::vector& GetColumnNames() const override; + [[nodiscard]] const std::vector& GetColumnNames() const override; std::vector> GetEntryRanges() override; - std::string GetTypeName(std::string_view colName) const override; - bool HasColumn(std::string_view colName) const override; + [[nodiscard]] std::string GetTypeName(std::string_view colName) const override; + [[nodiscard]] bool HasColumn(std::string_view colName) const override; bool SetEntry(unsigned int slot, ULong64_t entry) override; void InitSlot(unsigned int slot, ULong64_t firstEntry) override; void SetNSlots(unsigned int nSlots) override; - void Initialise() override; + virtual void Initialise() override; }; //////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx b/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx index f8ff71f8f6583..4102f4775a251 100644 --- a/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx +++ b/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx @@ -220,12 +220,28 @@ std::vector RCombinedDS::GetColumnReadersImpl(std::string_view colName, c O2_BUILTIN_UNREACHABLE(); } +struct DetectInitialise { + template + static auto call(U* u) -> decltype(std::declval().Initialise()) + { + u->Initialise(); + } + + template + static auto call(U* u) -> decltype(std::declval().Initialize()) + { + u->Initialize(); + } +}; + +} + void RCombinedDS::Initialise() { fEntryRanges = fIndex->BuildIndex(fLeftDF, fRightDF); - fLeft->Initialise(); - fRight->Initialise(); + DetectInitialise::call(fLeft); + DetectInitialise::call(fRight); } /// Creates a RDataFrame using an arrow::Table as input. From 999a2b05ac5538d3896e85794988ad226b28c8a9 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 09:20:40 +0100 Subject: [PATCH 085/726] DPL: adapt RCombinedDS to new ROOT API --- Framework/CMakeLists.txt | 5 +++-- .../include/Framework/RCombinedDS.h | 8 ++++---- .../RootAnalysisHelpers/src/RCombinedDS.cxx | 6 +++--- Framework/TestWorkflows/CMakeLists.txt | 19 ++++++++++--------- 4 files changed, 20 insertions(+), 18 deletions(-) diff --git a/Framework/CMakeLists.txt b/Framework/CMakeLists.txt index a007d6292f238..902713823c142 100644 --- a/Framework/CMakeLists.txt +++ b/Framework/CMakeLists.txt @@ -22,8 +22,9 @@ add_subdirectory(CCDBSupport) add_subdirectory(PhysicsSupport) add_subdirectory(DataTakingSupport) -add_subdirectory(RootAnalysisHelpers) - +# FIXME: disable for now, since ROOT made a non backward compatible change +# RCombinedDS. Will reenable once we move to ROOT 6.30.01 +#add_subdirectory(RootAnalysisHelpers) # Build the GUI support only if we have DebugGUI if (TARGET AliceO2::DebugGUI) diff --git a/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h b/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h index b3b7979232467..c712ab025ecb7 100644 --- a/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h +++ b/Framework/RootAnalysisHelpers/include/Framework/RCombinedDS.h @@ -341,14 +341,14 @@ class RCombinedDS final : public ROOT::RDF::RDataSource dummy += colName.data(); throw std::runtime_error(dummy); } - const std::vector& GetColumnNames() const override; + [[nodiscard]] const std::vector& GetColumnNames() const override; std::vector> GetEntryRanges() override; - std::string GetTypeName(std::string_view colName) const override; - bool HasColumn(std::string_view colName) const override; + [[nodiscard]] std::string GetTypeName(std::string_view colName) const override; + [[nodiscard]] bool HasColumn(std::string_view colName) const override; bool SetEntry(unsigned int slot, ULong64_t entry) override; void InitSlot(unsigned int slot, ULong64_t firstEntry) override; void SetNSlots(unsigned int nSlots) override; - void Initialise() override; + void Initialize() override; }; //////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx b/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx index f8ff71f8f6583..1b354f8450726 100644 --- a/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx +++ b/Framework/RootAnalysisHelpers/src/RCombinedDS.cxx @@ -220,12 +220,12 @@ std::vector RCombinedDS::GetColumnReadersImpl(std::string_view colName, c O2_BUILTIN_UNREACHABLE(); } -void RCombinedDS::Initialise() +void RCombinedDS::Initialize() { fEntryRanges = fIndex->BuildIndex(fLeftDF, fRightDF); - fLeft->Initialise(); - fRight->Initialise(); + fLeft->Initialize(); + fRight->Initialize(); } /// Creates a RDataFrame using an arrow::Table as input. diff --git a/Framework/TestWorkflows/CMakeLists.txt b/Framework/TestWorkflows/CMakeLists.txt index 05c40f034b9d2..7a396c0f2c3a9 100644 --- a/Framework/TestWorkflows/CMakeLists.txt +++ b/Framework/TestWorkflows/CMakeLists.txt @@ -37,15 +37,16 @@ o2_add_dpl_workflow(sync-reconstruction-dummy SOURCES src/o2SyncReconstructionDummy.cxx COMPONENT_NAME TestWorkflows) -o2_add_dpl_workflow(d0-analysis - SOURCES src/o2D0Analysis.cxx - PUBLIC_LINK_LIBRARIES O2::FrameworkRootAnalysisHelpers - COMPONENT_NAME TestWorkflows) - -o2_add_dpl_workflow(simple-tracks-analysis - SOURCES src/o2SimpleTracksAnalysis.cxx - PUBLIC_LINK_LIBRARIES O2::FrameworkRootAnalysisHelpers - COMPONENT_NAME TestWorkflows) +# FIXME: renable when the new ROOT is used +#o2_add_dpl_workflow(d0-analysis +# SOURCES src/o2D0Analysis.cxx +# PUBLIC_LINK_LIBRARIES O2::FrameworkRootAnalysisHelpers +# COMPONENT_NAME TestWorkflows) +# +#o2_add_dpl_workflow(simple-tracks-analysis +# SOURCES src/o2SimpleTracksAnalysis.cxx +# PUBLIC_LINK_LIBRARIES O2::FrameworkRootAnalysisHelpers +# COMPONENT_NAME TestWorkflows) o2_add_dpl_workflow(analysis-task-example SOURCES src/o2AnalysisTaskExample.cxx From f0b830d3596932fd68093aeec1c7e83702b61091 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 11:46:19 +0100 Subject: [PATCH 086/726] Keep dropping data while in ready, unless a new state was requested --- Framework/Core/src/ExternalFairMQDeviceProxy.cxx | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx index 4d9d4c11df49f..50738ed96d116 100644 --- a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx +++ b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx @@ -694,10 +694,15 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, // Continue iterating until all channels have seen a new state. while (std::all_of(lastNewStatePending.begin(), lastNewStatePending.end(), [](bool b) { return b; }) != true) { - if (uv_now(deviceState.loop) - start > 5000) { + // In case there is a new state pending, we break out of the loop after 5 seconds. + if (device->NewStatePending() && uv_now(deviceState.loop) - start > 5000) { LOGP(info, "Timeout while draining messages, going to next state anyway."); break; } + // Reset the start time if we have not seen a new state yet. + if (!device->NewStatePending()) { + start = uv_now(deviceState.loop); + } fair::mq::Parts parts; for (size_t ci = 0; ci < deviceState.inputChannelInfos.size(); ++ci) { auto& info = deviceState.inputChannelInfos[ci]; From 317aff77cb0fe2dd3b2768a8a649f26dd10a66bf Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 14:46:45 +0100 Subject: [PATCH 087/726] DPL: keep dropping data while in ready, unless a new state was requested For some reason, before we were dropping data only for 5 seconds. To be seen if this was needed to prevent some timeout by ECS / ODC. This has now changed to keep dropping data until the "cleanup" property changes or until there is a NewStatePending(). --- .../include/Framework/DataProcessingDevice.h | 1 - .../Core/include/Framework/DeviceContext.h | 3 ++ .../Core/include/Framework/DeviceState.h | 1 + Framework/Core/src/DataProcessingDevice.cxx | 9 ++-- .../Core/src/ExternalFairMQDeviceProxy.cxx | 48 ++++--------------- 5 files changed, 19 insertions(+), 43 deletions(-) diff --git a/Framework/Core/include/Framework/DataProcessingDevice.h b/Framework/Core/include/Framework/DataProcessingDevice.h index 0cbecdd4e3172..779e1aeff5340 100644 --- a/Framework/Core/include/Framework/DataProcessingDevice.h +++ b/Framework/Core/include/Framework/DataProcessingDevice.h @@ -119,7 +119,6 @@ class DataProcessingDevice : public fair::mq::Device /// Handle to wake up the main loop from other threads /// e.g. when FairMQ notifies some callback in an asynchronous way uv_async_t* mAwakeHandle = nullptr; - int64_t mCleanupCount = -1; }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/DeviceContext.h b/Framework/Core/include/Framework/DeviceContext.h index 3777e7f608b75..dd0459548e3ad 100644 --- a/Framework/Core/include/Framework/DeviceContext.h +++ b/Framework/Core/include/Framework/DeviceContext.h @@ -15,6 +15,9 @@ #ifndef O2_FRAMEWORK_DEVICECONTEXT_H_ #define O2_FRAMEWORK_DEVICECONTEXT_H_ +#include +#include + typedef struct uv_timer_s uv_timer_t; typedef struct uv_signal_s uv_signal_t; diff --git a/Framework/Core/include/Framework/DeviceState.h b/Framework/Core/include/Framework/DeviceState.h index 506abdc49fb24..ae3e6c8611fbc 100644 --- a/Framework/Core/include/Framework/DeviceState.h +++ b/Framework/Core/include/Framework/DeviceState.h @@ -59,6 +59,7 @@ struct DeviceState { std::vector inputChannelInfos; StreamingState streaming = StreamingState::Streaming; bool quitRequested = false; + std::atomic cleanupCount = -1; /// ComputingQuotaOffers which have not yet been /// evaluated by the ComputingQuotaEvaluator diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 5542be850fb70..cb17a78433267 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -136,15 +136,16 @@ DataProcessingDevice::DataProcessingDevice(RunningDeviceRef running, ServiceRegi mServiceRegistry{registry}, mProcessingPolicies{policies} { - GetConfig()->Subscribe("dpl", [&cleanupCount = mCleanupCount, ®istry = mServiceRegistry](const std::string& key, std::string value) { + GetConfig()->Subscribe("dpl", [®istry = mServiceRegistry](const std::string& key, std::string value) { if (key == "cleanup") { + auto ref = ServiceRegistryRef{registry, ServiceRegistry::globalDeviceSalt()}; + auto& deviceState = ref.get(); + int64_t cleanupCount = deviceState.cleanupCount.load(); int64_t newCleanupCount = std::stoll(value); if (newCleanupCount <= cleanupCount) { return; } - cleanupCount = newCleanupCount; - auto ref = ServiceRegistryRef{registry, ServiceRegistry::globalDeviceSalt()}; - auto& deviceState = ref.get(); + deviceState.cleanupCount.store(newCleanupCount); for (auto& info : deviceState.inputChannelInfos) { fair::mq::Parts parts; while (info.channel->Receive(parts, 0)) { diff --git a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx index 4d9d4c11df49f..5117c861b0615 100644 --- a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx +++ b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx @@ -687,53 +687,25 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, if (fair::mq::State{state} != fair::mq::State::Ready) { return; } - // We keep track of whether or not all channels have seen a new state. - std::vector lastNewStatePending(deviceState.inputChannelInfos.size(), false); uv_update_time(deviceState.loop); - auto start = uv_now(deviceState.loop); - - // Continue iterating until all channels have seen a new state. - while (std::all_of(lastNewStatePending.begin(), lastNewStatePending.end(), [](bool b) { return b; }) != true) { - if (uv_now(deviceState.loop) - start > 5000) { - LOGP(info, "Timeout while draining messages, going to next state anyway."); - break; - } + bool doDrain = true; + // Cleanup count is set by the cleanup property of the device. + // It is incremented every time the device is cleaned up. + // We use it to detect when the device is cleaned up. + int64_t cleanupCount = deviceState.cleanupCount.load(); + + // Continue iterating we saw the cleanup property being reset or + // the device state changing. + while (doDrain) { + doDrain = device->NewStatePending() || deviceState.cleanupCount == cleanupCount; fair::mq::Parts parts; for (size_t ci = 0; ci < deviceState.inputChannelInfos.size(); ++ci) { auto& info = deviceState.inputChannelInfos[ci]; // We only care about rawfmq channels. if (info.channelType != ChannelAccountingType::RAWFMQ) { - lastNewStatePending[ci] = true; - continue; - } - // This means we have not set things up yet. I.e. the first iteration from - // ready to run has not happened yet. - if (info.channel == nullptr) { - lastNewStatePending[ci] = true; continue; } info.channel->Receive(parts, 10); - // Handle both cases of state changes: - // - // - The state has been changed from the outside and FairMQ knows about it. - // - The state has been changed from the GUI, and deviceState.nextFairMQState knows about it. - // - // This latter case is probably better handled from DPL itself, after all it's fair to - // assume we need to switch state as soon as the GUI notifies us. - // For now we keep it here to avoid side effects. - lastNewStatePending[ci] = device->NewStatePending() || (deviceState.nextFairMQState.empty() == false); - if (parts.Size() == 0) { - continue; - } - if (!lastNewStatePending[ci]) { - LOGP(warn, "Unexpected {} message on channel {} while in Ready state. Dropping.", parts.Size(), info.channel->GetName()); - } else if (lastNewStatePending[ci]) { - LOGP(detail, "Some {} parts were received on channel {} while switching away from Ready. Keeping.", parts.Size(), info.channel->GetName()); - for (int pi = 0; pi < parts.Size(); ++pi) { - info.parts.fParts.emplace_back(std::move(parts.At(pi))); - } - info.readPolled = true; - } } // Keep state transitions going also when running with the standalone GUI. uv_run(deviceState.loop, UV_RUN_NOWAIT); From d6b639c617d6c9b8d3893e599e2600a51727efa9 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 14:46:44 +0100 Subject: [PATCH 088/726] Follow coding conventions --- .../Core/include/Framework/DataProcessingDevice.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Framework/Core/include/Framework/DataProcessingDevice.h b/Framework/Core/include/Framework/DataProcessingDevice.h index 2eade530edc3a..0cbecdd4e3172 100644 --- a/Framework/Core/include/Framework/DataProcessingDevice.h +++ b/Framework/Core/include/Framework/DataProcessingDevice.h @@ -107,15 +107,15 @@ class DataProcessingDevice : public fair::mq::Device std::unique_ptr mConfigRegistry; ServiceRegistry& mServiceRegistry; - uint64_t mLastSlowMetricSentTimestamp = 0; /// The timestamp of the last time we sent slow metrics - uint64_t mLastMetricFlushedTimestamp = 0; /// The timestamp of the last time we actually flushed metrics - uint64_t mBeginIterationTimestamp = 0; /// The timestamp of when the current ConditionalRun was started + uint64_t mLastSlowMetricSentTimestamp = 0; /// The timestamp of the last time we sent slow metrics + uint64_t mLastMetricFlushedTimestamp = 0; /// The timestamp of the last time we actually flushed metrics + uint64_t mBeginIterationTimestamp = 0; /// The timestamp of when the current ConditionalRun was started std::vector mPendingRegionInfos; /// A list of the region infos not yet notified. std::mutex mRegionInfoMutex; - ProcessingPolicies mProcessingPolicies; /// User policies related to data processing - bool mWasActive = false; /// Whether or not the device was active at last iteration. - std::vector mHandles; /// Handles to use to schedule work. - std::vector mStreams; /// Information about the task running in the associated mHandle. + ProcessingPolicies mProcessingPolicies; /// User policies related to data processing + bool mWasActive = false; /// Whether or not the device was active at last iteration. + std::vector mHandles; /// Handles to use to schedule work. + std::vector mStreams; /// Information about the task running in the associated mHandle. /// Handle to wake up the main loop from other threads /// e.g. when FairMQ notifies some callback in an asynchronous way uv_async_t* mAwakeHandle = nullptr; From 2d9ad4c6f4a76b3c3b3f7b8f1be039b17cc337f0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 14:47:43 +0100 Subject: [PATCH 089/726] DPL: keep dropping data while in ready, unless a new state was requested For some reason, before we were dropping data only for 5 seconds. To be seen if this was needed to prevent some timeout by ECS / ODC. This has now changed to keep dropping data until the "cleanup" property changes or until there is a NewStatePending(). --- .../include/Framework/DataProcessingDevice.h | 1 - .../Core/include/Framework/DeviceState.h | 1 + Framework/Core/src/DataProcessingDevice.cxx | 9 ++-- .../Core/src/ExternalFairMQDeviceProxy.cxx | 48 ++++--------------- 4 files changed, 16 insertions(+), 43 deletions(-) diff --git a/Framework/Core/include/Framework/DataProcessingDevice.h b/Framework/Core/include/Framework/DataProcessingDevice.h index 0cbecdd4e3172..779e1aeff5340 100644 --- a/Framework/Core/include/Framework/DataProcessingDevice.h +++ b/Framework/Core/include/Framework/DataProcessingDevice.h @@ -119,7 +119,6 @@ class DataProcessingDevice : public fair::mq::Device /// Handle to wake up the main loop from other threads /// e.g. when FairMQ notifies some callback in an asynchronous way uv_async_t* mAwakeHandle = nullptr; - int64_t mCleanupCount = -1; }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/DeviceState.h b/Framework/Core/include/Framework/DeviceState.h index 506abdc49fb24..ae3e6c8611fbc 100644 --- a/Framework/Core/include/Framework/DeviceState.h +++ b/Framework/Core/include/Framework/DeviceState.h @@ -59,6 +59,7 @@ struct DeviceState { std::vector inputChannelInfos; StreamingState streaming = StreamingState::Streaming; bool quitRequested = false; + std::atomic cleanupCount = -1; /// ComputingQuotaOffers which have not yet been /// evaluated by the ComputingQuotaEvaluator diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 5542be850fb70..cb17a78433267 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -136,15 +136,16 @@ DataProcessingDevice::DataProcessingDevice(RunningDeviceRef running, ServiceRegi mServiceRegistry{registry}, mProcessingPolicies{policies} { - GetConfig()->Subscribe("dpl", [&cleanupCount = mCleanupCount, ®istry = mServiceRegistry](const std::string& key, std::string value) { + GetConfig()->Subscribe("dpl", [®istry = mServiceRegistry](const std::string& key, std::string value) { if (key == "cleanup") { + auto ref = ServiceRegistryRef{registry, ServiceRegistry::globalDeviceSalt()}; + auto& deviceState = ref.get(); + int64_t cleanupCount = deviceState.cleanupCount.load(); int64_t newCleanupCount = std::stoll(value); if (newCleanupCount <= cleanupCount) { return; } - cleanupCount = newCleanupCount; - auto ref = ServiceRegistryRef{registry, ServiceRegistry::globalDeviceSalt()}; - auto& deviceState = ref.get(); + deviceState.cleanupCount.store(newCleanupCount); for (auto& info : deviceState.inputChannelInfos) { fair::mq::Parts parts; while (info.channel->Receive(parts, 0)) { diff --git a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx index 4d9d4c11df49f..5117c861b0615 100644 --- a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx +++ b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx @@ -687,53 +687,25 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, if (fair::mq::State{state} != fair::mq::State::Ready) { return; } - // We keep track of whether or not all channels have seen a new state. - std::vector lastNewStatePending(deviceState.inputChannelInfos.size(), false); uv_update_time(deviceState.loop); - auto start = uv_now(deviceState.loop); - - // Continue iterating until all channels have seen a new state. - while (std::all_of(lastNewStatePending.begin(), lastNewStatePending.end(), [](bool b) { return b; }) != true) { - if (uv_now(deviceState.loop) - start > 5000) { - LOGP(info, "Timeout while draining messages, going to next state anyway."); - break; - } + bool doDrain = true; + // Cleanup count is set by the cleanup property of the device. + // It is incremented every time the device is cleaned up. + // We use it to detect when the device is cleaned up. + int64_t cleanupCount = deviceState.cleanupCount.load(); + + // Continue iterating we saw the cleanup property being reset or + // the device state changing. + while (doDrain) { + doDrain = device->NewStatePending() || deviceState.cleanupCount == cleanupCount; fair::mq::Parts parts; for (size_t ci = 0; ci < deviceState.inputChannelInfos.size(); ++ci) { auto& info = deviceState.inputChannelInfos[ci]; // We only care about rawfmq channels. if (info.channelType != ChannelAccountingType::RAWFMQ) { - lastNewStatePending[ci] = true; - continue; - } - // This means we have not set things up yet. I.e. the first iteration from - // ready to run has not happened yet. - if (info.channel == nullptr) { - lastNewStatePending[ci] = true; continue; } info.channel->Receive(parts, 10); - // Handle both cases of state changes: - // - // - The state has been changed from the outside and FairMQ knows about it. - // - The state has been changed from the GUI, and deviceState.nextFairMQState knows about it. - // - // This latter case is probably better handled from DPL itself, after all it's fair to - // assume we need to switch state as soon as the GUI notifies us. - // For now we keep it here to avoid side effects. - lastNewStatePending[ci] = device->NewStatePending() || (deviceState.nextFairMQState.empty() == false); - if (parts.Size() == 0) { - continue; - } - if (!lastNewStatePending[ci]) { - LOGP(warn, "Unexpected {} message on channel {} while in Ready state. Dropping.", parts.Size(), info.channel->GetName()); - } else if (lastNewStatePending[ci]) { - LOGP(detail, "Some {} parts were received on channel {} while switching away from Ready. Keeping.", parts.Size(), info.channel->GetName()); - for (int pi = 0; pi < parts.Size(); ++pi) { - info.parts.fParts.emplace_back(std::move(parts.At(pi))); - } - info.readPolled = true; - } } // Keep state transitions going also when running with the standalone GUI. uv_run(deviceState.loop, UV_RUN_NOWAIT); From b0ce3a22d9526f63145c05887c704ab0543868a2 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 15:19:22 +0100 Subject: [PATCH 090/726] Fix wrong index in test --- Detectors/CTF/test/test_ctf_io_zdc.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/CTF/test/test_ctf_io_zdc.cxx b/Detectors/CTF/test/test_ctf_io_zdc.cxx index 87563f128a737..3ae6f5b991ad1 100644 --- a/Detectors/CTF/test/test_ctf_io_zdc.cxx +++ b/Detectors/CTF/test/test_ctf_io_zdc.cxx @@ -81,7 +81,7 @@ BOOST_DATA_TEST_CASE(CTFTest, boost_data::make(ANSVersions), ansVersion) pedsdata[i].ir = irPed; for (int ic = 0; ic < NChannels; ic++) { pedsdata[i].data[ic] = gRandom->Integer(0xffff); - pedsdata[i].scaler[ic] = (i > 0 ? pedsdata[i].scaler[ic - 1] : 0) + gRandom->Integer(20); + pedsdata[i].scaler[ic] = (ic > 0 ? pedsdata[i].scaler[ic - 1] : 0) + gRandom->Integer(20); } irPed.orbit++; } From 7cba8dcde3d6ff971f352b0f379b5e77fe4da752 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 22:05:46 +0100 Subject: [PATCH 091/726] Follow coding conventions --- .../Core/include/Framework/DataProcessingDevice.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Framework/Core/include/Framework/DataProcessingDevice.h b/Framework/Core/include/Framework/DataProcessingDevice.h index 2eade530edc3a..0cbecdd4e3172 100644 --- a/Framework/Core/include/Framework/DataProcessingDevice.h +++ b/Framework/Core/include/Framework/DataProcessingDevice.h @@ -107,15 +107,15 @@ class DataProcessingDevice : public fair::mq::Device std::unique_ptr mConfigRegistry; ServiceRegistry& mServiceRegistry; - uint64_t mLastSlowMetricSentTimestamp = 0; /// The timestamp of the last time we sent slow metrics - uint64_t mLastMetricFlushedTimestamp = 0; /// The timestamp of the last time we actually flushed metrics - uint64_t mBeginIterationTimestamp = 0; /// The timestamp of when the current ConditionalRun was started + uint64_t mLastSlowMetricSentTimestamp = 0; /// The timestamp of the last time we sent slow metrics + uint64_t mLastMetricFlushedTimestamp = 0; /// The timestamp of the last time we actually flushed metrics + uint64_t mBeginIterationTimestamp = 0; /// The timestamp of when the current ConditionalRun was started std::vector mPendingRegionInfos; /// A list of the region infos not yet notified. std::mutex mRegionInfoMutex; - ProcessingPolicies mProcessingPolicies; /// User policies related to data processing - bool mWasActive = false; /// Whether or not the device was active at last iteration. - std::vector mHandles; /// Handles to use to schedule work. - std::vector mStreams; /// Information about the task running in the associated mHandle. + ProcessingPolicies mProcessingPolicies; /// User policies related to data processing + bool mWasActive = false; /// Whether or not the device was active at last iteration. + std::vector mHandles; /// Handles to use to schedule work. + std::vector mStreams; /// Information about the task running in the associated mHandle. /// Handle to wake up the main loop from other threads /// e.g. when FairMQ notifies some callback in an asynchronous way uv_async_t* mAwakeHandle = nullptr; From 036b41a457987b3c01e6afb31a723d30556861f3 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 10 Nov 2023 22:06:38 +0100 Subject: [PATCH 092/726] DPL: keep dropping data while in ready, unless a new state was requested For some reason, before we were dropping data only for 5 seconds. To be seen if this was needed to prevent some timeout by ECS / ODC. This has now changed to keep dropping data until the "cleanup" property changes or until there is a NewStatePending(). --- .../include/Framework/DataProcessingDevice.h | 1 - .../Core/include/Framework/DeviceState.h | 2 + Framework/Core/src/DataProcessingDevice.cxx | 9 ++-- .../Core/src/ExternalFairMQDeviceProxy.cxx | 48 ++++--------------- 4 files changed, 17 insertions(+), 43 deletions(-) diff --git a/Framework/Core/include/Framework/DataProcessingDevice.h b/Framework/Core/include/Framework/DataProcessingDevice.h index 0cbecdd4e3172..779e1aeff5340 100644 --- a/Framework/Core/include/Framework/DataProcessingDevice.h +++ b/Framework/Core/include/Framework/DataProcessingDevice.h @@ -119,7 +119,6 @@ class DataProcessingDevice : public fair::mq::Device /// Handle to wake up the main loop from other threads /// e.g. when FairMQ notifies some callback in an asynchronous way uv_async_t* mAwakeHandle = nullptr; - int64_t mCleanupCount = -1; }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/DeviceState.h b/Framework/Core/include/Framework/DeviceState.h index 506abdc49fb24..3be0302fd4113 100644 --- a/Framework/Core/include/Framework/DeviceState.h +++ b/Framework/Core/include/Framework/DeviceState.h @@ -19,6 +19,7 @@ #include #include #include +#include typedef struct uv_loop_s uv_loop_t; typedef struct uv_timer_s uv_timer_t; @@ -59,6 +60,7 @@ struct DeviceState { std::vector inputChannelInfos; StreamingState streaming = StreamingState::Streaming; bool quitRequested = false; + std::atomic cleanupCount = -1; /// ComputingQuotaOffers which have not yet been /// evaluated by the ComputingQuotaEvaluator diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 851cb5e193d6f..4d0cac7087611 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -136,15 +136,16 @@ DataProcessingDevice::DataProcessingDevice(RunningDeviceRef running, ServiceRegi mServiceRegistry{registry}, mProcessingPolicies{policies} { - GetConfig()->Subscribe("dpl", [&cleanupCount = mCleanupCount, ®istry = mServiceRegistry](const std::string& key, std::string value) { + GetConfig()->Subscribe("dpl", [®istry = mServiceRegistry](const std::string& key, std::string value) { if (key == "cleanup") { + auto ref = ServiceRegistryRef{registry, ServiceRegistry::globalDeviceSalt()}; + auto& deviceState = ref.get(); + int64_t cleanupCount = deviceState.cleanupCount.load(); int64_t newCleanupCount = std::stoll(value); if (newCleanupCount <= cleanupCount) { return; } - cleanupCount = newCleanupCount; - auto ref = ServiceRegistryRef{registry, ServiceRegistry::globalDeviceSalt()}; - auto& deviceState = ref.get(); + deviceState.cleanupCount.store(newCleanupCount); for (auto& info : deviceState.inputChannelInfos) { fair::mq::Parts parts; while (info.channel->Receive(parts, 0)) { diff --git a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx index 4d9d4c11df49f..5117c861b0615 100644 --- a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx +++ b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx @@ -687,53 +687,25 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, if (fair::mq::State{state} != fair::mq::State::Ready) { return; } - // We keep track of whether or not all channels have seen a new state. - std::vector lastNewStatePending(deviceState.inputChannelInfos.size(), false); uv_update_time(deviceState.loop); - auto start = uv_now(deviceState.loop); - - // Continue iterating until all channels have seen a new state. - while (std::all_of(lastNewStatePending.begin(), lastNewStatePending.end(), [](bool b) { return b; }) != true) { - if (uv_now(deviceState.loop) - start > 5000) { - LOGP(info, "Timeout while draining messages, going to next state anyway."); - break; - } + bool doDrain = true; + // Cleanup count is set by the cleanup property of the device. + // It is incremented every time the device is cleaned up. + // We use it to detect when the device is cleaned up. + int64_t cleanupCount = deviceState.cleanupCount.load(); + + // Continue iterating we saw the cleanup property being reset or + // the device state changing. + while (doDrain) { + doDrain = device->NewStatePending() || deviceState.cleanupCount == cleanupCount; fair::mq::Parts parts; for (size_t ci = 0; ci < deviceState.inputChannelInfos.size(); ++ci) { auto& info = deviceState.inputChannelInfos[ci]; // We only care about rawfmq channels. if (info.channelType != ChannelAccountingType::RAWFMQ) { - lastNewStatePending[ci] = true; - continue; - } - // This means we have not set things up yet. I.e. the first iteration from - // ready to run has not happened yet. - if (info.channel == nullptr) { - lastNewStatePending[ci] = true; continue; } info.channel->Receive(parts, 10); - // Handle both cases of state changes: - // - // - The state has been changed from the outside and FairMQ knows about it. - // - The state has been changed from the GUI, and deviceState.nextFairMQState knows about it. - // - // This latter case is probably better handled from DPL itself, after all it's fair to - // assume we need to switch state as soon as the GUI notifies us. - // For now we keep it here to avoid side effects. - lastNewStatePending[ci] = device->NewStatePending() || (deviceState.nextFairMQState.empty() == false); - if (parts.Size() == 0) { - continue; - } - if (!lastNewStatePending[ci]) { - LOGP(warn, "Unexpected {} message on channel {} while in Ready state. Dropping.", parts.Size(), info.channel->GetName()); - } else if (lastNewStatePending[ci]) { - LOGP(detail, "Some {} parts were received on channel {} while switching away from Ready. Keeping.", parts.Size(), info.channel->GetName()); - for (int pi = 0; pi < parts.Size(); ++pi) { - info.parts.fParts.emplace_back(std::move(parts.At(pi))); - } - info.readPolled = true; - } } // Keep state transitions going also when running with the standalone GUI. uv_run(deviceState.loop, UV_RUN_NOWAIT); From cd50cd9a0bc25c9fc58be8ed22e7b148d87c9cd2 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 13 Nov 2023 12:16:04 +0100 Subject: [PATCH 093/726] DPL: fix logic in the READY state flushing --- Framework/Core/src/ExternalFairMQDeviceProxy.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx index 5117c861b0615..efd66b8499ab0 100644 --- a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx +++ b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx @@ -697,7 +697,7 @@ DataProcessorSpec specifyExternalFairMQDeviceProxy(char const* name, // Continue iterating we saw the cleanup property being reset or // the device state changing. while (doDrain) { - doDrain = device->NewStatePending() || deviceState.cleanupCount == cleanupCount; + doDrain = device->NewStatePending() == false && deviceState.cleanupCount == cleanupCount; fair::mq::Parts parts; for (size_t ci = 0; ci < deviceState.inputChannelInfos.size(); ++ci) { auto& info = deviceState.inputChannelInfos[ci]; From d080a1d0774b2c1746708e73abc85c7975c4e6ec Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 13 Nov 2023 12:39:54 +0100 Subject: [PATCH 094/726] DPL: print missing data when dropping timeframes --- Framework/Core/src/DataRelayer.cxx | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/Framework/Core/src/DataRelayer.cxx b/Framework/Core/src/DataRelayer.cxx index 266f5b054add1..e9591a9cc76ea 100644 --- a/Framework/Core/src/DataRelayer.cxx +++ b/Framework/Core/src/DataRelayer.cxx @@ -297,11 +297,13 @@ void DataRelayer::setOldestPossibleInput(TimesliceId proposed, ChannelIndex chan continue; } mPruneOps.push_back(PruneOp{si}); + bool didDrop = false; for (size_t mi = 0; mi < mInputs.size(); ++mi) { auto& input = mInputs[mi]; auto& element = mCache[si * mInputs.size() + mi]; if (element.size() != 0) { if (input.lifetime != Lifetime::Condition && mCompletionPolicy.name != "internal-dpl-injected-dummy-sink") { + didDrop = true; LOGP(error, "Dropping incomplete {} Lifetime::{} data in slot {} with timestamp {} < {} as it can never be completed.", DataSpecUtils::describe(input), input.lifetime, si, timestamp.value, newOldest.timeslice.value); } else { LOGP(debug, @@ -312,6 +314,16 @@ void DataRelayer::setOldestPossibleInput(TimesliceId proposed, ChannelIndex chan } } } + // We did drop some data. Let's print what was missing. + if (didDrop) { + for (size_t mi = 0; mi < mInputs.size(); ++mi) { + auto& input = mInputs[mi]; + auto& element = mCache[si * mInputs.size() + mi]; + if (element.size() == 0) { + LOGP(error, "Missing {} (lifetime:{}) while dropping incomplete data in slot {} with timestamp {} < {}.", DataSpecUtils::describe(input), input.lifetime, si, timestamp.value, newOldest.timeslice.value); + } + } + } } } From 060946d80e2842e6e36b15772de134453d932b1f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 13 Nov 2023 14:06:59 +0100 Subject: [PATCH 095/726] DPL: complain if inputs and outputs are incompatible --- Framework/Core/src/DeviceSpecHelpers.cxx | 2 ++ Framework/Core/src/WorkflowHelpers.cxx | 26 ++++++++++++++++++++++++ Framework/Core/src/WorkflowHelpers.h | 8 ++++++++ 3 files changed, 36 insertions(+) diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index c54dd6cec7a99..45a76570dbf1b 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -1072,6 +1072,8 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf WorkflowHelpers::constructGraph(workflow, logicalEdges, outputs, availableForwardsInfo); + WorkflowHelpers::validateEdges(workflow, logicalEdges, outputs); + // We need to instanciate one device per (me, timeIndex) in the // DeviceConnectionEdge. For each device we need one new binding // server per (me, other) -> port Moreover for each (me, other, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index cd9c3913de228..68d6554ccd052 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1233,4 +1233,30 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons } #pragma diagnostic pop +void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs) +{ + // Iterate over all the edges. + // Get the input lifetime and the output lifetime. + // Output lifetime must be Timeframe if the input lifetime is Timeframe. + std::string errors; + for (auto& edge : edges) { + auto& producer = workflow[edge.producer]; + auto& consumer = workflow[edge.consumer]; + auto& output = outputs[edge.outputGlobalIndex]; + auto& input = consumer.inputs[edge.consumerInputIndex]; + // We cannot simply say "!Timeframe" because we want to allow + // e.g. Enumeration => Timeframe + if (input.lifetime == Lifetime::Timeframe && output.lifetime == Lifetime::Sporadic) { + errors += fmt::format("Input {} of {} has lifetime Timeframe, but output {} of {} has lifetime Sporadic", + DataSpecUtils::describe(input).c_str(), consumer.name, + DataSpecUtils::describe(output).c_str(), producer.name); + } + } + if (!errors.empty()) { + throw std::runtime_error(errors); + } +} + } // namespace o2::framework diff --git a/Framework/Core/src/WorkflowHelpers.h b/Framework/Core/src/WorkflowHelpers.h index a167e99d4768c..67661dbdb979a 100644 --- a/Framework/Core/src/WorkflowHelpers.h +++ b/Framework/Core/src/WorkflowHelpers.h @@ -227,6 +227,14 @@ struct WorkflowHelpers { /// returns only dangling outputs static std::vector computeDanglingOutputs(WorkflowSpec const& workflow); + + /// Validate that the nodes at the ends of the edges of the graph + /// are actually compatible with each other. + /// For example we should make sure that Lifetime::Timeframe inputs of + /// one node is not connected to an Output of Lifetime::Sporadic of another node. + static void validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs); }; } // namespace o2::framework From 8c28b720075e211086cc03af6866280370c1dbca Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 13 Nov 2023 15:33:27 +0100 Subject: [PATCH 096/726] DPL: complain if inputs and outputs are incompatible --- Framework/Core/src/CommonDataProcessors.cxx | 4 +++- Framework/Core/src/DeviceSpecHelpers.cxx | 2 ++ Framework/Core/src/WorkflowHelpers.cxx | 26 +++++++++++++++++++++ Framework/Core/src/WorkflowHelpers.h | 8 +++++++ 4 files changed, 39 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 74e020595b345..e9cdc97e316ca 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -259,9 +259,11 @@ DataProcessorSpec CommonDataProcessors::getOutputObjHistSink(std::vector port Moreover for each (me, other, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index cd9c3913de228..68d6554ccd052 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1233,4 +1233,30 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons } #pragma diagnostic pop +void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs) +{ + // Iterate over all the edges. + // Get the input lifetime and the output lifetime. + // Output lifetime must be Timeframe if the input lifetime is Timeframe. + std::string errors; + for (auto& edge : edges) { + auto& producer = workflow[edge.producer]; + auto& consumer = workflow[edge.consumer]; + auto& output = outputs[edge.outputGlobalIndex]; + auto& input = consumer.inputs[edge.consumerInputIndex]; + // We cannot simply say "!Timeframe" because we want to allow + // e.g. Enumeration => Timeframe + if (input.lifetime == Lifetime::Timeframe && output.lifetime == Lifetime::Sporadic) { + errors += fmt::format("Input {} of {} has lifetime Timeframe, but output {} of {} has lifetime Sporadic", + DataSpecUtils::describe(input).c_str(), consumer.name, + DataSpecUtils::describe(output).c_str(), producer.name); + } + } + if (!errors.empty()) { + throw std::runtime_error(errors); + } +} + } // namespace o2::framework diff --git a/Framework/Core/src/WorkflowHelpers.h b/Framework/Core/src/WorkflowHelpers.h index a167e99d4768c..67661dbdb979a 100644 --- a/Framework/Core/src/WorkflowHelpers.h +++ b/Framework/Core/src/WorkflowHelpers.h @@ -227,6 +227,14 @@ struct WorkflowHelpers { /// returns only dangling outputs static std::vector computeDanglingOutputs(WorkflowSpec const& workflow); + + /// Validate that the nodes at the ends of the edges of the graph + /// are actually compatible with each other. + /// For example we should make sure that Lifetime::Timeframe inputs of + /// one node is not connected to an Output of Lifetime::Sporadic of another node. + static void validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs); }; } // namespace o2::framework From cc9433c582436b75489b41cc058f14dae32b8d08 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 13 Nov 2023 16:52:38 +0100 Subject: [PATCH 097/726] Keep code checker happy --- Detectors/TRD/base/macros/OCDB2CCDB.C | 14 +++++++------- Detectors/TRD/base/macros/OCDB2CCDBTrapConfig.C | 12 ++++++------ Detectors/TRD/base/macros/PrintTrapConfig.C | 12 ++++++------ Detectors/TRD/base/macros/Readocdb.C | 14 +++++++------- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/Detectors/TRD/base/macros/OCDB2CCDB.C b/Detectors/TRD/base/macros/OCDB2CCDB.C index 0d7be94536bcd..3f023d6398f9f 100644 --- a/Detectors/TRD/base/macros/OCDB2CCDB.C +++ b/Detectors/TRD/base/macros/OCDB2CCDB.C @@ -83,8 +83,8 @@ using namespace o2::trd; // histograms used for extracting the mean and RMS of calibration parameters // global constants -AliCDBStorage* storage = NULL; -AliCDBManager* manager = NULL; +AliCDBStorage* storage = nullptr; +AliCDBManager* manager = nullptr; Int_t Run(0); void MakeRunListFromOCDB(const Char_t* directory, const Char_t* outfile, Bool_t fromAlien = kFALSE); AliCDBEntry* GetCDBentry(const Char_t* path, Bool_t owner = kTRUE); @@ -184,7 +184,7 @@ void UnpackGainTable(std::string& gainkey, CalOnlineGainTables* gtbl) AliTRDCalOnlineGainTable* tbl = 0; AliTRDCalOnlineGainTableROC* tblroc = 0; AliTRDCalOnlineGainTableMCM* tblmcm = 0; - AliCDBEntry* entry = NULL; + AliCDBEntry* entry = nullptr; if ((entry = GetCDBentry(Form("TRD/Calib/%s", gainkey.c_str()), 0))) { tbl = (AliTRDCalOnlineGainTable*)entry->GetObject(); for (int i = 0; i < 540; i++) { @@ -239,7 +239,7 @@ void OCDB2CCDB(long timeStamp = -1, TString ccdbPath = "http://localhost:8080", manager->SetDefaultStorage(storageURI); manager->SetCacheFlag(kTRUE); storage = manager->GetDefaultStorage(); - AliCDBEntry* entry = NULL; + AliCDBEntry* entry = nullptr; Run = run; std::string TRDCalBase = "TRD"; @@ -504,16 +504,16 @@ AliCDBEntry* GetCDBentry(const Char_t* path, Bool_t owner) { TString spath = path; // ::Info("GetCDBentry", Form("QUERY RUN [%d] for \"%s\".", Run, spath.Data())); - AliCDBEntry* entry(NULL); + AliCDBEntry* entry(nullptr); storage->QueryCDB(Run, spath.Data()); cout << spath.Data(); if (!storage->GetQueryCDBList()->GetEntries()) { cout << "GetCDBentry" << Form("Missing \"%s\" in run %d.", spath.Data(), Run); - return NULL; + return nullptr; } else entry = manager->Get(spath.Data()); if (!entry) - return NULL; + return nullptr; entry->SetOwner(owner); // ::Info("GetCDBentry", Form("FOUND ENTRY @ [%p].", (void*)entry)); diff --git a/Detectors/TRD/base/macros/OCDB2CCDBTrapConfig.C b/Detectors/TRD/base/macros/OCDB2CCDBTrapConfig.C index 36c8d7aa9aa1d..5ada93ea73f99 100644 --- a/Detectors/TRD/base/macros/OCDB2CCDBTrapConfig.C +++ b/Detectors/TRD/base/macros/OCDB2CCDBTrapConfig.C @@ -82,8 +82,8 @@ using namespace o2::trd; // histograms used for extracting the mean and RMS of calibration parameters // global constants -AliCDBStorage* storage = NULL; -AliCDBManager* manager = NULL; +AliCDBStorage* storage = nullptr; +AliCDBManager* manager = nullptr; Int_t Run(0); void MakeRunListFromOCDB(const Char_t* directory, const Char_t* outfile, Bool_t fromAlien = kFALSE); AliCDBEntry* GetCDBentry(const Char_t* path, Bool_t owner = kTRUE); @@ -181,7 +181,7 @@ void OCDB2CCDBTrapConfig(TString ccdbPath = "http://localhost:8080", Int_t run = manager->SetDefaultStorage(storageURI); manager->SetCacheFlag(kTRUE); storage = manager->GetDefaultStorage(); - AliCDBEntry* entry = NULL; + AliCDBEntry* entry = nullptr; Run = run; manager->SetRun(Run); @@ -331,16 +331,16 @@ AliCDBEntry* GetCDBentry(const Char_t* path, Bool_t owner) { TString spath = path; // ::Info("GetCDBentry", Form("QUERY RUN [%d] for \"%s\".", Run, spath.Data())); - AliCDBEntry* entry(NULL); + AliCDBEntry* entry(nullptr); storage->QueryCDB(Run, spath.Data()); cout << spath.Data(); if (!storage->GetQueryCDBList()->GetEntries()) { cout << "GetCDBentry" << Form("Missing \"%s\" in run %d.", spath.Data(), Run); - return NULL; + return nullptr; } else entry = manager->Get(spath.Data()); if (!entry) - return NULL; + return nullptr; entry->SetOwner(owner); // ::Info("GetCDBentry", Form("FOUND ENTRY @ [%p].", (void*)entry)); diff --git a/Detectors/TRD/base/macros/PrintTrapConfig.C b/Detectors/TRD/base/macros/PrintTrapConfig.C index 949b8e4811b45..0fb39b52823b8 100644 --- a/Detectors/TRD/base/macros/PrintTrapConfig.C +++ b/Detectors/TRD/base/macros/PrintTrapConfig.C @@ -82,8 +82,8 @@ using namespace o2::trd; // histograms used for extracting the mean and RMS of calibration parameters // global constants -AliCDBStorage* storage = NULL; -AliCDBManager* manager = NULL; +AliCDBStorage* storage = nullptr; +AliCDBManager* manager = nullptr; Int_t Run(0); void MakeRunListFromOCDB(const Char_t* directory, const Char_t* outfile, Bool_t fromAlien = kFALSE); AliCDBEntry* GetCDBentry(const Char_t* path, Bool_t owner = kTRUE); @@ -209,7 +209,7 @@ void PrintTrapConfig(Int_t run, const Char_t* storageURI = "alien://folder=/alic manager->SetDefaultStorage(storageURI); manager->SetCacheFlag(kTRUE); storage = manager->GetDefaultStorage(); - AliCDBEntry* entry = NULL; + AliCDBEntry* entry = nullptr; Run = run; std::string TRDCalBase = "TRD"; @@ -365,16 +365,16 @@ AliCDBEntry* GetCDBentry(const Char_t* path, Bool_t owner) { TString spath = path; // ::Info("GetCDBentry", Form("QUERY RUN [%d] for \"%s\".", Run, spath.Data())); - AliCDBEntry* entry(NULL); + AliCDBEntry* entry(nullptr); storage->QueryCDB(Run, spath.Data()); cout << spath.Data(); if (!storage->GetQueryCDBList()->GetEntries()) { cout << "GetCDBentry" << Form("Missing \"%s\" in run %d.", spath.Data(), Run); - return NULL; + return nullptr; } else entry = manager->Get(spath.Data()); if (!entry) - return NULL; + return nullptr; entry->SetOwner(owner); // ::Info("GetCDBentry", Form("FOUND ENTRY @ [%p].", (void*)entry)); diff --git a/Detectors/TRD/base/macros/Readocdb.C b/Detectors/TRD/base/macros/Readocdb.C index c3e8c2978d789..12393fd8e3056 100644 --- a/Detectors/TRD/base/macros/Readocdb.C +++ b/Detectors/TRD/base/macros/Readocdb.C @@ -81,8 +81,8 @@ using namespace o2::trd; // histograms used for extracting the mean and RMS of calibration parameters // global constants -AliCDBStorage* storage = NULL; -AliCDBManager* manager = NULL; +AliCDBStorage* storage = nullptr; +AliCDBManager* manager = nullptr; Int_t Run(0); void MakeRunListFromOCDB(const Char_t* directory, const Char_t* outfile, Bool_t fromAlien = kFALSE); AliCDBEntry* GetCDBentry(const Char_t* path, Bool_t owner = kTRUE); @@ -182,7 +182,7 @@ void UnpackGainTable(std::string& gainkey, CalOnlineGainTables* gtbl) AliTRDCalOnlineGainTable* tbl = 0; AliTRDCalOnlineGainTableROC* tblroc = 0; AliTRDCalOnlineGainTableMCM* tblmcm = 0; - AliCDBEntry* entry = NULL; + AliCDBEntry* entry = nullptr; if ((entry = GetCDBentry(Form("TRD/Calib/%s", gainkey.c_str()), 0))) { tbl = (AliTRDCalOnlineGainTable*)entry->GetObject(); for (int i = 0; i < 540; i++) { @@ -226,7 +226,7 @@ void Readocdb(Int_t run, const Char_t* storageURI = "alien://folder=/alice/data/ manager->SetDefaultStorage(storageURI); manager->SetCacheFlag(kTRUE); storage = manager->GetDefaultStorage(); - AliCDBEntry* entry = NULL; + AliCDBEntry* entry = nullptr; Run = run; manager->SetRun(Run); @@ -275,16 +275,16 @@ AliCDBEntry* GetCDBentry(const Char_t* path, Bool_t owner) { TString spath = path; // ::Info("GetCDBentry", Form("QUERY RUN [%d] for \"%s\".", Run, spath.Data())); - AliCDBEntry* entry(NULL); + AliCDBEntry* entry(nullptr); storage->QueryCDB(Run, spath.Data()); cout << spath.Data(); if (!storage->GetQueryCDBList()->GetEntries()) { cout << "GetCDBentry" << Form("Missing \"%s\" in run %d.", spath.Data(), Run); - return NULL; + return nullptr; } else entry = manager->Get(spath.Data()); if (!entry) - return NULL; + return nullptr; entry->SetOwner(owner); // ::Info("GetCDBentry", Form("FOUND ENTRY @ [%p].", (void*)entry)); From d66408e930a84c95c02818db722c65675a22f9c0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 14 Nov 2023 11:14:18 +0100 Subject: [PATCH 098/726] Do not expose AODToHepMC.h to ROOT This is needed to fix macOS builds. Apparently ROOT injects some arrow incompatible system headers in the chain. --- Generators/CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Generators/CMakeLists.txt b/Generators/CMakeLists.txt index 7112cada22686..2e5b5d022a0a5 100644 --- a/Generators/CMakeLists.txt +++ b/Generators/CMakeLists.txt @@ -104,13 +104,16 @@ if(pythia_FOUND) include/Generators/GeneratorFactory.h) endif() +list(APPEND root_headers ${headers}) if(HepMC3_FOUND) list(APPEND headers include/Generators/GeneratorHepMC.h) list(APPEND headers include/Generators/GeneratorHepMCParam.h) + list(APPEND root_headers include/Generators/GeneratorHepMC.h) + list(APPEND root_headers include/Generators/GeneratorHepMCParam.h) list(APPEND headers include/Generators/AODToHepMC.h) endif() -o2_target_root_dictionary(Generators HEADERS ${headers}) +o2_target_root_dictionary(Generators HEADERS ${root_headers}) o2_add_test_root_macro(share/external/extgen.C PUBLIC_LINK_LIBRARIES O2::Generators FairRoot::Base From ddf21d8b1556b4512786cbe9e120b31d28d74c38 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 14 Nov 2023 11:16:52 +0100 Subject: [PATCH 099/726] Fix deprecation warning about implicit conversion of anonymous enum --- GPU/GPUTracking/TPCClusterFinder/GPUTPCCFCheckPadBaseline.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPU/GPUTracking/TPCClusterFinder/GPUTPCCFCheckPadBaseline.cxx b/GPU/GPUTracking/TPCClusterFinder/GPUTPCCFCheckPadBaseline.cxx index 497c63c5eb570..cfa3bb2707136 100644 --- a/GPU/GPUTracking/TPCClusterFinder/GPUTPCCFCheckPadBaseline.cxx +++ b/GPU/GPUTracking/TPCClusterFinder/GPUTPCCFCheckPadBaseline.cxx @@ -77,7 +77,7 @@ GPUd() void GPUTPCCFCheckPadBaseline::Thread<0>(int nBlocks, int nThreads, int i #else // CPU CODE - constexpr size_t ElemsInTileRow = TilingLayout>::WidthInTiles * TimebinsPerCacheline * PadsPerCacheline; + constexpr size_t ElemsInTileRow = (size_t)TilingLayout>::WidthInTiles * TimebinsPerCacheline * PadsPerCacheline; #ifndef GPUCA_NO_VC using UShort8 = Vc::fixed_size_simd; From c61a5bb25b2db0234f3a8fcdd0a40f9c86652eb9 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 14 Nov 2023 11:16:53 +0100 Subject: [PATCH 100/726] DPL Proxies: add --sporadic-inputs / --sporadic-outputs option to proxies --- Framework/Utils/src/dpl-output-proxy.cxx | 11 +++++++++++ Framework/Utils/src/raw-proxy.cxx | 12 +++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/Framework/Utils/src/dpl-output-proxy.cxx b/Framework/Utils/src/dpl-output-proxy.cxx index 5a3e38572e1ac..fb70f7dfdf4ed 100644 --- a/Framework/Utils/src/dpl-output-proxy.cxx +++ b/Framework/Utils/src/dpl-output-proxy.cxx @@ -39,6 +39,10 @@ void customize(std::vector& workflowOptions) ConfigParamSpec{ "output-proxy-method", VariantType::String, "bind", {"proxy socket method: bind, connect"}}); + workflowOptions.push_back( + ConfigParamSpec{ + "sporadic-inputs", VariantType::Bool, false, {"consider all the inputs as sporadic"}}); + workflowOptions.push_back( ConfigParamSpec{ "output-proxy-address", VariantType::String, "0.0.0.0", {"address to connect / bind to"}}); @@ -74,6 +78,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& config) auto processorName = config.options().get("proxy-name"); auto inputConfig = config.options().get("dataspec"); int defaultPort = config.options().get("default-port"); + bool sporadicInputs = config.options().get("sporadic-inputs"); auto defaultTransportConfig = config.options().get("default-transport"); if (defaultTransportConfig == "zeromq") { // nothing to do for the moment @@ -87,6 +92,12 @@ WorkflowSpec defineDataProcessing(ConfigContext const& config) if (inputs.size() == 0) { throw std::runtime_error("invalid dataspec '" + inputConfig + "'"); } + // we need to set the lifetime of the inputs to sporadic if requested + if (sporadicInputs) { + for (auto& input : inputs) { + input.lifetime = Lifetime::Sporadic; + } + } // we build the default channel configuration from the binding of the first input // in order to have more than one we would need to possibility to have support for diff --git a/Framework/Utils/src/raw-proxy.cxx b/Framework/Utils/src/raw-proxy.cxx index 195e1bd61d081..e04e9879e1e0c 100644 --- a/Framework/Utils/src/raw-proxy.cxx +++ b/Framework/Utils/src/raw-proxy.cxx @@ -35,6 +35,10 @@ void customize(std::vector& workflowOptions) ConfigParamSpec{ "inject-missing-data", VariantType::Bool, false, {"inject missing data according to dataspec if not found in the input"}}); + workflowOptions.push_back( + ConfigParamSpec{ + "sporadic-outputs", VariantType::Bool, false, {"consider all the outputs as sporadic"}}); + workflowOptions.push_back( ConfigParamSpec{ "print-input-sizes", VariantType::Int, 0, {"print statistics about sizes per input spec every n TFs"}}); @@ -52,16 +56,18 @@ void customize(std::vector& workflowOptions) WorkflowSpec defineDataProcessing(ConfigContext const& config) { - std::string processorName = config.options().get("proxy-name"); - std::string outputconfig = config.options().get("dataspec"); + auto processorName = config.options().get("proxy-name"); + auto outputconfig = config.options().get("dataspec"); bool injectMissingData = config.options().get("inject-missing-data"); - unsigned int printSizes = config.options().get("print-input-sizes"); + bool sporadicOutputs = config.options().get("sporadic-outputs"); + auto printSizes = config.options().get("print-input-sizes"); bool throwOnUnmatched = config.options().get("throwOnUnmatched"); uint64_t minSHM = std::stoul(config.options().get("timeframes-shm-limit")); std::vector matchers = select(outputconfig.c_str()); Outputs readoutProxyOutput; for (auto const& matcher : matchers) { readoutProxyOutput.emplace_back(DataSpecUtils::asOutputSpec(matcher)); + readoutProxyOutput.back().lifetime = sporadicOutputs ? Lifetime::Sporadic : Lifetime::Timeframe; } // we use the same specs as filters in the dpl adaptor From 6af07a868e2d25a8731a47e253080c9ca9872e3f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 14 Nov 2023 15:00:39 +0100 Subject: [PATCH 101/726] DPL: reset the async queue on STOP --- Framework/Core/include/Framework/AsyncQueue.h | 3 +++ Framework/Core/src/AsyncQueue.cxx | 6 ++++++ Framework/Core/src/CommonServices.cxx | 4 ++++ 3 files changed, 13 insertions(+) diff --git a/Framework/Core/include/Framework/AsyncQueue.h b/Framework/Core/include/Framework/AsyncQueue.h index 543dfd6994152..37971fbe03bfb 100644 --- a/Framework/Core/include/Framework/AsyncQueue.h +++ b/Framework/Core/include/Framework/AsyncQueue.h @@ -59,6 +59,9 @@ struct AsyncQueueHelpers { /// 2. then priority /// 3. only execute the highest (timeslice, debounce) value static void run(AsyncQueue& queue, TimesliceId oldestPossibleTimeslice); + + /// Reset the queue to its initial state + static void reset(AsyncQueue& queue); }; } // namespace o2::framework diff --git a/Framework/Core/src/AsyncQueue.cxx b/Framework/Core/src/AsyncQueue.cxx index 36d72bf1b3dd7..44115d4985e91 100644 --- a/Framework/Core/src/AsyncQueue.cxx +++ b/Framework/Core/src/AsyncQueue.cxx @@ -107,4 +107,10 @@ auto AsyncQueueHelpers::run(AsyncQueue& queue, TimesliceId oldestPossible) -> vo queue.tasks.end()); } +auto AsyncQueueHelpers::reset(AsyncQueue& queue) -> void +{ + queue.tasks.clear(); + queue.iteration = 0; +} + } // namespace o2::framework diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 4caddb9b98c78..66e0330841ea6 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -135,6 +135,10 @@ o2::framework::ServiceSpec CommonServices::asyncQueue() .name = "async-queue", .init = simpleServiceInit(), .configure = noConfiguration(), + .stop = [](ServiceRegistryRef services, void* service) { + auto& queue = services.get(); + AsyncQueueHelpers::reset(queue); + }, .kind = ServiceKind::Serial}; } From a510688914098b03730157d9ca0f37df11b76cd6 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 14 Nov 2023 15:07:34 +0100 Subject: [PATCH 102/726] DPL Utils: drop obsolete utils In the end we never used generic devices for scatter/gather because DPL provided internally such facility. --- Framework/Utils/CMakeLists.txt | 25 ------ Framework/Utils/src/DPLBroadcaster.cxx | 81 -------------------- Framework/Utils/src/DPLGatherer.cxx | 56 -------------- Framework/Utils/src/DPLMerger.cxx | 70 ----------------- Framework/Utils/src/DPLRouter.cxx | 54 ------------- Framework/Utils/test/DPLOutputTest.cxx | 102 ------------------------- 6 files changed, 388 deletions(-) delete mode 100644 Framework/Utils/src/DPLBroadcaster.cxx delete mode 100644 Framework/Utils/src/DPLGatherer.cxx delete mode 100644 Framework/Utils/src/DPLMerger.cxx delete mode 100644 Framework/Utils/src/DPLRouter.cxx delete mode 100644 Framework/Utils/test/DPLOutputTest.cxx diff --git a/Framework/Utils/CMakeLists.txt b/Framework/Utils/CMakeLists.txt index db234537a0b6f..fcbc53ef0e6f0 100644 --- a/Framework/Utils/CMakeLists.txt +++ b/Framework/Utils/CMakeLists.txt @@ -11,13 +11,7 @@ o2_add_library(DPLUtils SOURCES src/Utils.cxx - src/DPLBroadcaster.cxx - src/DPLGatherer.cxx - src/DPLMerger.cxx - src/DPLRouter.cxx src/RawParser.cxx - test/DPLBroadcasterMerger.cxx - test/DPLOutputTest.cxx test/RawPageTestData.cxx PUBLIC_LINK_LIBRARIES O2::Framework ROOT::Tree ROOT::TreePlayer O2::CommonUtils) @@ -37,25 +31,6 @@ o2_add_executable(output-proxy PUBLIC_LINK_LIBRARIES O2::Framework O2::DPLUtils) -o2_add_test(DPLBroadcasterMerger - SOURCES test/test_DPLBroadcasterMerger.cxx src/Utils.cxx - test/DPLBroadcasterMerger.cxx src/DPLMerger.cxx - src/DPLBroadcaster.cxx - NO_BOOST_TEST - PUBLIC_LINK_LIBRARIES O2::DPLUtils - COMPONENT_NAME DPLUtils - LABELS dplutils - COMMAND_LINE_ARGS ${DPL_WORKFLOW_TESTS_EXTRA_OPTIONS} --run) - -o2_add_test(DPLOutput - NO_BOOST_TEST - SOURCES test/test_DPLOutputTest.cxx src/Utils.cxx - test/DPLOutputTest.cxx - PUBLIC_LINK_LIBRARIES O2::DPLUtils - COMPONENT_NAME DPLUtils - LABELS long dplutils - COMMAND_LINE_ARGS ${DPL_WORKFLOW_TESTS_EXTRA_OPTIONS} --run) - o2_add_test(RootTreeWriterWorkflow NO_BOOST_TEST SOURCES test/test_RootTreeWriterWorkflow.cxx diff --git a/Framework/Utils/src/DPLBroadcaster.cxx b/Framework/Utils/src/DPLBroadcaster.cxx deleted file mode 100644 index 4ea5e5747cd42..0000000000000 --- a/Framework/Utils/src/DPLBroadcaster.cxx +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2019-2020 CERN and copyright holders of ALICE O2. -// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. -// All rights not expressly granted are reserved. -// -// This software is distributed under the terms of the GNU General Public -// License v3 (GPL Version 3), copied verbatim in the file "COPYING". -// -// In applying this license CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -/// \file DPLBroadcaster.cxx -/// \brief Implementation of generic DPL broadcaster, v0.1 -/// -/// \author Gabriele Gaetano Fronzé, gfronze@cern.ch - -#include "DPLUtils/Utils.h" -#include "Framework/DataProcessorSpec.h" -#include "Framework/DataProcessingHeader.h" -#include "Framework/DataRefUtils.h" -#include "Headers/DataHeader.h" - -namespace o2f = o2::framework; - -namespace o2 -{ -namespace workflows -{ - -// This is a possible implementation of a DPL compliant and generic broadcaster. -// Every other implementation should fall back to this one, after required translations. -o2f::DataProcessorSpec defineBroadcaster(std::string devName, o2f::InputSpec usrInput, o2f::Outputs usrOutputs, - std::function const func) -{ - return {devName, // Device name from user - o2f::Inputs{usrInput}, // User defined input as a vector of one InputSpec - usrOutputs, // user defined outputs as a vector of OutputSpecs - - o2f::AlgorithmSpec{[usrOutputs, func](o2f::InitContext&) { - // Creating shared ptrs to useful parameters - auto outputsPtr = getOutputList(usrOutputs); - auto funcPtr = std::make_shared const>(func); - - // Defining the ProcessCallback as returned object of InitCallback - return [outputsPtr, funcPtr](o2f::ProcessingContext& ctx) { - // Getting original input message and getting his size using the provided function - auto inputMsg = ctx.inputs().getByPos(0); - // Getting message size using provided std::function - auto msgSize = (*funcPtr)(inputMsg); - // Iterating over the OutputSpecs to push the input message to all the output destinations - for (const auto& itOutputs : (*outputsPtr)) { - auto& fwdMsg = ctx.outputs().newChunk(itOutputs, msgSize); - std::memcpy(fwdMsg.data(), inputMsg.payload, msgSize); - } - }; - }}}; -} - -// This is a shortcut for messages with fixed user-defined size -o2f::DataProcessorSpec defineBroadcaster(std::string devName, o2f::InputSpec usrInput, o2f::Outputs usrOutputs, - size_t fixMsgSize) -{ - // This lambda returns a fixed message size - auto funcSize = [fixMsgSize](o2f::DataRef d) -> size_t { return fixMsgSize; }; - // Callling complete implementation - return defineBroadcaster(devName, usrInput, usrOutputs, funcSize); -} - -// This is an implementation which retrieves the message size using the API -o2f::DataProcessorSpec defineBroadcaster(std::string devName, o2f::InputSpec usrInput, o2f::Outputs usrOutputs) -{ - // This lambda retrieves the message size using the API - auto funcSize = [](o2f::DataRef ref) -> size_t { - return o2::framework::DataRefUtils::getPayloadSize(ref); - }; - // Callling complete implementation - return defineBroadcaster(devName, usrInput, usrOutputs, funcSize); -} - -} // namespace workflows -} // namespace o2 diff --git a/Framework/Utils/src/DPLGatherer.cxx b/Framework/Utils/src/DPLGatherer.cxx deleted file mode 100644 index 039e6720980d1..0000000000000 --- a/Framework/Utils/src/DPLGatherer.cxx +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2019-2020 CERN and copyright holders of ALICE O2. -// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. -// All rights not expressly granted are reserved. -// -// This software is distributed under the terms of the GNU General Public -// License v3 (GPL Version 3), copied verbatim in the file "COPYING". -// -// In applying this license CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -/// \file DPLGatherer.cxx -/// \brief Implementation of generic DPL gatherer, v0.1 -/// -/// \author Gabriele Gaetano Fronzé, gfronze@cern.ch - -#include "DPLUtils/Utils.h" -#include "Framework/DataProcessorSpec.h" -#include "Framework/DataRefUtils.h" -#include - -namespace o2f = o2::framework; - -namespace o2 -{ -namespace workflows -{ - -// This is a possible implementation of a DPL compliant and generic gatherer -o2f::DataProcessorSpec defineGatherer(std::string devName, o2f::Inputs usrInputs, o2f::OutputSpec usrOutput) -{ - return {devName, // Device name from user - usrInputs, // User defined input as a vector of one InputSpec - o2f::Outputs{usrOutput}, // user defined outputs as a vector of OutputSpecs - - o2f::AlgorithmSpec{[usrOutput](o2f::InitContext&) { - // Creating shared ptrs to useful parameters - auto outputPtr = std::make_shared(getOutput(usrOutput)); - - // Defining the ProcessCallback as returned object of InitCallback - return [outputPtr](o2f::ProcessingContext& ctx) { - // Iterating over the Inputs to forward them on the same Output - for (const auto& itInputs : ctx.inputs()) { - // Retrieving message size from API - auto msgSize = o2::framework::DataRefUtils::getPayloadSize(itInputs); - // Allocating new chunk - auto& fwdMsg = ctx.outputs().newChunk((*outputPtr), msgSize); - // Moving the input to the output chunk - std::memmove(fwdMsg.data(), itInputs.payload, msgSize); - } - }; - }}}; -} - -} // namespace workflows -} // namespace o2 diff --git a/Framework/Utils/src/DPLMerger.cxx b/Framework/Utils/src/DPLMerger.cxx deleted file mode 100644 index 6f91df32a4cca..0000000000000 --- a/Framework/Utils/src/DPLMerger.cxx +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2019-2020 CERN and copyright holders of ALICE O2. -// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. -// All rights not expressly granted are reserved. -// -// This software is distributed under the terms of the GNU General Public -// License v3 (GPL Version 3), copied verbatim in the file "COPYING". -// -// In applying this license CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -/// \file DPLGatherer.cxx -/// \brief Implementation of generic DPL gatherer, v0.1 -/// -/// \author Gabriele Gaetano Fronzé, gfronze@cern.ch - -#include "DPLUtils/Utils.h" -#include "Framework/DataProcessorSpec.h" -#include "Framework/DataRefUtils.h" -#include - -namespace o2f = o2::framework; - -namespace o2 -{ -namespace workflows -{ - -// This is a possible implementation of a DPL compliant and generic gatherer -// Every other implementation should fall back to this one, after required translations. -o2f::DataProcessorSpec defineMerger(std::string devName, o2f::Inputs usrInputs, o2f::OutputSpec usrOutput, - std::function const mergerFunc) -{ - return {devName, // Device name from user - usrInputs, // User defined input as a vector of one InputSpec - o2f::Outputs{usrOutput}, // user defined outputs as a vector of OutputSpecs - - o2f::AlgorithmSpec{[usrOutput, mergerFunc](o2f::InitContext&) { - // Creating shared ptrs to useful parameters - auto outputPtr = std::make_shared(getOutput(usrOutput)); - auto mergerFuncPtr = std::make_shared const>(mergerFunc); - - // Defining the ProcessCallback as returned object of InitCallback - return [outputPtr, mergerFuncPtr](o2f::ProcessingContext& ctx) { - OutputBuffer outputBuffer = ctx.outputs().makeVector(*outputPtr); - // Iterating over the InputSpecs to aggregate msgs from the connected devices - for (const auto& itInputs : ctx.inputs()) { - (*mergerFuncPtr)(outputBuffer, itInputs); - } - // Adopting the buffer as new chunk - ctx.outputs().adoptContainer((*outputPtr), std::move(outputBuffer)); - }; - }}}; -} - -// This is a possible implementation of a DPL compliant and generic gatherer whit trivial messages concatenation -o2f::DataProcessorSpec defineMerger(std::string devName, o2f::Inputs usrInputs, o2f::OutputSpec usrOutput) -{ - // This lambda retrieves the payload size through the API and back-inserts it on the output buffer - auto funcMerge = [](OutputBuffer& buf, const o2f::DataRef ref) { - auto msgSize = o2::framework::DataRefUtils::getPayloadSize(ref); - buf.resize(buf.size() + msgSize); - std::copy(&(ref.payload[0]), &(ref.payload[msgSize - 1]), std::back_inserter(buf)); - }; - // Callling complete implementation - return defineMerger(devName, usrInputs, usrOutput, funcMerge); -} - -} // namespace workflows -} // namespace o2 diff --git a/Framework/Utils/src/DPLRouter.cxx b/Framework/Utils/src/DPLRouter.cxx deleted file mode 100644 index 7795cb37d4ea0..0000000000000 --- a/Framework/Utils/src/DPLRouter.cxx +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019-2020 CERN and copyright holders of ALICE O2. -// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. -// All rights not expressly granted are reserved. -// -// This software is distributed under the terms of the GNU General Public -// License v3 (GPL Version 3), copied verbatim in the file "COPYING". -// -// In applying this license CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -/// \file DPLGatherer.cxx -/// \brief Implementation of generic DPL gatherer, v0.1 -/// -/// \author Gabriele Gaetano Fronzé, gfronze@cern.ch - -#include "DPLUtils/Utils.h" -#include "Framework/DataProcessorSpec.h" -#include "Framework/DataRefUtils.h" -#include - -namespace o2f = o2::framework; - -namespace o2 -{ -namespace workflows -{ -// This is a possible implementation of a DPL compliant and generic gatherer -o2f::DataProcessorSpec defineRouter(std::string devName, o2f::Inputs usrInput, o2f::Outputs usrOutputs, - std::function const mappingFunc) -{ - return {devName, // Device name from user - o2f::Inputs{usrInput}, // User defined input as a vector of one InputSpec - usrOutputs, // user defined outputs as a vector of OutputSpecs - - o2f::AlgorithmSpec{[usrOutputs, mappingFunc](o2f::InitContext&) { - // Creating shared ptrs to useful parameters - auto outputsPtr = getOutputList(usrOutputs); - auto mappingFuncPtr = std::make_shared const>(mappingFunc); - - // Defining the ProcessCallback as returned object of InitCallback - return [outputsPtr, mappingFuncPtr](o2f::ProcessingContext& ctx) { - auto inputMsg = ctx.inputs().getByPos(0); - auto msgSize = o2::framework::DataRefUtils::getPayloadSize(inputMsg); - auto& outputCh = (*outputsPtr)[(*mappingFuncPtr)(inputMsg)]; - - auto& fwdMsg = ctx.outputs().newChunk(outputCh, msgSize); - std::memcpy(fwdMsg.data(), inputMsg.payload, msgSize); - }; - }}}; -} -} // namespace workflows - -} // namespace o2 diff --git a/Framework/Utils/test/DPLOutputTest.cxx b/Framework/Utils/test/DPLOutputTest.cxx deleted file mode 100644 index df0d69c8d6710..0000000000000 --- a/Framework/Utils/test/DPLOutputTest.cxx +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2019-2020 CERN and copyright holders of ALICE O2. -// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. -// All rights not expressly granted are reserved. -// -// This software is distributed under the terms of the GNU General Public -// License v3 (GPL Version 3), copied verbatim in the file "COPYING". -// -// In applying this license CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -/// \author Gabriele Gaetano Fronzé, gfronze@cern.ch - -#include "DPLOutputTest.h" -#include -#include "DPLUtils/Utils.h" -#include "Framework/DataProcessorSpec.h" -#include "random" -#include "Framework/Logger.h" -#include "Framework/ControlService.h" - -namespace o2f = o2::framework; - -namespace o2::workflows -{ - -o2f::DataProcessorSpec defineTestGenerator() -{ - return {"Generator", // Device name - {}, // No inputs for a generator - o2f::Outputs{{"TST", "ToSink", 0, o2f::Lifetime::Timeframe}}, // One simple output - - o2f::AlgorithmSpec{[](o2f::InitContext&) { - int msgCounter = 0; - auto msgCounter_shptr = std::make_shared(msgCounter); - - LOG(info) << ">>>>>>>>>>>>>> Generator initialised\n"; - - // Processing context in captured from return on InitCallback - return [msgCounter_shptr](o2f::ProcessingContext& ctx) { - int msgIndex = (*msgCounter_shptr)++; - if (msgIndex > 10) { - ctx.services().get().endOfStream(); - } - LOG(info) << ">>> MSG:" << msgIndex << "\n"; - - LOG(info) << ">>> Preparing MSG:" << msgIndex; - - auto& outputMsg = ctx.outputs().newChunk({"TST", "ToSink", 0, o2f::Lifetime::Timeframe}, - (31 + 1) * sizeof(uint32_t) / sizeof(char)); - - LOG(info) << ">>> Preparing1 MSG:" << msgIndex; - - auto payload = reinterpret_cast(outputMsg.data()); - - payload[0] = msgIndex; - - LOG(info) << ">>> Preparing2 MSG:" << msgIndex; - - for (int k = 0; k < 31; ++k) { - payload[k + 1] = (uint32_t)k; - LOG(info) << ">>>>\t" << payload[k + 1]; - } - - LOG(info) << ">>> Done MSG:" << msgIndex; - }; - }}}; -} - -o2f::DataProcessorSpec defineTestSink() -{ - return {"Sink", // Device name - o2f::Inputs{{"input", "TST", "ToSink", 0, o2f::Lifetime::Transient}}, // No inputs, for the moment - {}, - - o2f::AlgorithmSpec{[](o2f::InitContext&) { - LOG(info) << ">>>>>>>>>>>>>> Sink initialised\n"; - - // Processing context in captured from return on InitCallback - return [](o2f::ProcessingContext& ctx) { - auto inputMsg = ctx.inputs().getByPos(0); - auto payload = reinterpret_cast(inputMsg.payload); - - LOG(info) << "Received message containing" << payload[0] << "elements\n"; - for (int j = 0; j < payload[0]; ++j) { - LOG(info) << payload[j]; - } - }; - }}}; -} - -o2::framework::WorkflowSpec DPLOutputTest() -{ - auto lspec = o2f::WorkflowSpec(); - - // A generator of data - lspec.emplace_back(defineTestGenerator()); - lspec.emplace_back(defineTestSink()); - return std::move(lspec); -} - -} // namespace o2::workflows From 8b0a4b0c9253125bcdcd5f0d0e50ccc35cd9b602 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 14 Nov 2023 15:40:08 +0100 Subject: [PATCH 103/726] Do not expose AODToHepMC.h to ROOT This is needed to fix macOS builds. Apparently ROOT injects some arrow incompatible system headers in the chain. --- Generators/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Generators/CMakeLists.txt b/Generators/CMakeLists.txt index 7112cada22686..587696bd52e53 100644 --- a/Generators/CMakeLists.txt +++ b/Generators/CMakeLists.txt @@ -107,10 +107,10 @@ endif() if(HepMC3_FOUND) list(APPEND headers include/Generators/GeneratorHepMC.h) list(APPEND headers include/Generators/GeneratorHepMCParam.h) - list(APPEND headers include/Generators/AODToHepMC.h) endif() -o2_target_root_dictionary(Generators HEADERS ${headers}) +o2_target_root_dictionary(Generators HEADERS ${headers} + $<$:include/Generators/AODToHepMC.h>) o2_add_test_root_macro(share/external/extgen.C PUBLIC_LINK_LIBRARIES O2::Generators FairRoot::Base From b6eba75a1f405b207c2a788db2d5c5075c3d1179 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 14 Nov 2023 16:16:03 +0100 Subject: [PATCH 104/726] Do not expose AODToHepMC.h to ROOT This is needed to fix macOS builds. Apparently ROOT injects some arrow incompatible system headers in the chain. --- Generators/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/Generators/CMakeLists.txt b/Generators/CMakeLists.txt index 7112cada22686..f5ab384cef99a 100644 --- a/Generators/CMakeLists.txt +++ b/Generators/CMakeLists.txt @@ -107,7 +107,6 @@ endif() if(HepMC3_FOUND) list(APPEND headers include/Generators/GeneratorHepMC.h) list(APPEND headers include/Generators/GeneratorHepMCParam.h) - list(APPEND headers include/Generators/AODToHepMC.h) endif() o2_target_root_dictionary(Generators HEADERS ${headers}) From d629335c2717f2c338e3e374e30969e424e0c6bd Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 15 Nov 2023 09:45:15 +0100 Subject: [PATCH 105/726] DPL: complain if inputs and outputs are incompatible --- Framework/Core/src/CommonDataProcessors.cxx | 4 +++- Framework/Core/src/DeviceSpecHelpers.cxx | 2 ++ Framework/Core/src/WorkflowHelpers.cxx | 26 +++++++++++++++++++++ Framework/Core/src/WorkflowHelpers.h | 8 +++++++ 4 files changed, 39 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 74e020595b345..e9cdc97e316ca 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -259,9 +259,11 @@ DataProcessorSpec CommonDataProcessors::getOutputObjHistSink(std::vector port Moreover for each (me, other, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index cd9c3913de228..a731d7065cc4a 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1233,4 +1233,30 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons } #pragma diagnostic pop +void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs) +{ + // Iterate over all the edges. + // Get the input lifetime and the output lifetime. + // Output lifetime must be Timeframe if the input lifetime is Timeframe. + std::string errors; + for (auto& edge : edges) { + auto& producer = workflow[edge.producer]; + auto& consumer = workflow[edge.consumer]; + auto& output = outputs[edge.outputGlobalIndex]; + auto& input = consumer.inputs[edge.consumerInputIndex]; + // We cannot simply say "!Timeframe" because we want to allow + // e.g. Enumeration => Timeframe + if (input.lifetime == Lifetime::Timeframe && output.lifetime == Lifetime::Sporadic) { + errors += fmt::format("Input {} of {} has lifetime Timeframe, but output {} of {} has lifetime Sporadic\n", + DataSpecUtils::describe(input).c_str(), consumer.name, + DataSpecUtils::describe(output).c_str(), producer.name); + } + } + if (!errors.empty()) { + throw std::runtime_error(errors); + } +} + } // namespace o2::framework diff --git a/Framework/Core/src/WorkflowHelpers.h b/Framework/Core/src/WorkflowHelpers.h index a167e99d4768c..67661dbdb979a 100644 --- a/Framework/Core/src/WorkflowHelpers.h +++ b/Framework/Core/src/WorkflowHelpers.h @@ -227,6 +227,14 @@ struct WorkflowHelpers { /// returns only dangling outputs static std::vector computeDanglingOutputs(WorkflowSpec const& workflow); + + /// Validate that the nodes at the ends of the edges of the graph + /// are actually compatible with each other. + /// For example we should make sure that Lifetime::Timeframe inputs of + /// one node is not connected to an Output of Lifetime::Sporadic of another node. + static void validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs); }; } // namespace o2::framework From fe23d4d49998845c84be7f54277b9733dbab6662 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 15 Nov 2023 10:32:03 +0100 Subject: [PATCH 106/726] DPL: fix typo in message --- Framework/Core/src/TimesliceIndex.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/TimesliceIndex.cxx b/Framework/Core/src/TimesliceIndex.cxx index d9b4eec68f2af..267341eafd105 100644 --- a/Framework/Core/src/TimesliceIndex.cxx +++ b/Framework/Core/src/TimesliceIndex.cxx @@ -126,7 +126,7 @@ TimesliceIndex::OldestInputInfo TimesliceIndex::setOldestPossibleInput(Timeslice { // Each channel oldest possible input must be monotoically increasing. if (timestamp.value < mChannels[channel.value].oldestForChannel.value) { - LOG(error) << "Received bogus oldest possible timeslice " << timestamp.value << " for channel " << channel.value << " Excpected >= " << mChannels[channel.value].oldestForChannel.value; + LOG(error) << "Received bogus oldest possible timeslice " << timestamp.value << " for channel " << channel.value << ". Expected >= " << mChannels[channel.value].oldestForChannel.value; } mChannels[channel.value].oldestForChannel = timestamp; OldestInputInfo result{timestamp, channel}; From 4c2a683a141d499671a0ed5e42b474c36e145e6f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 16 Nov 2023 11:09:46 +0100 Subject: [PATCH 107/726] Make sure brace_constructible_size works in C++20 --- Framework/Core/test/test_AnalysisTask.cxx | 1 + .../include/Framework/StructToTuple.h | 2 +- .../Foundation/test/test_StructToTuple.cxx | 28 ++++++++++++++++++- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/Framework/Core/test/test_AnalysisTask.cxx b/Framework/Core/test/test_AnalysisTask.cxx index c166aec7f5b85..eae4b2cc36c16 100644 --- a/Framework/Core/test/test_AnalysisTask.cxx +++ b/Framework/Core/test/test_AnalysisTask.cxx @@ -153,6 +153,7 @@ TEST_CASE("AdaptorCompilation") { auto cfgc = makeEmptyConfigContext(); + REQUIRE(brace_constructible_size() == 1); auto task1 = adaptAnalysisTask(*cfgc, TaskName{"test1"}); REQUIRE(task1.inputs.size() == 2); REQUIRE(task1.outputs.size() == 1); diff --git a/Framework/Foundation/include/Framework/StructToTuple.h b/Framework/Foundation/include/Framework/StructToTuple.h index 4ea93e113249d..d94c0e3aaae03 100644 --- a/Framework/Foundation/include/Framework/StructToTuple.h +++ b/Framework/Foundation/include/Framework/StructToTuple.h @@ -281,7 +281,7 @@ auto homogeneous_apply_refs(L l, T&& object) { using type = std::decay_t; constexpr int nesting = B ? 1 : 0; - constexpr unsigned long numElements = brace_constructible_size() - nesting; + constexpr unsigned long numElements = brace_constructible_size() - nesting; // clang-format off if DPL_HOMOGENEOUS_APPLY_ENTRY (9, 9) else if DPL_HOMOGENEOUS_APPLY_ENTRY (9, 8) diff --git a/Framework/Foundation/test/test_StructToTuple.cxx b/Framework/Foundation/test/test_StructToTuple.cxx index 06cf4b47a9f7c..59685a5f1d598 100644 --- a/Framework/Foundation/test/test_StructToTuple.cxx +++ b/Framework/Foundation/test/test_StructToTuple.cxx @@ -12,7 +12,11 @@ #include #include "Framework/StructToTuple.h" -struct Foo { +struct Foo0 { +}; + +struct Foo1 { + int foo = 1; }; // FIXME: this should really struct Bar : Foo, but a c++17 bug @@ -23,6 +27,28 @@ struct Bar { int bar = 2; }; +TEST_CASE("SimpleDestructuring") +{ + Foo0 foo0; + auto t0 = o2::framework::homogeneous_apply_refs([](auto i) -> bool { return i > 1; }, foo0); + REQUIRE(t0.size() == 0); + Foo1 foo1; + auto t1 = o2::framework::homogeneous_apply_refs([](auto i) -> bool { return i > 1; }, foo1); + + REQUIRE(t1.size() == 1); + + // Should work with refs as well. When moving to C++20 this was + // not the case initially. + Foo1 const&& foo1ref = std::move(foo1); + auto t1ref = o2::framework::homogeneous_apply_refs([](auto i) -> bool { return i > 1; }, foo1ref); + REQUIRE(t1ref.size() == 1); + + Bar bar; + auto t = o2::framework::homogeneous_apply_refs([](auto i) -> bool { return i > 1; }, bar); + REQUIRE(t[0] == false); + REQUIRE(t[1] == true); +} + /// Largest supported struct struct FooMax { int foo01 = 1; From 73eddebedd8bd3dfb70df62c239046c0816d059c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 16 Nov 2023 11:53:27 +0100 Subject: [PATCH 108/726] DPL GUI: do not hang if quit requested but some devices are stuck --- Framework/Core/src/runDataProcessing.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 86cc15299e8f3..dbd626c32e568 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1073,7 +1073,7 @@ void gui_callback(uv_timer_s* ctx) if (gui->plugin->supportsDeferredClose()) { // For now, there is nothing for which we want to defer the close // so if the flag is set, we simply exit - if (*(gui->guiQuitRequested)) { + if (*(gui->guiQuitRequested) && *(gui->allChildrenGone)) { return; } void* draw_data = nullptr; From 7f758d447a4f273ce395a2e4c03a68fee0f53a0b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 16 Nov 2023 11:53:27 +0100 Subject: [PATCH 109/726] DPL GUI: add GUI to list devices preventing quitting --- Framework/Core/include/Framework/DebugGUI.h | 4 +- Framework/Core/src/runDataProcessing.cxx | 6 +- .../GUISupport/src/FrameworkGUIDebugger.cxx | 110 ++++++++++++------ .../GUISupport/src/FrameworkGUIDebugger.h | 4 +- Framework/GUISupport/src/Plugin.cxx | 5 +- 5 files changed, 87 insertions(+), 42 deletions(-) diff --git a/Framework/Core/include/Framework/DebugGUI.h b/Framework/Core/include/Framework/DebugGUI.h index c7a611d50a1d4..109fd36e27511 100644 --- a/Framework/Core/include/Framework/DebugGUI.h +++ b/Framework/Core/include/Framework/DebugGUI.h @@ -26,6 +26,7 @@ namespace o2::framework { struct ServiceRegistry; +struct GuiCallbackContext; /// Plugin interface for DPL GUIs. struct DebugGUI { virtual std::function getGUIDebugger(std::vector const& infos, @@ -35,7 +36,8 @@ struct DebugGUI { std::vector const& metricsInfos, o2::framework::DriverInfo const& driverInfo, std::vector& controls, - o2::framework::DriverControl& driverControl) = 0; + o2::framework::DriverControl& driverControl, + GuiCallbackContext& context) = 0; virtual void updateMousePos(float x, float y) = 0; virtual void updateMouseButton(bool isClicked) = 0; virtual void updateMouseWheel(int direction) = 0; diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index dbd626c32e568..f31629d90416b 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1302,7 +1302,8 @@ int runStateMachine(DataProcessorSpecs const& workflow, DebugGUI* debugGUI = nullptr; void* window = nullptr; - decltype(debugGUI->getGUIDebugger(infos, runningWorkflow.devices, allStates, dataProcessorInfos, metricsInfos, driverInfo, controls, driverControl)) debugGUICallback; + GuiCallbackContext guiContext; + decltype(debugGUI->getGUIDebugger(infos, runningWorkflow.devices, allStates, dataProcessorInfos, metricsInfos, driverInfo, controls, driverControl, guiContext)) debugGUICallback; // An empty frameworkId means this is the driver, so we initialise the GUI auto initDebugGUI = []() -> DebugGUI* { @@ -1389,7 +1390,6 @@ int runStateMachine(DataProcessorSpecs const& workflow, ServiceRegistryRef ref{serviceRegistry}; ref.registerService(ServiceRegistryHelpers::handleForService(devicesManager)); - GuiCallbackContext guiContext; guiContext.plugin = debugGUI; guiContext.frameLast = uv_hrtime(); guiContext.frameLatency = &driverInfo.frameLatency; @@ -1882,7 +1882,7 @@ int runStateMachine(DataProcessorSpecs const& workflow, uv_timer_stop(gui_timer); } - auto callback = debugGUI->getGUIDebugger(infos, runningWorkflow.devices, allStates, dataProcessorInfos, metricsInfos, driverInfo, controls, driverControl); + auto callback = debugGUI->getGUIDebugger(infos, runningWorkflow.devices, allStates, dataProcessorInfos, metricsInfos, driverInfo, controls, driverControl, guiContext); guiContext.callback = [&serviceRegistry, &driverServices, &debugGUI, &infos, &runningWorkflow, &dataProcessorInfos, &metricsInfos, &driverInfo, &controls, &driverControl, callback]() { callback(); for (auto& service : driverServices) { diff --git a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx index 35e0bc563a22e..375afef1007f9 100644 --- a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx @@ -16,6 +16,7 @@ #include "DebugGUI/imgui_extras.h" #include "Framework/DriverControl.h" #include "Framework/DriverInfo.h" +#include "Framework/GuiCallbackContext.h" #include "Framework/DeviceMetricsHelper.h" #include "Framework/DeviceMetricsInfo.h" #include "FrameworkGUIDeviceInspector.h" @@ -506,7 +507,7 @@ void displayDeviceMetrics(const char* label, case MetricsDisplayStyle::Lines: { auto xAxisFlags = ImPlotAxisFlags_None; auto yAxisFlags = ImPlotAxisFlags_LockMin; - //ImPlot::FitNextPlotAxes(true, true, true, true); + // ImPlot::FitNextPlotAxes(true, true, true, true); if (ImPlot::BeginPlot("##Some plot", {-1, -1}, axisFlags)) { ImPlot::SetupAxes("time", "value", xAxisFlags, yAxisFlags); ImPlot::SetupAxisFormat(ImAxis_Y1, formatSI, nullptr); @@ -1008,6 +1009,71 @@ void popWindowColorDueToStatus() ImGui::PopStyleVar(1); } +int debugPID(int pid) +{ + std::string pidStr = std::to_string(pid); + setenv("O2DEBUGGEDPID", pidStr.c_str(), 1); +#ifdef __APPLE__ + std::string defaultAppleDebugCommand = + "osascript -e 'tell application \"Terminal\"'" + " -e 'activate'" + " -e 'do script \"lldb -p \" & (system attribute \"O2DEBUGGEDPID\") & \"; exit\"'" + " -e 'end tell'"; + setenv("O2DPLDEBUG", defaultAppleDebugCommand.c_str(), 0); +#else + setenv("O2DPLDEBUG", "xterm -hold -e gdb attach $O2DEBUGGEDPID &", 0); +#endif + return system(getenv("O2DPLDEBUG")); +} + +int profilePID(int pid) +{ + std::string pidStr = std::to_string(pid); + setenv("O2PROFILEDPID", pidStr.c_str(), 1); +#ifdef __APPLE__ + auto defaultAppleProfileCommand = fmt::format( + "osascript -e 'tell application \"Terminal\"'" + " -e 'activate'" + " -e 'do script \"xcrun xctrace record --output dpl-profile-{0}.trace" + " --instrument os_signpost --time-limit 30s --template Time\\\\ Profiler --attach {0} " + " && open dpl-profile-{0}.trace && exit\"'" + " -e 'end tell'", + pid); + std::cout << defaultAppleProfileCommand << std::endl; + setenv("O2DPLPROFILE", defaultAppleProfileCommand.c_str(), 0); +#else + setenv("O2DPLPROFILE", "xterm -hold -e perf record -a -g -p $O2PROFILEDPID > perf-$O2PROFILEDPID.data &", 0); +#endif + return system(getenv("O2DPLPROFILE")); +} + +/// Show a window with the list of devices which have not yet exited +/// and provide a few controls to interact with them. +void displayShutdownDialog(std::vector const& specs, std::vector const& infos) +{ + for (size_t i = 0; i < infos.size(); ++i) { + auto& info = infos[i]; + if (info.active == false) { + continue; + } + auto& spec = specs[i]; + ImGui::Text("Unable to quit due to the following devices:"); + ImGui::Text("%s (%d).", spec.name.c_str(), info.pid); + ImGui::SameLine(); + if (ImGui::Button("Force quit")) { + kill(info.pid, SIGKILL); + } + ImGui::SameLine(); + if (ImGui::Button("Debug")) { + debugPID(info.pid); + } + ImGui::SameLine(); + if (ImGui::Button("Profile")) { + profilePID(info.pid); + } + } +} + /// Display information window about the driver /// and its state. void displayDriverInfo(DriverInfo const& driverInfo, DriverControl& driverControl) @@ -1035,42 +1101,12 @@ void displayDriverInfo(DriverInfo const& driverInfo, DriverControl& driverContro } ImGui::SameLine(); if (ImGui::Button("Debug driver")) { - std::string pidStr = std::to_string(pid); - setenv("O2DEBUGGEDPID", pidStr.c_str(), 1); -#ifdef __APPLE__ - std::string defaultAppleDebugCommand = - "osascript -e 'tell application \"Terminal\"'" - " -e 'activate'" - " -e 'do script \"lldb -p \" & (system attribute \"O2DEBUGGEDPID\") & \"; exit\"'" - " -e 'end tell'"; - setenv("O2DPLDEBUG", defaultAppleDebugCommand.c_str(), 0); -#else - setenv("O2DPLDEBUG", "xterm -hold -e gdb attach $O2DEBUGGEDPID &", 0); -#endif - int retVal = system(getenv("O2DPLDEBUG")); - (void)retVal; + debugPID(pid); } ImGui::SameLine(); if (ImGui::Button("Profile")) { - std::string pidStr = std::to_string(pid); - setenv("O2PROFILEDPID", pidStr.c_str(), 1); -#ifdef __APPLE__ - auto defaultAppleProfileCommand = fmt::format( - "osascript -e 'tell application \"Terminal\"'" - " -e 'activate'" - " -e 'do script \"xcrun xctrace record --output dpl-profile-{0}.trace" - " --instrument os_signpost --time-limit 30s --template Time\\\\ Profiler --attach {0} " - " && open dpl-profile-{0}.trace && exit\"'" - " -e 'end tell'", - pid); - std::cout << defaultAppleProfileCommand << std::endl; - setenv("O2DPLPROFILE", defaultAppleProfileCommand.c_str(), 0); -#else - setenv("O2DPLPROFILE", "xterm -hold -e perf record -a -g -p $O2PROFILEDPID > perf-$O2PROFILEDPID.data &", 0); -#endif - int retVal = system(getenv("O2DPLPROFILE")); - (void)retVal; + profilePID(pid); } // FIXME: this should really be a combo box @@ -1110,7 +1146,8 @@ std::function getGUIDebugger(std::vector const& infos, std::vector const& metricsInfos, DriverInfo const& driverInfo, std::vector& controls, - DriverControl& driverControl) + DriverControl& driverControl, + GuiCallbackContext& gui) { static gui::WorkspaceGUIState globalGUIState; gui::WorkspaceGUIState& guiState = globalGUIState; @@ -1141,7 +1178,7 @@ std::function getGUIDebugger(std::vector const& infos, std::vector driverNodesInfos; driverNodesInfos.push_back(TopologyNodeInfo{"driver"}); - return [&infos, &devices, &allStates, &metadata, &controls, &metricsInfos, &driverInfo, &driverControl, deviceNodesInfos, driverNodesInfos]() { + return [&infos, &devices, &allStates, &metadata, &controls, &metricsInfos, &driverInfo, &driverControl, &gui, deviceNodesInfos, driverNodesInfos]() { ImGuiStyle& style = ImGui::GetStyle(); style.FrameRounding = 0.; style.WindowRounding = 0.; @@ -1158,6 +1195,9 @@ std::function getGUIDebugger(std::vector const& infos, metricsStore.specs[DRIVER_METRICS] = gsl::span(driverNodesInfos); displayMetrics(guiState, driverInfo, infos, metadata, controls, metricsStore); displayDriverInfo(driverInfo, driverControl); + if (*(gui.guiQuitRequested) && !*(gui.allChildrenGone)) { + displayShutdownDialog(devices, infos); + } int windowPosStepping = (ImGui::GetIO().DisplaySize.y - 500) / guiState.devices.size(); diff --git a/Framework/GUISupport/src/FrameworkGUIDebugger.h b/Framework/GUISupport/src/FrameworkGUIDebugger.h index 5de5e350ddf8d..98bb50b07fd44 100644 --- a/Framework/GUISupport/src/FrameworkGUIDebugger.h +++ b/Framework/GUISupport/src/FrameworkGUIDebugger.h @@ -26,6 +26,7 @@ namespace o2::framework class DriverInfo; class DriverControl; +class GuiCallbackContext; namespace gui { @@ -37,7 +38,8 @@ std::function getGUIDebugger(std::vector const& infos, std::vector const& metricsInfos, DriverInfo const& driverInfo, std::vector& controls, - DriverControl& driverControl); + DriverControl& driverControl, + GuiCallbackContext& context); void updateMousePos(float x, float y); void updateMouseButton(bool clicked); diff --git a/Framework/GUISupport/src/Plugin.cxx b/Framework/GUISupport/src/Plugin.cxx index 7a356f6e55d9b..022b74174f632 100644 --- a/Framework/GUISupport/src/Plugin.cxx +++ b/Framework/GUISupport/src/Plugin.cxx @@ -36,9 +36,10 @@ struct ImGUIDebugGUI : o2::framework::DebugGUI { std::vector const& metricsInfos, DriverInfo const& driverInfo, std::vector& controls, - DriverControl& driverControl) override + DriverControl& driverControl, + GuiCallbackContext& context) override { - return o2::framework::gui::getGUIDebugger(infos, devices, allStates, metadata, metricsInfos, driverInfo, controls, driverControl); + return o2::framework::gui::getGUIDebugger(infos, devices, allStates, metadata, metricsInfos, driverInfo, controls, driverControl, context); } void updateMousePos(float x, float y) override From 0b08540a5f9c195dd0da911381cd817359f3a75e Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 16 Nov 2023 11:53:27 +0100 Subject: [PATCH 110/726] DPL GUI: allow GUI to move between different states --- Framework/Core/src/DataProcessingDevice.cxx | 49 ++++++++++++++++++- Framework/Core/src/WSDriverClient.cxx | 24 +++++++++ Framework/Core/src/runDataProcessing.cxx | 13 ++++- .../GUISupport/src/FrameworkGUIDebugger.cxx | 2 +- .../src/FrameworkGUIDeviceInspector.cxx | 27 +++++++++- .../src/FrameworkGUIDeviceInspector.h | 5 +- .../src/FrameworkGUIDevicesGraph.cxx | 5 +- .../GUISupport/src/FrameworkGUIDevicesGraph.h | 8 ++- 8 files changed, 123 insertions(+), 10 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 4d0cac7087611..c9a01d07449e4 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -25,6 +25,7 @@ #include "Framework/DispatchPolicy.h" #include "Framework/DispatchControl.h" #include "Framework/DanglingContext.h" +#include "Framework/DriverInfo.h" #include "Framework/DomainInfoHeader.h" #include "Framework/DriverClient.h" #include "Framework/EndOfStreamContext.h" @@ -160,16 +161,60 @@ DataProcessingDevice::DataProcessingDevice(RunningDeviceRef running, ServiceRegi std::function stateWatcher = [this, ®istry = mServiceRegistry](const fair::mq::State state) -> void { auto ref = ServiceRegistryRef{registry, ServiceRegistry::globalDeviceSalt()}; + auto controlKind = this->GetConfig()->GetPropertyAsString("control"); auto& deviceState = ref.get(); auto& control = ref.get(); auto& callbacks = ref.get(); - control.notifyDeviceState(fair::mq::GetStateName(state)); + auto stateName = fair::mq::GetStateName(state); + control.notifyDeviceState(stateName); callbacks.call(ServiceRegistryRef{ref}, (int)state); + LOG(detail) << "In state watcher callback " << stateName; + + // If the termination policy is not to wait, we simply ignore all + // user imposed state changes and keep running until we are done. + if (controlKind != "gui") { + return; + } + + static bool runningOnce = false; if (deviceState.nextFairMQState.empty() == false) { + LOG(detail) << "State change requested, changing state to " << deviceState.nextFairMQState.back(); auto state = deviceState.nextFairMQState.back(); - (void)this->ChangeState(state); + bool changed = this->ChangeState(state); + if (!changed) { + LOG(error) << "Failed to change state to " << state; + } deviceState.nextFairMQState.pop_back(); + } else if (state == fair::mq::State::Running && deviceState.nextFairMQState.empty()) { + LOGP(detail, "Device is running and no transition expected. We are done."); + deviceState.transitionHandling = TransitionHandlingState::NoTransition; + } else { + while (runningOnce && deviceState.nextFairMQState.empty() && this->NewStatePending() == false) { + LOG(detail) << "No state change requested, waiting for next state change " << this->NewStatePending(); + if (stateName == "EXITING") { + // Send ctrl c to ourselves. To bad FairMQ does not seem to exit when + // reaching the EXITING state. + kill(getpid(), SIGTERM); + return; + } + uv_run(deviceState.loop, UV_RUN_ONCE); + LOG(detail) << "Woke up from event loop"; + } + if (runningOnce && deviceState.nextFairMQState.empty() == false) { + LOG(detail) << "State change requested, changing state to " << deviceState.nextFairMQState.back(); + auto state = deviceState.nextFairMQState.back(); + bool changed = this->ChangeState(state); + if (!changed) { + LOG(error) << "Failed to change state to " << state; + } + deviceState.nextFairMQState.pop_back(); + } + LOG(detail) << "Exiting callback for state " << state; + } + if (runningOnce == false && state == fair::mq::State::Running) { + LOG(detail) << "First iteration, next time we start the event loop"; + runningOnce = true; } }; diff --git a/Framework/Core/src/WSDriverClient.cxx b/Framework/Core/src/WSDriverClient.cxx index d4ed77b9a004e..91f011e61cda3 100644 --- a/Framework/Core/src/WSDriverClient.cxx +++ b/Framework/Core/src/WSDriverClient.cxx @@ -15,6 +15,8 @@ #include "Framework/ServiceRegistry.h" #include "Framework/DeviceSpec.h" #include "DriverClientContext.h" +#include "Framework/RawDeviceService.h" +#include "Device.h" #include "DPLWebSocket.h" #include #include @@ -134,6 +136,28 @@ void on_connect(uv_connect_t* connection, int status) state.nextFairMQState.emplace_back("STOP"); }); + client->observe("/shutdown", [ref = context->ref](std::string_view) { + auto currentStateName = ref.get().device()->GetCurrentStateName(); + LOGP(info, "Received shutdown request while in {}", currentStateName); + + auto& state = ref.get(); + state.nextFairMQState.emplace_back("END"); + if (currentStateName == "IDLE") { + return; + } + state.nextFairMQState.emplace_back("AUTO"); + state.nextFairMQState.emplace_back("RESET DEVICE"); + if (currentStateName == "DEVICE READY") { + return; + } + state.nextFairMQState.emplace_back("AUTO"); + state.nextFairMQState.emplace_back("RESET TASK"); + if (currentStateName == "READY") { + return; + } + state.nextFairMQState.emplace_back("STOP"); + }); + client->observe("/trace", [ref = context->ref](std::string_view cmd) { auto& state = ref.get(); static constexpr int prefixSize = std::string_view{"/trace "}.size(); diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index f31629d90416b..f89b5f95693a6 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -29,6 +29,7 @@ #include "Framework/DeviceMetricsInfo.h" #include "Framework/DeviceMetricsHelper.h" #include "Framework/DeviceConfigInfo.h" +#include "Framework/DeviceController.h" #include "Framework/DeviceSpec.h" #include "Framework/DeviceState.h" #include "Framework/DeviceConfig.h" @@ -2068,7 +2069,17 @@ int runStateMachine(DataProcessorSpecs const& workflow, // We send SIGCONT to make sure stopped children are resumed killChildren(infos, SIGCONT); // We send SIGTERM to make sure we do the STOP transition in FairMQ - killChildren(infos, SIGTERM); + if (driverInfo.processingPolicies.termination == TerminationPolicy::WAIT) { + for (size_t di = 0; di < infos.size(); ++di) { + auto& info = infos[di]; + auto& control = controls[di]; + if (info.active == true) { + control.controller->write("/shutdown", strlen("/shutdown")); + } + } + } else { + killChildren(infos, SIGTERM); + } // We have a timer to send SIGUSR1 to make sure we advance all devices // in a timely manner. force_step_timer.data = &infos; diff --git a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx index 375afef1007f9..cc5180093ec94 100644 --- a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx @@ -1185,7 +1185,7 @@ std::function getGUIDebugger(std::vector const& infos, style.Colors[ImGuiCol_WindowBg] = ImVec4(0x1b / 255.f, 0x1b / 255.f, 0x1b / 255.f, 1.00f); style.Colors[ImGuiCol_ScrollbarBg] = ImVec4(0x1b / 255.f, 0x1b / 255.f, 0x1b / 255.f, 1.00f); - showTopologyNodeGraph(guiState, infos, devices, allStates, metadata, controls, metricsInfos); + showTopologyNodeGraph(guiState, infos, devices, allStates, metadata, controls, metricsInfos, driverInfo.processingPolicies.termination); AllMetricsStore metricsStore; diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index a82753eb5af1f..9febb93858cb8 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -11,6 +11,7 @@ #include "FrameworkGUIDeviceInspector.h" #include "Framework/DataProcessorInfo.h" +#include "Framework/ProcessingPolicies.h" #include "Framework/DeviceControl.h" #include "Framework/DeviceSpec.h" @@ -251,7 +252,8 @@ void displayDeviceInspector(DeviceSpec const& spec, DataProcessingStates const& states, DeviceMetricsInfo const& metrics, DataProcessorInfo const& metadata, - DeviceControl& control) + DeviceControl& control, + TerminationPolicy terminationPolicy) { ImGui::Text("Name: %s", spec.name.c_str()); ImGui::Text("Executable: %s", metadata.executable.c_str()); @@ -340,12 +342,33 @@ void displayDeviceInspector(DeviceSpec const& spec, } if (control.requestedState > info.providedState) { - ImGui::Text(ICON_FA_CLOCK_O); + ImGui::TextUnformatted(ICON_FA_CLOCK_O "Requested transition in progress"); } else { + // We only allow navigation if the termination policy is "WAIT" + ImGui::BeginDisabled(terminationPolicy == TerminationPolicy::QUIT); if (ImGui::Button("Restart")) { control.requestedState = info.providedState + 1; control.controller->write("/restart", strlen("/restart")); } + if (info.deviceState == "RUNNING") { + ImGui::SameLine(); + if (ImGui::Button(ICON_FA_STOP)) { + control.requestedState = info.providedState + 1; + control.controller->write("/stop", strlen("/stop")); + } + } else if (info.deviceState == "READY") { + ImGui::SameLine(); + if (ImGui::Button(ICON_FA_PLAY)) { + control.requestedState = info.providedState + 1; + control.controller->write("/start", strlen("/start")); + } + ImGui::SameLine(); + if (ImGui::Button(ICON_FA_POWER_OFF)) { + control.requestedState = info.providedState + 1; + control.controller->write("/shutdown", strlen("/shutdown")); + } + } + ImGui::EndDisabled(); } } diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.h b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.h index 8dac3204ea3ab..b69c6e9286215 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.h +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.h @@ -18,12 +18,15 @@ struct DeviceInfo; struct DeviceMetricsInfo; struct DataProcessorInfo; struct DataProcessingStates; +enum struct TerminationPolicy; namespace gui { /// Helper to display information about a device -void displayDeviceInspector(DeviceSpec const& spec, DeviceInfo const& info, DataProcessingStates const& states, DeviceMetricsInfo const& metrics, DataProcessorInfo const& metadata, DeviceControl& control); +void displayDeviceInspector(DeviceSpec const& spec, DeviceInfo const& info, DataProcessingStates const& states, + DeviceMetricsInfo const& metrics, DataProcessorInfo const& metadata, DeviceControl& control, + TerminationPolicy TerminationPolicy); } // namespace gui } // namespace o2::framework diff --git a/Framework/GUISupport/src/FrameworkGUIDevicesGraph.cxx b/Framework/GUISupport/src/FrameworkGUIDevicesGraph.cxx index 0b6ca07a9c86d..639122153e599 100644 --- a/Framework/GUISupport/src/FrameworkGUIDevicesGraph.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDevicesGraph.cxx @@ -365,7 +365,8 @@ void showTopologyNodeGraph(WorkspaceGUIState& state, std::vector const& allStates, std::vector const& metadata, std::vector& controls, - std::vector const& metricsInfos) + std::vector const& metricsInfos, + enum TerminationPolicy terminationPolicy) { ImGui::SetNextWindowPos(ImVec2(0, 0), 0); if (state.bottomPaneVisible) { @@ -893,7 +894,7 @@ void showTopologyNodeGraph(WorkspaceGUIState& state, auto& metadatum = metadata[group.metadataId]; if (state.rightPaneVisible) { - gui::displayDeviceInspector(spec, info, states, metrics, metadatum, control); + gui::displayDeviceInspector(spec, info, states, metrics, metadatum, control, terminationPolicy); } } else { ImGui::TextWrapped("Select a node in the topology to display information about it"); diff --git a/Framework/GUISupport/src/FrameworkGUIDevicesGraph.h b/Framework/GUISupport/src/FrameworkGUIDevicesGraph.h index 7950f1893ebba..d53cdf26dd5a8 100644 --- a/Framework/GUISupport/src/FrameworkGUIDevicesGraph.h +++ b/Framework/GUISupport/src/FrameworkGUIDevicesGraph.h @@ -19,6 +19,11 @@ #include +namespace o2::framework +{ +enum struct TerminationPolicy; +} // namespace o2::framework + namespace o2::framework::gui { @@ -30,7 +35,8 @@ void showTopologyNodeGraph(WorkspaceGUIState& state, std::vector const& allStates, std::vector const& metadata, std::vector& controls, - std::vector const& metricsInfos); + std::vector const& metricsInfos, + TerminationPolicy terminationPolicy); } // namespace o2::framework::gui From 354819d1d8ad0954178d9aff29603ef23459b328 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 17 Nov 2023 11:01:54 +0100 Subject: [PATCH 111/726] Misc formatting cleanups --- .../MemoryResources/test/testMemoryResources.cxx | 2 +- Framework/Core/test/test_FairMQ.cxx | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/DataFormats/MemoryResources/test/testMemoryResources.cxx b/DataFormats/MemoryResources/test/testMemoryResources.cxx index 592e76c8ef1f4..a77b291568701 100644 --- a/DataFormats/MemoryResources/test/testMemoryResources.cxx +++ b/DataFormats/MemoryResources/test/testMemoryResources.cxx @@ -165,7 +165,7 @@ BOOST_AUTO_TEST_CASE(adoptVector_test) testData::nconstructions = 0; - //Create a bogus message + // Create a bogus message auto message = factoryZMQ->CreateMessage(3 * sizeof(testData)); auto messageAddr = message.get(); testData tmpBuf[3] = {3, 2, 1}; diff --git a/Framework/Core/test/test_FairMQ.cxx b/Framework/Core/test/test_FairMQ.cxx index 2919dd1faf0eb..a8487165739ec 100644 --- a/Framework/Core/test/test_FairMQ.cxx +++ b/Framework/Core/test/test_FairMQ.cxx @@ -44,7 +44,7 @@ bool addDataBlock(fair::mq::Parts& parts, o2::header::Stack&& inputStack, Contai template ::value, int>::type = 0> bool addDataBlock(fair::mq::Parts& parts, o2::header::Stack&& inputStack, ContainerT&& dataMessage, o2::pmr::FairMQMemoryResource* targetResource = nullptr) { - //make sure the payload size in DataHeader corresponds to message size + // make sure the payload size in DataHeader corresponds to message size using o2::header::DataHeader; auto* dataHeader = const_cast(o2::header::get(inputStack.data())); dataHeader->payloadSize = dataMessage->GetSize(); @@ -113,7 +113,7 @@ TEST_CASE("getMessage_Stack") auto allocSHM = getTransportAllocator(factorySHM.get()); REQUIRE(allocSHM != nullptr); { - //check that a message is constructed properly with the default new_delete_resource + // check that a message is constructed properly with the default new_delete_resource Stack s1{DataHeader{gDataDescriptionInvalid, gDataOriginInvalid, DataHeader::SubSpecificationType{0}}, NameHeader<9>{"somename"}}; @@ -128,7 +128,7 @@ TEST_CASE("getMessage_Stack") REQUIRE(message->GetType() == fair::mq::Transport::ZMQ); } { - //check that a message is constructed properly, cross resource + // check that a message is constructed properly, cross resource Stack s1{allocZMQ, DataHeader{gDataDescriptionInvalid, gDataOriginInvalid, DataHeader::SubSpecificationType{0}}, NameHeader<9>{"somename"}}; REQUIRE(allocZMQ->getNumberOfMessages() == 1); @@ -159,7 +159,7 @@ TEST_CASE("addDataBlockForEach_test") REQUIRE(allocZMQ); { - //simple addition of a data block from an exisiting message + // simple addition of a data block from an exisiting message fair::mq::Parts message; auto simpleMessage = factoryZMQ->CreateMessage(10); addDataBlock(message, @@ -190,7 +190,7 @@ TEST_CASE("addDataBlockForEach_test") REQUIRE(message[1].GetSize() == 2 * sizeof(elem)); ; // check the size of the buffer is set correctly - //check contents + // check contents int sum{0}; forEach(message, [&](auto header, auto data) { const int* numbers = reinterpret_cast(data.data()); @@ -198,7 +198,7 @@ TEST_CASE("addDataBlockForEach_test") }); REQUIRE(sum == 10); - //add one more data block and check total size using forEach; + // add one more data block and check total size using forEach; addDataBlock(message, Stack{allocZMQ, DataHeader{gDataDescriptionInvalid, gDataOriginInvalid, DataHeader::SubSpecificationType{0}}}, factoryZMQ->CreateMessage(10)); @@ -206,7 +206,7 @@ TEST_CASE("addDataBlockForEach_test") forEach(message, [&](auto header, auto data) { size += header.size() + data.size(); }); REQUIRE(size == sizeofDataHeader + 2 * sizeof(elem) + sizeofDataHeader + 10); - //check contents (headers) + // check contents (headers) int checkOK{0}; forEach(message, [&](auto header, auto data) { auto dh = get(header.data()); From dcc2d884828c7f126207eefade9f257295211a48 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 17 Nov 2023 11:01:18 +0100 Subject: [PATCH 112/726] Avoid UUID conflicts when running with boost 1.83.0 Apparently something broke and now we get all the UUIDs to be the same. --- DataFormats/MemoryResources/test/testMemoryResources.cxx | 8 ++++---- Framework/Core/test/test_FairMQ.cxx | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/DataFormats/MemoryResources/test/testMemoryResources.cxx b/DataFormats/MemoryResources/test/testMemoryResources.cxx index a77b291568701..339f100a01108 100644 --- a/DataFormats/MemoryResources/test/testMemoryResources.cxx +++ b/DataFormats/MemoryResources/test/testMemoryResources.cxx @@ -47,7 +47,7 @@ int testData::nconstructions = 0; BOOST_AUTO_TEST_CASE(transportallocatormap_test) { - size_t session{fair::mq::tools::UuidHash()}; + size_t session{(size_t)getpid() * 1000}; fair::mq::ProgOptions config; config.SetProperty("session", std::to_string(session)); @@ -64,7 +64,7 @@ using namespace boost::container::pmr; BOOST_AUTO_TEST_CASE(allocator_test) { - size_t session{fair::mq::tools::UuidHash()}; + size_t session{(size_t)getpid() * 1000 + 1}; fair::mq::ProgOptions config; config.SetProperty("session", std::to_string(session)); @@ -102,7 +102,7 @@ BOOST_AUTO_TEST_CASE(allocator_test) BOOST_AUTO_TEST_CASE(getMessage_test) { - size_t session{fair::mq::tools::UuidHash()}; + size_t session{(size_t)getpid() * 1000 + 2}; fair::mq::ProgOptions config; config.SetProperty("session", std::to_string(session)); @@ -154,7 +154,7 @@ BOOST_AUTO_TEST_CASE(getMessage_test) BOOST_AUTO_TEST_CASE(adoptVector_test) { - size_t session{fair::mq::tools::UuidHash()}; + size_t session{(size_t)getpid() * 1000 + 3}; fair::mq::ProgOptions config; config.SetProperty("session", std::to_string(session)); diff --git a/Framework/Core/test/test_FairMQ.cxx b/Framework/Core/test/test_FairMQ.cxx index a8487165739ec..883533855126d 100644 --- a/Framework/Core/test/test_FairMQ.cxx +++ b/Framework/Core/test/test_FairMQ.cxx @@ -100,7 +100,7 @@ auto forEach(fair::mq::Parts& parts, F&& function) TEST_CASE("getMessage_Stack") { - size_t session{fair::mq::tools::UuidHash()}; + size_t session{(size_t)getpid() * 1000 + 0}; fair::mq::ProgOptions config; config.SetProperty("session", std::to_string(session)); @@ -149,7 +149,7 @@ TEST_CASE("getMessage_Stack") TEST_CASE("addDataBlockForEach_test") { - size_t session{fair::mq::tools::UuidHash()}; + size_t session{(size_t)getpid() * 1000 + 1}; fair::mq::ProgOptions config; config.SetProperty("session", std::to_string(session)); From 9f1f86e762d577cd44984a7b010cb2c43442e354 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 20 Nov 2023 23:44:21 +0100 Subject: [PATCH 113/726] DPL: actually pass session when creating Transport this might explain some sporadic crashes in the affected tests, due to session overllap. --- DataFormats/MemoryResources/test/testMemoryResources.cxx | 8 ++++---- Framework/Core/test/test_FairMQ.cxx | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/DataFormats/MemoryResources/test/testMemoryResources.cxx b/DataFormats/MemoryResources/test/testMemoryResources.cxx index 339f100a01108..264fe59b1caac 100644 --- a/DataFormats/MemoryResources/test/testMemoryResources.cxx +++ b/DataFormats/MemoryResources/test/testMemoryResources.cxx @@ -52,7 +52,7 @@ BOOST_AUTO_TEST_CASE(transportallocatormap_test) config.SetProperty("session", std::to_string(session)); auto factoryZMQ = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); - auto factorySHM = fair::mq::TransportFactory::CreateTransportFactory("shmem"); + auto factorySHM = fair::mq::TransportFactory::CreateTransportFactory("shmem", "transportallocatormap_test", &config); auto allocZMQ = getTransportAllocator(factoryZMQ.get()); auto allocSHM = getTransportAllocator(factorySHM.get()); BOOST_CHECK(allocZMQ != nullptr && allocSHM != allocZMQ); @@ -69,7 +69,7 @@ BOOST_AUTO_TEST_CASE(allocator_test) config.SetProperty("session", std::to_string(session)); auto factoryZMQ = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); - auto factorySHM = fair::mq::TransportFactory::CreateTransportFactory("shmem"); + auto factorySHM = fair::mq::TransportFactory::CreateTransportFactory("shmem", "allocator_test", &config); auto allocZMQ = getTransportAllocator(factoryZMQ.get()); auto allocSHM = getTransportAllocator(factorySHM.get()); @@ -107,7 +107,7 @@ BOOST_AUTO_TEST_CASE(getMessage_test) config.SetProperty("session", std::to_string(session)); auto factoryZMQ = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); - auto factorySHM = fair::mq::TransportFactory::CreateTransportFactory("shmem"); + auto factorySHM = fair::mq::TransportFactory::CreateTransportFactory("shmem", "getMessage_test", &config); auto allocZMQ = getTransportAllocator(factoryZMQ.get()); auto allocSHM = getTransportAllocator(factorySHM.get()); @@ -159,7 +159,7 @@ BOOST_AUTO_TEST_CASE(adoptVector_test) config.SetProperty("session", std::to_string(session)); auto factoryZMQ = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); - auto factorySHM = fair::mq::TransportFactory::CreateTransportFactory("shmem"); + auto factorySHM = fair::mq::TransportFactory::CreateTransportFactory("shmem", "adoptVector_test", &config); auto allocZMQ = getTransportAllocator(factoryZMQ.get()); auto allocSHM = getTransportAllocator(factorySHM.get()); diff --git a/Framework/Core/test/test_FairMQ.cxx b/Framework/Core/test/test_FairMQ.cxx index 883533855126d..0ccfbf58f7576 100644 --- a/Framework/Core/test/test_FairMQ.cxx +++ b/Framework/Core/test/test_FairMQ.cxx @@ -105,7 +105,7 @@ TEST_CASE("getMessage_Stack") config.SetProperty("session", std::to_string(session)); auto factoryZMQ = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); - auto factorySHM = fair::mq::TransportFactory::CreateTransportFactory("shmem"); + auto factorySHM = fair::mq::TransportFactory::CreateTransportFactory("shmem", "getMessage_Stack", &config); REQUIRE(factorySHM != nullptr); REQUIRE(factoryZMQ != nullptr); auto allocZMQ = getTransportAllocator(factoryZMQ.get()); From a0df6d3ac79e2724c56429c2b355816484fd4b8d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 21 Nov 2023 21:34:36 +0100 Subject: [PATCH 114/726] Fix coding convention --- Generators/src/GeneratorPythia8.cxx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Generators/src/GeneratorPythia8.cxx b/Generators/src/GeneratorPythia8.cxx index f72a214223307..a2a83ca74b35e 100644 --- a/Generators/src/GeneratorPythia8.cxx +++ b/Generators/src/GeneratorPythia8.cxx @@ -456,10 +456,12 @@ void GeneratorPythia8::pruneEvent(Pythia8::Event& event, Select select) auto& otherMother = pruned[otherMotherIdx]; int otherDaughter1 = otherMother.daughter1(); int otherDaughter2 = otherMother.daughter2(); - if (otherDaughter1 > 0) + if (otherDaughter1 > 0) { addId(allDaughters, otherDaughter1); - if (otherDaughter2 > 0) + } + if (otherDaughter2 > 0) { addId(allDaughters, otherDaughter2); + } } // At this point, we have added all mothers of current // daughter, and all daughters of those mothers. From 169a476c5653964813d0ac4f65e800f2af1179cb Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 23 Nov 2023 10:49:42 +0100 Subject: [PATCH 115/726] DPL: add edge validation support after topology generation --- Framework/Core/src/CommonDataProcessors.cxx | 4 +++- Framework/Core/src/DeviceSpecHelpers.cxx | 2 ++ Framework/Core/src/WorkflowHelpers.cxx | 26 ++++++++++++++++++++- Framework/Core/src/WorkflowHelpers.h | 8 +++++++ 4 files changed, 38 insertions(+), 2 deletions(-) diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 74e020595b345..e9cdc97e316ca 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -259,9 +259,11 @@ DataProcessorSpec CommonDataProcessors::getOutputObjHistSink(std::vector port Moreover for each (me, other, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index cd9c3913de228..fb9c4aa1db319 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1232,5 +1232,29 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } -#pragma diagnostic pop +using Validator = std::function; +void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs) +{ + std::vector defaultValidators = {}; + std::stringstream errors; + // Iterate over all the edges. + // Get the input lifetime and the output lifetime. + // Output lifetime must be Timeframe if the input lifetime is Timeframe. + bool hasErrors = false; + for (auto& edge : edges) { + DataProcessorSpec const& producer = workflow[edge.producer]; + DataProcessorSpec const& consumer = workflow[edge.consumer]; + OutputSpec const& output = outputs[edge.outputGlobalIndex]; + InputSpec const& input = consumer.inputs[edge.consumerInputIndex]; + for (auto& validator : defaultValidators) { + hasErrors |= validator(errors, producer, output, consumer, input); + } + } + if (!hasErrors) { + throw std::runtime_error(errors.str()); + } +} + } // namespace o2::framework diff --git a/Framework/Core/src/WorkflowHelpers.h b/Framework/Core/src/WorkflowHelpers.h index a167e99d4768c..67661dbdb979a 100644 --- a/Framework/Core/src/WorkflowHelpers.h +++ b/Framework/Core/src/WorkflowHelpers.h @@ -227,6 +227,14 @@ struct WorkflowHelpers { /// returns only dangling outputs static std::vector computeDanglingOutputs(WorkflowSpec const& workflow); + + /// Validate that the nodes at the ends of the edges of the graph + /// are actually compatible with each other. + /// For example we should make sure that Lifetime::Timeframe inputs of + /// one node is not connected to an Output of Lifetime::Sporadic of another node. + static void validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs); }; } // namespace o2::framework From 00bb2fd7a1d64a10f62ba58cffcecb173af25042 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 23 Nov 2023 10:49:42 +0100 Subject: [PATCH 116/726] DPL: edge validator for Lifetime::Sporadic --- Framework/Core/src/WorkflowHelpers.cxx | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index fb46b295e17bc..a263d1befd32a 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1232,6 +1232,17 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } +bool validateLifetime(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +{ + if (input.lifetime == Lifetime::Timeframe && output.lifetime == Lifetime::Sporadic) { + errors << fmt::format("Input {} of {} has lifetime Timeframe, but output {} of {} has lifetime Sporadic\n", + DataSpecUtils::describe(input).c_str(), consumer.name, + DataSpecUtils::describe(output).c_str(), producer.name); + return false; + } + return true; +} + bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) { auto isExpendable = [](DataProcessorLabel const& label) { From 1793e650827a9b96d13805aaf41e670a045c8c36 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 23 Nov 2023 10:49:42 +0100 Subject: [PATCH 117/726] DPL: add edge validator for expendable tasks --- Framework/Core/src/WorkflowHelpers.cxx | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index fb9c4aa1db319..fb46b295e17bc 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1232,12 +1232,31 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } +bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +{ + auto isExpendable = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + auto isResilient = [](DataProcessorLabel const& label) { + return label.value == "expendable" || label.value == "resilient"; + }; + bool producerExpendable = std::find_if(producer.labels.begin(), producer.labels.end(), isExpendable) != producer.labels.end(); + bool consumerCritical = std::find_if(consumer.labels.begin(), consumer.labels.end(), isResilient) == consumer.labels.end(); + if (producerExpendable && consumerCritical) { + errors << fmt::format("Critical consumer {} depends on expendable producer {}\n", + consumer.name, + producer.name); + return false; + } + return true; +} + using Validator = std::function; void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, std::vector const& edges, std::vector const& outputs) { - std::vector defaultValidators = {}; + std::vector defaultValidators = {validateExpendable}; std::stringstream errors; // Iterate over all the edges. // Get the input lifetime and the output lifetime. From 66ef04a80514ea17f207dc8efb85fb0db42eca49 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 23 Nov 2023 10:52:59 +0100 Subject: [PATCH 118/726] DPL: edge validator for Lifetime::Sporadic --- Framework/Core/src/WorkflowHelpers.cxx | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index fb46b295e17bc..7823070c8be76 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1232,6 +1232,17 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } +bool validateLifetime(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +{ + if (input.lifetime == Lifetime::Timeframe && output.lifetime == Lifetime::Sporadic) { + errors << fmt::format("Input {} of {} has lifetime Timeframe, but output {} of {} has lifetime Sporadic\n", + DataSpecUtils::describe(input).c_str(), consumer.name, + DataSpecUtils::describe(output).c_str(), producer.name); + return false; + } + return true; +} + bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) { auto isExpendable = [](DataProcessorLabel const& label) { @@ -1256,7 +1267,7 @@ void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, std::vector const& edges, std::vector const& outputs) { - std::vector defaultValidators = {validateExpendable}; + std::vector defaultValidators = {validateExpendable, validateLifetime}; std::stringstream errors; // Iterate over all the edges. // Get the input lifetime and the output lifetime. From 81e010a2ee50ac1b9935c9fb6c59407422936776 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 23 Nov 2023 13:04:56 +0100 Subject: [PATCH 119/726] DPL: add edge validation support after topology generation --- Framework/Core/src/CommonDataProcessors.cxx | 4 +++- Framework/Core/src/DeviceSpecHelpers.cxx | 2 ++ Framework/Core/src/WorkflowHelpers.cxx | 26 ++++++++++++++++++++- Framework/Core/src/WorkflowHelpers.h | 8 +++++++ 4 files changed, 38 insertions(+), 2 deletions(-) diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 74e020595b345..e9cdc97e316ca 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -259,9 +259,11 @@ DataProcessorSpec CommonDataProcessors::getOutputObjHistSink(std::vector port Moreover for each (me, other, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index cd9c3913de228..f8d6f351869dd 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1232,5 +1232,29 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } -#pragma diagnostic pop +using Validator = std::function; +void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs) +{ + std::vector defaultValidators = {}; + std::stringstream errors; + // Iterate over all the edges. + // Get the input lifetime and the output lifetime. + // Output lifetime must be Timeframe if the input lifetime is Timeframe. + bool hasErrors = false; + for (auto& edge : edges) { + DataProcessorSpec const& producer = workflow[edge.producer]; + DataProcessorSpec const& consumer = workflow[edge.consumer]; + OutputSpec const& output = outputs[edge.outputGlobalIndex]; + InputSpec const& input = consumer.inputs[edge.consumerInputIndex]; + for (auto& validator : defaultValidators) { + hasErrors |= validator(errors, producer, output, consumer, input); + } + } + if (hasErrors) { + throw std::runtime_error(errors.str()); + } +} + } // namespace o2::framework diff --git a/Framework/Core/src/WorkflowHelpers.h b/Framework/Core/src/WorkflowHelpers.h index a167e99d4768c..67661dbdb979a 100644 --- a/Framework/Core/src/WorkflowHelpers.h +++ b/Framework/Core/src/WorkflowHelpers.h @@ -227,6 +227,14 @@ struct WorkflowHelpers { /// returns only dangling outputs static std::vector computeDanglingOutputs(WorkflowSpec const& workflow); + + /// Validate that the nodes at the ends of the edges of the graph + /// are actually compatible with each other. + /// For example we should make sure that Lifetime::Timeframe inputs of + /// one node is not connected to an Output of Lifetime::Sporadic of another node. + static void validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs); }; } // namespace o2::framework From 54354c89a31df88e486c83424a161223d3fccccc Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 23 Nov 2023 13:05:16 +0100 Subject: [PATCH 120/726] DPL: add edge validator for expendable tasks --- Framework/Core/src/WorkflowHelpers.cxx | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index f8d6f351869dd..6f198621d84a8 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1232,12 +1232,31 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } +bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +{ + auto isExpendable = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + auto isResilient = [](DataProcessorLabel const& label) { + return label.value == "expendable" || label.value == "resilient"; + }; + bool producerExpendable = std::find_if(producer.labels.begin(), producer.labels.end(), isExpendable) != producer.labels.end(); + bool consumerCritical = std::find_if(consumer.labels.begin(), consumer.labels.end(), isResilient) == consumer.labels.end(); + if (producerExpendable && consumerCritical) { + errors << fmt::format("Critical consumer {} depends on expendable producer {}\n", + consumer.name, + producer.name); + return false; + } + return true; +} + using Validator = std::function; void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, std::vector const& edges, std::vector const& outputs) { - std::vector defaultValidators = {}; + std::vector defaultValidators = {validateExpendable}; std::stringstream errors; // Iterate over all the edges. // Get the input lifetime and the output lifetime. From 6fd39708f508963e83d5b3463608e6f35f3f5de8 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 23 Nov 2023 13:05:16 +0100 Subject: [PATCH 121/726] DPL: edge validator for Lifetime::Sporadic --- Framework/Core/src/WorkflowHelpers.cxx | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 6f198621d84a8..0864632d66e59 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1232,6 +1232,17 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } +bool validateLifetime(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +{ + if (input.lifetime == Lifetime::Timeframe && output.lifetime == Lifetime::Sporadic) { + errors << fmt::format("Input {} of {} has lifetime Timeframe, but output {} of {} has lifetime Sporadic\n", + DataSpecUtils::describe(input).c_str(), consumer.name, + DataSpecUtils::describe(output).c_str(), producer.name); + return false; + } + return true; +} + bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) { auto isExpendable = [](DataProcessorLabel const& label) { @@ -1256,7 +1267,7 @@ void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, std::vector const& edges, std::vector const& outputs) { - std::vector defaultValidators = {validateExpendable}; + std::vector defaultValidators = {validateExpendable, validateLifetime}; std::stringstream errors; // Iterate over all the edges. // Get the input lifetime and the output lifetime. From d85d40b58edea20b7b2c8e31156de0d7b55983e1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 23 Nov 2023 14:37:01 +0100 Subject: [PATCH 122/726] DPL: add edge validation support after topology generation --- Framework/Core/src/CommonDataProcessors.cxx | 4 +++- Framework/Core/src/DeviceSpecHelpers.cxx | 2 ++ Framework/Core/src/WorkflowHelpers.cxx | 26 ++++++++++++++++++++- Framework/Core/src/WorkflowHelpers.h | 8 +++++++ 4 files changed, 38 insertions(+), 2 deletions(-) diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 74e020595b345..e9cdc97e316ca 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -259,9 +259,11 @@ DataProcessorSpec CommonDataProcessors::getOutputObjHistSink(std::vector port Moreover for each (me, other, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index cd9c3913de228..f320e788f5e07 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1232,5 +1232,29 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } -#pragma diagnostic pop +using Validator = std::function; +void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs) +{ + std::vector defaultValidators = {}; + std::stringstream errors; + // Iterate over all the edges. + // Get the input lifetime and the output lifetime. + // Output lifetime must be Timeframe if the input lifetime is Timeframe. + bool hasErrors = false; + for (auto& edge : edges) { + DataProcessorSpec const& producer = workflow[edge.producer]; + DataProcessorSpec const& consumer = workflow[edge.consumer]; + OutputSpec const& output = outputs[edge.outputGlobalIndex]; + InputSpec const& input = consumer.inputs[edge.consumerInputIndex]; + for (auto& validator : defaultValidators) { + hasErrors |= !validator(errors, producer, output, consumer, input); + } + } + if (hasErrors) { + throw std::runtime_error(errors.str()); + } +} + } // namespace o2::framework diff --git a/Framework/Core/src/WorkflowHelpers.h b/Framework/Core/src/WorkflowHelpers.h index a167e99d4768c..67661dbdb979a 100644 --- a/Framework/Core/src/WorkflowHelpers.h +++ b/Framework/Core/src/WorkflowHelpers.h @@ -227,6 +227,14 @@ struct WorkflowHelpers { /// returns only dangling outputs static std::vector computeDanglingOutputs(WorkflowSpec const& workflow); + + /// Validate that the nodes at the ends of the edges of the graph + /// are actually compatible with each other. + /// For example we should make sure that Lifetime::Timeframe inputs of + /// one node is not connected to an Output of Lifetime::Sporadic of another node. + static void validateEdges(WorkflowSpec const& workflow, + std::vector const& edges, + std::vector const& outputs); }; } // namespace o2::framework From 0be97542d80e2f4b0473ad38665285670c39f84b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 23 Nov 2023 14:37:01 +0100 Subject: [PATCH 123/726] DPL: add edge validator for expendable tasks --- Framework/Core/src/WorkflowHelpers.cxx | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index f320e788f5e07..ee5882ccc3f64 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1232,12 +1232,31 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } +bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +{ + auto isExpendable = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + auto isResilient = [](DataProcessorLabel const& label) { + return label.value == "expendable" || label.value == "resilient"; + }; + bool producerExpendable = std::find_if(producer.labels.begin(), producer.labels.end(), isExpendable) != producer.labels.end(); + bool consumerCritical = std::find_if(consumer.labels.begin(), consumer.labels.end(), isResilient) == consumer.labels.end(); + if (producerExpendable && consumerCritical) { + errors << fmt::format("Critical consumer {} depends on expendable producer {}\n", + consumer.name, + producer.name); + return false; + } + return true; +} + using Validator = std::function; void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, std::vector const& edges, std::vector const& outputs) { - std::vector defaultValidators = {}; + std::vector defaultValidators = {validateExpendable}; std::stringstream errors; // Iterate over all the edges. // Get the input lifetime and the output lifetime. From 8a18c306b233605b8dbb925a26652b957b7c7de8 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 23 Nov 2023 14:37:01 +0100 Subject: [PATCH 124/726] DPL: edge validator for Lifetime::Sporadic --- Framework/Core/src/WorkflowHelpers.cxx | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index ee5882ccc3f64..11a24d1300e0a 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1232,6 +1232,17 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } +bool validateLifetime(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +{ + if (input.lifetime == Lifetime::Timeframe && output.lifetime == Lifetime::Sporadic) { + errors << fmt::format("Input {} of {} has lifetime Timeframe, but output {} of {} has lifetime Sporadic\n", + DataSpecUtils::describe(input).c_str(), consumer.name, + DataSpecUtils::describe(output).c_str(), producer.name); + return false; + } + return true; +} + bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) { auto isExpendable = [](DataProcessorLabel const& label) { @@ -1256,7 +1267,7 @@ void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, std::vector const& edges, std::vector const& outputs) { - std::vector defaultValidators = {validateExpendable}; + std::vector defaultValidators = {validateExpendable, validateLifetime}; std::stringstream errors; // Iterate over all the edges. // Get the input lifetime and the output lifetime. From 003d2f352bb7ebf3a644a92f7b053f99e30a32f0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 23 Nov 2023 22:58:04 +0100 Subject: [PATCH 125/726] DPL: edge validator for Lifetime::Sporadic --- Framework/Core/src/WorkflowHelpers.cxx | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index ee5882ccc3f64..11a24d1300e0a 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1232,6 +1232,17 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } +bool validateLifetime(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +{ + if (input.lifetime == Lifetime::Timeframe && output.lifetime == Lifetime::Sporadic) { + errors << fmt::format("Input {} of {} has lifetime Timeframe, but output {} of {} has lifetime Sporadic\n", + DataSpecUtils::describe(input).c_str(), consumer.name, + DataSpecUtils::describe(output).c_str(), producer.name); + return false; + } + return true; +} + bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) { auto isExpendable = [](DataProcessorLabel const& label) { @@ -1256,7 +1267,7 @@ void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, std::vector const& edges, std::vector const& outputs) { - std::vector defaultValidators = {validateExpendable}; + std::vector defaultValidators = {validateExpendable, validateLifetime}; std::stringstream errors; // Iterate over all the edges. // Get the input lifetime and the output lifetime. From 1c4c75d661c00e06c7af63172a4e5653911acf62 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 24 Nov 2023 10:17:01 +0100 Subject: [PATCH 126/726] DPL: introduce "resilient" tasks Task with label "resilient" will not care about being sorted after "expendable" tasks, even if they are not expendable themselves. --- Framework/Core/src/TopologyPolicy.cxx | 38 +++++++++++++-------- Utilities/DataSampling/src/DataSampling.cxx | 2 +- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/Framework/Core/src/TopologyPolicy.cxx b/Framework/Core/src/TopologyPolicy.cxx index e0f7a208ed4e5..fb96eff5af2fc 100644 --- a/Framework/Core/src/TopologyPolicy.cxx +++ b/Framework/Core/src/TopologyPolicy.cxx @@ -82,33 +82,41 @@ bool expendableDataDeps(DataProcessorSpec const& a, DataProcessorSpec const& b) // If we are here we do not have any data dependency, // however we strill consider a dependent on b if // a has the "expendable" label and b does not. - bool isBExpendable = false; - bool isAExpendable = false; - for (auto const& label : b.labels) { + auto checkExpendable = [](DataProcessorLabel const& label) { if (label.value == "expendable") { - isBExpendable = true; - break; + return true; } - } - for (auto const& label : a.labels) { - if (label.value == "expendable") { - isAExpendable = true; - break; + return false; + }; + // A task marked as expendable or resilient can be put after an expendable task + auto checkResilient = [](DataProcessorLabel const& label) { + if (label.value == "resilient") { + return true; } - } - // If none is expendable. We simply return false. + return false; + }; + bool isBExpendable = std::find_if(b.labels.begin(), b.labels.end(), checkExpendable) != b.labels.end(); + bool isAExpendable = std::find_if(a.labels.begin(), a.labels.end(), checkExpendable) != a.labels.end(); + bool bResilient = std::find_if(b.labels.begin(), b.labels.end(), checkResilient) != b.labels.end(); + + // If none is expendable. We simply return false and sort as usual. if (!isAExpendable && !isBExpendable) { - LOGP(debug, "Neither {} nor {} are expendable. No dependency.", a.name, b.name); + LOGP(debug, "Neither {} nor {} are expendable. No dependency beyond data deps.", a.name, b.name); return false; } - // If both are expendable. We return false. + // If both are expendable. We return false and sort as usual. if (isAExpendable && isBExpendable) { LOGP(debug, "Both {} and {} are expendable. No dependency.", a.name, b.name); return false; } + // If b is expendable but b is resilient, we can keep the same order. + if (isAExpendable && bResilient) { + LOGP(debug, "{} is expendable but b is resilient, no need to add an unneeded dependency", a.name, a.name, b.name); + return false; + } // If a is expendable we consider it as if there was a dependency from a to b, - // but we still need to check if there is not one already from b to a. + // however we still need to check if there is not one already from b to a. if (isAExpendable) { LOGP(debug, "{} is expendable. Checking if there is a dependency from {} to {}.", a.name, b.name, a.name); return !dataDeps(b, a); diff --git a/Utilities/DataSampling/src/DataSampling.cxx b/Utilities/DataSampling/src/DataSampling.cxx index cd30759629c9d..fee4cf6e4ca10 100644 --- a/Utilities/DataSampling/src/DataSampling.cxx +++ b/Utilities/DataSampling/src/DataSampling.cxx @@ -95,7 +95,7 @@ void DataSampling::DoGenerateInfrastructure(Dispatcher& dispatcher, WorkflowSpec spec.inputs = dispatcher.getInputSpecs(); spec.outputs = dispatcher.getOutputSpecs(); spec.maxInputTimeslices = threads; - spec.labels = {{"DataSampling"}, {"Dispatcher"}}; + spec.labels = {{"DataSampling"}, {"Dispatcher"}, {"resilient"}}; spec.options = dispatcher.getOptions(); spec.algorithm = adaptFromTask(std::move(dispatcher)); From fa52cc05eedd2a1c1f0561a7aa811f0677e0350d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 24 Nov 2023 10:17:01 +0100 Subject: [PATCH 127/726] DPL: Improve debugging of bad topologies. Print dependency as graphviz digraph when topological sort fails. --- Framework/Core/src/runDataProcessing.cxx | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 86cc15299e8f3..d5da17d9b0f45 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -2590,6 +2590,11 @@ std::string debugTopoInfo(std::vector const& specs, for (auto& d : specs) { out << "- " << d.name << std::endl; } + out << "digraph G {\n"; + for (auto& e : edges) { + out << fmt::format(" \"{}\" -> \"{}\"\n", specs[e.first].name, specs[e.second].name); + } + out << "}\n"; return out.str(); } From a6b2f4c260b63aeedd1822cb4f63e5487a142f8d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 24 Nov 2023 12:18:05 +0100 Subject: [PATCH 128/726] DPL: introduce "resilient" tasks Task with label "resilient" will not care about being sorted after "expendable" tasks, even if they are not expendable themselves. In case a non-resilient / non-expendable task ends up depending on an expendable one, complain appropriately. --- Framework/Core/src/TopologyPolicy.cxx | 38 ++++++++++++++---------- Framework/Core/src/runDataProcessing.cxx | 26 ++++++++++++++++ 2 files changed, 49 insertions(+), 15 deletions(-) diff --git a/Framework/Core/src/TopologyPolicy.cxx b/Framework/Core/src/TopologyPolicy.cxx index e0f7a208ed4e5..fb96eff5af2fc 100644 --- a/Framework/Core/src/TopologyPolicy.cxx +++ b/Framework/Core/src/TopologyPolicy.cxx @@ -82,33 +82,41 @@ bool expendableDataDeps(DataProcessorSpec const& a, DataProcessorSpec const& b) // If we are here we do not have any data dependency, // however we strill consider a dependent on b if // a has the "expendable" label and b does not. - bool isBExpendable = false; - bool isAExpendable = false; - for (auto const& label : b.labels) { + auto checkExpendable = [](DataProcessorLabel const& label) { if (label.value == "expendable") { - isBExpendable = true; - break; + return true; } - } - for (auto const& label : a.labels) { - if (label.value == "expendable") { - isAExpendable = true; - break; + return false; + }; + // A task marked as expendable or resilient can be put after an expendable task + auto checkResilient = [](DataProcessorLabel const& label) { + if (label.value == "resilient") { + return true; } - } - // If none is expendable. We simply return false. + return false; + }; + bool isBExpendable = std::find_if(b.labels.begin(), b.labels.end(), checkExpendable) != b.labels.end(); + bool isAExpendable = std::find_if(a.labels.begin(), a.labels.end(), checkExpendable) != a.labels.end(); + bool bResilient = std::find_if(b.labels.begin(), b.labels.end(), checkResilient) != b.labels.end(); + + // If none is expendable. We simply return false and sort as usual. if (!isAExpendable && !isBExpendable) { - LOGP(debug, "Neither {} nor {} are expendable. No dependency.", a.name, b.name); + LOGP(debug, "Neither {} nor {} are expendable. No dependency beyond data deps.", a.name, b.name); return false; } - // If both are expendable. We return false. + // If both are expendable. We return false and sort as usual. if (isAExpendable && isBExpendable) { LOGP(debug, "Both {} and {} are expendable. No dependency.", a.name, b.name); return false; } + // If b is expendable but b is resilient, we can keep the same order. + if (isAExpendable && bResilient) { + LOGP(debug, "{} is expendable but b is resilient, no need to add an unneeded dependency", a.name, a.name, b.name); + return false; + } // If a is expendable we consider it as if there was a dependency from a to b, - // but we still need to check if there is not one already from b to a. + // however we still need to check if there is not one already from b to a. if (isAExpendable) { LOGP(debug, "{} is expendable. Checking if there is a dependency from {} to {}.", a.name, b.name, a.name); return !dataDeps(b, a); diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index d5da17d9b0f45..724383ce501e6 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -2565,6 +2565,30 @@ void apply_permutation( } } +// Check if the workflow is resiliant to failures +void checkNonResiliency(std::vector const& specs, + std::vector> const& edges) +{ + auto checkExpendable = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + auto checkResilient = [](DataProcessorLabel const& label) { + return label.value == "resilient" || label.value == "expendable"; + }; + + for (auto& edge : edges) { + auto& src = specs[edge.first]; + auto& dst = specs[edge.second]; + if (std::none_of(src.labels.begin(), src.labels.end(), checkExpendable)) { + continue; + } + if (std::any_of(dst.labels.begin(), dst.labels.end(), checkResilient)) { + continue; + } + throw std::runtime_error("Workflow is not resiliant to failures. Processor " + dst.name + " gets inputs from expendable devices, but is not marked as expendable or resilient itself."); + } +} + std::string debugTopoInfo(std::vector const& specs, std::vector const& infos, std::vector> const& edges) @@ -2833,6 +2857,8 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& workflow, auto topoInfos = WorkflowHelpers::topologicalSort(physicalWorkflow.size(), &edges[0].first, &edges[0].second, sizeof(std::pair), edges.size()); if (topoInfos.size() != physicalWorkflow.size()) { + // Check missing resilincy of one of the tasks + checkNonResiliency(physicalWorkflow, edges); throw std::runtime_error("Unable to do topological sort of the resulting workflow. Do you have loops?\n" + debugTopoInfo(physicalWorkflow, topoInfos, edges)); } // Sort by layer and then by name, to ensure stability. From cc7b6d3ee550aaeccb0a2852f686f06641ce10bf Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 24 Nov 2023 13:17:50 +0100 Subject: [PATCH 129/726] DPL: C++20 fixes --- Framework/Core/src/DataProcessingDevice.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 4d0cac7087611..b03904c5b3776 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -2234,7 +2234,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v buffer[ai] = record.isValid(ai) ? '2' : '0'; } buffer[record.size()] = 0; - states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index), (int)(record.size() + buffer - relayerSlotState), relayerSlotState}); + states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index), .size = (int)(record.size() + buffer - relayerSlotState), .data = relayerSlotState}); }; // This is the main dispatching loop From f75ba9f4a966c8750e2cbc0cb786c92ab0b45e18 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 24 Nov 2023 14:49:38 +0100 Subject: [PATCH 130/726] Drop need to include ptree in ConfigurableParam.h --- Common/SimConfig/test/testSimCutParam.cxx | 1 + Common/Utils/CMakeLists.txt | 3 +- .../include/CommonUtils/ConfigurableParam.h | 62 ++++++--------- .../CommonUtils/ConfigurableParamReaders.h | 36 +++++++++ Common/Utils/src/ConfigurableParam.cxx | 70 +++++------------ Common/Utils/src/ConfigurableParamReaders.cxx | 75 +++++++++++++++++++ .../MUON/MCH/Raw/test/testClosureCoDec.cxx | 1 + .../MCH/Simulation/test/testDigitizer.cxx | 1 + .../MCH/Triggering/test/testEventFinder.cxx | 1 + Detectors/TPC/base/test/testTPCParameters.cxx | 7 +- .../include/TPCCalibration/IDCAverageGroup.h | 1 + .../include/TPCCalibration/IDCFactorization.h | 1 + .../include/TPCCalibration/SACFactorization.h | 1 + .../calibration/src/IDCGroupingParameter.cxx | 2 + .../TPC/reconstruction/macro/makeIonTail.C | 1 + Detectors/TPC/simulation/macro/toyCluster.C | 1 + .../TPC/workflow/src/ApplyCCDBCalibSpec.cxx | 1 + macro/o2sim.C | 1 + 18 files changed, 169 insertions(+), 97 deletions(-) create mode 100644 Common/Utils/include/CommonUtils/ConfigurableParamReaders.h create mode 100644 Common/Utils/src/ConfigurableParamReaders.cxx diff --git a/Common/SimConfig/test/testSimCutParam.cxx b/Common/SimConfig/test/testSimCutParam.cxx index 7a17ae9d1f2cf..468d76429cbba 100644 --- a/Common/SimConfig/test/testSimCutParam.cxx +++ b/Common/SimConfig/test/testSimCutParam.cxx @@ -15,6 +15,7 @@ #include #include "SimConfig/SimParams.h" #include "CommonUtils/ConfigurableParam.h" +#include using namespace o2::conf; diff --git a/Common/Utils/CMakeLists.txt b/Common/Utils/CMakeLists.txt index b0fa605cb6723..7f650b973990b 100644 --- a/Common/Utils/CMakeLists.txt +++ b/Common/Utils/CMakeLists.txt @@ -14,7 +14,7 @@ o2_add_library(CommonUtils src/RootChain.cxx src/CompStream.cxx src/ShmManager.cxx src/ValueMonitor.cxx src/StringUtils.cxx - src/ConfigurableParamHelper.cxx src/ConfigurableParam.cxx src/RootSerializableKeyValueStore.cxx + src/ConfigurableParamReaders.cxx src/ConfigurableParamHelper.cxx src/ConfigurableParam.cxx src/RootSerializableKeyValueStore.cxx src/KeyValParam.cxx src/FileSystemUtils.cxx src/FIFO.cxx @@ -39,6 +39,7 @@ o2_target_root_dictionary(CommonUtils include/CommonUtils/MemFileHelper.h include/CommonUtils/ConfigurableParam.h include/CommonUtils/ConfigurableParamHelper.h + include/CommonUtils/ConfigurableParamReaders.h include/CommonUtils/ConfigurationMacroHelper.h include/CommonUtils/RootSerializableKeyValueStore.h include/CommonUtils/KeyValParam.h diff --git a/Common/Utils/include/CommonUtils/ConfigurableParam.h b/Common/Utils/include/CommonUtils/ConfigurableParam.h index 7099e37d5bc50..717a4c425fc82 100644 --- a/Common/Utils/include/CommonUtils/ConfigurableParam.h +++ b/Common/Utils/include/CommonUtils/ConfigurableParam.h @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -176,15 +176,10 @@ class ConfigurableParam static void printAllRegisteredParamNames(); static void printAllKeyValuePairs(bool useLogger = false); - static const std::string& getInputDir() { return sInputDir; } static const std::string& getOutputDir() { return sOutputDir; } - static void setInputDir(const std::string& d) { sInputDir = d; } static void setOutputDir(const std::string& d) { sOutputDir = d; } - static boost::property_tree::ptree readINI(std::string const& filepath); - static boost::property_tree::ptree readJSON(std::string const& filepath); - static boost::property_tree::ptree readConfigFile(std::string const& filepath); static bool configFileExists(std::string const& filepath); // writes a human readable JSON file of all parameters @@ -196,10 +191,12 @@ class ConfigurableParam template static T getValueAs(std::string key) { - if (!sIsFullyInitialized) { - initialize(); - } - return sPtree->get(key); + return [](auto* tree, const std::string& key) -> T { + if (!sIsFullyInitialized) { + initialize(); + } + return tree->template get(key); + }(sPtree, key); } template @@ -208,19 +205,21 @@ class ConfigurableParam if (!sIsFullyInitialized) { initialize(); } - assert(sPtree); - try { - auto key = mainkey + "." + subkey; - if (sPtree->get_optional(key).is_initialized()) { - sPtree->put(key, x); - auto changed = updateThroughStorageMap(mainkey, subkey, typeid(T), (void*)&x); - if (changed != EParamUpdateStatus::Failed) { - sValueProvenanceMap->find(key)->second = kRT; // set to runtime + return [&subkey, &x, &mainkey](auto* tree) -> void { + assert(tree); + try { + auto key = mainkey + "." + subkey; + if (tree->template get_optional(key).is_initialized()) { + tree->put(key, x); + auto changed = updateThroughStorageMap(mainkey, subkey, typeid(T), (void*)&x); + if (changed != EParamUpdateStatus::Failed) { + sValueProvenanceMap->find(key)->second = kRT; // set to runtime + } } + } catch (std::exception const& e) { + std::cerr << "Error in setValue (T) " << e.what() << "\n"; } - } catch (std::exception const& e) { - std::cerr << "Error in setValue (T) " << e.what() << "\n"; - } + }(sPtree); } static void setProvenance(std::string const& mainkey, std::string const& subkey, EParamProvenance p) @@ -242,25 +241,7 @@ class ConfigurableParam // specialized for std::string // which means that the type will be converted internally - static void setValue(std::string const& key, std::string const& valuestring) - { - if (!sIsFullyInitialized) { - initialize(); - } - assert(sPtree); - try { - if (sPtree->get_optional(key).is_initialized()) { - sPtree->put(key, valuestring); - auto changed = updateThroughStorageMapWithConversion(key, valuestring); - if (changed != EParamUpdateStatus::Failed) { - sValueProvenanceMap->find(key)->second = kRT; // set to runtime - } - } - } catch (std::exception const& e) { - std::cerr << "Error in setValue (string) " << e.what() << "\n"; - } - } - + static void setValue(std::string const& key, std::string const& valuestring); static void setEnumValue(const std::string&, const std::string&); static void setArrayValue(const std::string&, const std::string&); @@ -321,7 +302,6 @@ class ConfigurableParam // (stored as a vector of pairs ) static EnumRegistry* sEnumRegistry; - static std::string sInputDir; static std::string sOutputDir; void setRegisterMode(bool b) { sRegisterMode = b; } diff --git a/Common/Utils/include/CommonUtils/ConfigurableParamReaders.h b/Common/Utils/include/CommonUtils/ConfigurableParamReaders.h new file mode 100644 index 0000000000000..d5ecd6cb97f7a --- /dev/null +++ b/Common/Utils/include/CommonUtils/ConfigurableParamReaders.h @@ -0,0 +1,36 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#ifndef O2_COMMON_UTILS_CONFIGURABLEPARAMREADERS_H_ +#define O2_COMMON_UTILS_CONFIGURABLEPARAMREADERS_H_ + +#include +#include + +namespace o2::conf +{ + +// Helpers to read ConfigurableParam from different file formats +class ConfigurableParamReaders +{ + public: + static void setInputDir(const std::string& d) { sInputDir = d; } + static const std::string& getInputDir() { return sInputDir; } + + static boost::property_tree::ptree readINI(std::string const& filepath); + static boost::property_tree::ptree readJSON(std::string const& filepath); + static boost::property_tree::ptree readConfigFile(std::string const& filepath); + + private: + static std::string sInputDir; +}; + +} // namespace o2::conf +#endif // O2_COMMON_UTILS_CONF_CONFIGURABLEPARAMREADERS_H_ diff --git a/Common/Utils/src/ConfigurableParam.cxx b/Common/Utils/src/ConfigurableParam.cxx index e581970fc81fb..0a3c9fe2d7595 100644 --- a/Common/Utils/src/ConfigurableParam.cxx +++ b/Common/Utils/src/ConfigurableParam.cxx @@ -9,11 +9,12 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -//first version 8/2018, Sandro Wenzel +// first version 8/2018, Sandro Wenzel #include "CommonUtils/ConfigurableParam.h" #include "CommonUtils/StringUtils.h" #include "CommonUtils/KeyValParam.h" +#include "CommonUtils/ConfigurableParamReaders.h" #define BOOST_BIND_GLOBAL_PLACEHOLDERS #include #include @@ -47,7 +48,6 @@ std::vector* ConfigurableParam::sRegisteredParamClasses = nu boost::property_tree::ptree* ConfigurableParam::sPtree = nullptr; std::map>* ConfigurableParam::sKeyToStorageMap = nullptr; std::map* ConfigurableParam::sValueProvenanceMap = nullptr; -std::string ConfigurableParam::sInputDir = ""; std::string ConfigurableParam::sOutputDir = ""; EnumRegistry* ConfigurableParam::sEnumRegistry = nullptr; @@ -194,60 +194,28 @@ void ConfigurableParam::writeINI(std::string const& filename, std::string const& bool ConfigurableParam::configFileExists(std::string const& filepath) { - return std::filesystem::exists(o2::utils::Str::concat_string(sInputDir, filepath)); + return std::filesystem::exists(o2::utils::Str::concat_string(ConfigurableParamReaders::getInputDir(), filepath)); } // ------------------------------------------------------------------ -boost::property_tree::ptree ConfigurableParam::readConfigFile(std::string const& filepath) +void ConfigurableParam::setValue(std::string const& key, std::string const& valuestring) { - auto inpfilename = o2::utils::Str::concat_string(sInputDir, filepath); - if (!std::filesystem::exists(inpfilename)) { - LOG(fatal) << inpfilename << " : config file does not exist!"; - } - - boost::property_tree::ptree pt; - - if (boost::iends_with(inpfilename, ".ini")) { - pt = readINI(inpfilename); - } else if (boost::iends_with(inpfilename, ".json")) { - pt = readJSON(inpfilename); - } else { - LOG(fatal) << "Configuration file must have either .ini or .json extension"; - } - - return pt; -} - -// ------------------------------------------------------------------ - -boost::property_tree::ptree ConfigurableParam::readINI(std::string const& filepath) -{ - boost::property_tree::ptree pt; - try { - boost::property_tree::read_ini(filepath, pt); - } catch (const boost::property_tree::ptree_error& e) { - LOG(fatal) << "Failed to read INI config file " << filepath << " (" << e.what() << ")"; - } catch (...) { - LOG(fatal) << "Unknown error when reading INI config file "; + if (!sIsFullyInitialized) { + initialize(); } - - return pt; -} - -// ------------------------------------------------------------------ - -boost::property_tree::ptree ConfigurableParam::readJSON(std::string const& filepath) -{ - boost::property_tree::ptree pt; - + assert(sPtree); try { - boost::property_tree::read_json(filepath, pt); - } catch (const boost::property_tree::ptree_error& e) { - LOG(fatal) << "Failed to read JSON config file " << filepath << " (" << e.what() << ")"; + if (sPtree->get_optional(key).is_initialized()) { + sPtree->put(key, valuestring); + auto changed = updateThroughStorageMapWithConversion(key, valuestring); + if (changed != EParamUpdateStatus::Failed) { + sValueProvenanceMap->find(key)->second = kRT; // set to runtime + } + } + } catch (std::exception const& e) { + std::cerr << "Error in setValue (string) " << e.what() << "\n"; } - - return pt; } // ------------------------------------------------------------------ @@ -258,7 +226,7 @@ void ConfigurableParam::writeJSON(std::string const& filename, std::string const LOG(info) << "ignoring writing of json file " << filename; return; } - initPropertyTree(); // update the boost tree before writing + initPropertyTree(); // update the boost tree before writing auto outfilename = o2::utils::Str::concat_string(sOutputDir, filename); if (!keyOnly.empty()) { // write ini for selected key only try { @@ -409,7 +377,7 @@ void ConfigurableParam::updateFromFile(std::string const& configFile, std::strin return; } - boost::property_tree::ptree pt = readConfigFile(cfgfile); + boost::property_tree::ptree pt = ConfigurableParamReaders::readConfigFile(cfgfile); std::vector> keyValPairs; auto request = o2::utils::Str::tokenize(paramsList, ',', true); @@ -522,7 +490,7 @@ void ConfigurableParam::updateFromString(std::string const& configString) const auto& kv = o2::conf::KeyValParam::Instance(); if (getProvenance("keyval.input_dir") != kCODE) { - sInputDir = o2::utils::Str::concat_string(o2::utils::Str::rectifyDirectory(kv.input_dir)); + ConfigurableParamReaders::setInputDir(o2::utils::Str::concat_string(o2::utils::Str::rectifyDirectory(kv.input_dir))); } if (getProvenance("keyval.output_dir") != kCODE) { if (kv.output_dir == "/dev/null") { diff --git a/Common/Utils/src/ConfigurableParamReaders.cxx b/Common/Utils/src/ConfigurableParamReaders.cxx new file mode 100644 index 0000000000000..fe45a4fccfc1a --- /dev/null +++ b/Common/Utils/src/ConfigurableParamReaders.cxx @@ -0,0 +1,75 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +#include "CommonUtils/ConfigurableParamReaders.h" +#include "CommonUtils/StringUtils.h" +#include + +#include +#include +#include + +namespace o2::conf +{ +// ------------------------------------------------------------------ + +boost::property_tree::ptree ConfigurableParamReaders::readINI(std::string const& filepath) +{ + boost::property_tree::ptree pt; + try { + boost::property_tree::read_ini(filepath, pt); + } catch (const boost::property_tree::ptree_error& e) { + LOG(fatal) << "Failed to read INI config file " << filepath << " (" << e.what() << ")"; + } catch (...) { + LOG(fatal) << "Unknown error when reading INI config file "; + } + + return pt; +} + +// ------------------------------------------------------------------ + +boost::property_tree::ptree ConfigurableParamReaders::readJSON(std::string const& filepath) +{ + boost::property_tree::ptree pt; + + try { + boost::property_tree::read_json(filepath, pt); + } catch (const boost::property_tree::ptree_error& e) { + LOG(fatal) << "Failed to read JSON config file " << filepath << " (" << e.what() << ")"; + } + + return pt; +} + +boost::property_tree::ptree ConfigurableParamReaders::readConfigFile(std::string const& filepath) +{ + auto inpfilename = o2::utils::Str::concat_string(sInputDir, filepath); + if (!std::filesystem::exists(inpfilename)) { + LOG(fatal) << inpfilename << " : config file does not exist!"; + } + + boost::property_tree::ptree pt; + + if (boost::iends_with(inpfilename, ".ini")) { + pt = ConfigurableParamReaders::readINI(inpfilename); + } else if (boost::iends_with(inpfilename, ".json")) { + pt = ConfigurableParamReaders::readJSON(inpfilename); + } else { + LOG(fatal) << "Configuration file must have either .ini or .json extension"; + } + + return pt; +} + +std::string ConfigurableParamReaders::sInputDir = ""; + +} // namespace o2::conf diff --git a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx index 2407796773f57..b18ffd1abee63 100644 --- a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx +++ b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx @@ -16,6 +16,7 @@ #define BOOST_TEST_DYN_LINK #include +#include #include "DetectorsRaw/HBFUtils.h" #include "DetectorsRaw/RawFileWriter.h" #include "Framework/Logger.h" diff --git a/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx b/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx index 791ab8e7a3909..2a550220bb137 100644 --- a/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx +++ b/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx @@ -31,6 +31,7 @@ #include "TGeoManager.h" #include "boost/format.hpp" #include +#include #include #include diff --git a/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx b/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx index eb786fc1bb068..60275f2cad2f4 100644 --- a/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx +++ b/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx @@ -27,6 +27,7 @@ #include #include +#include #include "CommonUtils/ConfigurableParam.h" #include "SimulationDataFormat/MCCompLabel.h" diff --git a/Detectors/TPC/base/test/testTPCParameters.cxx b/Detectors/TPC/base/test/testTPCParameters.cxx index 015a6abd964ec..b8baa1e2a74ac 100644 --- a/Detectors/TPC/base/test/testTPCParameters.cxx +++ b/Detectors/TPC/base/test/testTPCParameters.cxx @@ -23,9 +23,9 @@ #include "TPCBase/ParameterGas.h" #include #include -namespace o2 -{ -namespace tpc +#include + +namespace o2::tpc { constexpr float NominalTimeBin = 8 * o2::constants::lhc::LHCBunchSpacingNS * 1e-3; @@ -367,5 +367,4 @@ BOOST_AUTO_TEST_CASE(ParameterGEM_test2) BOOST_CHECK_CLOSE(o2::conf::ConfigurableParam::getValueAs("TPCGEMParam.KappaStack"), 32, 1e-12); BOOST_CHECK_CLOSE(o2::conf::ConfigurableParam::getValueAs("TPCGEMParam.EfficiencyStack"), 33, 1e-3); } -} // namespace tpc } // namespace o2 diff --git a/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h b/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h index fecbe9f14f18b..dd9a5fc257935 100644 --- a/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h +++ b/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h @@ -20,6 +20,7 @@ #include "TPCCalibration/IDCAverageGroupBase.h" #include "TPCBase/Sector.h" #include "TPCBase/CalDet.h" +#include #include #include "Rtypes.h" diff --git a/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h b/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h index 13c9d0b98412f..4c7f119efcfe0 100644 --- a/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h +++ b/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h @@ -24,6 +24,7 @@ #include "TPCCalibration/IDCContainer.h" #include "TPCCalibration/IDCGroupHelperSector.h" #include "DataFormatsTPC/Defs.h" +#include namespace o2::tpc { diff --git a/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h b/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h index b1b4430c11a00..7141750b8ce37 100644 --- a/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h +++ b/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h @@ -21,6 +21,7 @@ #include "Rtypes.h" #include "TPCCalibration/IDCContainer.h" #include "DataFormatsTPC/Defs.h" +#include namespace o2::tpc { diff --git a/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx b/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx index bbecb4df8f8fd..895b6957d8a01 100644 --- a/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx +++ b/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx @@ -12,6 +12,8 @@ #include "TPCCalibration/IDCGroupingParameter.h" #include "Framework/Logger.h" #include "Algorithm/RangeTokenizer.h" +#include +#include #include using namespace o2::tpc; diff --git a/Detectors/TPC/reconstruction/macro/makeIonTail.C b/Detectors/TPC/reconstruction/macro/makeIonTail.C index 98db6540d20f7..d8d1be6830626 100644 --- a/Detectors/TPC/reconstruction/macro/makeIonTail.C +++ b/Detectors/TPC/reconstruction/macro/makeIonTail.C @@ -32,6 +32,7 @@ #include "TPCBase/CRUCalibHelpers.h" #include "TPCBase/CRU.h" #include "CommonUtils/TreeStreamRedirector.h" +#include using namespace o2::tpc; size_t digitsInSaturateion(std::vector& digits, bool correctCharge = false, CalPad* pedestals = nullptr, o2::utils::TreeStreamRedirector* stream = nullptr); diff --git a/Detectors/TPC/simulation/macro/toyCluster.C b/Detectors/TPC/simulation/macro/toyCluster.C index 257adaf5f79fc..2087a7773fe22 100644 --- a/Detectors/TPC/simulation/macro/toyCluster.C +++ b/Detectors/TPC/simulation/macro/toyCluster.C @@ -58,6 +58,7 @@ #include "TPCReconstruction/HwClusterer.h" #include "TPCSimulation/GEMAmplification.h" #endif +#include using namespace o2::tpc; void fillTPCHits(const float theta, const float phi, const float dedx, std::vector& hitGroupSector, std::pair& trackInfo); diff --git a/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx b/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx index 37429a04c4b34..9c588ff50a45c 100644 --- a/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx +++ b/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx @@ -19,6 +19,7 @@ #include "TPCBase/ParameterGas.h" #include "DataFormatsTPC/LtrCalibData.h" #include "TPCWorkflow/ApplyCCDBCalibSpec.h" +#include using namespace o2::framework; diff --git a/macro/o2sim.C b/macro/o2sim.C index 5257a225d3ed9..f610be730eeb7 100644 --- a/macro/o2sim.C +++ b/macro/o2sim.C @@ -39,6 +39,7 @@ #include #endif #include "migrateSimFiles.C" +#include void check_notransport() { From 5eca1f19be52bd4817663c86dd5b945cea1bf428 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 24 Nov 2023 15:22:46 +0100 Subject: [PATCH 131/726] DPL: fixes for C++20 --- .../test_FrameworkDataFlowToO2Control.cxx | 42 ++++++++----------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx b/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx index 4ef883f3b32de..7b0ffa462a9db 100644 --- a/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx +++ b/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx @@ -31,32 +31,26 @@ namespace { WorkflowSpec defineDataProcessing() { - return {{"A", // - Inputs{}, // - Outputs{OutputSpec{"TST", "A1"}, OutputSpec{"TST", "A2"}}, // A1 will be consumed twice, A2 is dangling - AlgorithmSpec{}, // - {ConfigParamSpec{"channel-config", VariantType::String, // raw input channel - "name=into_dpl,type=pull,method=connect,address=ipc:///tmp/pipe-into-dpl,transport=shmem,rateLogging=10,rcvBufSize=789", - {"Out-of-band channel config"}}}}, - {"B", // producer, no inputs - Inputs{}, - Outputs{OutputSpec{"TST", "B1"}}, + return {{.name = "A", // + .outputs = Outputs{OutputSpec{"TST", "A1"}, OutputSpec{"TST", "A2"}}, // A1 will be consumed twice, A2 is dangling + .algorithm = AlgorithmSpec{}, // + .options = {ConfigParamSpec{"channel-config", VariantType::String, // raw input channel + "name=into_dpl,type=pull,method=connect,address=ipc:///tmp/pipe-into-dpl,transport=shmem,rateLogging=10,rcvBufSize=789", + {"Out-of-band channel config"}}}}, + {.name = "B", // producer, no inputs + .outputs = Outputs{OutputSpec{"TST", "B1"}}, .metadata = {{ecs::cpuKillThreshold, "3.0"}}}, - {"C", // first consumer of A1, consumer of B1 - {InputSpec{"y", "TST", "A1"}, InputSpec{"y", "TST", "B1"}}, - Outputs{}, + {.name = "C", // first consumer of A1, consumer of B1 + .inputs = {InputSpec{"y", "TST", "A1"}, InputSpec{"y", "TST", "B1"}}, .metadata = {{ecs::privateMemoryKillThresholdMB, "5000"}}}, - {"D", // second consumer of A1 - Inputs{ - InputSpec{"x", "TST", "A1"}}, - Outputs{}, - AlgorithmSpec{}, - {ConfigParamSpec{"a-param", VariantType::Int, 1, {"A parameter which should not be escaped"}}, - ConfigParamSpec{"b-param", VariantType::String, "", {"a parameter which will be escaped"}}, - ConfigParamSpec{"c-param", VariantType::String, "foo;bar", {"another parameter which will be escaped"}}, - ConfigParamSpec{"channel-config", VariantType::String, // raw output channel - "name=outta_dpl,type=push,method=bind,address=ipc:///tmp/pipe-outta-dpl,transport=shmem,rateLogging=10", - {"Out-of-band channel config"}}}}}; + {.name = "D", // second consumer of A1 + .inputs = Inputs{InputSpec{"x", "TST", "A1"}}, + .options = {ConfigParamSpec{"a-param", VariantType::Int, 1, {"A parameter which should not be escaped"}}, + ConfigParamSpec{"b-param", VariantType::String, "", {"a parameter which will be escaped"}}, + ConfigParamSpec{"c-param", VariantType::String, "foo;bar", {"another parameter which will be escaped"}}, + ConfigParamSpec{"channel-config", VariantType::String, // raw output channel + "name=outta_dpl,type=push,method=bind,address=ipc:///tmp/pipe-outta-dpl,transport=shmem,rateLogging=10", + {"Out-of-band channel config"}}}}}; } char* strdiffchr(const char* s1, const char* s2) From de8c022334e36fb0836ed8beb242861b9c515c48 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 24 Nov 2023 18:17:26 +0100 Subject: [PATCH 132/726] Drop need to include ptree in ConfigurableParam.h --- Common/SimConfig/test/testSimCutParam.cxx | 1 + Common/Utils/CMakeLists.txt | 3 +- .../include/CommonUtils/ConfigurableParam.h | 62 +++++---------- .../CommonUtils/ConfigurableParamReaders.h | 36 +++++++++ Common/Utils/src/ConfigurableParam.cxx | 70 +++++------------ Common/Utils/src/ConfigurableParamReaders.cxx | 76 +++++++++++++++++++ .../MUON/MCH/Raw/test/testClosureCoDec.cxx | 1 + .../MCH/Simulation/test/testDigitizer.cxx | 1 + .../MCH/Triggering/test/testEventFinder.cxx | 1 + Detectors/TPC/base/test/testTPCParameters.cxx | 7 +- .../include/TPCCalibration/IDCAverageGroup.h | 1 + .../include/TPCCalibration/IDCFactorization.h | 1 + .../include/TPCCalibration/SACFactorization.h | 1 + .../calibration/src/IDCGroupingParameter.cxx | 2 + .../TPC/reconstruction/macro/makeIonTail.C | 1 + Detectors/TPC/simulation/macro/toyCluster.C | 1 + .../TPC/workflow/src/ApplyCCDBCalibSpec.cxx | 1 + macro/o2sim.C | 1 + 18 files changed, 170 insertions(+), 97 deletions(-) create mode 100644 Common/Utils/include/CommonUtils/ConfigurableParamReaders.h create mode 100644 Common/Utils/src/ConfigurableParamReaders.cxx diff --git a/Common/SimConfig/test/testSimCutParam.cxx b/Common/SimConfig/test/testSimCutParam.cxx index 7a17ae9d1f2cf..468d76429cbba 100644 --- a/Common/SimConfig/test/testSimCutParam.cxx +++ b/Common/SimConfig/test/testSimCutParam.cxx @@ -15,6 +15,7 @@ #include #include "SimConfig/SimParams.h" #include "CommonUtils/ConfigurableParam.h" +#include using namespace o2::conf; diff --git a/Common/Utils/CMakeLists.txt b/Common/Utils/CMakeLists.txt index b0fa605cb6723..7f650b973990b 100644 --- a/Common/Utils/CMakeLists.txt +++ b/Common/Utils/CMakeLists.txt @@ -14,7 +14,7 @@ o2_add_library(CommonUtils src/RootChain.cxx src/CompStream.cxx src/ShmManager.cxx src/ValueMonitor.cxx src/StringUtils.cxx - src/ConfigurableParamHelper.cxx src/ConfigurableParam.cxx src/RootSerializableKeyValueStore.cxx + src/ConfigurableParamReaders.cxx src/ConfigurableParamHelper.cxx src/ConfigurableParam.cxx src/RootSerializableKeyValueStore.cxx src/KeyValParam.cxx src/FileSystemUtils.cxx src/FIFO.cxx @@ -39,6 +39,7 @@ o2_target_root_dictionary(CommonUtils include/CommonUtils/MemFileHelper.h include/CommonUtils/ConfigurableParam.h include/CommonUtils/ConfigurableParamHelper.h + include/CommonUtils/ConfigurableParamReaders.h include/CommonUtils/ConfigurationMacroHelper.h include/CommonUtils/RootSerializableKeyValueStore.h include/CommonUtils/KeyValParam.h diff --git a/Common/Utils/include/CommonUtils/ConfigurableParam.h b/Common/Utils/include/CommonUtils/ConfigurableParam.h index 7099e37d5bc50..717a4c425fc82 100644 --- a/Common/Utils/include/CommonUtils/ConfigurableParam.h +++ b/Common/Utils/include/CommonUtils/ConfigurableParam.h @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -176,15 +176,10 @@ class ConfigurableParam static void printAllRegisteredParamNames(); static void printAllKeyValuePairs(bool useLogger = false); - static const std::string& getInputDir() { return sInputDir; } static const std::string& getOutputDir() { return sOutputDir; } - static void setInputDir(const std::string& d) { sInputDir = d; } static void setOutputDir(const std::string& d) { sOutputDir = d; } - static boost::property_tree::ptree readINI(std::string const& filepath); - static boost::property_tree::ptree readJSON(std::string const& filepath); - static boost::property_tree::ptree readConfigFile(std::string const& filepath); static bool configFileExists(std::string const& filepath); // writes a human readable JSON file of all parameters @@ -196,10 +191,12 @@ class ConfigurableParam template static T getValueAs(std::string key) { - if (!sIsFullyInitialized) { - initialize(); - } - return sPtree->get(key); + return [](auto* tree, const std::string& key) -> T { + if (!sIsFullyInitialized) { + initialize(); + } + return tree->template get(key); + }(sPtree, key); } template @@ -208,19 +205,21 @@ class ConfigurableParam if (!sIsFullyInitialized) { initialize(); } - assert(sPtree); - try { - auto key = mainkey + "." + subkey; - if (sPtree->get_optional(key).is_initialized()) { - sPtree->put(key, x); - auto changed = updateThroughStorageMap(mainkey, subkey, typeid(T), (void*)&x); - if (changed != EParamUpdateStatus::Failed) { - sValueProvenanceMap->find(key)->second = kRT; // set to runtime + return [&subkey, &x, &mainkey](auto* tree) -> void { + assert(tree); + try { + auto key = mainkey + "." + subkey; + if (tree->template get_optional(key).is_initialized()) { + tree->put(key, x); + auto changed = updateThroughStorageMap(mainkey, subkey, typeid(T), (void*)&x); + if (changed != EParamUpdateStatus::Failed) { + sValueProvenanceMap->find(key)->second = kRT; // set to runtime + } } + } catch (std::exception const& e) { + std::cerr << "Error in setValue (T) " << e.what() << "\n"; } - } catch (std::exception const& e) { - std::cerr << "Error in setValue (T) " << e.what() << "\n"; - } + }(sPtree); } static void setProvenance(std::string const& mainkey, std::string const& subkey, EParamProvenance p) @@ -242,25 +241,7 @@ class ConfigurableParam // specialized for std::string // which means that the type will be converted internally - static void setValue(std::string const& key, std::string const& valuestring) - { - if (!sIsFullyInitialized) { - initialize(); - } - assert(sPtree); - try { - if (sPtree->get_optional(key).is_initialized()) { - sPtree->put(key, valuestring); - auto changed = updateThroughStorageMapWithConversion(key, valuestring); - if (changed != EParamUpdateStatus::Failed) { - sValueProvenanceMap->find(key)->second = kRT; // set to runtime - } - } - } catch (std::exception const& e) { - std::cerr << "Error in setValue (string) " << e.what() << "\n"; - } - } - + static void setValue(std::string const& key, std::string const& valuestring); static void setEnumValue(const std::string&, const std::string&); static void setArrayValue(const std::string&, const std::string&); @@ -321,7 +302,6 @@ class ConfigurableParam // (stored as a vector of pairs ) static EnumRegistry* sEnumRegistry; - static std::string sInputDir; static std::string sOutputDir; void setRegisterMode(bool b) { sRegisterMode = b; } diff --git a/Common/Utils/include/CommonUtils/ConfigurableParamReaders.h b/Common/Utils/include/CommonUtils/ConfigurableParamReaders.h new file mode 100644 index 0000000000000..d5ecd6cb97f7a --- /dev/null +++ b/Common/Utils/include/CommonUtils/ConfigurableParamReaders.h @@ -0,0 +1,36 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#ifndef O2_COMMON_UTILS_CONFIGURABLEPARAMREADERS_H_ +#define O2_COMMON_UTILS_CONFIGURABLEPARAMREADERS_H_ + +#include +#include + +namespace o2::conf +{ + +// Helpers to read ConfigurableParam from different file formats +class ConfigurableParamReaders +{ + public: + static void setInputDir(const std::string& d) { sInputDir = d; } + static const std::string& getInputDir() { return sInputDir; } + + static boost::property_tree::ptree readINI(std::string const& filepath); + static boost::property_tree::ptree readJSON(std::string const& filepath); + static boost::property_tree::ptree readConfigFile(std::string const& filepath); + + private: + static std::string sInputDir; +}; + +} // namespace o2::conf +#endif // O2_COMMON_UTILS_CONF_CONFIGURABLEPARAMREADERS_H_ diff --git a/Common/Utils/src/ConfigurableParam.cxx b/Common/Utils/src/ConfigurableParam.cxx index e581970fc81fb..0a3c9fe2d7595 100644 --- a/Common/Utils/src/ConfigurableParam.cxx +++ b/Common/Utils/src/ConfigurableParam.cxx @@ -9,11 +9,12 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -//first version 8/2018, Sandro Wenzel +// first version 8/2018, Sandro Wenzel #include "CommonUtils/ConfigurableParam.h" #include "CommonUtils/StringUtils.h" #include "CommonUtils/KeyValParam.h" +#include "CommonUtils/ConfigurableParamReaders.h" #define BOOST_BIND_GLOBAL_PLACEHOLDERS #include #include @@ -47,7 +48,6 @@ std::vector* ConfigurableParam::sRegisteredParamClasses = nu boost::property_tree::ptree* ConfigurableParam::sPtree = nullptr; std::map>* ConfigurableParam::sKeyToStorageMap = nullptr; std::map* ConfigurableParam::sValueProvenanceMap = nullptr; -std::string ConfigurableParam::sInputDir = ""; std::string ConfigurableParam::sOutputDir = ""; EnumRegistry* ConfigurableParam::sEnumRegistry = nullptr; @@ -194,60 +194,28 @@ void ConfigurableParam::writeINI(std::string const& filename, std::string const& bool ConfigurableParam::configFileExists(std::string const& filepath) { - return std::filesystem::exists(o2::utils::Str::concat_string(sInputDir, filepath)); + return std::filesystem::exists(o2::utils::Str::concat_string(ConfigurableParamReaders::getInputDir(), filepath)); } // ------------------------------------------------------------------ -boost::property_tree::ptree ConfigurableParam::readConfigFile(std::string const& filepath) +void ConfigurableParam::setValue(std::string const& key, std::string const& valuestring) { - auto inpfilename = o2::utils::Str::concat_string(sInputDir, filepath); - if (!std::filesystem::exists(inpfilename)) { - LOG(fatal) << inpfilename << " : config file does not exist!"; - } - - boost::property_tree::ptree pt; - - if (boost::iends_with(inpfilename, ".ini")) { - pt = readINI(inpfilename); - } else if (boost::iends_with(inpfilename, ".json")) { - pt = readJSON(inpfilename); - } else { - LOG(fatal) << "Configuration file must have either .ini or .json extension"; - } - - return pt; -} - -// ------------------------------------------------------------------ - -boost::property_tree::ptree ConfigurableParam::readINI(std::string const& filepath) -{ - boost::property_tree::ptree pt; - try { - boost::property_tree::read_ini(filepath, pt); - } catch (const boost::property_tree::ptree_error& e) { - LOG(fatal) << "Failed to read INI config file " << filepath << " (" << e.what() << ")"; - } catch (...) { - LOG(fatal) << "Unknown error when reading INI config file "; + if (!sIsFullyInitialized) { + initialize(); } - - return pt; -} - -// ------------------------------------------------------------------ - -boost::property_tree::ptree ConfigurableParam::readJSON(std::string const& filepath) -{ - boost::property_tree::ptree pt; - + assert(sPtree); try { - boost::property_tree::read_json(filepath, pt); - } catch (const boost::property_tree::ptree_error& e) { - LOG(fatal) << "Failed to read JSON config file " << filepath << " (" << e.what() << ")"; + if (sPtree->get_optional(key).is_initialized()) { + sPtree->put(key, valuestring); + auto changed = updateThroughStorageMapWithConversion(key, valuestring); + if (changed != EParamUpdateStatus::Failed) { + sValueProvenanceMap->find(key)->second = kRT; // set to runtime + } + } + } catch (std::exception const& e) { + std::cerr << "Error in setValue (string) " << e.what() << "\n"; } - - return pt; } // ------------------------------------------------------------------ @@ -258,7 +226,7 @@ void ConfigurableParam::writeJSON(std::string const& filename, std::string const LOG(info) << "ignoring writing of json file " << filename; return; } - initPropertyTree(); // update the boost tree before writing + initPropertyTree(); // update the boost tree before writing auto outfilename = o2::utils::Str::concat_string(sOutputDir, filename); if (!keyOnly.empty()) { // write ini for selected key only try { @@ -409,7 +377,7 @@ void ConfigurableParam::updateFromFile(std::string const& configFile, std::strin return; } - boost::property_tree::ptree pt = readConfigFile(cfgfile); + boost::property_tree::ptree pt = ConfigurableParamReaders::readConfigFile(cfgfile); std::vector> keyValPairs; auto request = o2::utils::Str::tokenize(paramsList, ',', true); @@ -522,7 +490,7 @@ void ConfigurableParam::updateFromString(std::string const& configString) const auto& kv = o2::conf::KeyValParam::Instance(); if (getProvenance("keyval.input_dir") != kCODE) { - sInputDir = o2::utils::Str::concat_string(o2::utils::Str::rectifyDirectory(kv.input_dir)); + ConfigurableParamReaders::setInputDir(o2::utils::Str::concat_string(o2::utils::Str::rectifyDirectory(kv.input_dir))); } if (getProvenance("keyval.output_dir") != kCODE) { if (kv.output_dir == "/dev/null") { diff --git a/Common/Utils/src/ConfigurableParamReaders.cxx b/Common/Utils/src/ConfigurableParamReaders.cxx new file mode 100644 index 0000000000000..2f36f8b27f5fa --- /dev/null +++ b/Common/Utils/src/ConfigurableParamReaders.cxx @@ -0,0 +1,76 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +#include "CommonUtils/ConfigurableParamReaders.h" +#include "CommonUtils/StringUtils.h" +#include +#include + +#include +#include +#include + +namespace o2::conf +{ +// ------------------------------------------------------------------ + +boost::property_tree::ptree ConfigurableParamReaders::readINI(std::string const& filepath) +{ + boost::property_tree::ptree pt; + try { + boost::property_tree::read_ini(filepath, pt); + } catch (const boost::property_tree::ptree_error& e) { + LOG(fatal) << "Failed to read INI config file " << filepath << " (" << e.what() << ")"; + } catch (...) { + LOG(fatal) << "Unknown error when reading INI config file "; + } + + return pt; +} + +// ------------------------------------------------------------------ + +boost::property_tree::ptree ConfigurableParamReaders::readJSON(std::string const& filepath) +{ + boost::property_tree::ptree pt; + + try { + boost::property_tree::read_json(filepath, pt); + } catch (const boost::property_tree::ptree_error& e) { + LOG(fatal) << "Failed to read JSON config file " << filepath << " (" << e.what() << ")"; + } + + return pt; +} + +boost::property_tree::ptree ConfigurableParamReaders::readConfigFile(std::string const& filepath) +{ + auto inpfilename = o2::utils::Str::concat_string(sInputDir, filepath); + if (!std::filesystem::exists(inpfilename)) { + LOG(fatal) << inpfilename << " : config file does not exist!"; + } + + boost::property_tree::ptree pt; + + if (boost::iends_with(inpfilename, ".ini")) { + pt = ConfigurableParamReaders::readINI(inpfilename); + } else if (boost::iends_with(inpfilename, ".json")) { + pt = ConfigurableParamReaders::readJSON(inpfilename); + } else { + LOG(fatal) << "Configuration file must have either .ini or .json extension"; + } + + return pt; +} + +std::string ConfigurableParamReaders::sInputDir = ""; + +} // namespace o2::conf diff --git a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx index 2407796773f57..b18ffd1abee63 100644 --- a/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx +++ b/Detectors/MUON/MCH/Raw/test/testClosureCoDec.cxx @@ -16,6 +16,7 @@ #define BOOST_TEST_DYN_LINK #include +#include #include "DetectorsRaw/HBFUtils.h" #include "DetectorsRaw/RawFileWriter.h" #include "Framework/Logger.h" diff --git a/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx b/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx index 791ab8e7a3909..2a550220bb137 100644 --- a/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx +++ b/Detectors/MUON/MCH/Simulation/test/testDigitizer.cxx @@ -31,6 +31,7 @@ #include "TGeoManager.h" #include "boost/format.hpp" #include +#include #include #include diff --git a/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx b/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx index eb786fc1bb068..60275f2cad2f4 100644 --- a/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx +++ b/Detectors/MUON/MCH/Triggering/test/testEventFinder.cxx @@ -27,6 +27,7 @@ #include #include +#include #include "CommonUtils/ConfigurableParam.h" #include "SimulationDataFormat/MCCompLabel.h" diff --git a/Detectors/TPC/base/test/testTPCParameters.cxx b/Detectors/TPC/base/test/testTPCParameters.cxx index 015a6abd964ec..b8baa1e2a74ac 100644 --- a/Detectors/TPC/base/test/testTPCParameters.cxx +++ b/Detectors/TPC/base/test/testTPCParameters.cxx @@ -23,9 +23,9 @@ #include "TPCBase/ParameterGas.h" #include #include -namespace o2 -{ -namespace tpc +#include + +namespace o2::tpc { constexpr float NominalTimeBin = 8 * o2::constants::lhc::LHCBunchSpacingNS * 1e-3; @@ -367,5 +367,4 @@ BOOST_AUTO_TEST_CASE(ParameterGEM_test2) BOOST_CHECK_CLOSE(o2::conf::ConfigurableParam::getValueAs("TPCGEMParam.KappaStack"), 32, 1e-12); BOOST_CHECK_CLOSE(o2::conf::ConfigurableParam::getValueAs("TPCGEMParam.EfficiencyStack"), 33, 1e-3); } -} // namespace tpc } // namespace o2 diff --git a/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h b/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h index fecbe9f14f18b..dd9a5fc257935 100644 --- a/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h +++ b/Detectors/TPC/calibration/include/TPCCalibration/IDCAverageGroup.h @@ -20,6 +20,7 @@ #include "TPCCalibration/IDCAverageGroupBase.h" #include "TPCBase/Sector.h" #include "TPCBase/CalDet.h" +#include #include #include "Rtypes.h" diff --git a/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h b/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h index 13c9d0b98412f..4c7f119efcfe0 100644 --- a/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h +++ b/Detectors/TPC/calibration/include/TPCCalibration/IDCFactorization.h @@ -24,6 +24,7 @@ #include "TPCCalibration/IDCContainer.h" #include "TPCCalibration/IDCGroupHelperSector.h" #include "DataFormatsTPC/Defs.h" +#include namespace o2::tpc { diff --git a/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h b/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h index b1b4430c11a00..7141750b8ce37 100644 --- a/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h +++ b/Detectors/TPC/calibration/include/TPCCalibration/SACFactorization.h @@ -21,6 +21,7 @@ #include "Rtypes.h" #include "TPCCalibration/IDCContainer.h" #include "DataFormatsTPC/Defs.h" +#include namespace o2::tpc { diff --git a/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx b/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx index bbecb4df8f8fd..895b6957d8a01 100644 --- a/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx +++ b/Detectors/TPC/calibration/src/IDCGroupingParameter.cxx @@ -12,6 +12,8 @@ #include "TPCCalibration/IDCGroupingParameter.h" #include "Framework/Logger.h" #include "Algorithm/RangeTokenizer.h" +#include +#include #include using namespace o2::tpc; diff --git a/Detectors/TPC/reconstruction/macro/makeIonTail.C b/Detectors/TPC/reconstruction/macro/makeIonTail.C index 98db6540d20f7..d8d1be6830626 100644 --- a/Detectors/TPC/reconstruction/macro/makeIonTail.C +++ b/Detectors/TPC/reconstruction/macro/makeIonTail.C @@ -32,6 +32,7 @@ #include "TPCBase/CRUCalibHelpers.h" #include "TPCBase/CRU.h" #include "CommonUtils/TreeStreamRedirector.h" +#include using namespace o2::tpc; size_t digitsInSaturateion(std::vector& digits, bool correctCharge = false, CalPad* pedestals = nullptr, o2::utils::TreeStreamRedirector* stream = nullptr); diff --git a/Detectors/TPC/simulation/macro/toyCluster.C b/Detectors/TPC/simulation/macro/toyCluster.C index 257adaf5f79fc..2087a7773fe22 100644 --- a/Detectors/TPC/simulation/macro/toyCluster.C +++ b/Detectors/TPC/simulation/macro/toyCluster.C @@ -58,6 +58,7 @@ #include "TPCReconstruction/HwClusterer.h" #include "TPCSimulation/GEMAmplification.h" #endif +#include using namespace o2::tpc; void fillTPCHits(const float theta, const float phi, const float dedx, std::vector& hitGroupSector, std::pair& trackInfo); diff --git a/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx b/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx index 37429a04c4b34..9c588ff50a45c 100644 --- a/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx +++ b/Detectors/TPC/workflow/src/ApplyCCDBCalibSpec.cxx @@ -19,6 +19,7 @@ #include "TPCBase/ParameterGas.h" #include "DataFormatsTPC/LtrCalibData.h" #include "TPCWorkflow/ApplyCCDBCalibSpec.h" +#include using namespace o2::framework; diff --git a/macro/o2sim.C b/macro/o2sim.C index 5257a225d3ed9..f610be730eeb7 100644 --- a/macro/o2sim.C +++ b/macro/o2sim.C @@ -39,6 +39,7 @@ #include #endif #include "migrateSimFiles.C" +#include void check_notransport() { From f5f18ff8cc0764ec7422122d1889675a548670f0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 24 Nov 2023 23:12:49 +0100 Subject: [PATCH 133/726] DPL: more operator== ambiguities fixed --- Framework/Core/include/Framework/ASoA.h | 8 ++++---- Framework/Core/include/Framework/ASoAHelpers.h | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Framework/Core/include/Framework/ASoA.h b/Framework/Core/include/Framework/ASoA.h index a0a45a833efeb..898273b63d043 100644 --- a/Framework/Core/include/Framework/ASoA.h +++ b/Framework/Core/include/Framework/ASoA.h @@ -614,14 +614,14 @@ struct DefaultIndexPolicy : IndexPolicyBase { this->setCursor(mMaxRow); } - bool operator!=(DefaultIndexPolicy const& other) const + friend bool operator!=(DefaultIndexPolicy const& lh, DefaultIndexPolicy const& rh) { - return O2_BUILTIN_LIKELY(this->mRowIndex != other.mRowIndex); + return O2_BUILTIN_LIKELY(lh.mRowIndex != rh.mRowIndex); } - bool operator==(DefaultIndexPolicy const& other) const + friend bool operator==(DefaultIndexPolicy const& lh, DefaultIndexPolicy const& rh) { - return O2_BUILTIN_UNLIKELY(this->mRowIndex == other.mRowIndex); + return O2_BUILTIN_UNLIKELY(lh.mRowIndex == rh.mRowIndex); } bool operator!=(RowViewSentinel const& sentinel) const diff --git a/Framework/Core/include/Framework/ASoAHelpers.h b/Framework/Core/include/Framework/ASoAHelpers.h index 1573ce3abf235..932f4fd029298 100644 --- a/Framework/Core/include/Framework/ASoAHelpers.h +++ b/Framework/Core/include/Framework/ASoAHelpers.h @@ -1253,13 +1253,13 @@ struct CombinationsGenerator { { return this->mCurrent; } - bool operator==(const CombinationsIterator& rh) + friend bool operator==(const CombinationsIterator& lh, const CombinationsIterator& rh) { - return (this->mIsEnd && rh.mIsEnd) || (this->mCurrent == rh.mCurrent); + return (lh.mIsEnd && rh.mIsEnd) || (lh.mCurrent == rh.mCurrent); } - bool operator!=(const CombinationsIterator& rh) + friend bool operator!=(const CombinationsIterator& lh, const CombinationsIterator& rh) { - return !(*this == rh); + return !(lh == rh); } }; From 937d667c9db1e44385b3cac3db9d2cf086b683bc Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 25 Nov 2023 21:53:24 +0100 Subject: [PATCH 134/726] Fix more ambiguous operators --- Algorithm/include/Algorithm/PageParser.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Algorithm/include/Algorithm/PageParser.h b/Algorithm/include/Algorithm/PageParser.h index e382fc318352e..3ca01d87bcba3 100644 --- a/Algorithm/include/Algorithm/PageParser.h +++ b/Algorithm/include/Algorithm/PageParser.h @@ -255,12 +255,12 @@ class PageParser return mElement; } // comparison - bool operator==(const SelfType& rh) + bool operator==(const SelfType& rh) const { return mPosition == rh.mPosition; } // comparison - bool operator!=(const SelfType& rh) + bool operator!=(const SelfType& rh) const { return mPosition != rh.mPosition; } From 8e934a5bc36d570cd52cd42d0240db3d947b832a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 25 Nov 2023 21:56:57 +0100 Subject: [PATCH 135/726] DPL: fix ambiguous operator for C++20 --- Framework/Core/include/Framework/GroupedCombinations.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Framework/Core/include/Framework/GroupedCombinations.h b/Framework/Core/include/Framework/GroupedCombinations.h index 4d2d7678493dc..5904916613d2e 100644 --- a/Framework/Core/include/Framework/GroupedCombinations.h +++ b/Framework/Core/include/Framework/GroupedCombinations.h @@ -137,11 +137,11 @@ struct GroupedCombinationsGenerator { { return *mCurrentGrouped; } - bool operator==(const GroupedIterator& rh) + bool operator==(const GroupedIterator& rh) const { return (this->mIsEnd && rh.mIsEnd) || (this->mCurrent == rh.mCurrent); } - bool operator!=(const GroupedIterator& rh) + bool operator!=(const GroupedIterator& rh) const { return !(*this == rh); } From 87656e97980c10a1a90551286264c3dbb461d42a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 27 Nov 2023 11:39:59 +0100 Subject: [PATCH 136/726] More methods which could be const made const --- Common/Field/src/MagFieldFast.cxx | 2 +- Utilities/DataCompression/test/DataGenerator.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Common/Field/src/MagFieldFast.cxx b/Common/Field/src/MagFieldFast.cxx index e652dffa396c5..5caad34d56dd4 100644 --- a/Common/Field/src/MagFieldFast.cxx +++ b/Common/Field/src/MagFieldFast.cxx @@ -238,7 +238,7 @@ bool MagFieldFast::Field(const math_utils::Point3D xyz, double bxyz[3]) bool MagFieldFast::GetSegment(float x, float y, float z, int& zSeg, int& rSeg, int& quadrant) const { // get segment of point location - const float zGridSpaceInv = 1.f / (kSolZMax * 2 / kNSolZRanges); + const float zGridSpaceInv = 1.f / (kSolZMax * 2 / (float)kNSolZRanges); zSeg = -1; if (z < kSolZMax) { if (z > -kSolZMax) { diff --git a/Utilities/DataCompression/test/DataGenerator.h b/Utilities/DataCompression/test/DataGenerator.h index 6c209f9007f6f..3fe0740ccbad8 100644 --- a/Utilities/DataCompression/test/DataGenerator.h +++ b/Utilities/DataCompression/test/DataGenerator.h @@ -154,8 +154,8 @@ class DataGenerator // pointer operator->() const {return &mValue;} // reference operator[](size_type n) const; - bool operator==(const self_type& other) { return mCount == other.mCount; } - bool operator!=(const self_type& other) { return not(*this == other); } + bool operator==(const self_type& other) const { return mCount == other.mCount; } + bool operator!=(const self_type& other) const { return not(*this == other); } private: const ContainerT& mParent; From 41641d15b9c069420f689853e4b9a3c50b7a3b0e Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 27 Nov 2023 12:36:55 +0100 Subject: [PATCH 137/726] Misc fixes for C++20 Apparently specifying the template specialization for trivial case is now an error. --- .../MUON/MCH/Tracking/src/TrackFinderSpec.cxx | 2 +- .../MUON/MID/Workflow/src/RawCheckerSpec.cxx | 2 +- .../DataTypes/GPUTRDInterfaceO2Track.h | 12 +++---- .../TRDTracking/GPUTRDInterfaces.h | 36 +++++++++---------- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx b/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx index c40df5d4b448e..6239186309dc3 100644 --- a/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx +++ b/Detectors/MUON/MCH/Tracking/src/TrackFinderSpec.cxx @@ -66,7 +66,7 @@ class TrackFinderTask { public: //_________________________________________________________________________________________________ - TrackFinderTask(bool computeTime, bool digits, std::shared_ptr req) + TrackFinderTask(bool computeTime, bool digits, std::shared_ptr req) : mComputeTime(computeTime), mDigits(digits), mCCDBRequest(req) {} //_________________________________________________________________________________________________ diff --git a/Detectors/MUON/MID/Workflow/src/RawCheckerSpec.cxx b/Detectors/MUON/MID/Workflow/src/RawCheckerSpec.cxx index c2e9e3fc5c635..58774a6336294 100644 --- a/Detectors/MUON/MID/Workflow/src/RawCheckerSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/RawCheckerSpec.cxx @@ -48,7 +48,7 @@ template class RawCheckerDeviceDPL { public: - RawCheckerDeviceDPL(const std::vector& feeIds, const CrateMasks& crateMasks, const ElectronicsDelay& electronicsDelay) : mFeeIds(feeIds), mCrateMasks(crateMasks), mElectronicsDelay(electronicsDelay) {} + RawCheckerDeviceDPL(const std::vector& feeIds, const CrateMasks& crateMasks, const ElectronicsDelay& electronicsDelay) : mFeeIds(feeIds), mCrateMasks(crateMasks), mElectronicsDelay(electronicsDelay) {} void init(o2::framework::InitContext& ic) { diff --git a/GPU/GPUTracking/DataTypes/GPUTRDInterfaceO2Track.h b/GPU/GPUTracking/DataTypes/GPUTRDInterfaceO2Track.h index 19cad3649fa42..fb8f6d1b47d52 100644 --- a/GPU/GPUTracking/DataTypes/GPUTRDInterfaceO2Track.h +++ b/GPU/GPUTracking/DataTypes/GPUTRDInterfaceO2Track.h @@ -47,10 +47,10 @@ template <> class trackInterface : public o2::track::TrackParCov { public: - GPUdDefault() trackInterface() = default; - trackInterface(const o2::track::TrackParCov& param) = delete; - GPUd() trackInterface(const o2::dataformats::TrackTPCITS& trkItsTpc) : o2::track::TrackParCov(trkItsTpc.getParamOut()) {} - GPUd() trackInterface(const o2::tpc::TrackTPC& trkTpc) : o2::track::TrackParCov(trkTpc.getParamOut()) {} + GPUdDefault() trackInterface() = default; + trackInterface(const o2::track::TrackParCov& param) = delete; + GPUd() trackInterface(const o2::dataformats::TrackTPCITS& trkItsTpc) : o2::track::TrackParCov(trkItsTpc.getParamOut()) {} + GPUd() trackInterface(const o2::tpc::TrackTPC& trkTpc) : o2::track::TrackParCov(trkTpc.getParamOut()) {} GPUd() void set(float x, float alpha, const float* param, const float* cov) { @@ -63,8 +63,8 @@ class trackInterface : public o2::track::TrackParCov setCov(cov[i], i); } } - GPUd() trackInterface(const GPUTPCGMMergedTrack& trk); - GPUd() trackInterface(const gputpcgmmergertypes::GPUTPCOuterParam& param); + GPUd() trackInterface(const GPUTPCGMMergedTrack& trk); + GPUd() trackInterface(const gputpcgmmergertypes::GPUTPCOuterParam& param); GPUd() void updateCovZ2(float addZerror) { updateCov(addZerror, o2::track::CovLabels::kSigZ2); } GPUd() o2::track::TrackLTIntegral& getLTIntegralOut() { return mLTOut; } GPUd() const o2::track::TrackLTIntegral& getLTIntegralOut() const { return mLTOut; } diff --git a/GPU/GPUTracking/TRDTracking/GPUTRDInterfaces.h b/GPU/GPUTracking/TRDTracking/GPUTRDInterfaces.h index ab8db40e90e2e..6359f880e2860 100644 --- a/GPU/GPUTracking/TRDTracking/GPUTRDInterfaces.h +++ b/GPU/GPUTracking/TRDTracking/GPUTRDInterfaces.h @@ -101,9 +101,9 @@ class propagatorInterface : public AliTrackerBase public: typedef void propagatorParam; - propagatorInterface(const propagatorParam* = nullptr) : AliTrackerBase(), mParam(nullptr){}; - propagatorInterface(const propagatorInterface&) CON_DELETE; - propagatorInterface& operator=(const propagatorInterface&) CON_DELETE; + propagatorInterface(const propagatorParam* = nullptr) : AliTrackerBase(), mParam(nullptr){}; + propagatorInterface(const propagatorInterface&) CON_DELETE; + propagatorInterface& operator=(const propagatorInterface&) CON_DELETE; bool propagateToX(float x, float maxSnp, float maxStep) { return PropagateTrackToBxByBz(mParam, x, 0.13957, maxStep, false, maxSnp); } int getPropagatedYZ(float x, float& projY, float& projZ) @@ -147,9 +147,9 @@ class propagatorInterface { public: typedef o2::base::Propagator propagatorParam; - GPUd() propagatorInterface(const propagatorParam* prop) : mProp(prop){}; - GPUd() propagatorInterface(const propagatorInterface&) = delete; - GPUd() propagatorInterface& operator=(const propagatorInterface&) = delete; + GPUd() propagatorInterface(const propagatorParam* prop) : mProp(prop){}; + GPUd() propagatorInterface(const propagatorInterface&) = delete; + GPUd() propagatorInterface& operator=(const propagatorInterface&) = delete; GPUdi() bool propagateToX(float x, float maxSnp, float maxStep) { return mProp->PropagateToXBxByBz(*mParam, x, maxSnp, maxStep); } GPUdi() int getPropagatedYZ(float x, float& projY, float& projZ) { return static_cast(mParam->getYZAt(x, mProp->getNominalBz(), projY, projZ)); } @@ -206,10 +206,10 @@ template <> class trackInterface : public GPUTPCGMTrackParam { public: - GPUdDefault() trackInterface() CON_DEFAULT; - GPUd() trackInterface(const GPUTPCGMTrackParam& param) CON_DELETE; - GPUd() trackInterface(const GPUTPCGMMergedTrack& trk) : GPUTPCGMTrackParam(trk.GetParam()), mAlpha(trk.GetAlpha()) {} - GPUd() trackInterface(const gputpcgmmergertypes::GPUTPCOuterParam& param) : GPUTPCGMTrackParam(), mAlpha(param.alpha) + GPUdDefault() trackInterface() CON_DEFAULT; + GPUd() trackInterface(const GPUTPCGMTrackParam& param) CON_DELETE; + GPUd() trackInterface(const GPUTPCGMMergedTrack& trk) : GPUTPCGMTrackParam(trk.GetParam()), mAlpha(trk.GetAlpha()) {} + GPUd() trackInterface(const gputpcgmmergertypes::GPUTPCOuterParam& param) : GPUTPCGMTrackParam(), mAlpha(param.alpha) { SetX(param.X); for (int i = 0; i < 5; i++) { @@ -220,11 +220,11 @@ class trackInterface : public GPUTPCGMTrackParam } }; #ifdef GPUCA_NOCOMPAT - GPUdDefault() trackInterface(const trackInterface& param) = default; - GPUdDefault() trackInterface& operator=(const trackInterface& param) = default; + GPUdDefault() trackInterface(const trackInterface& param) = default; + GPUdDefault() trackInterface& operator=(const trackInterface& param) = default; #endif #ifdef GPUCA_ALIROOT_LIB - trackInterface(const AliHLTExternalTrackParam& param) : GPUTPCGMTrackParam(), mAlpha(param.fAlpha) + trackInterface(const AliHLTExternalTrackParam& param) : GPUTPCGMTrackParam(), mAlpha(param.fAlpha) { SetX(param.fX); SetPar(0, param.fY); @@ -238,7 +238,7 @@ class trackInterface : public GPUTPCGMTrackParam }; #endif #if defined(GPUCA_HAVE_O2HEADERS) - GPUd() trackInterface(const o2::dataformats::TrackTPCITS& param) : GPUTPCGMTrackParam(), mAlpha(param.getParamOut().getAlpha()) + GPUd() trackInterface(const o2::dataformats::TrackTPCITS& param) : GPUTPCGMTrackParam(), mAlpha(param.getParamOut().getAlpha()) { SetX(param.getParamOut().getX()); SetPar(0, param.getParamOut().getY()); @@ -250,7 +250,7 @@ class trackInterface : public GPUTPCGMTrackParam SetCov(i, param.getParamOut().getCov()[i]); } } - GPUd() trackInterface(const o2::tpc::TrackTPC& param) : GPUTPCGMTrackParam(), mAlpha(param.getParamOut().getAlpha()) + GPUd() trackInterface(const o2::tpc::TrackTPC& param) : GPUTPCGMTrackParam(), mAlpha(param.getParamOut().getAlpha()) { SetX(param.getParamOut().getX()); SetPar(0, param.getParamOut().getY()); @@ -307,7 +307,7 @@ class propagatorInterface : public GPUTPCGMPropagator { public: typedef GPUTPCGMPolynomialField propagatorParam; - GPUd() propagatorInterface(const propagatorParam* pField) : GPUTPCGMPropagator(), mTrack(nullptr) + GPUd() propagatorInterface(const propagatorParam* pField) : GPUTPCGMPropagator(), mTrack(nullptr) { this->SetMaterialTPC(); this->SetPolynomialField(pField); @@ -316,8 +316,8 @@ class propagatorInterface : public GPUTPCGMPropagator this->SetFitInProjections(0); this->SelectFieldRegion(GPUTPCGMPropagator::TRD); }; - propagatorInterface(const propagatorInterface&) CON_DELETE; - propagatorInterface& operator=(const propagatorInterface&) CON_DELETE; + propagatorInterface(const propagatorInterface&) CON_DELETE; + propagatorInterface& operator=(const propagatorInterface&) CON_DELETE; GPUd() void setTrack(trackInterface* trk) { SetTrack(trk, trk->getAlpha()); From 77a3db35b839e0b2ec50598d7976339fbc2e0527 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 28 Nov 2023 15:07:11 +0100 Subject: [PATCH 138/726] C++20 fixes * Using "{T}" in a format string is not actually allowed because it's interpreted as the name of the placeholder. * GlobalFwdTrack needs an explicit constructor from o2::track::TrackParCovFwd --- .../ReconstructionDataFormats/GlobalFwdTrack.h | 1 + Detectors/TPC/qc/src/Tracks.cxx | 16 ++++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/DataFormats/Reconstruction/include/ReconstructionDataFormats/GlobalFwdTrack.h b/DataFormats/Reconstruction/include/ReconstructionDataFormats/GlobalFwdTrack.h index 15d816b7b7727..5d13a216316ef 100644 --- a/DataFormats/Reconstruction/include/ReconstructionDataFormats/GlobalFwdTrack.h +++ b/DataFormats/Reconstruction/include/ReconstructionDataFormats/GlobalFwdTrack.h @@ -31,6 +31,7 @@ class GlobalFwdTrack : public o2::track::TrackParCovFwd, public o2::dataformats: public: GlobalFwdTrack() = default; GlobalFwdTrack(const GlobalFwdTrack& t) = default; + GlobalFwdTrack(o2::track::TrackParCovFwd const& t) { *this = t; } ~GlobalFwdTrack() = default; SMatrix5 computeResiduals2Cov(const o2::track::TrackParCovFwd& t) const diff --git a/Detectors/TPC/qc/src/Tracks.cxx b/Detectors/TPC/qc/src/Tracks.cxx index 86f13d60a00fa..7e077487a9ef5 100644 --- a/Detectors/TPC/qc/src/Tracks.cxx +++ b/Detectors/TPC/qc/src/Tracks.cxx @@ -71,26 +71,26 @@ void Tracks::initializeHistograms() mMapHist["h2DQOverPtPhiAside"] = std::make_unique("h2DQOverPtPhiAside", "Charger over p_T vs. phi, A side;phi;q/p_T", 360, 0., 2 * M_PI, 400, -20., 20.); mMapHist["h2DQOverPtPhiCside"] = std::make_unique("h2DQOverPtPhiCside", "Charger over p_T vs. phi, C side;phi;q/p_T", 360, 0., 2 * M_PI, 400, -20., 20.); // eta vs pt and phi vs pt possitive and negative signs - mMapHist["hEtaVsPtPos"] = std::make_unique("hEtaVsPtPos", "#eta vs. p_{T} (Pos.);p_{T};eta", logPtBinning.size() - 1, logPtBinning.data(), 400, -2., 2.); - mMapHist["hEtaVsPtNeg"] = std::make_unique("hEtaVsPtNeg", "#eta vs. p_{T} (Neg.);p_{T};eta", logPtBinning.size() - 1, logPtBinning.data(), 400, -2., 2.); - mMapHist["hPhiVsPtPos"] = std::make_unique("hPhiVsPtPos", "#phi vs. p_{T} (Pos.);p_{T};phi", logPtBinning.size() - 1, logPtBinning.data(), 360, 0., 2 * M_PI); - mMapHist["hPhiVsPtNeg"] = std::make_unique("hPhiVsPtNeg", "#phi vs. p_{T} (Neg.);p_{T};phi", logPtBinning.size() - 1, logPtBinning.data(), 360, 0., 2 * M_PI); + mMapHist["hEtaVsPtPos"] = std::make_unique("hEtaVsPtPos", "#eta vs. p_{{T}} (Pos.);p_{{T}};eta", logPtBinning.size() - 1, logPtBinning.data(), 400, -2., 2.); + mMapHist["hEtaVsPtNeg"] = std::make_unique("hEtaVsPtNeg", "#eta vs. p_{{T}} (Neg.);p_{{T}};eta", logPtBinning.size() - 1, logPtBinning.data(), 400, -2., 2.); + mMapHist["hPhiVsPtPos"] = std::make_unique("hPhiVsPtPos", "#phi vs. p_{{T}} (Pos.);p_{{T}};phi", logPtBinning.size() - 1, logPtBinning.data(), 360, 0., 2 * M_PI); + mMapHist["hPhiVsPtNeg"] = std::make_unique("hPhiVsPtNeg", "#phi vs. p_{{T}} (Neg.);p_{{T}};phi", logPtBinning.size() - 1, logPtBinning.data(), 360, 0., 2 * M_PI); // 1d histograms mMapHist["hEtaRatio"] = std::make_unique("hEtaRatio", "Pseudorapidity, ratio neg./pos. ;eta", 400, -2., 2.); mMapHist["hPhiAsideRatio"] = std::make_unique("hPhiAsideRatio", "Azimuthal angle, A side, ratio neg./pos. ;phi", 360, 0., 2 * M_PI); mMapHist["hPhiCsideRatio"] = std::make_unique("hPhiCsideRatio", "Azimuthal angle, C side, ratio neg./pos. ;phi", 360, 0., 2 * M_PI); - mMapHist["hPtRatio"] = std::make_unique("hPtRatio", "Transverse momentum, ratio neg./pos. ;p_T", logPtBinning.size() - 1, logPtBinning.data()); + mMapHist["hPtRatio"] = std::make_unique("hPtRatio", "Transverse momentum, ratio neg./pos. ;p_{{T}}", logPtBinning.size() - 1, logPtBinning.data()); // DCA Histograms for (const auto type : types) { mMapHist[fmt::format("hDCAr_{}", type).data()] = std::make_unique(fmt::format("hDCAr_{}", type).data(), fmt::format("DCAr {};phi;DCAr (cm)", type).data(), 360, 0, o2::math_utils::twoPid(), 250, -10., 10.); } // DCA vs variables Histograms - mMapHist["hDCArVsPtPos"] = std::make_unique("hDCArVsPtPos", "DCAr Pos;#it{p}_{T}T (GeV/#it{c});DCAr (cm)", logPtBinning.size() - 1, logPtBinning.data(), 250, -10., 10.); + mMapHist["hDCArVsPtPos"] = std::make_unique("hDCArVsPtPos", "DCAr Pos;#it{p}_{{T}}T (GeV/#it{c});DCAr (cm)", logPtBinning.size() - 1, logPtBinning.data(), 250, -10., 10.); mMapHist["hDCArVsEtaPos"] = std::make_unique("hDCArVsEtaPos", "DCAr Pos;#eta;DCAr (cm)", 400, -2., 2., 250, -10., 10.); mMapHist["hDCArVsNClsPos"] = std::make_unique("hDCArVsNClsPos", "DCAr Pos;NClusters;DCAr (cm)", 400, -0.5, 399.5, 250, -10., 10.); - mMapHist["hDCArVsPtNeg"] = std::make_unique("hDCArVsPtNeg", "DCAr Neg;#it{p}_{T}T (GeV/#it{c});DCAr (cm)", logPtBinning.size() - 1, logPtBinning.data(), 250, -10., 10.); + mMapHist["hDCArVsPtNeg"] = std::make_unique("hDCArVsPtNeg", "DCAr Neg;#it{p}_{{T}}T (GeV/#it{c});DCAr (cm)", logPtBinning.size() - 1, logPtBinning.data(), 250, -10., 10.); mMapHist["hDCArVsEtaNeg"] = std::make_unique("hDCArVsEtaNeg", "DCAr Neg;#eta;DCAr (cm)", 400, -2., 2., 250, -10., 10.); mMapHist["hDCArVsNClsNeg"] = std::make_unique("hDCArVsNClsNeg", "DCAr Neg;NClusters;DCAr (cm)", 400, -0.5, 399.5, 250, -10., 10.); } @@ -173,7 +173,7 @@ bool Tracks::processTrack(const o2::tpc::TrackTPC& track) reported = true; } dcaHist->SetTitle(fmt::format("DCAr {} o2::base::Propagator not properly initialized", types[type]).data()); - dcaHistPT->SetTitle(fmt::format("DCAr p_{T} {} o2::base::Propagator not properly initialized", signType).data()); + dcaHistPT->SetTitle(fmt::format("DCAr p_{{T}} {} o2::base::Propagator not properly initialized", signType).data()); dcaHistEta->SetTitle(fmt::format("DCAr eta {} o2::base::Propagator not properly initialized", signType).data()); dcaHistNCluster->SetTitle(fmt::format("DCAr nClusters {} o2::base::Propagator not properly initialized", signType).data()); } From 6bc6a196c45c177b1fdd4cb28a678cc4bac43319 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 28 Nov 2023 15:24:56 +0100 Subject: [PATCH 139/726] C++20 fixes --- Detectors/MUON/MCH/Raw/Decoder/src/UserLogicEndpointDecoder.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/MUON/MCH/Raw/Decoder/src/UserLogicEndpointDecoder.h b/Detectors/MUON/MCH/Raw/Decoder/src/UserLogicEndpointDecoder.h index 8471497c18708..8b49a006aca0e 100644 --- a/Detectors/MUON/MCH/Raw/Decoder/src/UserLogicEndpointDecoder.h +++ b/Detectors/MUON/MCH/Raw/Decoder/src/UserLogicEndpointDecoder.h @@ -158,7 +158,7 @@ size_t UserLogicEndpointDecoder::append(Payload buffer) } mElinkDecoders.emplace(static_cast(gbt), - impl::makeArray<40>([=](size_t i) { + impl::makeArray<40>([solarId, *this](size_t i) { DsElecId dselec{solarId.value(), static_cast(i / 5), static_cast(i % 5)}; return ElinkDecoder(dselec, mDecodedDataHandlers); })); From c2b5ea091383108236823a55613ca83665bb7273 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 28 Nov 2023 15:54:02 +0100 Subject: [PATCH 140/726] More asymmetric operator== --- .../include/DataCompression/dc_primitives.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Utilities/DataCompression/include/DataCompression/dc_primitives.h b/Utilities/DataCompression/include/DataCompression/dc_primitives.h index fc0f941fe2358..0e43c07ee7e9c 100644 --- a/Utilities/DataCompression/include/DataCompression/dc_primitives.h +++ b/Utilities/DataCompression/include/DataCompression/dc_primitives.h @@ -170,9 +170,9 @@ class ExampleAlphabet // reference reference operator*(); // comparison - bool operator==(const self_type& other); + bool operator==(const self_type& other) const; // comparison - bool operator!=(const self_type& other); + bool operator!=(const self_type& other) const; private: }; @@ -300,8 +300,8 @@ class ContiguousAlphabet // pointer operator->() const {return &mValue;} // reference operator[](size_type n) const; - bool operator==(const self_type& other) { return mValue == other.mValue && mIsEnd == other.mIsEnd; } - bool operator!=(const self_type& other) { return not(*this == other); } + bool operator==(const self_type& other) const { return mValue == other.mValue && mIsEnd == other.mIsEnd; } + bool operator!=(const self_type& other) const { return not(*this == other); } private: value_type mValue; From fd3ec296323aa63e8ca93ca779fe81d8bcb8e41f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 28 Nov 2023 17:54:56 +0100 Subject: [PATCH 141/726] DPL: mark dummy sink as resilient to expendable task failures --- Framework/Core/src/CommonDataProcessors.cxx | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index e9cdc97e316ca..3c86f921c01b8 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -599,9 +599,8 @@ DataProcessorSpec CommonDataProcessors::getDummySink(std::vector cons .options = !rateLimitingChannelConfig.empty() ? std::vector{{"channel-config", VariantType::String, // raw input channel rateLimitingChannelConfig, {"Out-of-band channel config"}}} - : std::vector() - - }; + : std::vector(), + .labels = {{"resilient"}}}; } AlgorithmSpec CommonDataProcessors::wrapWithRateLimiting(AlgorithmSpec spec) From 65d05ab1ff55e03b6142bc42439a4c1a7ea15677 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 29 Nov 2023 10:05:51 +0100 Subject: [PATCH 142/726] Fix more enums used as int for C++20 --- Detectors/ITSMFT/ITS/reconstruction/src/CookedTracker.cxx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Detectors/ITSMFT/ITS/reconstruction/src/CookedTracker.cxx b/Detectors/ITSMFT/ITS/reconstruction/src/CookedTracker.cxx index e68d26a4d12d3..5c804f6705dfd 100644 --- a/Detectors/ITSMFT/ITS/reconstruction/src/CookedTracker.cxx +++ b/Detectors/ITSMFT/ITS/reconstruction/src/CookedTracker.cxx @@ -739,7 +739,7 @@ void CookedTracker::Layer::init() Float_t phi = xyz.Phi(); o2::math_utils::bringTo02Pi(phi); mPhi.push_back(phi); - Int_t s = phi * kNSectors / k2PI; + Int_t s = phi * (int)kNSectors / k2PI; mSectors[s < kNSectors ? s : kNSectors - 1].emplace_back(i, c->getZ()); } @@ -792,8 +792,8 @@ void CookedTracker::Layer::selectClusters(std::vector& selec, Float_t phi Float_t dphi = dy / mR; - int smin = (phi - dphi) / k2PI * kNSectors; - int ds = (phi + dphi) / k2PI * kNSectors - smin + 1; + int smin = (phi - dphi) / k2PI * (int)kNSectors; + int ds = (phi + dphi) / k2PI * (int)kNSectors - smin + 1; smin = (smin + kNSectors) % kNSectors; From 0805af741cd7aa5f4c48386b25dcc7c05420b416 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 29 Nov 2023 14:24:57 +0100 Subject: [PATCH 143/726] Fix more enums used as integers --- .../GRP/calibration/include/GRPCalibration/GRPDCSDPsProcessor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/GRP/calibration/include/GRPCalibration/GRPDCSDPsProcessor.h b/Detectors/GRP/calibration/include/GRPCalibration/GRPDCSDPsProcessor.h index 0270136abbf77..6bb93f26066f7 100644 --- a/Detectors/GRP/calibration/include/GRPCalibration/GRPDCSDPsProcessor.h +++ b/Detectors/GRP/calibration/include/GRPCalibration/GRPDCSDPsProcessor.h @@ -174,7 +174,7 @@ struct GRPLHCInfo { static constexpr std::string_view bptxPhaseShiftAliases[NBPTXPhaseShiftAliases] = {"BPTX_Phase_Shift_B1", "BPTX_Phase_Shift_B2"}; static constexpr std::string_view lumiAliases[NLumiAliases] = {"ALI_Lumi_Total_Inst"}; static constexpr std::string_view lhcStringAliases[NLHCStringAliases] = {"ALI_Lumi_Source_Name", "BEAM_MODE", "MACHINE_MODE"}; - static constexpr int nAliasesLHC = NCollimatorAliases + NBeamAliases + NBkgAliases + NBPTXAliases + NBPTXPhaseAliases + NBPTXPhaseRMSAliases + NBPTXPhaseShiftAliases + NLumiAliases + NLHCStringAliases; + static constexpr int nAliasesLHC = (int)NCollimatorAliases + (int)NBeamAliases + (int)NBkgAliases + (int)NBPTXAliases + (int)NBPTXPhaseAliases + (int)NBPTXPhaseRMSAliases + (int)NBPTXPhaseShiftAliases + (int)NLumiAliases + (int)NLHCStringAliases; std::array>, 2> mIntensityBeam; std::array>, 3> mBackground; From 66d63444fb4d45253cc5bec5f4d5a915aad86343 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 30 Nov 2023 13:52:00 +0100 Subject: [PATCH 144/726] DPL: add support for Lifetime::sporadic to query builder --- Framework/Core/src/DataDescriptorQueryBuilder.cxx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Framework/Core/src/DataDescriptorQueryBuilder.cxx b/Framework/Core/src/DataDescriptorQueryBuilder.cxx index f79698ea2a08d..41a14d06f3acc 100644 --- a/Framework/Core/src/DataDescriptorQueryBuilder.cxx +++ b/Framework/Core/src/DataDescriptorQueryBuilder.cxx @@ -156,6 +156,9 @@ std::vector DataDescriptorQueryBuilder::parse(char const* config) if (attribute.name == "lifetime" && attribute.defaultValue.get() == "condition") { lifetime = Lifetime::Condition; } + if (attribute.name == "lifetime" && attribute.defaultValue.get() == "sporadic") { + lifetime = Lifetime::Sporadic; + } } return InputSpec{binding, std::move(*lastMatcher.release()), lifetime, attributes}; }; From 0217381a66bb0131a20f6e2c86d31c80ac2b640c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 30 Nov 2023 16:20:00 +0100 Subject: [PATCH 145/726] DPL: better message when child dies --- Framework/Core/src/runDataProcessing.cxx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 724383ce501e6..0b29bfcdcb599 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -864,10 +864,9 @@ bool processSigChild(DeviceInfos& infos, DeviceSpecs& specs) int status; pid_t pid = waitpid((pid_t)(-1), &status, WNOHANG); if (pid > 0) { + // Normal exit int es = WEXITSTATUS(status); - if (WIFEXITED(status) == false || es != 0) { - es = WIFEXITED(status) ? es : 128 + es; // Look for the name associated to the pid in the infos std::string id = "unknown"; assert(specs.size() == infos.size()); @@ -882,10 +881,11 @@ bool processSigChild(DeviceInfos& infos, DeviceSpecs& specs) } else if (forceful_exit) { LOGP(error, "pid {} ({}) was forcefully terminated after being requested to quit", pid, id); } else { - if (es == 128) { - LOGP(error, "Workflow crashed - pid {} ({}) was killed abnormally with exit code {}, could be out of memory killer, segfault, unhandled exception, SIGKILL, etc...", pid, id, es); + if (WIFSIGNALED(status)) { + int exitSignal = WTERMSIG(status); + LOGP(error, "Workflow crashed - pid {} ({}) was killed abnormally with {} and exited with {}", pid, id, strsignal(exitSignal), es); } else { - LOGP(error, "pid {} ({}) crashed with or was killed with exit code {}", pid, id, es); + LOGP(error, "pid {} ({}) exited with {}", pid, id, es); } } hasError |= true; From 88c53204fcd0ac78f1db69b6e7992baa1c245e6d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 30 Nov 2023 19:00:43 +0100 Subject: [PATCH 146/726] DPL: ignored inputs are "Sporadic" by construction. --- Framework/Core/src/WorkflowHelpers.cxx | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 2484f021ff0e2..0e26762829e14 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -732,10 +732,7 @@ void WorkflowHelpers::injectServiceDevices(WorkflowSpec& workflow, ConfigContext std::vector ignored = unmatched; ignored.insert(ignored.end(), redirectedOutputsInputs.begin(), redirectedOutputsInputs.end()); for (auto& ignoredInput : ignored) { - if (ignoredInput.lifetime == Lifetime::OutOfBand) { - // FIXME: Use Lifetime::Dangling when fully working? - ignoredInput.lifetime = Lifetime::Timeframe; - } + ignoredInput.lifetime = Lifetime::Sporadic; } extraSpecs.push_back(CommonDataProcessors::getDummySink(ignored, rateLimitingChannelConfigOutput)); From 494b491ac315a5d7cc92458939b26ddd81d98355 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 4 Dec 2023 10:29:59 +0100 Subject: [PATCH 147/726] DPL: better message when child dies --- Framework/Core/src/runDataProcessing.cxx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 724383ce501e6..54af8e9b4a25e 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -864,10 +864,9 @@ bool processSigChild(DeviceInfos& infos, DeviceSpecs& specs) int status; pid_t pid = waitpid((pid_t)(-1), &status, WNOHANG); if (pid > 0) { + // Normal exit int es = WEXITSTATUS(status); - if (WIFEXITED(status) == false || es != 0) { - es = WIFEXITED(status) ? es : 128 + es; // Look for the name associated to the pid in the infos std::string id = "unknown"; assert(specs.size() == infos.size()); @@ -882,8 +881,9 @@ bool processSigChild(DeviceInfos& infos, DeviceSpecs& specs) } else if (forceful_exit) { LOGP(error, "pid {} ({}) was forcefully terminated after being requested to quit", pid, id); } else { - if (es == 128) { - LOGP(error, "Workflow crashed - pid {} ({}) was killed abnormally with exit code {}, could be out of memory killer, segfault, unhandled exception, SIGKILL, etc...", pid, id, es); + if (WIFSIGNALED(status)) { + int exitSignal = WTERMSIG(status); + LOGP(error, "Workflow crashed - pid {} ({}) was killed abnormally with {} and exited with {}", pid, id, strsignal(exitSignal), es); } else { LOGP(error, "pid {} ({}) crashed with or was killed with exit code {}", pid, id, es); } From 955d9432cde30f58f596723b75143e60fcd1a5f4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 4 Dec 2023 12:38:50 +0100 Subject: [PATCH 148/726] DPL: explicit cast enum to float Required by C++20. --- GPU/GPUTracking/TRDTracking/GPUTRDTracker.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPU/GPUTracking/TRDTracking/GPUTRDTracker.cxx b/GPU/GPUTracking/TRDTracking/GPUTRDTracker.cxx index 81c452926d7af..46479091d7c3a 100644 --- a/GPU/GPUTracking/TRDTracking/GPUTRDTracker.cxx +++ b/GPU/GPUTracking/TRDTracking/GPUTRDTracker.cxx @@ -998,7 +998,7 @@ GPUd() int GPUTRDTracker_t::GetSector(float alpha) const } else if (alpha >= 2.f * CAMath::Pi()) { alpha -= 2.f * CAMath::Pi(); } - return (int)(alpha * kNSectors / (2.f * CAMath::Pi())); + return (int)(alpha * (float)kNSectors / (2.f * CAMath::Pi())); } template From 917237204dbb86b0835f867c9150f17054dc5178 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:53:34 +0100 Subject: [PATCH 149/726] Remove default Lifetime::Timeframe when instanciating an Output This lifetime field will be removed soon, and this is unambiguous, because it's in any case the default. --- .../AOD/src/AODMcProducerWorkflowSpec.cxx | 8 +-- Detectors/AOD/src/AODProducerWorkflowSpec.cxx | 8 +-- .../src/CPVBadMapCalibDevice.cxx | 2 +- .../src/CPVGainCalibDevice.cxx | 2 +- .../src/CPVPedestalCalibDevice.cxx | 2 +- .../CPV/workflow/src/ClusterReaderSpec.cxx | 6 +-- .../CPV/workflow/src/ClusterizerSpec.cxx | 16 +++--- .../CPV/workflow/src/DigitReaderSpec.cxx | 6 +-- .../CPV/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../workflow/src/RawToDigitConverterSpec.cxx | 18 +++---- .../CTP/workflow/src/EntropyEncoderSpec.cxx | 2 +- Detectors/CTP/workflow/src/RawDecoderSpec.cxx | 8 +-- .../CTP/workflowIO/src/DigitReaderSpec.cxx | 4 +- .../testWorkflow/src/DCSDataReplaySpec.cxx | 2 +- .../src/DCSRandomDataGeneratorSpec.cxx | 2 +- .../include/EMCALWorkflow/PublisherSpec.h | 11 ++-- .../workflow/src/AnalysisClusterSpec.cxx | 2 +- .../EMCAL/workflow/src/CellConverterSpec.cxx | 6 +-- .../workflow/src/CellRecalibratorSpec.cxx | 10 ++-- .../EMCAL/workflow/src/ClusterizerSpec.cxx | 8 +-- .../EMCAL/workflow/src/EMCALDigitizerSpec.cxx | 10 ++-- .../EMCAL/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../workflow/src/RawToCellConverterSpec.cxx | 6 +-- .../src/StandaloneAODProducerSpec.cxx | 4 +- .../src/emc-channel-data-producer.cxx | 6 +-- .../include/FDDWorkflow/RawReaderFDD.h | 4 +- .../FIT/FDD/workflow/src/DigitReaderSpec.cxx | 8 +-- .../FDD/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../FDD/workflow/src/RecPointReaderSpec.cxx | 4 +- .../FDD/workflow/src/ReconstructorSpec.cxx | 4 +- .../FT0CalibSlewingCollectorSpec.h | 4 +- .../FT0TimeSpectraProcessor-Workflow.cxx | 2 +- .../FIT/FT0/workflow/src/DigitReaderSpec.cxx | 8 +-- .../FT0/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../FT0/workflow/src/RecPointReaderSpec.cxx | 4 +- .../FT0/workflow/src/ReconstructionSpec.cxx | 4 +- .../FIT/FV0/workflow/src/DigitReaderSpec.cxx | 8 +-- .../FV0/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../FV0/workflow/src/RecPointReaderSpec.cxx | 4 +- .../FV0/workflow/src/ReconstructionSpec.cxx | 4 +- .../include/FITWorkflow/RawReaderFIT.h | 14 ++--- .../FOCAL/workflow/src/RawDecoderSpec.cxx | 10 ++-- .../Filtering/src/FilteredTFReaderSpec.cxx | 14 ++--- .../helpers/src/NoInpDummyOutSpec.cxx | 2 +- .../qc/src/ITSTPCMatchingQCSpec.cxx | 2 +- .../readers/src/GlobalFwdTrackReaderSpec.cxx | 4 +- .../readers/src/IRFrameReaderSpec.cxx | 2 +- .../readers/src/MatchedMFTMCHReaderSpec.cxx | 2 +- .../readers/src/PrimaryVertexReaderSpec.cxx | 8 +-- .../readers/src/SecondaryVertexReaderSpec.cxx | 18 +++---- .../src/StrangenessTrackingReaderSpec.cxx | 6 +-- .../readers/src/TrackCosmicsReaderSpec.cxx | 4 +- .../readers/src/TrackTPCITSReaderSpec.cxx | 10 ++-- .../src/CosmicsMatchingSpec.cxx | 4 +- .../src/GlobalFwdMatchingSpec.cxx | 12 ++--- .../src/HMPMatcherSpec.cxx | 4 +- .../src/PrimaryVertexingSpec.cxx | 8 +-- .../src/StrangenessTrackingSpec.cxx | 6 +-- .../src/TOFMatcherSpec.cxx | 54 +++++++++---------- .../src/TPCITSMatchingSpec.cxx | 12 ++--- .../src/VertexTrackMatcherSpec.cxx | 4 +- .../study/src/TPCDataFilter.cxx | 10 ++-- .../src/TPCInterpolationSpec.cxx | 12 ++--- .../src/TPCUnbinnedResidualReaderSpec.cxx | 6 +-- .../HMPID/workflow/src/ClustersReaderSpec.cxx | 4 +- .../HMPID/workflow/src/DataDecoderSpec.cxx | 4 +- .../HMPID/workflow/src/DataDecoderSpec2.cxx | 4 +- .../HMPID/workflow/src/DigitsReaderSpec.cxx | 4 +- .../workflow/src/DigitsToClustersSpec.cxx | 4 +- .../HMPID/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../workflow/src/HMPMatchedReaderSpec.cxx | 4 +- .../src/TestDataReader.cxx | 28 +++++----- .../ITSMFT/ITS/workflow/src/ClustererSpec.cxx | 10 ++-- .../ITS/workflow/src/CookedTrackerSpec.cxx | 16 +++--- .../ITS/workflow/src/DCSGeneratorSpec.cxx | 2 +- .../ITS/workflow/src/TrackReaderSpec.cxx | 14 ++--- .../ITSMFT/ITS/workflow/src/TrackerSpec.cxx | 18 +++---- .../ITS/workflow/src/VertexReaderSpec.cxx | 4 +- .../ITSMFT/MFT/workflow/src/ClustererSpec.cxx | 10 ++-- .../MFT/workflow/src/TrackReaderSpec.cxx | 8 +-- .../ITSMFT/MFT/workflow/src/TrackerSpec.cxx | 10 ++-- .../common/workflow/src/ClusterReaderSpec.cxx | 12 ++--- .../common/workflow/src/DigitReaderSpec.cxx | 12 ++--- .../workflow/src/EntropyEncoderSpec.cxx | 2 +- .../common/workflow/src/STFDecoderSpec.cxx | 20 +++---- Detectors/MUON/MCH/IO/src/DigitReaderSpec.cxx | 10 ++-- .../MCH/Workflow/src/TrackAtVertexSpec.cxx | 2 +- .../Workflow/src/entropy-encoder-workflow.cxx | 2 +- .../MUON/MID/Workflow/src/ClusterizerSpec.cxx | 8 +-- .../MID/Workflow/src/ColumnDataSpecsUtils.cxx | 4 +- .../src/DecodedDataAggregatorSpec.cxx | 4 +- .../MUON/MID/Workflow/src/DigitReaderSpec.cxx | 10 ++-- .../MID/Workflow/src/EntropyDecoderSpec.cxx | 4 +- .../MID/Workflow/src/EntropyEncoderSpec.cxx | 2 +- .../MUON/MID/Workflow/src/MaskMakerSpec.cxx | 4 +- .../MID/Workflow/src/RawGBTDecoderSpec.cxx | 4 +- .../MUON/MID/Workflow/src/TrackerSpec.cxx | 12 ++--- .../MID/Workflow/src/ZeroSuppressionSpec.cxx | 4 +- .../PHOS/calib/src/PHOSBadMapCalibDevice.cxx | 2 +- .../calib/src/PHOSHGLGRatioCalibDevice.cxx | 2 +- .../calib/src/PHOSPedestalCalibDevice.cxx | 2 +- .../PHOS/workflow/src/CellConverterSpec.cxx | 12 ++--- .../PHOS/workflow/src/CellReaderSpec.cxx | 6 +-- .../PHOS/workflow/src/ClusterizerSpec.cxx | 16 +++--- .../PHOS/workflow/src/DigitReaderSpec.cxx | 6 +-- .../PHOS/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../PHOS/workflow/src/EventBuilderSpec.cxx | 4 +- .../workflow/src/RawToCellConverterSpec.cxx | 12 ++--- Detectors/PHOS/workflow/src/ReaderSpec.cxx | 22 ++++---- .../src/StandaloneAODProducerSpec.cxx | 4 +- .../testWorkflow/TOFCalibCollectorSpec.h | 4 +- .../workflow/src/CompressedDecodingTask.cxx | 12 ++--- .../TOF/workflow/src/EntropyDecoderSpec.cxx | 2 +- .../TOF/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../TOF/workflow/src/TOFClusterizerSpec.cxx | 14 ++--- .../workflowIO/src/CalibClusReaderSpec.cxx | 8 +-- .../workflowIO/src/CalibInfoReaderSpec.cxx | 4 +- .../TOF/workflowIO/src/ClusterReaderSpec.cxx | 6 +-- .../TOF/workflowIO/src/DigitReaderSpec.cxx | 12 ++--- .../workflowIO/src/TOFMatchedReaderSpec.cxx | 6 +-- .../include/TPCWorkflow/TPCFLPIDCSpec.h | 6 +-- .../TPCWorkflow/TPCFourierTransformEPNSpec.h | 2 +- .../include/TPCWorkflow/TPCIntegrateIDCSpec.h | 4 +- .../include/TPCReaderWorkflow/PublisherSpec.h | 7 ++- .../workflow/readers/src/PublisherSpec.cxx | 4 +- .../workflow/readers/src/TrackReaderSpec.cxx | 6 +-- .../readers/src/TriggerReaderSpec.cxx | 2 +- .../workflow/src/ChunkedDigitPublisher.cxx | 5 +- .../workflow/src/ClusterDecoderRawSpec.cxx | 8 +-- Detectors/TPC/workflow/src/ClustererSpec.cxx | 8 +-- .../TPC/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../TPC/workflow/src/KryptonClustererSpec.cxx | 2 +- .../TPC/workflow/src/KryptonRawFilterSpec.cxx | 2 +- .../TPC/workflow/src/LinkZSToDigitsSpec.cxx | 2 +- .../TPC/workflow/src/MIPTrackFilterSpec.cxx | 2 +- .../TPC/workflow/src/RawToDigitsSpec.cxx | 4 +- .../TPC/workflow/src/SACProcessorSpec.cxx | 4 +- Detectors/TPC/workflow/src/ZSSpec.cxx | 6 +-- .../workflow/test/test_ft_EPN_Aggregator.cxx | 2 +- .../TRD/reconstruction/src/EventRecord.cxx | 10 ++-- .../include/TRDWorkflow/KrClustererSpec.h | 4 +- .../TRDWorkflow/TRDGlobalTrackingQCSpec.h | 2 +- .../include/TRDWorkflow/TRDPulseHeightSpec.h | 4 +- .../workflow/io/src/TRDCalibReaderSpec.cxx | 2 +- .../workflow/io/src/TRDDigitReaderSpec.cxx | 6 +-- .../TRD/workflow/io/src/TRDPHReaderSpec.cxx | 2 +- .../workflow/io/src/TRDTrackReaderSpec.cxx | 16 +++--- .../workflow/io/src/TRDTrackletReaderSpec.cxx | 10 ++-- .../TRD/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../TRD/workflow/src/TRDDigitizerSpec.cxx | 8 +-- .../workflow/src/TRDGlobalTrackingSpec.cxx | 16 +++--- .../src/TRDTrackletTransformerSpec.cxx | 4 +- .../TRD/workflow/src/TRDTrapSimulatorSpec.cxx | 8 +-- .../TRD/workflow/src/TrackBasedCalibSpec.cxx | 4 +- .../ITS3/workflow/src/ClustererSpec.cxx | 10 ++-- .../ITS3/workflow/src/DigitReaderSpec.cxx | 10 ++-- .../ITS3/workflow/src/TrackReaderSpec.cxx | 14 ++--- .../ITS3/workflow/src/TrackerSpec.cxx | 18 +++---- .../ITS3/workflow/src/VertexReaderSpec.cxx | 4 +- Detectors/Vertexing/src/SVertexer.cxx | 24 ++++----- Detectors/ZDC/calib/src/InterCalibEPNSpec.cxx | 6 +-- Detectors/ZDC/calib/src/TDCCalibEPNSpec.cxx | 4 +- .../ZDC/calib/src/WaveformCalibEPNSpec.cxx | 2 +- .../ZDC/raw/include/ZDCRaw/RawReaderZDC.h | 6 +-- .../ZDC/workflow/src/DigitReaderSpec.cxx | 8 +-- Detectors/ZDC/workflow/src/DigitRecoSpec.cxx | 10 ++-- .../ZDC/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../ZDC/workflow/src/RecEventReaderSpec.cxx | 8 +-- Detectors/ZDC/workflow/src/RecoReaderSpec.cxx | 8 +-- GPU/Workflow/src/GPUWorkflowITS.cxx | 18 +++---- GPU/Workflow/src/GPUWorkflowPipeline.cxx | 2 +- GPU/Workflow/src/GPUWorkflowSpec.cxx | 14 ++--- .../src/CPVDigitizerSpec.cxx | 8 +-- .../src/CTPDigitizerSpec.cxx | 4 +- .../src/FDDDigitizerSpec.cxx | 10 ++-- .../src/FT0DigitizerSpec.cxx | 10 ++-- .../src/FV0DigitizerSpec.cxx | 10 ++-- .../src/HMPIDDigitizerSpec.cxx | 8 +-- .../src/ITS3DigitizerSpec.cxx | 10 ++-- .../src/ITSMFTDigitizerSpec.cxx | 10 ++-- .../src/MCHDigitizerSpec.cxx | 8 +-- .../DigitizerWorkflow/src/MCTruthReaderSpec.h | 4 +- .../src/MCTruthSourceSpec.cxx | 4 +- .../src/MCTruthWriterSpec.cxx | 2 +- .../src/MIDDigitizerSpec.cxx | 8 +-- .../src/PHOSDigitizerSpec.cxx | 8 +-- .../src/TOFDigitizerSpec.cxx | 12 ++--- .../src/TPCDigitizerSpec.cxx | 12 ++--- .../src/ZDCDigitizerSpec.cxx | 10 ++-- .../DataSampling/src/DataSamplingPolicy.cxx | 2 +- Utilities/DataSampling/src/Dispatcher.cxx | 1 - run/dpl_eventgen.cxx | 4 +- run/o2sim_hepmc_publisher.cxx | 4 +- run/o2sim_kine_publisher.cxx | 4 +- run/o2sim_mctracks_to_aod.cxx | 4 +- 195 files changed, 680 insertions(+), 688 deletions(-) diff --git a/Detectors/AOD/src/AODMcProducerWorkflowSpec.cxx b/Detectors/AOD/src/AODMcProducerWorkflowSpec.cxx index 2e60b70c9855a..4c1623188e55c 100644 --- a/Detectors/AOD/src/AODMcProducerWorkflowSpec.cxx +++ b/Detectors/AOD/src/AODMcProducerWorkflowSpec.cxx @@ -296,13 +296,13 @@ void AODMcProducerWorkflowDPL::run(ProcessingContext& pc) TString ROOTVersion = ROOT_RELEASE; mMetaDataKeys = {"DataType", "Run", "O2Version", "ROOTVersion", "RecoPassName", "AnchorProduction", "AnchorPassName", "LPMProductionTag"}; mMetaDataVals = {dataType, "3", O2Version, ROOTVersion, mRecoPass, mAnchorProd, mAnchorPass, mLPMProdTag}; - pc.outputs().snapshot(Output{"AMD", "AODMetadataKeys", 0, Lifetime::Timeframe}, mMetaDataKeys); - pc.outputs().snapshot(Output{"AMD", "AODMetadataVals", 0, Lifetime::Timeframe}, mMetaDataVals); + pc.outputs().snapshot(Output{"AMD", "AODMetadataKeys", 0}, mMetaDataKeys); + pc.outputs().snapshot(Output{"AMD", "AODMetadataVals", 0}, mMetaDataVals); mIsMDSent = true; } - pc.outputs().snapshot(Output{"TFN", "TFNumber", 0, Lifetime::Timeframe}, tfNumber); - pc.outputs().snapshot(Output{"TFF", "TFFilename", 0, Lifetime::Timeframe}, ""); + pc.outputs().snapshot(Output{"TFN", "TFNumber", 0}, tfNumber); + pc.outputs().snapshot(Output{"TFF", "TFFilename", 0}, ""); pc.services().get().endOfStream(); pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Detectors/AOD/src/AODProducerWorkflowSpec.cxx b/Detectors/AOD/src/AODProducerWorkflowSpec.cxx index 323d4b9f7f41a..e97c90f14f64f 100644 --- a/Detectors/AOD/src/AODProducerWorkflowSpec.cxx +++ b/Detectors/AOD/src/AODProducerWorkflowSpec.cxx @@ -2241,11 +2241,11 @@ void AODProducerWorkflowDPL::run(ProcessingContext& pc) TString ROOTVersion = ROOT_RELEASE; mMetaDataKeys = {"DataType", "Run", "O2Version", "ROOTVersion", "RecoPassName", "AnchorProduction", "AnchorPassName", "LPMProductionTag"}; mMetaDataVals = {dataType, "3", O2Version, ROOTVersion, mRecoPass, mAnchorProd, mAnchorPass, mLPMProdTag}; - pc.outputs().snapshot(Output{"AMD", "AODMetadataKeys", 0, Lifetime::Timeframe}, mMetaDataKeys); - pc.outputs().snapshot(Output{"AMD", "AODMetadataVals", 0, Lifetime::Timeframe}, mMetaDataVals); + pc.outputs().snapshot(Output{"AMD", "AODMetadataKeys", 0}, mMetaDataKeys); + pc.outputs().snapshot(Output{"AMD", "AODMetadataVals", 0}, mMetaDataVals); - pc.outputs().snapshot(Output{"TFN", "TFNumber", 0, Lifetime::Timeframe}, tfNumber); - pc.outputs().snapshot(Output{"TFF", "TFFilename", 0, Lifetime::Timeframe}, ""); + pc.outputs().snapshot(Output{"TFN", "TFNumber", 0}, tfNumber); + pc.outputs().snapshot(Output{"TFF", "TFFilename", 0}, ""); mTimer.Stop(); } diff --git a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVBadMapCalibDevice.cxx b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVBadMapCalibDevice.cxx index b1c61d25da9c8..f2a08e280f954 100644 --- a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVBadMapCalibDevice.cxx +++ b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVBadMapCalibDevice.cxx @@ -174,7 +174,7 @@ void CPVBadMapCalibDevice::sendOutput(DataAllocator& output) output.snapshot(Output{o2::calibration::Utils::gDataOriginCDBWrapper, "CPV_BadChanMap", subSpec}, info); } - output.snapshot(o2::framework::Output{"CPV", "BADMAPCHANGE", 0, o2::framework::Lifetime::Timeframe}, mMapDiff); + output.snapshot(o2::framework::Output{"CPV", "BADMAPCHANGE", 0}, mMapDiff); } bool CPVBadMapCalibDevice::differFromCurrent() diff --git a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVGainCalibDevice.cxx b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVGainCalibDevice.cxx index b5e98f00d021f..d7f187e3a88cc 100644 --- a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVGainCalibDevice.cxx +++ b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVGainCalibDevice.cxx @@ -143,7 +143,7 @@ void CPVGainCalibDevice::sendOutput(DataAllocator& output) fout.Close(); } // Anyway send change to QC - output.snapshot(o2::framework::Output{"CPV", "GAINDIFF", 0, o2::framework::Lifetime::Timeframe}, mGainRatio); + output.snapshot(o2::framework::Output{"CPV", "GAINDIFF", 0}, mGainRatio); } void CPVGainCalibDevice::calculateGains() diff --git a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVPedestalCalibDevice.cxx b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVPedestalCalibDevice.cxx index 125e8e1bc8c40..5dd414ab7aeb8 100644 --- a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVPedestalCalibDevice.cxx +++ b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVPedestalCalibDevice.cxx @@ -118,7 +118,7 @@ void CPVPedestalCalibDevice::sendOutput(DataAllocator& output) } // Anyway send change to QC LOG(info) << "[CPVPedestalCalibDevice - run] Writing "; - output.snapshot(o2::framework::Output{"CPV", "PEDDIFF", 0, o2::framework::Lifetime::Timeframe}, mPedDiff); + output.snapshot(o2::framework::Output{"CPV", "PEDDIFF", 0}, mPedDiff); // Write pedestal distributions to calculate bad map std::string filename = mPath + "CPVPedestals.root"; diff --git a/Detectors/CPV/workflow/src/ClusterReaderSpec.cxx b/Detectors/CPV/workflow/src/ClusterReaderSpec.cxx index f9d82eb23bf68..f9d0817325c36 100644 --- a/Detectors/CPV/workflow/src/ClusterReaderSpec.cxx +++ b/Detectors/CPV/workflow/src/ClusterReaderSpec.cxx @@ -44,10 +44,10 @@ void ClusterReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mClusters.size() << " Clusters in " << mTRs.size() << " TriggerRecords at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "CLUSTERS", 0, Lifetime::Timeframe}, mClusters); - pc.outputs().snapshot(Output{mOrigin, "CLUSTERTRIGRECS", 0, Lifetime::Timeframe}, mTRs); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERS", 0}, mClusters); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERTRIGRECS", 0}, mTRs); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "CLUSTERTRUEMC", 0, Lifetime::Timeframe}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERTRUEMC", 0}, mMCTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/CPV/workflow/src/ClusterizerSpec.cxx b/Detectors/CPV/workflow/src/ClusterizerSpec.cxx index 87d0b86a98f5c..e004c3cec8949 100644 --- a/Detectors/CPV/workflow/src/ClusterizerSpec.cxx +++ b/Detectors/CPV/workflow/src/ClusterizerSpec.cxx @@ -49,14 +49,14 @@ void ClusterizerSpec::run(framework::ProcessingContext& ctx) if (!digits.size()) { // nothing to process LOG(info) << "ClusterizerSpec::run() : no digits; moving on"; mOutputClusters.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusters); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERS", 0}, mOutputClusters); mOutputClusterTrigRecs.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRIGRECS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusterTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRIGRECS", 0}, mOutputClusterTrigRecs); mCalibDigits.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CALIBDIGITS", 0, o2::framework::Lifetime::Timeframe}, mCalibDigits); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CALIBDIGITS", 0}, mCalibDigits); if (mPropagateMC) { mOutputTruthCont.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRUEMC", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRUEMC", 0}, mOutputTruthCont); } return; } @@ -73,12 +73,12 @@ void ClusterizerSpec::run(framework::ProcessingContext& ctx) LOG(debug) << "CPVClusterizer::run() : Received " << digitsTR.size() << " TR, calling clusterizer ..."; - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusters); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRIGRECS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusterTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERS", 0}, mOutputClusters); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRIGRECS", 0}, mOutputClusterTrigRecs); if (mPropagateMC) { - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRUEMC", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRUEMC", 0}, mOutputTruthCont); } - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CALIBDIGITS", 0, o2::framework::Lifetime::Timeframe}, mCalibDigits); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CALIBDIGITS", 0}, mCalibDigits); LOG(info) << "Finished, wrote " << mOutputClusters.size() << " clusters, " << mOutputClusterTrigRecs.size() << "TR and " << mOutputTruthCont.getIndexedSize() << " Labels"; } o2::framework::DataProcessorSpec o2::cpv::reco_workflow::getClusterizerSpec(bool propagateMC) diff --git a/Detectors/CPV/workflow/src/DigitReaderSpec.cxx b/Detectors/CPV/workflow/src/DigitReaderSpec.cxx index ba74cb88b10d2..20fe497eb5d0c 100644 --- a/Detectors/CPV/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/CPV/workflow/src/DigitReaderSpec.cxx @@ -44,10 +44,10 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mDigits.size() << " Digits in " << mTRs.size() << " TriggerRecords at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}, mDigits); - pc.outputs().snapshot(Output{mOrigin, "DIGITTRIGREC", 0, Lifetime::Timeframe}, mTRs); + pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0}, mDigits); + pc.outputs().snapshot(Output{mOrigin, "DIGITTRIGREC", 0}, mTRs); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMCTR", 0}, mMCTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/CPV/workflow/src/EntropyEncoderSpec.cxx b/Detectors/CPV/workflow/src/EntropyEncoderSpec.cxx index 97f69fe928342..31ed720e66335 100644 --- a/Detectors/CPV/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/CPV/workflow/src/EntropyEncoderSpec.cxx @@ -54,7 +54,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"CPV", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"CPV", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, triggers, clusters); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/CPV/workflow/src/RawToDigitConverterSpec.cxx b/Detectors/CPV/workflow/src/RawToDigitConverterSpec.cxx index fcdc75c38c67f..766902a2fdc95 100644 --- a/Detectors/CPV/workflow/src/RawToDigitConverterSpec.cxx +++ b/Detectors/CPV/workflow/src/RawToDigitConverterSpec.cxx @@ -136,11 +136,11 @@ void RawToDigitConverterSpec::run(framework::ProcessingContext& ctx) contDeadBeef == maxWarn ? fmt::format(". {} such inputs in row received, stopping reporting", contDeadBeef) : ""); } mOutputDigits.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mOutputDigits); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0}, mOutputDigits); mOutputTriggerRecords.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputTriggerRecords); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0}, mOutputTriggerRecords); mOutputHWErrors.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0, o2::framework::Lifetime::Timeframe}, mOutputHWErrors); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0}, mOutputHWErrors); return; // empty TF, nothing to process } } @@ -272,11 +272,11 @@ void RawToDigitConverterSpec::run(framework::ProcessingContext& ctx) if (skipTF) { // Send no digits mOutputDigits.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mOutputDigits); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0}, mOutputDigits); mOutputTriggerRecords.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputTriggerRecords); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0}, mOutputTriggerRecords); // Send errors - ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0, o2::framework::Lifetime::Timeframe}, mOutputHWErrors); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0}, mOutputHWErrors); return; } } @@ -305,9 +305,9 @@ void RawToDigitConverterSpec::run(framework::ProcessingContext& ctx) digitBuffer.clear(); LOG(info) << "[CPVRawToDigitConverter - run] Sending " << mOutputDigits.size() << " digits in " << mOutputTriggerRecords.size() << "trigger records."; - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mOutputDigits); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputTriggerRecords); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0, o2::framework::Lifetime::Timeframe}, mOutputHWErrors); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0}, mOutputDigits); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0}, mOutputTriggerRecords); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0}, mOutputHWErrors); } //_____________________________________________________________________________ o2::framework::DataProcessorSpec o2::cpv::reco_workflow::getRawToDigitConverterSpec(bool askDISTSTF, bool isPedestal, bool useBadChannelMap, bool useGainCalibration) diff --git a/Detectors/CTP/workflow/src/EntropyEncoderSpec.cxx b/Detectors/CTP/workflow/src/EntropyEncoderSpec.cxx index 7ad4f821c12a9..ca16d10087fa6 100644 --- a/Detectors/CTP/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/CTP/workflow/src/EntropyEncoderSpec.cxx @@ -64,7 +64,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) lumi = lumiPrev; } } - auto& buffer = pc.outputs().make>(Output{"CTP", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"CTP", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, digits, lumi); pc.outputs().snapshot({"ctfrep", 0}, iosize); mTimer.Stop(); diff --git a/Detectors/CTP/workflow/src/RawDecoderSpec.cxx b/Detectors/CTP/workflow/src/RawDecoderSpec.cxx index 674c2f4208d8f..5f0cb3b759c48 100644 --- a/Detectors/CTP/workflow/src/RawDecoderSpec.cxx +++ b/Detectors/CTP/workflow/src/RawDecoderSpec.cxx @@ -80,10 +80,10 @@ void RawDecoderSpec::run(framework::ProcessingContext& ctx) auto& inputs = ctx.inputs(); auto dummyOutput = [&ctx, this]() { if (this->mDoDigits) { - ctx.outputs().snapshot(o2::framework::Output{"CTP", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, this->mOutputDigits); + ctx.outputs().snapshot(o2::framework::Output{"CTP", "DIGITS", 0}, this->mOutputDigits); } if (this->mDoLumi) { - ctx.outputs().snapshot(o2::framework::Output{"CTP", "LUMI", 0, o2::framework::Lifetime::Timeframe}, this->mOutputLumiInfo); + ctx.outputs().snapshot(o2::framework::Output{"CTP", "LUMI", 0}, this->mOutputLumiInfo); } }; // if we see requested data type input with 0xDEADBEEF subspec and 0 payload this means that the "delayed message" @@ -117,7 +117,7 @@ void RawDecoderSpec::run(framework::ProcessingContext& ctx) } if (mDoDigits) { LOG(info) << "[CTPRawToDigitConverter - run] Writing " << mOutputDigits.size() << " digits. IR rejected:" << mDecoder.getIRRejected() << " TCR rejected:" << mDecoder.getTCRRejected(); - ctx.outputs().snapshot(o2::framework::Output{"CTP", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mOutputDigits); + ctx.outputs().snapshot(o2::framework::Output{"CTP", "DIGITS", 0}, mOutputDigits); } if (mDoLumi) { uint32_t tfCountsT = 0; @@ -157,7 +157,7 @@ void RawDecoderSpec::run(framework::ProcessingContext& ctx) mOutputLumiInfo.printInputs(); LOGP(info, "Orbit {}: {}/{} counts inp1/inp2 in {}/{} HBFs -> lumi_inp1 = {:.3e}+-{:.3e} lumi_inp2 = {:.3e}+-{:.3e}", mOutputLumiInfo.orbit, mCountsT, mCountsV, mNHBIntegratedT, mNHBIntegratedV, mOutputLumiInfo.getLumi(), mOutputLumiInfo.getLumiError(), mOutputLumiInfo.getLumiFV0(), mOutputLumiInfo.getLumiFV0Error()); } - ctx.outputs().snapshot(o2::framework::Output{"CTP", "LUMI", 0, o2::framework::Lifetime::Timeframe}, mOutputLumiInfo); + ctx.outputs().snapshot(o2::framework::Output{"CTP", "LUMI", 0}, mOutputLumiInfo); } } o2::framework::DataProcessorSpec o2::ctp::reco_workflow::getRawDecoderSpec(bool askDISTSTF, bool digits, bool lumi) diff --git a/Detectors/CTP/workflowIO/src/DigitReaderSpec.cxx b/Detectors/CTP/workflowIO/src/DigitReaderSpec.cxx index 59f608bef2efb..ecf1c2e19b660 100644 --- a/Detectors/CTP/workflowIO/src/DigitReaderSpec.cxx +++ b/Detectors/CTP/workflowIO/src/DigitReaderSpec.cxx @@ -76,8 +76,8 @@ void DigitReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "DigitReader pushes " << mDigits.size() << " digits at entry " << ent; - pc.outputs().snapshot(Output{"CTP", "DIGITS", 0, Lifetime::Timeframe}, mDigits); - pc.outputs().snapshot(Output{"CTP", "LUMI", 0, Lifetime::Timeframe}, mLumi); + pc.outputs().snapshot(Output{"CTP", "DIGITS", 0}, mDigits); + pc.outputs().snapshot(Output{"CTP", "LUMI", 0}, mLumi); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Detectors/DCS/testWorkflow/src/DCSDataReplaySpec.cxx b/Detectors/DCS/testWorkflow/src/DCSDataReplaySpec.cxx index 05daf522f372a..783f6ae76e707 100644 --- a/Detectors/DCS/testWorkflow/src/DCSDataReplaySpec.cxx +++ b/Detectors/DCS/testWorkflow/src/DCSDataReplaySpec.cxx @@ -88,7 +88,7 @@ void DCSDataReplayer::run(o2::framework::ProcessingContext& pc) LOG(info) << "***************** TF " << tfid << " has generated " << dpcoms.size() << " DPs"; - pc.outputs().snapshot(Output{"DCS", mDataDescription, 0, Lifetime::Timeframe}, dpcoms); + pc.outputs().snapshot(Output{"DCS", mDataDescription, 0}, dpcoms); mTFs++; } } // namespace diff --git a/Detectors/DCS/testWorkflow/src/DCSRandomDataGeneratorSpec.cxx b/Detectors/DCS/testWorkflow/src/DCSRandomDataGeneratorSpec.cxx index a82a5b4164e2a..899a4e110aea8 100644 --- a/Detectors/DCS/testWorkflow/src/DCSRandomDataGeneratorSpec.cxx +++ b/Detectors/DCS/testWorkflow/src/DCSRandomDataGeneratorSpec.cxx @@ -153,7 +153,7 @@ void DCSRandomDataGenerator::run(o2::framework::ProcessingContext& pc) auto timeNow = std::chrono::system_clock::now(); timingInfo.creation = std::chrono::duration_cast(timeNow.time_since_epoch()).count(); // in ms - pc.outputs().snapshot(Output{"DCS", mDataDescription, 0, Lifetime::Timeframe}, dpcoms); + pc.outputs().snapshot(Output{"DCS", mDataDescription, 0}, dpcoms); mTFs++; } } // namespace diff --git a/Detectors/EMCAL/workflow/include/EMCALWorkflow/PublisherSpec.h b/Detectors/EMCAL/workflow/include/EMCALWorkflow/PublisherSpec.h index f583653f1c3cb..468e66c43a0ab 100644 --- a/Detectors/EMCAL/workflow/include/EMCALWorkflow/PublisherSpec.h +++ b/Detectors/EMCAL/workflow/include/EMCALWorkflow/PublisherSpec.h @@ -60,23 +60,22 @@ framework::DataProcessorSpec getPublisherSpec(PublisherConf const& config, uint3 // a creator callback for the actual reader instance auto creator = [dto, tro, mco, subspec, propagateMC](const char* treename, const char* filename, int nofEvents, Reader::PublishingMode publishingMode, const char* branchname, const char* triggerbranchname, const char* mcbranchname) { - constexpr auto persistency = o2::framework::Lifetime::Timeframe; if (propagateMC) { return std::make_shared(treename, filename, nofEvents, publishingMode, - Output{mco.origin, mco.description, subspec, persistency}, + Output{mco.origin, mco.description, subspec}, mcbranchname, - Reader::BranchDefinition{Output{dto.origin, dto.description, subspec, persistency}, branchname}, - Reader::BranchDefinition{Output{tro.origin, tro.description, subspec, persistency}, triggerbranchname}); + Reader::BranchDefinition{Output{dto.origin, dto.description, subspec}, branchname}, + Reader::BranchDefinition{Output{tro.origin, tro.description, subspec}, triggerbranchname}); } else { return std::make_shared(treename, filename, nofEvents, publishingMode, - Reader::BranchDefinition{Output{dto.origin, dto.description, subspec, persistency}, branchname}, - Reader::BranchDefinition{Output{tro.origin, tro.description, subspec, persistency}, triggerbranchname}); + Reader::BranchDefinition{Output{dto.origin, dto.description, subspec}, branchname}, + Reader::BranchDefinition{Output{tro.origin, tro.description, subspec}, triggerbranchname}); } }; diff --git a/Detectors/EMCAL/workflow/src/AnalysisClusterSpec.cxx b/Detectors/EMCAL/workflow/src/AnalysisClusterSpec.cxx index 2c98a100ac17a..7b81fec681c14 100644 --- a/Detectors/EMCAL/workflow/src/AnalysisClusterSpec.cxx +++ b/Detectors/EMCAL/workflow/src/AnalysisClusterSpec.cxx @@ -148,7 +148,7 @@ void AnalysisClusterSpec::run(framework::ProcessingContext& ctx) } LOG(debug) << "[EMCALClusterizer - run] Writing " << mOutputAnaClusters->size() << " clusters ..."; - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "ANALYSISCLUSTERS", 0, o2::framework::Lifetime::Timeframe}, *mOutputAnaClusters); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "ANALYSISCLUSTERS", 0}, *mOutputAnaClusters); } o2::framework::DataProcessorSpec o2::emcal::reco_workflow::getAnalysisClusterSpec(bool useDigits) diff --git a/Detectors/EMCAL/workflow/src/CellConverterSpec.cxx b/Detectors/EMCAL/workflow/src/CellConverterSpec.cxx index 489546a86eeaf..5fa7353e907e2 100644 --- a/Detectors/EMCAL/workflow/src/CellConverterSpec.cxx +++ b/Detectors/EMCAL/workflow/src/CellConverterSpec.cxx @@ -161,10 +161,10 @@ void CellConverterSpec::run(framework::ProcessingContext& ctx) ncellsTrigger = 0; } LOG(debug) << "[EMCALCellConverter - run] Writing " << mOutputCells.size() << " cells ..."; - ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLS", mSubspecificationOut, o2::framework::Lifetime::Timeframe}, mOutputCells); - ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLSTRGR", mSubspecificationOut, o2::framework::Lifetime::Timeframe}, mOutputTriggers); + ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLS", mSubspecificationOut}, mOutputCells); + ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLSTRGR", mSubspecificationOut}, mOutputTriggers); if (mPropagateMC) { - ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLSMCTR", mSubspecificationOut, o2::framework::Lifetime::Timeframe}, mOutputLabels); + ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLSMCTR", mSubspecificationOut}, mOutputLabels); } } diff --git a/Detectors/EMCAL/workflow/src/CellRecalibratorSpec.cxx b/Detectors/EMCAL/workflow/src/CellRecalibratorSpec.cxx index 493c205223161..3f19b49cd73bd 100644 --- a/Detectors/EMCAL/workflow/src/CellRecalibratorSpec.cxx +++ b/Detectors/EMCAL/workflow/src/CellRecalibratorSpec.cxx @@ -113,15 +113,15 @@ void CellRecalibratorSpec::run(framework::ProcessingContext& ctx) } // send recalibrated objects - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLS", mOutputSubspec, o2::framework::Lifetime::Timeframe}, outputcells); - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSTRGR", mOutputSubspec, o2::framework::Lifetime::Timeframe}, outputtriggers); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLS", mOutputSubspec}, outputcells); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSTRGR", mOutputSubspec}, outputtriggers); if (outputMCLabels.has_value()) { LOG(info) << "Timeframe: " << inputMCLabels->getIndexedSize() << " label entries read, " << outputMCLabels->getIndexedSize() << " label entries kept"; - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSMCTR", mOutputSubspec, o2::framework::Lifetime::Timeframe}, outputMCLabels.value()); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSMCTR", mOutputSubspec}, outputMCLabels.value()); } if (mLEDsettings == LEDEventSettings::REDIRECT) { - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLS", 10, o2::framework::Lifetime::Timeframe}, ledcells); - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSTRGR", 10, o2::framework::Lifetime::Timeframe}, ledtriggers); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLS", 10}, ledcells); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSTRGR", 10}, ledtriggers); } } diff --git a/Detectors/EMCAL/workflow/src/ClusterizerSpec.cxx b/Detectors/EMCAL/workflow/src/ClusterizerSpec.cxx index eaa00d6d3d0b6..f938d02ce7e3f 100644 --- a/Detectors/EMCAL/workflow/src/ClusterizerSpec.cxx +++ b/Detectors/EMCAL/workflow/src/ClusterizerSpec.cxx @@ -107,11 +107,11 @@ void ClusterizerSpec::run(framework::ProcessingContext& ctx) currentStartIndices = mOutputCellDigitIndices->size(); } LOG(debug) << "[EMCALClusterizer - run] Writing " << mOutputClusters->size() << " clusters ..."; - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, *mOutputClusters); - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "INDICES", 0, o2::framework::Lifetime::Timeframe}, *mOutputCellDigitIndices); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CLUSTERS", 0}, *mOutputClusters); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "INDICES", 0}, *mOutputCellDigitIndices); - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CLUSTERSTRGR", 0, o2::framework::Lifetime::Timeframe}, *mOutputTriggerRecord); - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "INDICESTRGR", 0, o2::framework::Lifetime::Timeframe}, *mOutputTriggerRecordIndices); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CLUSTERSTRGR", 0}, *mOutputTriggerRecord); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "INDICESTRGR", 0}, *mOutputTriggerRecordIndices); mTimer.Stop(); } diff --git a/Detectors/EMCAL/workflow/src/EMCALDigitizerSpec.cxx b/Detectors/EMCAL/workflow/src/EMCALDigitizerSpec.cxx index 052e65af00bda..becf3c6cdcfc3 100644 --- a/Detectors/EMCAL/workflow/src/EMCALDigitizerSpec.cxx +++ b/Detectors/EMCAL/workflow/src/EMCALDigitizerSpec.cxx @@ -140,15 +140,15 @@ void DigitizerSpec::run(framework::ProcessingContext& ctx) mDigitizer.finish(); // here we have all digits and we can send them to consumer (aka snapshot it onto output) - ctx.outputs().snapshot(Output{"EMC", "DIGITS", 0, Lifetime::Timeframe}, mDigitizer.getDigits()); - ctx.outputs().snapshot(Output{"EMC", "TRGRDIG", 0, Lifetime::Timeframe}, mDigitizer.getTriggerRecords()); + ctx.outputs().snapshot(Output{"EMC", "DIGITS", 0}, mDigitizer.getDigits()); + ctx.outputs().snapshot(Output{"EMC", "TRGRDIG", 0}, mDigitizer.getTriggerRecords()); if (ctx.outputs().isAllowed({"EMC", "DIGITSMCTR", 0})) { - ctx.outputs().snapshot(Output{"EMC", "DIGITSMCTR", 0, Lifetime::Timeframe}, mDigitizer.getMCLabels()); + ctx.outputs().snapshot(Output{"EMC", "DIGITSMCTR", 0}, mDigitizer.getMCLabels()); } // EMCAL is always a triggering detector const o2::parameters::GRPObject::ROMode roMode = o2::parameters::GRPObject::TRIGGERING; LOG(info) << "EMCAL: Sending ROMode= " << roMode << " to GRPUpdater"; - ctx.outputs().snapshot(Output{"EMC", "ROMode", 0, Lifetime::Timeframe}, roMode); + ctx.outputs().snapshot(Output{"EMC", "ROMode", 0}, roMode); // Create CTP digits std::vector triggerinputs; for (auto& trg : mDigitizer.getTriggerRecords()) { @@ -161,7 +161,7 @@ void DigitizerSpec::run(framework::ProcessingContext& ctx) nextdigit.inputsMask.set(0); triggerinputs.push_back(nextdigit); } - ctx.outputs().snapshot(Output{"EMC", "TRIGGERINPUT", 0, Lifetime::Timeframe}, triggerinputs); + ctx.outputs().snapshot(Output{"EMC", "TRIGGERINPUT", 0}, triggerinputs); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Detectors/EMCAL/workflow/src/EntropyEncoderSpec.cxx b/Detectors/EMCAL/workflow/src/EntropyEncoderSpec.cxx index da34fcfd8c534..773c4c65fc9fe 100644 --- a/Detectors/EMCAL/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/EMCAL/workflow/src/EntropyEncoderSpec.cxx @@ -52,7 +52,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) auto triggers = pc.inputs().get>("triggers"); auto cells = pc.inputs().get>("cells"); - auto& buffer = pc.outputs().make>(Output{"EMC", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"EMC", "CTFDATA", 0}); if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } diff --git a/Detectors/EMCAL/workflow/src/RawToCellConverterSpec.cxx b/Detectors/EMCAL/workflow/src/RawToCellConverterSpec.cxx index 178f4c4418b40..ac09b842d9ab8 100644 --- a/Detectors/EMCAL/workflow/src/RawToCellConverterSpec.cxx +++ b/Detectors/EMCAL/workflow/src/RawToCellConverterSpec.cxx @@ -722,11 +722,11 @@ void RawToCellConverterSpec::handleMinorPageError(const RawReaderMemory::MinorEr void RawToCellConverterSpec::sendData(framework::ProcessingContext& ctx, const std::vector& cells, const std::vector& triggers, const std::vector& decodingErrors) const { constexpr auto originEMC = o2::header::gDataOriginEMC; - ctx.outputs().snapshot(framework::Output{originEMC, "CELLS", mSubspecification, framework::Lifetime::Timeframe}, cells); - ctx.outputs().snapshot(framework::Output{originEMC, "CELLSTRGR", mSubspecification, framework::Lifetime::Timeframe}, triggers); + ctx.outputs().snapshot(framework::Output{originEMC, "CELLS", mSubspecification}, cells); + ctx.outputs().snapshot(framework::Output{originEMC, "CELLSTRGR", mSubspecification}, triggers); if (mCreateRawDataErrors) { LOG(debug) << "Sending " << decodingErrors.size() << " decoding errors"; - ctx.outputs().snapshot(framework::Output{originEMC, "DECODERERR", mSubspecification, framework::Lifetime::Timeframe}, decodingErrors); + ctx.outputs().snapshot(framework::Output{originEMC, "DECODERERR", mSubspecification}, decodingErrors); } } diff --git a/Detectors/EMCAL/workflow/src/StandaloneAODProducerSpec.cxx b/Detectors/EMCAL/workflow/src/StandaloneAODProducerSpec.cxx index 152e9dd54119d..73987ce6d1c1b 100644 --- a/Detectors/EMCAL/workflow/src/StandaloneAODProducerSpec.cxx +++ b/Detectors/EMCAL/workflow/src/StandaloneAODProducerSpec.cxx @@ -136,8 +136,8 @@ void StandaloneAODProducerSpec::run(ProcessingContext& pc) } // end of event loop // std::cout << "Finished cell loop" << std::endl; - pc.outputs().snapshot(Output{"TFN", "TFNumber", 0, Lifetime::Timeframe}, tfNumber); - pc.outputs().snapshot(Output{"TFF", "TFFilename", 0, Lifetime::Timeframe}, ""); + pc.outputs().snapshot(Output{"TFN", "TFNumber", 0}, tfNumber); + pc.outputs().snapshot(Output{"TFF", "TFFilename", 0}, ""); mTimer.Stop(); } diff --git a/Detectors/EMCAL/workflow/src/emc-channel-data-producer.cxx b/Detectors/EMCAL/workflow/src/emc-channel-data-producer.cxx index f878a933b2d8d..75e2cdbd9fce3 100644 --- a/Detectors/EMCAL/workflow/src/emc-channel-data-producer.cxx +++ b/Detectors/EMCAL/workflow/src/emc-channel-data-producer.cxx @@ -160,7 +160,7 @@ DataProcessorSpec generateData(const std::string nameRootFile, const std::string o2::pmr::vector TriggerOutput; TriggerOutput.emplace_back(0, 0, 0, CellOutput.size()); - ctx.outputs().adoptContainer(Output{o2::header::gDataOriginEMC, "CELLS", 0, Lifetime::Timeframe}, std::move(CellOutput)); - ctx.outputs().adoptContainer(Output{o2::header::gDataOriginEMC, "CELLSTRGR", 0, Lifetime::Timeframe}, std::move(TriggerOutput)); + ctx.outputs().adoptContainer(Output{o2::header::gDataOriginEMC, "CELLS", 0}, std::move(CellOutput)); + ctx.outputs().adoptContainer(Output{o2::header::gDataOriginEMC, "CELLSTRGR", 0}, std::move(TriggerOutput)); }}}; -} \ No newline at end of file +} diff --git a/Detectors/FIT/FDD/workflow/include/FDDWorkflow/RawReaderFDD.h b/Detectors/FIT/FDD/workflow/include/FDDWorkflow/RawReaderFDD.h index e9612ae8c8bda..2d92c907c1d79 100644 --- a/Detectors/FIT/FDD/workflow/include/FDDWorkflow/RawReaderFDD.h +++ b/Detectors/FIT/FDD/workflow/include/FDDWorkflow/RawReaderFDD.h @@ -65,8 +65,8 @@ class RawReaderFDD : public RawReaderFDDBaseNorm } void makeSnapshot(o2::framework::ProcessingContext& pc) { - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFDD, "DIGITSBC", 0, o2::framework::Lifetime::Timeframe}, mVecDigits); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFDD, "DIGITSCH", 0, o2::framework::Lifetime::Timeframe}, mVecChannelData); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFDD, "DIGITSBC", 0}, mVecDigits); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFDD, "DIGITSCH", 0}, mVecChannelData); } bool mDumpData; std::vector mVecDigits; diff --git a/Detectors/FIT/FDD/workflow/src/DigitReaderSpec.cxx b/Detectors/FIT/FDD/workflow/src/DigitReaderSpec.cxx index 441c679a327e3..628a2160c6d0c 100644 --- a/Detectors/FIT/FDD/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/FIT/FDD/workflow/src/DigitReaderSpec.cxx @@ -81,18 +81,18 @@ void DigitReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "FDD DigitReader pushes " << digitsBC->size() << " digits"; - pc.outputs().snapshot(Output{mOrigin, "DIGITSBC", 0, Lifetime::Timeframe}, *digitsBC); - pc.outputs().snapshot(Output{mOrigin, "DIGITSCH", 0, Lifetime::Timeframe}, *digitsCh); + pc.outputs().snapshot(Output{mOrigin, "DIGITSBC", 0}, *digitsBC); + pc.outputs().snapshot(Output{mOrigin, "DIGITSCH", 0}, *digitsCh); if (mUseMC) { // TODO: To be replaced with sending ConstMCTruthContainer as soon as reco workflow supports it - pc.outputs().snapshot(Output{mOrigin, "TRIGGERINPUT", 0, Lifetime::Timeframe}, *digitsTrig); + pc.outputs().snapshot(Output{mOrigin, "TRIGGERINPUT", 0}, *digitsTrig); std::vector flatbuffer; mcTruthRootBuffer->copyandflatten(flatbuffer); o2::dataformats::MCTruthContainer mcTruth; mcTruth.restore_from(flatbuffer.data(), flatbuffer.size()); - pc.outputs().snapshot(Output{mOrigin, "DIGITLBL", 0, Lifetime::Timeframe}, mcTruth); + pc.outputs().snapshot(Output{mOrigin, "DIGITLBL", 0}, mcTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FDD/workflow/src/EntropyEncoderSpec.cxx b/Detectors/FIT/FDD/workflow/src/EntropyEncoderSpec.cxx index b79c033771743..abb2518e5ae0b 100644 --- a/Detectors/FIT/FDD/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/FIT/FDD/workflow/src/EntropyEncoderSpec.cxx @@ -54,7 +54,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"FDD", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"FDD", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, digits, channels); if (mSelIR) { mCTFCoder.getIRFramesSelector().clear(); diff --git a/Detectors/FIT/FDD/workflow/src/RecPointReaderSpec.cxx b/Detectors/FIT/FDD/workflow/src/RecPointReaderSpec.cxx index 3da258a672611..9b612c31d28e6 100644 --- a/Detectors/FIT/FDD/workflow/src/RecPointReaderSpec.cxx +++ b/Detectors/FIT/FDD/workflow/src/RecPointReaderSpec.cxx @@ -51,8 +51,8 @@ void RecPointReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "FDD RecPointReader pushes " << mRecPoints->size() << " recpoints with " << mChannelData->size() << " channels at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, *mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, *mChannelData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, *mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, *mChannelData); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FDD/workflow/src/ReconstructorSpec.cxx b/Detectors/FIT/FDD/workflow/src/ReconstructorSpec.cxx index 8d76bc4ee45b5..b7a0b9876a2ee 100644 --- a/Detectors/FIT/FDD/workflow/src/ReconstructorSpec.cxx +++ b/Detectors/FIT/FDD/workflow/src/ReconstructorSpec.cxx @@ -54,8 +54,8 @@ void FDDReconstructorDPL::run(ProcessingContext& pc) } // do we ignore MC in this task? LOG(debug) << "FDD reconstruction pushes " << mRecPoints.size() << " RecPoints"; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, mRecChData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, mRecChData); } DataProcessorSpec getFDDReconstructorSpec(bool useMC) diff --git a/Detectors/FIT/FT0/calibration/testWorkflow/FT0CalibSlewingCollectorSpec.h b/Detectors/FIT/FT0/calibration/testWorkflow/FT0CalibSlewingCollectorSpec.h index e01fed6f20588..8a1a071a0a043 100644 --- a/Detectors/FIT/FT0/calibration/testWorkflow/FT0CalibSlewingCollectorSpec.h +++ b/Detectors/FIT/FT0/calibration/testWorkflow/FT0CalibSlewingCollectorSpec.h @@ -88,8 +88,8 @@ class FT0CalibCollectorDevice : public o2::framework::Task auto entries = collectedInfo.size(); // this means that we are ready to send the output auto entriesPerChannel = mCollector->getEntriesPerChannel(); - output.snapshot(Output{o2::header::gDataOriginFT0, "COLLECTEDINFO", 0, Lifetime::Timeframe}, collectedInfo); - output.snapshot(Output{o2::header::gDataOriginFT0, "ENTRIESCH", 0, Lifetime::Timeframe}, entriesPerChannel); + output.snapshot(Output{o2::header::gDataOriginFT0, "COLLECTEDINFO", 0}, collectedInfo); + output.snapshot(Output{o2::header::gDataOriginFT0, "ENTRIESCH", 0}, entriesPerChannel); mCollector->initOutput(); // reset the output for the next round } } diff --git a/Detectors/FIT/FT0/calibration/testWorkflow/FT0TimeSpectraProcessor-Workflow.cxx b/Detectors/FIT/FT0/calibration/testWorkflow/FT0TimeSpectraProcessor-Workflow.cxx index 541f5a9827a4a..f1dd64a250a3b 100644 --- a/Detectors/FIT/FT0/calibration/testWorkflow/FT0TimeSpectraProcessor-Workflow.cxx +++ b/Detectors/FIT/FT0/calibration/testWorkflow/FT0TimeSpectraProcessor-Workflow.cxx @@ -101,7 +101,7 @@ class FT0TimeSpectraProcessor final : public o2::framework::Task } } - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "TIME_SPECTRA", 0, o2::framework::Lifetime::Timeframe}, timeSpectraInfoObject.getBase()); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "TIME_SPECTRA", 0}, timeSpectraInfoObject.getBase()); } }; diff --git a/Detectors/FIT/FT0/workflow/src/DigitReaderSpec.cxx b/Detectors/FIT/FT0/workflow/src/DigitReaderSpec.cxx index e4f11d3f5bd1e..09586d778ac15 100644 --- a/Detectors/FIT/FT0/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/FIT/FT0/workflow/src/DigitReaderSpec.cxx @@ -64,13 +64,13 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(debug) << "FT0DigitReader pushed " << channels.size() << " channels in " << digits.size() << " digits"; - pc.outputs().snapshot(Output{"FT0", "DIGITSBC", 0, Lifetime::Timeframe}, digits); - pc.outputs().snapshot(Output{"FT0", "DIGITSCH", 0, Lifetime::Timeframe}, channels); + pc.outputs().snapshot(Output{"FT0", "DIGITSBC", 0}, digits); + pc.outputs().snapshot(Output{"FT0", "DIGITSCH", 0}, channels); if (mUseMC) { - pc.outputs().snapshot(Output{"FT0", "DIGITSMCTR", 0, Lifetime::Timeframe}, labels); + pc.outputs().snapshot(Output{"FT0", "DIGITSMCTR", 0}, labels); } if (mUseTrgInput) { - pc.outputs().snapshot(Output{"FT0", "TRIGGERINPUT", 0, Lifetime::Timeframe}, trgInput); + pc.outputs().snapshot(Output{"FT0", "TRIGGERINPUT", 0}, trgInput); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FT0/workflow/src/EntropyEncoderSpec.cxx b/Detectors/FIT/FT0/workflow/src/EntropyEncoderSpec.cxx index edf86b3d7da0d..81bdc2e729bb4 100644 --- a/Detectors/FIT/FT0/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/FIT/FT0/workflow/src/EntropyEncoderSpec.cxx @@ -55,7 +55,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"FT0", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"FT0", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, digits, channels); if (mSelIR) { mCTFCoder.getIRFramesSelector().clear(); diff --git a/Detectors/FIT/FT0/workflow/src/RecPointReaderSpec.cxx b/Detectors/FIT/FT0/workflow/src/RecPointReaderSpec.cxx index 69f21abc369c6..ba5ae4aa1356c 100644 --- a/Detectors/FIT/FT0/workflow/src/RecPointReaderSpec.cxx +++ b/Detectors/FIT/FT0/workflow/src/RecPointReaderSpec.cxx @@ -49,8 +49,8 @@ void RecPointReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(debug) << "FT0 RecPointReader pushes " << mRecPoints->size() << " recpoints with " << mChannelData->size() << " channels at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, *mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, *mChannelData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, *mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, *mChannelData); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FT0/workflow/src/ReconstructionSpec.cxx b/Detectors/FIT/FT0/workflow/src/ReconstructionSpec.cxx index da5281d7e2ba1..40bc96ebca58e 100644 --- a/Detectors/FIT/FT0/workflow/src/ReconstructionSpec.cxx +++ b/Detectors/FIT/FT0/workflow/src/ReconstructionSpec.cxx @@ -74,8 +74,8 @@ void ReconstructionDPL::run(ProcessingContext& pc) mReco.processTF(digits, channels, mRecPoints, mRecChData); // do we ignore MC in this task? LOG(debug) << "FT0 reconstruction pushes " << mRecPoints.size() << " RecPoints"; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, mRecChData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, mRecChData); mTimer.Stop(); } diff --git a/Detectors/FIT/FV0/workflow/src/DigitReaderSpec.cxx b/Detectors/FIT/FV0/workflow/src/DigitReaderSpec.cxx index 08a6c4cbb2e24..a49bda2cec18b 100644 --- a/Detectors/FIT/FV0/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/FIT/FV0/workflow/src/DigitReaderSpec.cxx @@ -65,13 +65,13 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(debug) << "FV0DigitReader pushed " << channels.size() << " channels in " << digits.size() << " digits"; - pc.outputs().snapshot(Output{"FV0", "DIGITSBC", 0, Lifetime::Timeframe}, digits); - pc.outputs().snapshot(Output{"FV0", "DIGITSCH", 0, Lifetime::Timeframe}, channels); + pc.outputs().snapshot(Output{"FV0", "DIGITSBC", 0}, digits); + pc.outputs().snapshot(Output{"FV0", "DIGITSCH", 0}, channels); if (mUseMC) { - pc.outputs().snapshot(Output{"FV0", "DIGITSMCTR", 0, Lifetime::Timeframe}, labels); + pc.outputs().snapshot(Output{"FV0", "DIGITSMCTR", 0}, labels); } if (mUseTrgInput) { - pc.outputs().snapshot(Output{"FV0", "TRIGGERINPUT", 0, Lifetime::Timeframe}, trgInput); + pc.outputs().snapshot(Output{"FV0", "TRIGGERINPUT", 0}, trgInput); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FV0/workflow/src/EntropyEncoderSpec.cxx b/Detectors/FIT/FV0/workflow/src/EntropyEncoderSpec.cxx index a115712a0c980..a25c16a5d697c 100644 --- a/Detectors/FIT/FV0/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/FIT/FV0/workflow/src/EntropyEncoderSpec.cxx @@ -55,7 +55,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"FV0", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"FV0", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, digits, channels); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/FIT/FV0/workflow/src/RecPointReaderSpec.cxx b/Detectors/FIT/FV0/workflow/src/RecPointReaderSpec.cxx index 163beacf0c140..5997cac500ee6 100644 --- a/Detectors/FIT/FV0/workflow/src/RecPointReaderSpec.cxx +++ b/Detectors/FIT/FV0/workflow/src/RecPointReaderSpec.cxx @@ -49,8 +49,8 @@ void RecPointReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(debug) << "FV0 RecPointReader pushes " << mRecPoints->size() << " recpoints with " << mChannelData->size() << " channels at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, *mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, *mChannelData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, *mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, *mChannelData); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FV0/workflow/src/ReconstructionSpec.cxx b/Detectors/FIT/FV0/workflow/src/ReconstructionSpec.cxx index d48b96fd4e626..520ac4dbaa563 100644 --- a/Detectors/FIT/FV0/workflow/src/ReconstructionSpec.cxx +++ b/Detectors/FIT/FV0/workflow/src/ReconstructionSpec.cxx @@ -68,8 +68,8 @@ void ReconstructionDPL::run(ProcessingContext& pc) } LOG(debug) << "FV0 reconstruction pushes " << mRecPoints.size() << " RecPoints"; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, mRecChData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, mRecChData); mTimer.Stop(); } diff --git a/Detectors/FIT/workflow/include/FITWorkflow/RawReaderFIT.h b/Detectors/FIT/workflow/include/FITWorkflow/RawReaderFIT.h index 5c2cfce0a620c..cfe41e4bb474f 100644 --- a/Detectors/FIT/workflow/include/FITWorkflow/RawReaderFIT.h +++ b/Detectors/FIT/workflow/include/FITWorkflow/RawReaderFIT.h @@ -165,28 +165,28 @@ class RawReaderFIT : public RawReaderType } void makeSnapshot(o2::framework::ProcessingContext& pc) const { - pc.outputs().snapshot(o2::framework::Output{mDataOrigin, Digit_t::sChannelNameDPL, 0, o2::framework::Lifetime::Timeframe}, mVecDigit); + pc.outputs().snapshot(o2::framework::Output{mDataOrigin, Digit_t::sChannelNameDPL, 0}, mVecDigit); if constexpr (sSubDigitExists) { std::apply([&](const auto&... subDigit) { - ((pc.outputs().snapshot(o2::framework::Output{mDataOrigin, (std::decay::type::value_type::sChannelNameDPL), 0, o2::framework::Lifetime::Timeframe}, subDigit)), ...); + ((pc.outputs().snapshot(o2::framework::Output{mDataOrigin, (std::decay::type::value_type::sChannelNameDPL), 0}, subDigit)), ...); }, mVecSubDigit); } if constexpr (sSingleSubDigitExists) { std::apply([&](const auto&... singleSubDigit) { - ((pc.outputs().snapshot(o2::framework::Output{mDataOrigin, (std::decay::type::value_type::sChannelNameDPL), 0, o2::framework::Lifetime::Timeframe}, singleSubDigit)), ...); + ((pc.outputs().snapshot(o2::framework::Output{mDataOrigin, (std::decay::type::value_type::sChannelNameDPL), 0}, singleSubDigit)), ...); }, mVecSingleSubDigit); } if constexpr (sUseTrgInput) { - pc.outputs().snapshot(o2::framework::Output{mDataOrigin, DetTrigInput_t::sChannelNameDPL, 0, o2::framework::Lifetime::Timeframe}, mVecTrgInput); + pc.outputs().snapshot(o2::framework::Output{mDataOrigin, DetTrigInput_t::sChannelNameDPL, 0}, mVecTrgInput); } - pc.outputs().snapshot(o2::framework::Output{mDataOrigin, "RawDataMetric", 0, o2::framework::Lifetime::Timeframe}, mVecRawDataMetric); + pc.outputs().snapshot(o2::framework::Output{mDataOrigin, "RawDataMetric", 0}, mVecRawDataMetric); } template auto& getRefVec(o2::framework::ProcessingContext& pc) { - auto& refVec = pc.outputs().make(o2::framework::Output{mDataOrigin, VecDigitType::value_type::sChannelNameDPL, 0, o2::framework::Lifetime::Timeframe}); + auto& refVec = pc.outputs().make(o2::framework::Output{mDataOrigin, VecDigitType::value_type::sChannelNameDPL, 0}); return refVec; } void enableEmptyTFprotection() @@ -204,4 +204,4 @@ class RawReaderFIT : public RawReaderType } // namespace fit } // namespace o2 -#endif \ No newline at end of file +#endif diff --git a/Detectors/FOCAL/workflow/src/RawDecoderSpec.cxx b/Detectors/FOCAL/workflow/src/RawDecoderSpec.cxx index d6004561f852d..c7260ae3c9815 100644 --- a/Detectors/FOCAL/workflow/src/RawDecoderSpec.cxx +++ b/Detectors/FOCAL/workflow/src/RawDecoderSpec.cxx @@ -274,10 +274,10 @@ void RawDecoderSpec::endOfStream(o2::framework::EndOfStreamContext& ec) void RawDecoderSpec::sendOutput(framework::ProcessingContext& ctx) { - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PADLAYERS", mOutputSubspec, framework::Lifetime::Timeframe}, mOutputPadLayers); - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PIXELHITS", mOutputSubspec, framework::Lifetime::Timeframe}, mOutputPixelHits); - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PIXELCHIPS", mOutputSubspec, framework::Lifetime::Timeframe}, mOutputPixelChips); - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "TRIGGERS", mOutputSubspec, framework::Lifetime::Timeframe}, mOutputTriggerRecords); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PADLAYERS", mOutputSubspec}, mOutputPadLayers); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PIXELHITS", mOutputSubspec}, mOutputPixelHits); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PIXELCHIPS", mOutputSubspec}, mOutputPixelChips); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "TRIGGERS", mOutputSubspec}, mOutputTriggerRecords); } void RawDecoderSpec::resetContainers() @@ -689,4 +689,4 @@ o2::framework::DataProcessorSpec o2::focal::reco_workflow::getRawDecoderSpec(boo {"displayInconsistent", o2::framework::VariantType::Bool, false, {"Display information about inconsistent timeframes"}}, {"pixeltype", o2::framework::VariantType::String, "OB", {"Pixel mapping type"}}, {"pixelmapping", o2::framework::VariantType::String, "default", {"File with pixel mapping"}}}}; -} \ No newline at end of file +} diff --git a/Detectors/Filtering/src/FilteredTFReaderSpec.cxx b/Detectors/Filtering/src/FilteredTFReaderSpec.cxx index 3165208aef246..22fe1370040db 100644 --- a/Detectors/Filtering/src/FilteredTFReaderSpec.cxx +++ b/Detectors/Filtering/src/FilteredTFReaderSpec.cxx @@ -45,15 +45,15 @@ void FilteredTFReader::run(ProcessingContext& pc) LOG(info) << "Pushing filtered TF: " << mFiltTF.header.asString(); // ITS - pc.outputs().snapshot(Output{"ITS", "ITSTrackROF", 0, Lifetime::Timeframe}, mFiltTF.ITSTrackROFs); - pc.outputs().snapshot(Output{"ITS", "TRACKS", 0, Lifetime::Timeframe}, mFiltTF.ITSTracks); - pc.outputs().snapshot(Output{"ITS", "TRACKCLSID", 0, Lifetime::Timeframe}, mFiltTF.ITSClusterIndices); + pc.outputs().snapshot(Output{"ITS", "ITSTrackROF", 0}, mFiltTF.ITSTrackROFs); + pc.outputs().snapshot(Output{"ITS", "TRACKS", 0}, mFiltTF.ITSTracks); + pc.outputs().snapshot(Output{"ITS", "TRACKCLSID", 0}, mFiltTF.ITSClusterIndices); if (mUseMC) { - pc.outputs().snapshot(Output{"ITS", "TRACKSMCTR", 0, Lifetime::Timeframe}, mFiltTF.ITSTrackMCTruth); + pc.outputs().snapshot(Output{"ITS", "TRACKSMCTR", 0}, mFiltTF.ITSTrackMCTruth); } - pc.outputs().snapshot(Output{"ITS", "CLUSTERSROF", 0, Lifetime::Timeframe}, mFiltTF.ITSClusterROFs); - pc.outputs().snapshot(Output{"ITS", "COMPCLUSTERS", 0, Lifetime::Timeframe}, mFiltTF.ITSClusters); - pc.outputs().snapshot(Output{"ITS", "PATTERNS", 0, Lifetime::Timeframe}, mFiltTF.ITSClusterPatterns); + pc.outputs().snapshot(Output{"ITS", "CLUSTERSROF", 0}, mFiltTF.ITSClusterROFs); + pc.outputs().snapshot(Output{"ITS", "COMPCLUSTERS", 0}, mFiltTF.ITSClusters); + pc.outputs().snapshot(Output{"ITS", "PATTERNS", 0}, mFiltTF.ITSClusterPatterns); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/GlobalTrackingWorkflow/helpers/src/NoInpDummyOutSpec.cxx b/Detectors/GlobalTrackingWorkflow/helpers/src/NoInpDummyOutSpec.cxx index 34f445ed003da..17ad5f09027e1 100644 --- a/Detectors/GlobalTrackingWorkflow/helpers/src/NoInpDummyOutSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/helpers/src/NoInpDummyOutSpec.cxx @@ -47,7 +47,7 @@ void NoInpDummyOut::run(ProcessingContext& pc) { static int counter = 0; // send just once dummy output to trigger the ccdb-fetcher - pc.outputs().make>(Output{"GLO", "DUMMY_OUT", 0, Lifetime::Timeframe}); + pc.outputs().make>(Output{"GLO", "DUMMY_OUT", 0}); if (mLoops >= 0 && ++counter >= mLoops) { pc.services().get().endOfStream(); pc.services().get().readyToQuit(framework::QuitRequest::Me); diff --git a/Detectors/GlobalTrackingWorkflow/qc/src/ITSTPCMatchingQCSpec.cxx b/Detectors/GlobalTrackingWorkflow/qc/src/ITSTPCMatchingQCSpec.cxx index 3653fd8bcc90b..c8a402a48d659 100644 --- a/Detectors/GlobalTrackingWorkflow/qc/src/ITSTPCMatchingQCSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/qc/src/ITSTPCMatchingQCSpec.cxx @@ -71,7 +71,7 @@ void ITSTPCMatchingQCDevice::sendOutput(DataAllocator& output) TObjArray objar; mMatchITSTPCQC->getHistos(objar); - output.snapshot(Output{"GLO", "ITSTPCMATCHQC", 0, Lifetime::Sporadic}, objar); + output.snapshot(Output{"GLO", "ITSTPCMATCHQC", 0}, objar); TFile* f = new TFile(Form("outITSTPCmatchingQC.root"), "RECREATE"); objar.Write("ObjArray", TObject::kSingleKey); diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/GlobalFwdTrackReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/GlobalFwdTrackReaderSpec.cxx index 1fdc7b49616c3..11fa58333f89b 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/GlobalFwdTrackReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/GlobalFwdTrackReaderSpec.cxx @@ -65,9 +65,9 @@ void GlobalFwdTrackReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " Global Forward tracks at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "GLFWD", 0, Lifetime::Timeframe}, mTracks); + pc.outputs().snapshot(Output{"GLO", "GLFWD", 0}, mTracks); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "GLFWD_MC", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"GLO", "GLFWD_MC", 0}, mLabels); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/IRFrameReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/IRFrameReaderSpec.cxx index 012542e448d19..c1810a1deb743 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/IRFrameReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/IRFrameReaderSpec.cxx @@ -63,7 +63,7 @@ void IRFrameReaderSpec::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(debug) << "Pushing " << mIRF.size() << " IR-frames in at entry " << ent; - pc.outputs().snapshot(Output{mDataOrigin, "IRFRAMES", mSubSpec, Lifetime::Timeframe}, mIRF); + pc.outputs().snapshot(Output{mDataOrigin, "IRFRAMES", mSubSpec}, mIRF); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/MatchedMFTMCHReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/MatchedMFTMCHReaderSpec.cxx index 08c5c5871db84..5f02beebd1746 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/MatchedMFTMCHReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/MatchedMFTMCHReaderSpec.cxx @@ -65,7 +65,7 @@ void MatchMFTMCHReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " MFTMCH matches at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "MTC_MFTMCH", 0, Lifetime::Timeframe}, mTracks); + pc.outputs().snapshot(Output{"GLO", "MTC_MFTMCH", 0}, mTracks); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/PrimaryVertexReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/PrimaryVertexReaderSpec.cxx index 06bdf8ff95a45..6e1aba8b2e1f3 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/PrimaryVertexReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/PrimaryVertexReaderSpec.cxx @@ -84,12 +84,12 @@ void PrimaryVertexReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mVerticesPtr->size() << " vertices at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "PVTX", 0, Lifetime::Timeframe}, mVertices); - pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTC", 0, Lifetime::Timeframe}, mPV2MatchIdx); - pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTCREFS", 0, Lifetime::Timeframe}, mPV2MatchIdxRef); + pc.outputs().snapshot(Output{"GLO", "PVTX", 0}, mVertices); + pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTC", 0}, mPV2MatchIdx); + pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTCREFS", 0}, mPV2MatchIdxRef); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "PVTX_MCTR", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"GLO", "PVTX_MCTR", 0}, mLabels); } if (mVerbose) { diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/SecondaryVertexReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/SecondaryVertexReaderSpec.cxx index e440a075d2558..4dd6324c83675 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/SecondaryVertexReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/SecondaryVertexReaderSpec.cxx @@ -93,15 +93,15 @@ void SecondaryVertexReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mV0s.size() << " V0s and " << mCascs.size() << " cascades at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "V0S_IDX", 0, Lifetime::Timeframe}, mV0sIdx); - pc.outputs().snapshot(Output{"GLO", "V0S", 0, Lifetime::Timeframe}, mV0s); - pc.outputs().snapshot(Output{"GLO", "PVTX_V0REFS", 0, Lifetime::Timeframe}, mPV2V0Ref); - pc.outputs().snapshot(Output{"GLO", "CASCS_IDX", 0, Lifetime::Timeframe}, mCascsIdx); - pc.outputs().snapshot(Output{"GLO", "CASCS", 0, Lifetime::Timeframe}, mCascs); - pc.outputs().snapshot(Output{"GLO", "PVTX_CASCREFS", 0, Lifetime::Timeframe}, mPV2CascRef); - pc.outputs().snapshot(Output{"GLO", "DECAYS3BODY_IDX", 0, Lifetime::Timeframe}, m3BodysIdx); - pc.outputs().snapshot(Output{"GLO", "DECAYS3BODY", 0, Lifetime::Timeframe}, m3Bodys); - pc.outputs().snapshot(Output{"GLO", "PVTX_3BODYREFS", 0, Lifetime::Timeframe}, mPV23BodyRef); + pc.outputs().snapshot(Output{"GLO", "V0S_IDX", 0}, mV0sIdx); + pc.outputs().snapshot(Output{"GLO", "V0S", 0}, mV0s); + pc.outputs().snapshot(Output{"GLO", "PVTX_V0REFS", 0}, mPV2V0Ref); + pc.outputs().snapshot(Output{"GLO", "CASCS_IDX", 0}, mCascsIdx); + pc.outputs().snapshot(Output{"GLO", "CASCS", 0}, mCascs); + pc.outputs().snapshot(Output{"GLO", "PVTX_CASCREFS", 0}, mPV2CascRef); + pc.outputs().snapshot(Output{"GLO", "DECAYS3BODY_IDX", 0}, m3BodysIdx); + pc.outputs().snapshot(Output{"GLO", "DECAYS3BODY", 0}, m3Bodys); + pc.outputs().snapshot(Output{"GLO", "PVTX_3BODYREFS", 0}, mPV23BodyRef); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/StrangenessTrackingReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/StrangenessTrackingReaderSpec.cxx index 23eff571105e6..8c7f87a720925 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/StrangenessTrackingReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/StrangenessTrackingReaderSpec.cxx @@ -76,14 +76,14 @@ void StrangenessTrackingReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mStrangeTrack.size() << " strange tracks at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS", 0, Lifetime::Timeframe}, mStrangeTrack); + pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS", 0}, mStrangeTrack); if (mUseMC) { LOG(info) << "Pushing " << mStrangeTrackMC.size() << " strange tracks MC labels at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS_MC", 0, Lifetime::Timeframe}, mStrangeTrackMC); + pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS_MC", 0}, mStrangeTrackMC); } - // pc.outputs().snapshot(Output{"GLO", "PVTX_V0REFS", 0, Lifetime::Timeframe}, mPV2V0Ref); + // pc.outputs().snapshot(Output{"GLO", "PVTX_V0REFS", 0}, mPV2V0Ref); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/TrackCosmicsReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/TrackCosmicsReaderSpec.cxx index 90db5d08acc58..7e3cdffd84a6d 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/TrackCosmicsReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/TrackCosmicsReaderSpec.cxx @@ -41,9 +41,9 @@ void TrackCosmicsReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " Cosmic Tracks at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "COSMICTRC", 0, Lifetime::Timeframe}, mTracks); + pc.outputs().snapshot(Output{"GLO", "COSMICTRC", 0}, mTracks); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "COSMICTRC_MC", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"GLO", "COSMICTRC_MC", 0}, mLabels); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/TrackTPCITSReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/TrackTPCITSReaderSpec.cxx index 8ae55e478603e..c7fd0d543ecf6 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/TrackTPCITSReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/TrackTPCITSReaderSpec.cxx @@ -68,12 +68,12 @@ void TrackTPCITSReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " TPC-ITS matches at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "TPCITS", 0, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{"GLO", "TPCITSAB_REFS", 0, Lifetime::Timeframe}, mABTrkClusRefs); - pc.outputs().snapshot(Output{"GLO", "TPCITSAB_CLID", 0, Lifetime::Timeframe}, mABTrkClIDs); + pc.outputs().snapshot(Output{"GLO", "TPCITS", 0}, mTracks); + pc.outputs().snapshot(Output{"GLO", "TPCITSAB_REFS", 0}, mABTrkClusRefs); + pc.outputs().snapshot(Output{"GLO", "TPCITSAB_CLID", 0}, mABTrkClIDs); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "TPCITS_MC", 0, Lifetime::Timeframe}, mLabels); - pc.outputs().snapshot(Output{"GLO", "TPCITSAB_MC", 0, Lifetime::Timeframe}, mLabelsAB); + pc.outputs().snapshot(Output{"GLO", "TPCITS_MC", 0}, mLabels); + pc.outputs().snapshot(Output{"GLO", "TPCITSAB_MC", 0}, mLabelsAB); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/GlobalTrackingWorkflow/src/CosmicsMatchingSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/CosmicsMatchingSpec.cxx index d8242fc2ccd79..a4c9d302019a5 100644 --- a/Detectors/GlobalTrackingWorkflow/src/CosmicsMatchingSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/CosmicsMatchingSpec.cxx @@ -99,9 +99,9 @@ void CosmicsMatchingSpec::run(ProcessingContext& pc) updateTimeDependentParams(pc); // Make sure this is called after recoData.collectData, which may load some conditions mMatching.process(recoData); - pc.outputs().snapshot(Output{"GLO", "COSMICTRC", 0, Lifetime::Timeframe}, mMatching.getCosmicTracks()); + pc.outputs().snapshot(Output{"GLO", "COSMICTRC", 0}, mMatching.getCosmicTracks()); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "COSMICTRC_MC", 0, Lifetime::Timeframe}, mMatching.getCosmicTracksLbl()); + pc.outputs().snapshot(Output{"GLO", "COSMICTRC_MC", 0}, mMatching.getCosmicTracksLbl()); } mTimer.Stop(); } diff --git a/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingSpec.cxx index 582f2be3c5f84..03dc823c62c42 100644 --- a/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingSpec.cxx @@ -92,18 +92,18 @@ void GlobalFwdMatchingDPL::run(ProcessingContext& pc) const auto& matchingParam = GlobalFwdMatchingParam::Instance(); if (matchingParam.saveMode == kSaveTrainingData) { - pc.outputs().snapshot(Output{"GLO", "GLFWDMFT", 0, Lifetime::Timeframe}, mMatching.getMFTMatchingPlaneParams()); - pc.outputs().snapshot(Output{"GLO", "GLFWDMCH", 0, Lifetime::Timeframe}, mMatching.getMCHMatchingPlaneParams()); - pc.outputs().snapshot(Output{"GLO", "GLFWDINF", 0, Lifetime::Timeframe}, mMatching.getMFTMCHMatchInfo()); + pc.outputs().snapshot(Output{"GLO", "GLFWDMFT", 0}, mMatching.getMFTMatchingPlaneParams()); + pc.outputs().snapshot(Output{"GLO", "GLFWDMCH", 0}, mMatching.getMCHMatchingPlaneParams()); + pc.outputs().snapshot(Output{"GLO", "GLFWDINF", 0}, mMatching.getMFTMCHMatchInfo()); } else { - pc.outputs().snapshot(Output{"GLO", "GLFWD", 0, Lifetime::Timeframe}, mMatching.getMatchedFwdTracks()); + pc.outputs().snapshot(Output{"GLO", "GLFWD", 0}, mMatching.getMatchedFwdTracks()); } if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "GLFWD_MC", 0, Lifetime::Timeframe}, mMatching.getMatchLabels()); + pc.outputs().snapshot(Output{"GLO", "GLFWD_MC", 0}, mMatching.getMatchLabels()); } if (mMatchRootOutput) { - pc.outputs().snapshot(Output{"GLO", "MTC_MFTMCH", 0, Lifetime::Timeframe}, mMatching.getMFTMCHMatchInfo()); + pc.outputs().snapshot(Output{"GLO", "MTC_MFTMCH", 0}, mMatching.getMFTMCHMatchInfo()); } mTimer.Stop(); } diff --git a/Detectors/GlobalTrackingWorkflow/src/HMPMatcherSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/HMPMatcherSpec.cxx index f8666b5561610..b0562fd4f3a1e 100644 --- a/Detectors/GlobalTrackingWorkflow/src/HMPMatcherSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/HMPMatcherSpec.cxx @@ -128,9 +128,9 @@ void HMPMatcherSpec::run(ProcessingContext& pc) mMatcher.run(recoData); - pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MATCHES", 0, Lifetime::Timeframe}, mMatcher.getMatchedTrackVector(o2::globaltracking::MatchHMP::trackType::CONSTR)); + pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MATCHES", 0}, mMatcher.getMatchedTrackVector(o2::globaltracking::MatchHMP::trackType::CONSTR)); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MCLABELS", 0, Lifetime::Timeframe}, mMatcher.getMatchedHMPLabelsVector(o2::globaltracking::MatchHMP::trackType::CONSTR)); + pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MCLABELS", 0}, mMatcher.getMatchedHMPLabelsVector(o2::globaltracking::MatchHMP::trackType::CONSTR)); } mTimer.Stop(); diff --git a/Detectors/GlobalTrackingWorkflow/src/PrimaryVertexingSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/PrimaryVertexingSpec.cxx index e70122e7a01b5..ff45790a95921 100644 --- a/Detectors/GlobalTrackingWorkflow/src/PrimaryVertexingSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/PrimaryVertexingSpec.cxx @@ -152,12 +152,12 @@ void PrimaryVertexingSpec::run(ProcessingContext& pc) mVertexer.process(tracks, gids, ft0Data, vertices, vertexTrackIDs, v2tRefs, tracksMCInfo, lblVtx); } - pc.outputs().snapshot(Output{"GLO", "PVTX", 0, Lifetime::Timeframe}, vertices); - pc.outputs().snapshot(Output{"GLO", "PVTX_CONTIDREFS", 0, Lifetime::Timeframe}, v2tRefs); - pc.outputs().snapshot(Output{"GLO", "PVTX_CONTID", 0, Lifetime::Timeframe}, vertexTrackIDs); + pc.outputs().snapshot(Output{"GLO", "PVTX", 0}, vertices); + pc.outputs().snapshot(Output{"GLO", "PVTX_CONTIDREFS"}, v2tRefs); + pc.outputs().snapshot(Output{"GLO", "PVTX_CONTID", 0}, vertexTrackIDs); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "PVTX_MCTR", 0, Lifetime::Timeframe}, lblVtx); + pc.outputs().snapshot(Output{"GLO", "PVTX_MCTR", 0}, lblVtx); } mTimer.Stop(); diff --git a/Detectors/GlobalTrackingWorkflow/src/StrangenessTrackingSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/StrangenessTrackingSpec.cxx index 46136ac6e2397..37b35df06ea72 100644 --- a/Detectors/GlobalTrackingWorkflow/src/StrangenessTrackingSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/StrangenessTrackingSpec.cxx @@ -75,11 +75,11 @@ void StrangenessTrackerSpec::run(framework::ProcessingContext& pc) mTracker.loadData(recoData); mTracker.prepareITStracks(); mTracker.process(); - pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS", 0, Lifetime::Timeframe}, mTracker.getStrangeTrackVec()); - pc.outputs().snapshot(Output{"GLO", "CLUSUPDATES", 0, Lifetime::Timeframe}, mTracker.getClusAttachments()); + pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS", 0}, mTracker.getStrangeTrackVec()); + pc.outputs().snapshot(Output{"GLO", "CLUSUPDATES", 0}, mTracker.getClusAttachments()); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS_MC", 0, Lifetime::Timeframe}, mTracker.getStrangeTrackLabels()); + pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS_MC", 0}, mTracker.getStrangeTrackLabels()); } mTimer.Stop(); diff --git a/Detectors/GlobalTrackingWorkflow/src/TOFMatcherSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/TOFMatcherSpec.cxx index 134ffbb75f341..ca4d3e2fc5dc8 100644 --- a/Detectors/GlobalTrackingWorkflow/src/TOFMatcherSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/TOFMatcherSpec.cxx @@ -171,8 +171,8 @@ void TOFMatcherSpec::run(ProcessingContext& pc) static pmr::vector dummyMCLab; if (isTPCused) { - auto& mtcInfo = pc.outputs().make>(Output{o2::header::gDataOriginTOF, "MTC_TPC", ss, Lifetime::Timeframe}); - auto& mclabels = mUseMC ? pc.outputs().make>(Output{o2::header::gDataOriginTOF, "MCMTC_TPC", ss, Lifetime::Timeframe}) : dummyMCLab; + auto& mtcInfo = pc.outputs().make>(Output{o2::header::gDataOriginTOF, "MTC_TPC", ss}); + auto& mclabels = mUseMC ? pc.outputs().make>(Output{o2::header::gDataOriginTOF, "MCMTC_TPC", ss}) : dummyMCLab; auto& tracksTPCTOF = pc.outputs().make>(OutputRef{"tpctofTracks", ss}); auto nmatch = mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::TPC).size(); LOG(debug) << (mDoTPCRefit ? "Refitting " : "Shifting Z for ") << nmatch << " matched TPC tracks with TOF time info"; @@ -180,48 +180,48 @@ void TOFMatcherSpec::run(ProcessingContext& pc) } if (isITSTPCused) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_ITSTPC", 0, Lifetime::Timeframe}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPC)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_ITSTPC", 0}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPC)); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_ITSTPC", 0, Lifetime::Timeframe}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPC)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_ITSTPC", 0}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPC)); } } if (isTPCTRDused) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_TPCTRD", ss, Lifetime::Timeframe}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::TPCTRD)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_TPCTRD", ss}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::TPCTRD)); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_TPCTRD", ss, Lifetime::Timeframe}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::TPCTRD)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_TPCTRD", ss}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::TPCTRD)); } } if (isITSTPCTRDused) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_ITSTPCTRD", 0, Lifetime::Timeframe}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPCTRD)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_ITSTPCTRD", 0}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPCTRD)); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_ITSTPCTRD", 0, Lifetime::Timeframe}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPCTRD)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_ITSTPCTRD", 0}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPCTRD)); } } // TODO: TRD-matched tracks - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CALIBDATA", 0, Lifetime::Timeframe}, mMatcher.getCalibVector()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CALIBDATA", 0}, mMatcher.getCalibVector()); if (mPushMatchable) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_0", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(0)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_1", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(1)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_2", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(2)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_3", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(3)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_4", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(4)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_5", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(5)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_6", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(6)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_7", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(7)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_8", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(8)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_9", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(9)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_10", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(10)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_11", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(11)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_12", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(12)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_13", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(13)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_14", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(14)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_15", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(15)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_16", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(16)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_17", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(17)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_0", 0}, mMatcher.getMatchedTracksPair(0)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_1", 0}, mMatcher.getMatchedTracksPair(1)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_2", 0}, mMatcher.getMatchedTracksPair(2)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_3", 0}, mMatcher.getMatchedTracksPair(3)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_4", 0}, mMatcher.getMatchedTracksPair(4)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_5", 0}, mMatcher.getMatchedTracksPair(5)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_6", 0}, mMatcher.getMatchedTracksPair(6)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_7", 0}, mMatcher.getMatchedTracksPair(7)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_8", 0}, mMatcher.getMatchedTracksPair(8)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_9", 0}, mMatcher.getMatchedTracksPair(9)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_10", 0}, mMatcher.getMatchedTracksPair(10)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_11", 0}, mMatcher.getMatchedTracksPair(11)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_12", 0}, mMatcher.getMatchedTracksPair(12)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_13", 0}, mMatcher.getMatchedTracksPair(13)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_14", 0}, mMatcher.getMatchedTracksPair(14)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_15", 0}, mMatcher.getMatchedTracksPair(15)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_16", 0}, mMatcher.getMatchedTracksPair(16)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_17", 0}, mMatcher.getMatchedTracksPair(17)); } mTimer.Stop(); diff --git a/Detectors/GlobalTrackingWorkflow/src/TPCITSMatchingSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/TPCITSMatchingSpec.cxx index 1a061ee9707f5..aebcd96c4faa7 100644 --- a/Detectors/GlobalTrackingWorkflow/src/TPCITSMatchingSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/TPCITSMatchingSpec.cxx @@ -103,12 +103,12 @@ void TPCITSMatchingDPL::run(ProcessingContext& pc) static pmr::vector dummyMCLab, dummyMCLabAB; static pmr::vector> dummyCalib; - auto& matchedTracks = pc.outputs().make>(Output{"GLO", "TPCITS", 0, Lifetime::Timeframe}); - auto& ABTrackletRefs = pc.outputs().make>(Output{"GLO", "TPCITSAB_REFS", 0, Lifetime::Timeframe}); - auto& ABTrackletClusterIDs = pc.outputs().make>(Output{"GLO", "TPCITSAB_CLID", 0, Lifetime::Timeframe}); - auto& matchLabels = mUseMC ? pc.outputs().make>(Output{"GLO", "TPCITS_MC", 0, Lifetime::Timeframe}) : dummyMCLab; - auto& ABTrackletLabels = mUseMC ? pc.outputs().make>(Output{"GLO", "TPCITSAB_MC", 0, Lifetime::Timeframe}) : dummyMCLabAB; - auto& calib = mCalibMode ? pc.outputs().make>>(Output{"GLO", "TPCITS_VDTGL", 0, Lifetime::Timeframe}) : dummyCalib; + auto& matchedTracks = pc.outputs().make>(Output{"GLO", "TPCITS", 0}); + auto& ABTrackletRefs = pc.outputs().make>(Output{"GLO", "TPCITSAB_REFS", 0}); + auto& ABTrackletClusterIDs = pc.outputs().make>(Output{"GLO", "TPCITSAB_CLID", 0}); + auto& matchLabels = mUseMC ? pc.outputs().make>(Output{"GLO", "TPCITS_MC", 0}) : dummyMCLab; + auto& ABTrackletLabels = mUseMC ? pc.outputs().make>(Output{"GLO", "TPCITSAB_MC", 0}) : dummyMCLabAB; + auto& calib = mCalibMode ? pc.outputs().make>>(Output{"GLO", "TPCITS_VDTGL", 0}) : dummyCalib; mMatching.run(recoData, matchedTracks, ABTrackletRefs, ABTrackletClusterIDs, matchLabels, ABTrackletLabels, calib); diff --git a/Detectors/GlobalTrackingWorkflow/src/VertexTrackMatcherSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/VertexTrackMatcherSpec.cxx index 281fd06dea7eb..e5d2afb1ee700 100644 --- a/Detectors/GlobalTrackingWorkflow/src/VertexTrackMatcherSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/VertexTrackMatcherSpec.cxx @@ -78,8 +78,8 @@ void VertexTrackMatcherSpec::run(ProcessingContext& pc) mMatcher.process(recoData, trackIndex, vtxRefs); - pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTC", 0, Lifetime::Timeframe}, trackIndex); - pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTCREFS", 0, Lifetime::Timeframe}, vtxRefs); + pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTC", 0}, trackIndex); + pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTCREFS", 0}, vtxRefs); mTimer.Stop(); LOG(info) << "Made " << trackIndex.size() << " track associations for " << recoData.getPrimaryVertices().size() diff --git a/Detectors/GlobalTrackingWorkflow/study/src/TPCDataFilter.cxx b/Detectors/GlobalTrackingWorkflow/study/src/TPCDataFilter.cxx index ed06e014184b6..8a962686d743e 100644 --- a/Detectors/GlobalTrackingWorkflow/study/src/TPCDataFilter.cxx +++ b/Detectors/GlobalTrackingWorkflow/study/src/TPCDataFilter.cxx @@ -107,10 +107,10 @@ void TPCDataFilter::run(ProcessingContext& pc) void TPCDataFilter::sendOutput(ProcessingContext& pc) { - pc.outputs().snapshot(Output{"TPC", "TRACKSF", 0, Lifetime::Timeframe}, mTracksFiltered); - pc.outputs().snapshot(Output{"TPC", "CLUSREFSF", 0, Lifetime::Timeframe}, mTrackClusIdxFiltered); + pc.outputs().snapshot(Output{"TPC", "TRACKSF", 0}, mTracksFiltered); + pc.outputs().snapshot(Output{"TPC", "CLUSREFSF", 0}, mTrackClusIdxFiltered); if (mUseMC) { - pc.outputs().snapshot(Output{"TPC", "TRACKSMCLBLF", 0, Lifetime::Timeframe}, mTPCTrkLabelsFiltered); + pc.outputs().snapshot(Output{"TPC", "TRACKSMCLBLF", 0}, mTPCTrkLabelsFiltered); } o2::tpc::TPCSectorHeader clusterOutputSectorHeader{0}; @@ -118,7 +118,7 @@ void TPCDataFilter::sendOutput(ProcessingContext& pc) for (int i = 0; i < o2::tpc::constants::MAXSECTOR; i++) { clusterOutputSectorHeader.sectorBits = (1ul << i); o2::header::DataHeader::SubSpecificationType subspec = i; - char* buffer = pc.outputs().make({o2::header::gDataOriginTPC, "CLUSTERNATIVEF", subspec, Lifetime::Timeframe, {clusterOutputSectorHeader}}, + char* buffer = pc.outputs().make({o2::header::gDataOriginTPC, "CLUSTERNATIVEF", subspec, {clusterOutputSectorHeader}}, mClusFiltered.nClustersSector[i] * sizeof(*mClusFiltered.clustersLinear) + sizeof(o2::tpc::ClusterCountIndex)) .data(); o2::tpc::ClusterCountIndex* outIndex = reinterpret_cast(buffer); @@ -138,7 +138,7 @@ void TPCDataFilter::sendOutput(ProcessingContext& pc) } o2::dataformats::ConstMCLabelContainer contflat; cont.flatten_to(contflat); - pc.outputs().snapshot({o2::header::gDataOriginTPC, "CLNATIVEMCLBLF", subspec, Lifetime::Timeframe, {clusterOutputSectorHeader}}, contflat); + pc.outputs().snapshot({o2::header::gDataOriginTPC, "CLNATIVEMCLBLF", subspec, {clusterOutputSectorHeader}}, contflat); } } } diff --git a/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCInterpolationSpec.cxx b/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCInterpolationSpec.cxx index 69554d3d495a4..89e3ae19cf65b 100644 --- a/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCInterpolationSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCInterpolationSpec.cxx @@ -133,18 +133,18 @@ void TPCInterpolationDPL::run(ProcessingContext& pc) LOGF(info, "TPC interpolation timing: Cpu: %.3e Real: %.3e s", mTimer.CpuTime(), mTimer.RealTime()); if (SpacePointsCalibConfParam::Instance().writeUnfiltered) { // these are the residuals and tracks before outlier rejection; they are not used in production - pc.outputs().snapshot(Output{"GLO", "TPCINT_RES", 0, Lifetime::Timeframe}, mInterpolation.getClusterResidualsUnfiltered()); + pc.outputs().snapshot(Output{"GLO", "TPCINT_RES", 0}, mInterpolation.getClusterResidualsUnfiltered()); if (mSendTrackData) { - pc.outputs().snapshot(Output{"GLO", "TPCINT_TRK", 0, Lifetime::Timeframe}, mInterpolation.getReferenceTracksUnfiltered()); + pc.outputs().snapshot(Output{"GLO", "TPCINT_TRK", 0}, mInterpolation.getReferenceTracksUnfiltered()); } } - pc.outputs().snapshot(Output{"GLO", "UNBINNEDRES", 0, Lifetime::Timeframe}, mInterpolation.getClusterResiduals()); - pc.outputs().snapshot(Output{"GLO", "TRKREFS", 0, Lifetime::Timeframe}, mInterpolation.getTrackDataCompact()); + pc.outputs().snapshot(Output{"GLO", "UNBINNEDRES", 0}, mInterpolation.getClusterResiduals()); + pc.outputs().snapshot(Output{"GLO", "TRKREFS", 0}, mInterpolation.getTrackDataCompact()); if (mSendTrackData) { - pc.outputs().snapshot(Output{"GLO", "TRKDATA", 0, Lifetime::Timeframe}, mInterpolation.getReferenceTracks()); + pc.outputs().snapshot(Output{"GLO", "TRKDATA", 0}, mInterpolation.getReferenceTracks()); } if (mDebugOutput) { - pc.outputs().snapshot(Output{"GLO", "TRKDATAEXT", 0, Lifetime::Timeframe}, mInterpolation.getTrackDataExtended()); + pc.outputs().snapshot(Output{"GLO", "TRKDATAEXT", 0}, mInterpolation.getTrackDataExtended()); } mInterpolation.reset(); } diff --git a/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCUnbinnedResidualReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCUnbinnedResidualReaderSpec.cxx index af50b32bc8bba..55da5a5e71e44 100644 --- a/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCUnbinnedResidualReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCUnbinnedResidualReaderSpec.cxx @@ -56,11 +56,11 @@ void TPCUnbinnedResidualReader::run(ProcessingContext& pc) assert(currEntry < mTreeIn->GetEntries()); // this should not happen mTreeIn->GetEntry(currEntry); LOG(info) << "Pushing " << mUnbinnedResid.size() << " unbinned residuals at entry " << currEntry; - pc.outputs().snapshot(Output{"GLO", "UNBINNEDRES", 0, Lifetime::Timeframe}, mUnbinnedResid); - pc.outputs().snapshot(Output{"GLO", "TRKREFS", 0, Lifetime::Timeframe}, mTrackDataCompact); + pc.outputs().snapshot(Output{"GLO", "UNBINNEDRES", 0}, mUnbinnedResid); + pc.outputs().snapshot(Output{"GLO", "TRKREFS", 0}, mTrackDataCompact); if (mTrackInput) { LOG(info) << "Pushing " << mTrackData.size() << " reference tracks for these residuals"; - pc.outputs().snapshot(Output{"GLO", "TRKDATA", 0, Lifetime::Timeframe}, mTrackData); + pc.outputs().snapshot(Output{"GLO", "TRKDATA", 0}, mTrackData); } if (mTreeIn->GetReadEntry() + 1 >= mTreeIn->GetEntries()) { diff --git a/Detectors/HMPID/workflow/src/ClustersReaderSpec.cxx b/Detectors/HMPID/workflow/src/ClustersReaderSpec.cxx index e71cd42845dd4..9ac5074acb505 100644 --- a/Detectors/HMPID/workflow/src/ClustersReaderSpec.cxx +++ b/Detectors/HMPID/workflow/src/ClustersReaderSpec.cxx @@ -71,8 +71,8 @@ void ClusterReaderTask::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); - pc.outputs().snapshot(Output{"HMP", "CLUSTERS", 0, Lifetime::Timeframe}, mClustersFromFile); - pc.outputs().snapshot(Output{"HMP", "INTRECORDS1", 0, Lifetime::Timeframe}, mClusterTriggersFromFile); + pc.outputs().snapshot(Output{"HMP", "CLUSTERS", 0}, mClustersFromFile); + pc.outputs().snapshot(Output{"HMP", "INTRECORDS1", 0}, mClusterTriggersFromFile); mClustersReceived += mClustersFromFile.size(); LOG(info) << "[HMPID ClusterReader - run() ] clusters = " << mClustersFromFile.size(); diff --git a/Detectors/HMPID/workflow/src/DataDecoderSpec.cxx b/Detectors/HMPID/workflow/src/DataDecoderSpec.cxx index 3a3b5a6338d81..1a6509fc75654 100644 --- a/Detectors/HMPID/workflow/src/DataDecoderSpec.cxx +++ b/Detectors/HMPID/workflow/src/DataDecoderSpec.cxx @@ -87,8 +87,8 @@ void DataDecoderTask::run(framework::ProcessingContext& pc) // decodeReadout(pc); // decodeRawFile(pc); - pc.outputs().snapshot(o2::framework::Output{"HMP", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mDeco->mDigits); - pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS", 0, o2::framework::Lifetime::Timeframe}, mDeco->mIntReco); + pc.outputs().snapshot(o2::framework::Output{"HMP", "DIGITS", 0}, mDeco->mDigits); + pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS", 0}, mDeco->mIntReco); LOG(debug) << "Writing Digitis=" << mDeco->mDigits.size() << "/" << mTotalDigits << " Frame=" << mTotalFrames << " IntRec " << mDeco->mIntReco; mExTimer.elapseMes("Decoding... Digits decoded = " + std::to_string(mTotalDigits) + " Frames received = " + std::to_string(mTotalFrames)); diff --git a/Detectors/HMPID/workflow/src/DataDecoderSpec2.cxx b/Detectors/HMPID/workflow/src/DataDecoderSpec2.cxx index 61051ab48577e..92a966938786a 100644 --- a/Detectors/HMPID/workflow/src/DataDecoderSpec2.cxx +++ b/Detectors/HMPID/workflow/src/DataDecoderSpec2.cxx @@ -93,8 +93,8 @@ void DataDecoderTask2::run(framework::ProcessingContext& pc) // Output the Digits/Triggers vector orderTriggers(); - pc.outputs().snapshot(o2::framework::Output{"HMP", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mDeco->mDigits); - pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS", 0, o2::framework::Lifetime::Timeframe}, mTriggers); + pc.outputs().snapshot(o2::framework::Output{"HMP", "DIGITS", 0}, mDeco->mDigits); + pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS", 0}, mTriggers); mExTimer.elapseMes("Decoding... Digits decoded = " + std::to_string(mTotalDigits) + " Frames received = " + std::to_string(mTotalFrames)); return; diff --git a/Detectors/HMPID/workflow/src/DigitsReaderSpec.cxx b/Detectors/HMPID/workflow/src/DigitsReaderSpec.cxx index ed83db476b01a..88f6df2bce2e7 100644 --- a/Detectors/HMPID/workflow/src/DigitsReaderSpec.cxx +++ b/Detectors/HMPID/workflow/src/DigitsReaderSpec.cxx @@ -115,8 +115,8 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); - pc.outputs().snapshot(Output{"HMP", "DIGITS", 0, Lifetime::Timeframe}, mDigitsFromFile); - pc.outputs().snapshot(Output{"HMP", "INTRECORDS", 0, Lifetime::Timeframe}, mTriggersFromFile); + pc.outputs().snapshot(Output{"HMP", "DIGITS", 0}, mDigitsFromFile); + pc.outputs().snapshot(Output{"HMP", "INTRECORDS", 0}, mTriggersFromFile); mDigitsReceived += mDigitsFromFile.size(); LOG(info) << "[HMPID DigitsReader - run() ] digits = " << mDigitsFromFile.size(); diff --git a/Detectors/HMPID/workflow/src/DigitsToClustersSpec.cxx b/Detectors/HMPID/workflow/src/DigitsToClustersSpec.cxx index 377bc0f7af45e..81c04a9875c51 100644 --- a/Detectors/HMPID/workflow/src/DigitsToClustersSpec.cxx +++ b/Detectors/HMPID/workflow/src/DigitsToClustersSpec.cxx @@ -116,8 +116,8 @@ void DigitsToClustersTask::run(framework::ProcessingContext& pc) mDigitsReceived += digits.size(); mClustersReceived += clusters.size(); - pc.outputs().snapshot(o2::framework::Output{"HMP", "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, clusters); - pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS1", 0, o2::framework::Lifetime::Timeframe}, clusterTriggers); + pc.outputs().snapshot(o2::framework::Output{"HMP", "CLUSTERS", 0}, clusters); + pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS1", 0}, clusterTriggers); mExTimer.elapseMes("Clusterization of Digits received = " + std::to_string(mDigitsReceived)); mExTimer.elapseMes("Clusterization of Clusters received = " + std::to_string(mClustersReceived)); diff --git a/Detectors/HMPID/workflow/src/EntropyEncoderSpec.cxx b/Detectors/HMPID/workflow/src/EntropyEncoderSpec.cxx index 15bd0987a0ccc..95723f42d0fd6 100644 --- a/Detectors/HMPID/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/HMPID/workflow/src/EntropyEncoderSpec.cxx @@ -72,7 +72,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"HMP", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"HMP", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, triggers, digits); pc.outputs().snapshot({"ctfrep", 0}, iosize); diff --git a/Detectors/HMPID/workflow/src/HMPMatchedReaderSpec.cxx b/Detectors/HMPID/workflow/src/HMPMatchedReaderSpec.cxx index e466a8a4e00c4..3ddb71e282a96 100644 --- a/Detectors/HMPID/workflow/src/HMPMatchedReaderSpec.cxx +++ b/Detectors/HMPID/workflow/src/HMPMatchedReaderSpec.cxx @@ -62,9 +62,9 @@ void HMPMatchedReader::run(ProcessingContext& pc) mTree->GetEntry(currEntry); LOG(debug) << "Pushing " << mMatches.size() << " HMP matchings at entry " << currEntry; - pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MATCHES", 0, Lifetime::Timeframe}, mMatches); + pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MATCHES", 0}, mMatches); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MCLABELS", 0, Lifetime::Timeframe}, mLabelHMP); + pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MCLABELS", 0}, mLabelHMP); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/ITSMFT/ITS/QC/TestDataReaderWorkflow/src/TestDataReader.cxx b/Detectors/ITSMFT/ITS/QC/TestDataReaderWorkflow/src/TestDataReader.cxx index d6ef8c947ed49..964f342c58b15 100644 --- a/Detectors/ITSMFT/ITS/QC/TestDataReaderWorkflow/src/TestDataReader.cxx +++ b/Detectors/ITSMFT/ITS/QC/TestDataReaderWorkflow/src/TestDataReader.cxx @@ -139,14 +139,14 @@ void TestDataReader::run(ProcessingContext& pc) if (mDiffFolderName.size() == 0) { cout << "No New Run -- No Need to Reset" << endl; mResetCommand = 0; - pc.outputs().snapshot(Output{"ITS", "TEST", 0, Lifetime::Timeframe}, mResetCommand); + pc.outputs().snapshot(Output{"ITS", "TEST", 0}, mResetCommand); } // New folders found, send the reset signal and reload configuration if (mDiffFolderName.size() > 0) { cout << "New Run Started -- Reset All Histograms" << endl; mResetCommand = 1; - pc.outputs().snapshot(Output{"ITS", "TEST", 0, Lifetime::Timeframe}, mResetCommand); + pc.outputs().snapshot(Output{"ITS", "TEST", 0}, mResetCommand); for (int i = 0; i < sNError; i++) { mErrors[i] = 0; } @@ -264,11 +264,11 @@ void TestDataReader::run(ProcessingContext& pc) mErrorsVecTest.push_back(mErrors); mFileDone = 1; mFileInfo = mFileDone + mFileRemain * 10; - pc.outputs().snapshot(Output{"ITS", "Run", 0, Lifetime::Timeframe}, mRunNumber); - pc.outputs().snapshot(Output{"ITS", "File", 0, Lifetime::Timeframe}, mFileID); - pc.outputs().snapshot(Output{"ITS", "Error", 0, Lifetime::Timeframe}, mErrorsVecTest[0]); - pc.outputs().snapshot(Output{"ITS", "Finish", 0, Lifetime::Timeframe}, mFileInfo); - pc.outputs().snapshot(Output{"ITS", "DIGITS", 0, Lifetime::Timeframe}, mMultiDigitsTest); + pc.outputs().snapshot(Output{"ITS", "Run", 0}, mRunNumber); + pc.outputs().snapshot(Output{"ITS", "File", 0}, mFileID); + pc.outputs().snapshot(Output{"ITS", "Error", 0}, mErrorsVecTest[0]); + pc.outputs().snapshot(Output{"ITS", "Finish", 0}, mFileInfo); + pc.outputs().snapshot(Output{"ITS", "DIGITS", 0}, mMultiDigitsTest); mNewFileInj = 0; mErrorsVecTest.clear(); mDigitsTest.clear(); @@ -422,10 +422,10 @@ void TestDataReader::run(ProcessingContext& pc) cout << "RunIDS = " << mRunNumber << " FileIDS = " << mFileID << endl; - pc.outputs().snapshot(Output{"ITS", "Run", 0, Lifetime::Timeframe}, mRunNumber); - pc.outputs().snapshot(Output{"ITS", "File", 0, Lifetime::Timeframe}, mFileID); + pc.outputs().snapshot(Output{"ITS", "Run", 0}, mRunNumber); + pc.outputs().snapshot(Output{"ITS", "File", 0}, mFileID); - pc.outputs().snapshot(Output{"ITS", "Error", 0, Lifetime::Timeframe}, mErrorsVec[j]); + pc.outputs().snapshot(Output{"ITS", "Error", 0}, mErrorsVec[j]); mIndexPushEx = mIndexPush + mNDigits[j]; LOG(debug) << "IndexPushEx = " << mIndexPushEx << " mDigits.size() " << mDigits.size(); if (mIndexPushEx > mDigits.size() - 5) { @@ -436,11 +436,11 @@ void TestDataReader::run(ProcessingContext& pc) mFileInfo = mFileDone + mFileRemain * 10; - pc.outputs().snapshot(Output{"ITS", "Finish", 0, Lifetime::Timeframe}, mFileInfo); + pc.outputs().snapshot(Output{"ITS", "Finish", 0}, mFileInfo); LOG(debug) << "mIndexPush = " << mIndexPush << " Chip ID Pushing " << mDigits[mIndexPush].getChipIndex(); - pc.outputs().snapshot(Output{"ITS", "DIGITS", 0, Lifetime::Timeframe}, mMultiDigits); + pc.outputs().snapshot(Output{"ITS", "DIGITS", 0}, mMultiDigits); mMultiDigits.clear(); mIndexPush = mIndexPush + mNDigits[j]; @@ -453,7 +453,7 @@ void TestDataReader::run(ProcessingContext& pc) // << "mIndexPush = " << mIndexPush << " mDigits.size() = " << mDigits.size(); // while (mIndexPush < mDigits.size()) { // LOG(debug) << "mDigits.size() = " << mDigits.size(); - // pc.outputs().snapshot(Output{"ITS", "DIGITS", 0, Lifetime::Timeframe}, mDigits[mIndexPush++]); + // pc.outputs().snapshot(Output{"ITS", "DIGITS", 0}, mDigits[mIndexPush++]); // if (mIndexPush % 100000 == 0) // LOG(debug) << "mIndexPush = " << mIndexPush << " Chip ID Pushing " << mDigits[mIndexPush].getChipIndex(); // } @@ -483,7 +483,7 @@ void TestDataReader::run(ProcessingContext& pc) j = 0; mNDigits.clear(); mFileDone = 1; - pc.outputs().snapshot(Output{"TST", "Finish", 0, Lifetime::Timeframe}, mFileDone); + pc.outputs().snapshot(Output{"TST", "Finish", 0}, mFileDone); PercentDone = 0; mErrorsVec.clear(); } diff --git a/Detectors/ITSMFT/ITS/workflow/src/ClustererSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/ClustererSpec.cxx index 89596e8357b74..71987d0480566 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/ClustererSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/ClustererSpec.cxx @@ -88,17 +88,17 @@ void ClustererDPL::run(ProcessingContext& pc) clusterLabels = std::make_unique>(); } mClusterer->process(mNThreads, reader, &clusCompVec, &clusPattVec, &clusROFVec, clusterLabels.get()); - pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0, Lifetime::Timeframe}, clusCompVec); - pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0, Lifetime::Timeframe}, clusROFVec); - pc.outputs().snapshot(Output{orig, "PATTERNS", 0, Lifetime::Timeframe}, clusPattVec); + pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0}, clusCompVec); + pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0}, clusROFVec); + pc.outputs().snapshot(Output{orig, "PATTERNS", 0}, clusPattVec); if (mUseMC) { - pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, *clusterLabels.get()); // at the moment requires snapshot + pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0}, *clusterLabels.get()); // at the moment requires snapshot std::vector clusterMC2ROframes(mc2rofs.size()); for (int i = mc2rofs.size(); i--;) { clusterMC2ROframes[i] = mc2rofs[i]; // Simply, replicate it from digits ? } - pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0, Lifetime::Timeframe}, clusterMC2ROframes); + pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0}, clusterMC2ROframes); } // TODO: in principle, after masking "overflow" pixels the MC2ROFRecord maxROF supposed to change, nominally to minROF diff --git a/Detectors/ITSMFT/ITS/workflow/src/CookedTrackerSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/CookedTrackerSpec.cxx index e3f585c81cd97..e4c9d43541e1f 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/CookedTrackerSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/CookedTrackerSpec.cxx @@ -95,7 +95,7 @@ void CookedTrackerDPL::run(ProcessingContext& pc) physTriggers = pc.inputs().get>("phystrig"); } - auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0, Lifetime::Timeframe}, rofsinput.begin(), rofsinput.end()); + auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0}, rofsinput.begin(), rofsinput.end()); std::unique_ptr> labels; gsl::span mc2rofs; @@ -116,11 +116,11 @@ void CookedTrackerDPL::run(ProcessingContext& pc) o2::its::ROframe event(0, 7); mVertexerPtr->adoptTimeFrame(mTimeFrame); - auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0, Lifetime::Timeframe}); - auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0, Lifetime::Timeframe}); - auto& tracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0, Lifetime::Timeframe}); - auto& clusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0, Lifetime::Timeframe}); - auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0, Lifetime::Timeframe}); + auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0}); + auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0}); + auto& tracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0}); + auto& clusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0}); + auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0}); const auto& alpParams = o2::itsmft::DPLAlpideParam::Instance(); // RS: this should come from CCDB int nBCPerTF = mTracker.getContinuousMode() ? alpParams.roFrameLengthInBC : alpParams.roFrameLengthTrig; @@ -201,8 +201,8 @@ void CookedTrackerDPL::run(ProcessingContext& pc) LOG(info) << "ITSCookedTracker pushed " << tracks.size() << " tracks and " << vertices.size() << " vertices"; if (mUseMC) { - pc.outputs().snapshot(Output{"ITS", "TRACKSMCTR", 0, Lifetime::Timeframe}, trackLabels); - pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0, Lifetime::Timeframe}, mc2rofs); + pc.outputs().snapshot(Output{"ITS", "TRACKSMCTR", 0}, trackLabels); + pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0}, mc2rofs); } mTimer.Stop(); } diff --git a/Detectors/ITSMFT/ITS/workflow/src/DCSGeneratorSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/DCSGeneratorSpec.cxx index 48bbaed171a85..2ea0c833f3871 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/DCSGeneratorSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/DCSGeneratorSpec.cxx @@ -117,7 +117,7 @@ void ITSDCSDataGenerator::run(o2::framework::ProcessingContext& pc) auto timeNow = std::chrono::system_clock::now(); timingInfo.creation = std::chrono::duration_cast(timeNow.time_since_epoch()).count(); // in ms - pc.outputs().snapshot(Output{"ITS", mDataDescription, 0, Lifetime::Timeframe}, dpcoms); + pc.outputs().snapshot(Output{"ITS", mDataDescription, 0}, dpcoms); mTFs++; } } // namespace diff --git a/Detectors/ITSMFT/ITS/workflow/src/TrackReaderSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/TrackReaderSpec.cxx index 416ba5b3b07cc..8e72faae9fd37 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/TrackReaderSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/TrackReaderSpec.cxx @@ -44,14 +44,14 @@ void TrackReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " track in " << mROFRec.size() << " ROFs at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "ITSTrackROF", 0, Lifetime::Timeframe}, mROFRec); - pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0, Lifetime::Timeframe}, mClusInd); - pc.outputs().snapshot(Output{"ITS", "VERTICES", 0, Lifetime::Timeframe}, mVertices); - pc.outputs().snapshot(Output{"ITS", "VERTICESROF", 0, Lifetime::Timeframe}, mVerticesROFRec); + pc.outputs().snapshot(Output{mOrigin, "ITSTrackROF", 0}, mROFRec); + pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0}, mTracks); + pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0}, mClusInd); + pc.outputs().snapshot(Output{"ITS", "VERTICES", 0}, mVertices); + pc.outputs().snapshot(Output{"ITS", "VERTICESROF", 0}, mVerticesROFRec); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0, Lifetime::Timeframe}, mMCTruth); - pc.outputs().snapshot(Output{mOrigin, "VERTICESMCTR", 0, Lifetime::Timeframe}, mMCVertTruth); + pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "VERTICESMCTR", 0}, mMCVertTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/ITSMFT/ITS/workflow/src/TrackerSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/TrackerSpec.cxx index 35671f646bd04..331e42907530a 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/TrackerSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/TrackerSpec.cxx @@ -150,8 +150,8 @@ void TrackerDPL::run(ProcessingContext& pc) } auto rofsinput = pc.inputs().get>("ROframes"); - auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0, Lifetime::Timeframe}, rofsinput.begin(), rofsinput.end()); - auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0, Lifetime::Timeframe}); + auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0}, rofsinput.begin(), rofsinput.end()); + auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0}); const auto& alpParams = o2::itsmft::DPLAlpideParam::Instance(); // RS: this should come from CCDB irFrames.reserve(rofs.size()); @@ -164,19 +164,19 @@ void TrackerDPL::run(ProcessingContext& pc) if (mIsMC) { labels = pc.inputs().get*>("itsmclabels").release(); // get the array as read-only span, a snapshot is sent forward - pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0, Lifetime::Timeframe}, pc.inputs().get>("ITSMC2ROframes")); + pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0}, pc.inputs().get>("ITSMC2ROframes")); LOG(info) << labels->getIndexedSize() << " MC label objects , in " << mc2rofs.size() << " MC events"; } - auto& allClusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0, Lifetime::Timeframe}); - auto& allTracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0, Lifetime::Timeframe}); - auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0, Lifetime::Timeframe}); - auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0, Lifetime::Timeframe}); + auto& allClusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0}); + auto& allTracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0}); + auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0}); + auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0}); // MC static pmr::vector dummyMCLabTracks, dummyMCLabVerts; - auto& allTrackLabels = mIsMC ? pc.outputs().make>(Output{"ITS", "TRACKSMCTR", 0, Lifetime::Timeframe}) : dummyMCLabTracks; - auto& allVerticesLabels = mIsMC ? pc.outputs().make>(Output{"ITS", "VERTICESMCTR", 0, Lifetime::Timeframe}) : dummyMCLabVerts; + auto& allTrackLabels = mIsMC ? pc.outputs().make>(Output{"ITS", "TRACKSMCTR", 0}) : dummyMCLabTracks; + auto& allVerticesLabels = mIsMC ? pc.outputs().make>(Output{"ITS", "VERTICESMCTR", 0}) : dummyMCLabVerts; std::uint32_t roFrame = 0; diff --git a/Detectors/ITSMFT/ITS/workflow/src/VertexReaderSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/VertexReaderSpec.cxx index bbf033a31abee..e92f08af23c0d 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/VertexReaderSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/VertexReaderSpec.cxx @@ -41,8 +41,8 @@ void VertexReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mVerticesPtr->size() << " vertices in " << mVerticesROFRecPtr->size() << " ROFs at entry " << ent; - pc.outputs().snapshot(Output{"ITS", "VERTICES", 0, Lifetime::Timeframe}, mVertices); - pc.outputs().snapshot(Output{"ITS", "VERTICESROF", 0, Lifetime::Timeframe}, mVerticesROFRec); + pc.outputs().snapshot(Output{"ITS", "VERTICES", 0}, mVertices); + pc.outputs().snapshot(Output{"ITS", "VERTICESROF", 0}, mVerticesROFRec); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/ITSMFT/MFT/workflow/src/ClustererSpec.cxx b/Detectors/ITSMFT/MFT/workflow/src/ClustererSpec.cxx index f6279be58cfd9..bc8568112e447 100644 --- a/Detectors/ITSMFT/MFT/workflow/src/ClustererSpec.cxx +++ b/Detectors/ITSMFT/MFT/workflow/src/ClustererSpec.cxx @@ -89,17 +89,17 @@ void ClustererDPL::run(ProcessingContext& pc) clusterLabels = std::make_unique>(); } mClusterer->process(mNThreads, reader, &clusCompVec, &clusPattVec, &clusROFVec, clusterLabels.get()); - pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0, Lifetime::Timeframe}, clusCompVec); - pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0, Lifetime::Timeframe}, clusROFVec); - pc.outputs().snapshot(Output{orig, "PATTERNS", 0, Lifetime::Timeframe}, clusPattVec); + pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0}, clusCompVec); + pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0}, clusROFVec); + pc.outputs().snapshot(Output{orig, "PATTERNS", 0}, clusPattVec); if (mUseMC) { - pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, *clusterLabels.get()); // at the moment requires snapshot + pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0}, *clusterLabels.get()); // at the moment requires snapshot std::vector clusterMC2ROframes(mc2rofs.size()); for (int i = mc2rofs.size(); i--;) { clusterMC2ROframes[i] = mc2rofs[i]; // Simply, replicate it from digits ? } - pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0, Lifetime::Timeframe}, clusterMC2ROframes); + pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0}, clusterMC2ROframes); } // TODO: in principle, after masking "overflow" pixels the MC2ROFRecord maxROF supposed to change, nominally to minROF diff --git a/Detectors/ITSMFT/MFT/workflow/src/TrackReaderSpec.cxx b/Detectors/ITSMFT/MFT/workflow/src/TrackReaderSpec.cxx index 8d9b889d4d2aa..1a2ae573af536 100644 --- a/Detectors/ITSMFT/MFT/workflow/src/TrackReaderSpec.cxx +++ b/Detectors/ITSMFT/MFT/workflow/src/TrackReaderSpec.cxx @@ -45,11 +45,11 @@ void TrackReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " track in " << mROFRec.size() << " ROFs at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "MFTTrackROF", 0, Lifetime::Timeframe}, mROFRec); - pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0, Lifetime::Timeframe}, mClusInd); + pc.outputs().snapshot(Output{mOrigin, "MFTTrackROF", 0}, mROFRec); + pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0}, mTracks); + pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0}, mClusInd); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0, Lifetime::Timeframe}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0}, mMCTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/ITSMFT/MFT/workflow/src/TrackerSpec.cxx b/Detectors/ITSMFT/MFT/workflow/src/TrackerSpec.cxx index 7bf7d27a69e4c..5d22a6f2d2306 100644 --- a/Detectors/ITSMFT/MFT/workflow/src/TrackerSpec.cxx +++ b/Detectors/ITSMFT/MFT/workflow/src/TrackerSpec.cxx @@ -72,7 +72,7 @@ void TrackerDPL::run(ProcessingContext& pc) // the output vector however is created directly inside the message memory thus avoiding copy by // snapshot auto rofsinput = pc.inputs().get>("ROframes"); - auto& rofs = pc.outputs().make>(Output{"MFT", "MFTTrackROF", 0, Lifetime::Timeframe}, rofsinput.begin(), rofsinput.end()); + auto& rofs = pc.outputs().make>(Output{"MFT", "MFTTrackROF", 0}, rofsinput.begin(), rofsinput.end()); ROFFilter filter = [](const o2::itsmft::ROFRecord& r) { return true; }; @@ -104,12 +104,12 @@ void TrackerDPL::run(ProcessingContext& pc) LOG(info) << labels->getIndexedSize() << " MC label objects , in " << mc2rofs.size() << " MC events"; } - auto& allClusIdx = pc.outputs().make>(Output{"MFT", "TRACKCLSID", 0, Lifetime::Timeframe}); + auto& allClusIdx = pc.outputs().make>(Output{"MFT", "TRACKCLSID", 0}); std::vector trackLabels; std::vector allTrackLabels; std::vector tracks; std::vector tracksL; - auto& allTracksMFT = pc.outputs().make>(Output{"MFT", "TRACKS", 0, Lifetime::Timeframe}); + auto& allTracksMFT = pc.outputs().make>(Output{"MFT", "TRACKS", 0}); std::uint32_t roFrameId = 0; int nROFs = rofs.size(); @@ -327,8 +327,8 @@ void TrackerDPL::run(ProcessingContext& pc) LOG(info) << "MFTTracker pushed " << allTracksMFT.size() << " tracks"; if (mUseMC) { - pc.outputs().snapshot(Output{"MFT", "TRACKSMCTR", 0, Lifetime::Timeframe}, allTrackLabels); - pc.outputs().snapshot(Output{"MFT", "TRACKSMC2ROF", 0, Lifetime::Timeframe}, mc2rofs); + pc.outputs().snapshot(Output{"MFT", "TRACKSMCTR", 0}, allTrackLabels); + pc.outputs().snapshot(Output{"MFT", "TRACKSMC2ROF", 0}, mc2rofs); } mTimer[SWTot].Stop(); diff --git a/Detectors/ITSMFT/common/workflow/src/ClusterReaderSpec.cxx b/Detectors/ITSMFT/common/workflow/src/ClusterReaderSpec.cxx index ad7f26b1b4680..ea906056c7898 100644 --- a/Detectors/ITSMFT/common/workflow/src/ClusterReaderSpec.cxx +++ b/Detectors/ITSMFT/common/workflow/src/ClusterReaderSpec.cxx @@ -58,18 +58,18 @@ void ClusterReader::run(ProcessingContext& pc) // This is a very ugly way of providing DataDescription, which anyway does not need to contain detector name. // To be fixed once the names-definition class is ready - pc.outputs().snapshot(Output{mOrigin, "CLUSTERSROF", 0, Lifetime::Timeframe}, mClusROFRec); - pc.outputs().snapshot(Output{mOrigin, "COMPCLUSTERS", 0, Lifetime::Timeframe}, mClusterCompArray); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERSROF", 0}, mClusROFRec); + pc.outputs().snapshot(Output{mOrigin, "COMPCLUSTERS", 0}, mClusterCompArray); if (mUsePatterns) { - pc.outputs().snapshot(Output{mOrigin, "PATTERNS", 0, Lifetime::Timeframe}, mPatternsArray); + pc.outputs().snapshot(Output{mOrigin, "PATTERNS", 0}, mPatternsArray); } if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, mClusterMCTruth); - pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMC2ROF", 0, Lifetime::Timeframe}, mClusMC2ROFs); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMCTR", 0}, mClusterMCTruth); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMC2ROF", 0}, mClusMC2ROFs); } if (mTriggerOut) { std::vector dummyTrig; - pc.outputs().snapshot(Output{mOrigin, "PHYSTRIG", 0, Lifetime::Timeframe}, dummyTrig); + pc.outputs().snapshot(Output{mOrigin, "PHYSTRIG", 0}, dummyTrig); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/ITSMFT/common/workflow/src/DigitReaderSpec.cxx b/Detectors/ITSMFT/common/workflow/src/DigitReaderSpec.cxx index fbdd63c73d782..ef9494c85bf4b 100644 --- a/Detectors/ITSMFT/common/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/ITSMFT/common/workflow/src/DigitReaderSpec.cxx @@ -74,20 +74,20 @@ void DigitReader::run(ProcessingContext& pc) // This is a very ugly way of providing DataDescription, which anyway does not need to contain detector name. // To be fixed once the names-definition class is ready - pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0, Lifetime::Timeframe}, mDigROFRec); - pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}, mDigits); + pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0}, mDigROFRec); + pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0}, mDigits); if (mUseCalib) { - pc.outputs().snapshot(Output{mOrigin, "GBTCALIB", 0, Lifetime::Timeframe}, mCalib); + pc.outputs().snapshot(Output{mOrigin, "GBTCALIB", 0}, mCalib); } if (mTriggerOut) { std::vector dummyTrig; - pc.outputs().snapshot(Output{mOrigin, "PHYSTRIG", 0, Lifetime::Timeframe}, dummyTrig); + pc.outputs().snapshot(Output{mOrigin, "PHYSTRIG", 0}, dummyTrig); } if (mUseMC) { - auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0}); plabels->copyandflatten(sharedlabels); delete plabels; - pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0, Lifetime::Timeframe}, mDigMC2ROFs); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0}, mDigMC2ROFs); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/ITSMFT/common/workflow/src/EntropyEncoderSpec.cxx b/Detectors/ITSMFT/common/workflow/src/EntropyEncoderSpec.cxx index ccb6f0539909e..4b35f6cc44e39 100644 --- a/Detectors/ITSMFT/common/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/ITSMFT/common/workflow/src/EntropyEncoderSpec.cxx @@ -56,7 +56,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{mOrigin, "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{mOrigin, "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, rofs, compClusters, pspan, mPattIdConverter, mStrobeLength); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/ITSMFT/common/workflow/src/STFDecoderSpec.cxx b/Detectors/ITSMFT/common/workflow/src/STFDecoderSpec.cxx index 73132163d102c..a31ec0f1a4af2 100644 --- a/Detectors/ITSMFT/common/workflow/src/STFDecoderSpec.cxx +++ b/Detectors/ITSMFT/common/workflow/src/STFDecoderSpec.cxx @@ -139,7 +139,7 @@ void STFDecoder::run(ProcessingContext& pc) std::vector digVec; std::vector calVec; std::vector digROFVec; - auto& chipStatus = pc.outputs().make>(Output{orig, "CHIPSSTATUS", 0, Lifetime::Timeframe}, (size_t)Mapping::getNChips()); + auto& chipStatus = pc.outputs().make>(Output{orig, "CHIPSSTATUS", 0}, (size_t)Mapping::getNChips()); try { mDecoder->startNewTF(pc.inputs()); @@ -202,29 +202,29 @@ void STFDecoder::run(ProcessingContext& pc) } } if (mDoDigits) { - pc.outputs().snapshot(Output{orig, "DIGITS", 0, Lifetime::Timeframe}, digVec); - pc.outputs().snapshot(Output{orig, "DIGITSROF", 0, Lifetime::Timeframe}, digROFVec); + pc.outputs().snapshot(Output{orig, "DIGITS", 0}, digVec); + pc.outputs().snapshot(Output{orig, "DIGITSROF", 0}, digROFVec); mEstNDig = std::max(mEstNDig, size_t(digVec.size() * 1.2)); mEstNROF = std::max(mEstNROF, size_t(digROFVec.size() * 1.2)); if (mDoCalibData) { - pc.outputs().snapshot(Output{orig, "GBTCALIB", 0, Lifetime::Timeframe}, calVec); + pc.outputs().snapshot(Output{orig, "GBTCALIB", 0}, calVec); mEstNCalib = std::max(mEstNCalib, size_t(calVec.size() * 1.2)); } } if (mDoClusters) { // we are not obliged to create vectors which are not requested, but other devices might not know the options of this one - pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0, Lifetime::Timeframe}, clusCompVec); - pc.outputs().snapshot(Output{orig, "PATTERNS", 0, Lifetime::Timeframe}, clusPattVec); - pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0, Lifetime::Timeframe}, clusROFVec); + pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0}, clusCompVec); + pc.outputs().snapshot(Output{orig, "PATTERNS", 0}, clusPattVec); + pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0}, clusROFVec); mEstNClus = std::max(mEstNClus, size_t(clusCompVec.size() * 1.2)); mEstNClusPatt = std::max(mEstNClusPatt, size_t(clusPattVec.size() * 1.2)); mEstNROF = std::max(mEstNROF, size_t(clusROFVec.size() * 1.2)); } - auto& linkErrors = pc.outputs().make>(Output{orig, "LinkErrors", 0, Lifetime::Timeframe}); - auto& decErrors = pc.outputs().make>(Output{orig, "ChipErrors", 0, Lifetime::Timeframe}); + auto& linkErrors = pc.outputs().make>(Output{orig, "LinkErrors", 0}); + auto& decErrors = pc.outputs().make>(Output{orig, "ChipErrors", 0}); mDecoder->collectDecodingErrors(linkErrors, decErrors); - pc.outputs().snapshot(Output{orig, "PHYSTRIG", 0, Lifetime::Timeframe}, mDecoder->getExternalTriggers()); + pc.outputs().snapshot(Output{orig, "PHYSTRIG", 0}, mDecoder->getExternalTriggers()); if (mDumpOnError != int(GBTLink::RawDataDumps::DUMP_NONE)) { mDecoder->produceRawDataDumps(mDumpOnError, pc.services().get()); diff --git a/Detectors/MUON/MCH/IO/src/DigitReaderSpec.cxx b/Detectors/MUON/MCH/IO/src/DigitReaderSpec.cxx index da4c37035c82b..541e6c5fa26c0 100644 --- a/Detectors/MUON/MCH/IO/src/DigitReaderSpec.cxx +++ b/Detectors/MUON/MCH/IO/src/DigitReaderSpec.cxx @@ -54,18 +54,18 @@ class DigitsReaderDeviceDPL mReader = std::make_unique("o2sim", filename.c_str(), -1, RootTreeReader::PublishingMode::Single, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMCH, mDescriptions[0], 0, Lifetime::Timeframe}, "MCHDigit"}, + Output{header::gDataOriginMCH, mDescriptions[0], 0}, "MCHDigit"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMCH, mDescriptions[1], 0, Lifetime::Timeframe}, "MCHROFRecords"}, + Output{header::gDataOriginMCH, mDescriptions[1], 0}, "MCHROFRecords"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMCH, mDescriptions[2], 0, Lifetime::Timeframe}, "MCHMCLabels"}); + Output{header::gDataOriginMCH, mDescriptions[2], 0}, "MCHMCLabels"}); } else { mReader = std::make_unique("o2sim", filename.c_str(), -1, RootTreeReader::PublishingMode::Single, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMCH, mDescriptions[0], 0, Lifetime::Timeframe}, "MCHDigit"}, + Output{header::gDataOriginMCH, mDescriptions[0], 0}, "MCHDigit"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMCH, mDescriptions[1], 0, Lifetime::Timeframe}, "MCHROFRecords"}); + Output{header::gDataOriginMCH, mDescriptions[1], 0}, "MCHROFRecords"}); } } diff --git a/Detectors/MUON/MCH/Workflow/src/TrackAtVertexSpec.cxx b/Detectors/MUON/MCH/Workflow/src/TrackAtVertexSpec.cxx index 080c36706a0eb..8b981d4ee4abd 100644 --- a/Detectors/MUON/MCH/Workflow/src/TrackAtVertexSpec.cxx +++ b/Detectors/MUON/MCH/Workflow/src/TrackAtVertexSpec.cxx @@ -139,7 +139,7 @@ class TrackAtVertexTask } // create the output message - auto msgOut = pc.outputs().make(Output{"MCH", "TRACKSATVERTEX", 0, Lifetime::Timeframe}, + auto msgOut = pc.outputs().make(Output{"MCH", "TRACKSATVERTEX", 0}, mTracksAtVtx.size() * sizeof(int) + nTracksTot * sizeof(TrackAtVtxStruct)); // write the tracks diff --git a/Detectors/MUON/MCH/Workflow/src/entropy-encoder-workflow.cxx b/Detectors/MUON/MCH/Workflow/src/entropy-encoder-workflow.cxx index 50e085e238d55..058202dfb802b 100644 --- a/Detectors/MUON/MCH/Workflow/src/entropy-encoder-workflow.cxx +++ b/Detectors/MUON/MCH/Workflow/src/entropy-encoder-workflow.cxx @@ -72,7 +72,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"MCH", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"MCH", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, rofs, digits); pc.outputs().snapshot({"ctfrep", 0}, iosize); mTimer.Stop(); diff --git a/Detectors/MUON/MID/Workflow/src/ClusterizerSpec.cxx b/Detectors/MUON/MID/Workflow/src/ClusterizerSpec.cxx index 5657fd375f118..c544ce19fcdea 100644 --- a/Detectors/MUON/MID/Workflow/src/ClusterizerSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/ClusterizerSpec.cxx @@ -99,13 +99,13 @@ class ClusterizerDeviceDPL // Clear the index correlations that will be used in the next cluster processing mCorrelation.clear(); - pc.outputs().snapshot(of::Output{"MID", "CLUSTERSLABELS", 0, of::Lifetime::Timeframe}, mClusterLabeler.getContainer()); + pc.outputs().snapshot(of::Output{"MID", "CLUSTERSLABELS", 0}, mClusterLabeler.getContainer()); LOG(debug) << "Sent " << mClusterLabeler.getContainer().getIndexedSize() << " indexed clusters"; } - pc.outputs().snapshot(of::Output{"MID", "CLUSTERS", 0, of::Lifetime::Timeframe}, mClusterizer.getClusters()); + pc.outputs().snapshot(of::Output{"MID", "CLUSTERS", 0}, mClusterizer.getClusters()); LOG(debug) << "Sent " << mClusterizer.getClusters().size() << " clusters"; - pc.outputs().snapshot(of::Output{"MID", "CLUSTERSROF", 0, of::Lifetime::Timeframe}, mClusterizer.getROFRecords()); + pc.outputs().snapshot(of::Output{"MID", "CLUSTERSROF", 0}, mClusterizer.getROFRecords()); LOG(debug) << "Sent " << mClusterizer.getROFRecords().size() << " ROF"; mTimer += std::chrono::high_resolution_clock::now() - tStart; @@ -141,4 +141,4 @@ framework::DataProcessorSpec getClusterizerSpec(bool isMC, std::string_view inDa of::AlgorithmSpec{of::adaptFromTask(isMC)}}; } } // namespace mid -} // namespace o2 \ No newline at end of file +} // namespace o2 diff --git a/Detectors/MUON/MID/Workflow/src/ColumnDataSpecsUtils.cxx b/Detectors/MUON/MID/Workflow/src/ColumnDataSpecsUtils.cxx index 98f9b8c08c654..e0d41cd8d91d2 100644 --- a/Detectors/MUON/MID/Workflow/src/ColumnDataSpecsUtils.cxx +++ b/Detectors/MUON/MID/Workflow/src/ColumnDataSpecsUtils.cxx @@ -127,7 +127,7 @@ std::vector buildOutputs(std::vector o std::vector outputs; for (auto& outSpec : outputSpecs) { auto matcher = framework::DataSpecUtils::asConcreteDataMatcher(outSpec); - outputs.emplace_back(framework::Output{matcher.origin, matcher.description, matcher.subSpec, framework::Lifetime::Timeframe}); + outputs.emplace_back(framework::Output{matcher.origin, matcher.description, matcher.subSpec}); } return outputs; } @@ -161,4 +161,4 @@ std::unique_ptr> getLabels(fram } // namespace specs } // namespace mid -} // namespace o2 \ No newline at end of file +} // namespace o2 diff --git a/Detectors/MUON/MID/Workflow/src/DecodedDataAggregatorSpec.cxx b/Detectors/MUON/MID/Workflow/src/DecodedDataAggregatorSpec.cxx index f03eb6715798a..192b4c52be9cc 100644 --- a/Detectors/MUON/MID/Workflow/src/DecodedDataAggregatorSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/DecodedDataAggregatorSpec.cxx @@ -60,8 +60,8 @@ class DecodedDataAggregatorDeviceDPL for (o2::header::DataHeader::SubSpecificationType subSpec = 0; subSpec < 3; ++subSpec) { EventType evtType = static_cast(subSpec); - pc.outputs().snapshot(of::Output{o2::header::gDataOriginMID, "DATA", subSpec, of::Lifetime::Timeframe}, mAggregator.getData(evtType)); - pc.outputs().snapshot(of::Output{o2::header::gDataOriginMID, "DATAROF", subSpec, of::Lifetime::Timeframe}, mAggregator.getROFRecords(evtType)); + pc.outputs().snapshot(of::Output{o2::header::gDataOriginMID, "DATA", subSpec}, mAggregator.getData(evtType)); + pc.outputs().snapshot(of::Output{o2::header::gDataOriginMID, "DATAROF", subSpec}, mAggregator.getROFRecords(evtType)); } mTimer += std::chrono::high_resolution_clock::now() - tStart; diff --git a/Detectors/MUON/MID/Workflow/src/DigitReaderSpec.cxx b/Detectors/MUON/MID/Workflow/src/DigitReaderSpec.cxx index 3445fad66642e..7f380d1afe13a 100644 --- a/Detectors/MUON/MID/Workflow/src/DigitReaderSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/DigitReaderSpec.cxx @@ -55,19 +55,19 @@ class DigitsReaderDeviceDPL mReader = std::make_unique("o2sim", filename.c_str(), -1, RootTreeReader::PublishingMode::Single, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMID, mDescriptions[0], 0, Lifetime::Timeframe}, "MIDDigit"}, + Output{header::gDataOriginMID, mDescriptions[0], 0}, "MIDDigit"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMID, mDescriptions[1], 0, Lifetime::Timeframe}, "MIDROFRecords"}, + Output{header::gDataOriginMID, mDescriptions[1], 0}, "MIDROFRecords"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMID, mDescriptions[2], 0, Lifetime::Timeframe}, "MIDDigitMCLabels"}, + Output{header::gDataOriginMID, mDescriptions[2], 0}, "MIDDigitMCLabels"}, &mPublishDigits); } else { mReader = std::make_unique("o2sim", filename.c_str(), -1, RootTreeReader::PublishingMode::Single, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMID, mDescriptions[0], 0, Lifetime::Timeframe}, "MIDDigit"}, + Output{header::gDataOriginMID, mDescriptions[0], 0}, "MIDDigit"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMID, mDescriptions[1], 0, Lifetime::Timeframe}, "MIDROFRecords"}, + Output{header::gDataOriginMID, mDescriptions[1], 0}, "MIDROFRecords"}, &mPublishDigits); } } diff --git a/Detectors/MUON/MID/Workflow/src/EntropyDecoderSpec.cxx b/Detectors/MUON/MID/Workflow/src/EntropyDecoderSpec.cxx index e0030eb102c8d..5a8df6f8e81cb 100644 --- a/Detectors/MUON/MID/Workflow/src/EntropyDecoderSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/EntropyDecoderSpec.cxx @@ -66,9 +66,9 @@ void EntropyDecoderSpec::run(ProcessingContext& pc) size_t insize = 0; for (uint32_t it = 0; it < NEvTypes; it++) { insize += cols[it].size() * sizeof(o2::mid::ColumnData); - pc.outputs().snapshot(Output{o2::header::gDataOriginMID, "DATA", it, Lifetime::Timeframe}, cols[it]); + pc.outputs().snapshot(Output{o2::header::gDataOriginMID, "DATA", it}, cols[it]); insize += rofs[it].size() * sizeof(o2::mid::ROFRecord); - pc.outputs().snapshot(Output{o2::header::gDataOriginMID, "DATAROF", it, Lifetime::Timeframe}, rofs[it]); + pc.outputs().snapshot(Output{o2::header::gDataOriginMID, "DATAROF", it}, rofs[it]); } iosize.rawIn = insize; pc.outputs().snapshot({"ctfrep", 0}, iosize); diff --git a/Detectors/MUON/MID/Workflow/src/EntropyEncoderSpec.cxx b/Detectors/MUON/MID/Workflow/src/EntropyEncoderSpec.cxx index 44bf3dcc0ae6f..a472d6e28ff16 100644 --- a/Detectors/MUON/MID/Workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/EntropyEncoderSpec.cxx @@ -83,7 +83,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) // build references for looping over the data in BC increasing direction tfData.buildReferences(mCTFCoder.getIRFramesSelector()); - auto& buffer = pc.outputs().make>(Output{header::gDataOriginMID, "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{header::gDataOriginMID, "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, tfData); pc.outputs().snapshot({"ctfrep", 0}, iosize); iosize.rawIn = insize; diff --git a/Detectors/MUON/MID/Workflow/src/MaskMakerSpec.cxx b/Detectors/MUON/MID/Workflow/src/MaskMakerSpec.cxx index 99acaa08ac5f0..28d2ff953ea23 100644 --- a/Detectors/MUON/MID/Workflow/src/MaskMakerSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/MaskMakerSpec.cxx @@ -117,7 +117,7 @@ class MaskMakerDeviceDPL if (mCounterSinceReset >= mNReset) { for (size_t itype = 0; itype < 2; ++itype) { auto masks = o2::mid::makeMasks(mScalers[itype], mCounterSinceReset, mThreshold, mRefMasks); - pc.outputs().snapshot(of::Output{header::gDataOriginMID, "MASKS", static_cast(itype + 1), of::Lifetime::Timeframe}, masks); + pc.outputs().snapshot(of::Output{header::gDataOriginMID, "MASKS", static_cast(itype + 1)}, masks); } mCounterSinceReset = 0; for (auto& scaler : mScalers) { @@ -160,4 +160,4 @@ framework::DataProcessorSpec getMaskMakerSpec(const FEEIdConfig& feeIdConfig, co of::Options{{"mid-mask-threshold", of::VariantType::Double, 0.9, {"Tolerated occupancy before producing a map"}}, {"mid-mask-reset", of::VariantType::Int, 100, {"Number of calibration events to be checked before resetting the scalers"}}}}; } } // namespace mid -} // namespace o2 \ No newline at end of file +} // namespace o2 diff --git a/Detectors/MUON/MID/Workflow/src/RawGBTDecoderSpec.cxx b/Detectors/MUON/MID/Workflow/src/RawGBTDecoderSpec.cxx index 6d23ce9cdab1f..ba15644e2b17a 100644 --- a/Detectors/MUON/MID/Workflow/src/RawGBTDecoderSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/RawGBTDecoderSpec.cxx @@ -82,8 +82,8 @@ class RawGBTDecoderDeviceDPL mTimerAlgo += std::chrono::high_resolution_clock::now() - tAlgoStart; - pc.outputs().snapshot(o2::framework::Output{header::gDataOriginMID, "DECODED", dh->subSpecification, o2::framework::Lifetime::Timeframe}, data); - pc.outputs().snapshot(o2::framework::Output{header::gDataOriginMID, "DECODEDROF", dh->subSpecification, o2::framework::Lifetime::Timeframe}, rofRecords); + pc.outputs().snapshot(o2::framework::Output{header::gDataOriginMID, "DECODED", dh->subSpecification}, data); + pc.outputs().snapshot(o2::framework::Output{header::gDataOriginMID, "DECODEDROF", dh->subSpecification}, rofRecords); mTimer += std::chrono::high_resolution_clock::now() - tStart; mNROFs += rofRecords.size(); diff --git a/Detectors/MUON/MID/Workflow/src/TrackerSpec.cxx b/Detectors/MUON/MID/Workflow/src/TrackerSpec.cxx index be1dd9124315c..e1044768e8ea6 100644 --- a/Detectors/MUON/MID/Workflow/src/TrackerSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/TrackerSpec.cxx @@ -80,20 +80,20 @@ class TrackerDeviceDPL if (mIsMC) { std::unique_ptr> labels = pc.inputs().get*>("mid_clusterlabels"); mTrackLabeler.process(mTracker->getClusters(), tracks, *labels); - pc.outputs().snapshot(of::Output{"MID", "TRACKLABELS", 0, of::Lifetime::Timeframe}, mTrackLabeler.getTracksLabels()); + pc.outputs().snapshot(of::Output{"MID", "TRACKLABELS", 0}, mTrackLabeler.getTracksLabels()); LOG(debug) << "Sent " << mTrackLabeler.getTracksLabels().size() << " indexed tracks."; - pc.outputs().snapshot(of::Output{"MID", "TRCLUSLABELS", 0, of::Lifetime::Timeframe}, mTrackLabeler.getTrackClustersLabels()); + pc.outputs().snapshot(of::Output{"MID", "TRCLUSLABELS", 0}, mTrackLabeler.getTrackClustersLabels()); LOG(debug) << "Sent " << mTrackLabeler.getTrackClustersLabels().getIndexedSize() << " indexed track clusters."; } - pc.outputs().snapshot(of::Output{"MID", "TRACKS", 0, of::Lifetime::Timeframe}, tracks); + pc.outputs().snapshot(of::Output{"MID", "TRACKS", 0}, tracks); LOG(debug) << "Sent " << tracks.size() << " tracks."; - pc.outputs().snapshot(of::Output{"MID", "TRACKCLUSTERS", 0, of::Lifetime::Timeframe}, mTracker->getClusters()); + pc.outputs().snapshot(of::Output{"MID", "TRACKCLUSTERS", 0}, mTracker->getClusters()); LOG(debug) << "Sent " << mTracker->getClusters().size() << " track clusters."; - pc.outputs().snapshot(of::Output{"MID", "TRACKROFS", 0, of::Lifetime::Timeframe}, mTracker->getTrackROFRecords()); + pc.outputs().snapshot(of::Output{"MID", "TRACKROFS", 0}, mTracker->getTrackROFRecords()); LOG(debug) << "Sent " << mTracker->getTrackROFRecords().size() << " ROFs."; - pc.outputs().snapshot(of::Output{"MID", "TRCLUSROFS", 0, of::Lifetime::Timeframe}, mTracker->getClusterROFRecords()); + pc.outputs().snapshot(of::Output{"MID", "TRCLUSROFS", 0}, mTracker->getClusterROFRecords()); LOG(debug) << "Sent " << mTracker->getClusterROFRecords().size() << " ROFs."; mTimer += std::chrono::high_resolution_clock::now() - tStart; diff --git a/Detectors/MUON/MID/Workflow/src/ZeroSuppressionSpec.cxx b/Detectors/MUON/MID/Workflow/src/ZeroSuppressionSpec.cxx index ccfe1641b87b6..5d89eee81c629 100644 --- a/Detectors/MUON/MID/Workflow/src/ZeroSuppressionSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/ZeroSuppressionSpec.cxx @@ -90,7 +90,7 @@ class ZeroSuppressionDeviceDPL } if (mUseMC) { - pc.outputs().snapshot(of::Output{header::gDataOriginMID, "DATALABELS", 0, of::Lifetime::Timeframe}, outMCContainer); + pc.outputs().snapshot(of::Output{header::gDataOriginMID, "DATALABELS", 0}, outMCContainer); } } @@ -113,4 +113,4 @@ framework::DataProcessorSpec getZeroSuppressionSpec(bool useMC, std::string_view of::AlgorithmSpec{of::adaptFromTask(useMC)}}; } } // namespace mid -} // namespace o2 \ No newline at end of file +} // namespace o2 diff --git a/Detectors/PHOS/calib/src/PHOSBadMapCalibDevice.cxx b/Detectors/PHOS/calib/src/PHOSBadMapCalibDevice.cxx index 226630a8b1d81..4f81300431934 100644 --- a/Detectors/PHOS/calib/src/PHOSBadMapCalibDevice.cxx +++ b/Detectors/PHOS/calib/src/PHOSBadMapCalibDevice.cxx @@ -177,7 +177,7 @@ void PHOSBadMapCalibDevice::sendOutput(DataAllocator& output) // Send change to QC LOG(info) << "[PHOSBadMapCalibDevice - run] Sending QC "; - output.snapshot(o2::framework::Output{"PHS", "BADMAPDIFF", 0, o2::framework::Lifetime::Timeframe}, mBadMapDiff); + output.snapshot(o2::framework::Output{"PHS", "BADMAPDIFF", 0}, mBadMapDiff); } bool PHOSBadMapCalibDevice::calculateBadMap() diff --git a/Detectors/PHOS/calib/src/PHOSHGLGRatioCalibDevice.cxx b/Detectors/PHOS/calib/src/PHOSHGLGRatioCalibDevice.cxx index 0a37589d3abe7..5c07c6fee8493 100644 --- a/Detectors/PHOS/calib/src/PHOSHGLGRatioCalibDevice.cxx +++ b/Detectors/PHOS/calib/src/PHOSHGLGRatioCalibDevice.cxx @@ -229,7 +229,7 @@ void PHOSHGLGRatioCalibDevice::sendOutput(DataAllocator& output) } // Anyway send change to QC LOG(info) << "[PHOSHGLGRatioCalibDevice - sendOutput] Sending QC "; - output.snapshot(o2::framework::Output{"PHS", "CALIBDIFF", 0, o2::framework::Lifetime::Timeframe}, mRatioDiff); + output.snapshot(o2::framework::Output{"PHS", "CALIBDIFF", 0}, mRatioDiff); } DataProcessorSpec o2::phos::getHGLGRatioCalibSpec(bool useCCDB, bool forceUpdate) diff --git a/Detectors/PHOS/calib/src/PHOSPedestalCalibDevice.cxx b/Detectors/PHOS/calib/src/PHOSPedestalCalibDevice.cxx index 96f680f710fb0..42289a910debb 100644 --- a/Detectors/PHOS/calib/src/PHOSPedestalCalibDevice.cxx +++ b/Detectors/PHOS/calib/src/PHOSPedestalCalibDevice.cxx @@ -135,7 +135,7 @@ void PHOSPedestalCalibDevice::sendOutput(DataAllocator& output) } // Anyway send change to QC LOG(info) << "[PHOSPedestalCalibDevice - run] Sending QC "; - output.snapshot(o2::framework::Output{"PHS", "CALIBDIFF", 0, o2::framework::Lifetime::Timeframe}, mPedDiff); + output.snapshot(o2::framework::Output{"PHS", "CALIBDIFF", 0}, mPedDiff); } void PHOSPedestalCalibDevice::calculatePedestals() diff --git a/Detectors/PHOS/workflow/src/CellConverterSpec.cxx b/Detectors/PHOS/workflow/src/CellConverterSpec.cxx index 330dd27c45251..370df48a8fc40 100644 --- a/Detectors/PHOS/workflow/src/CellConverterSpec.cxx +++ b/Detectors/PHOS/workflow/src/CellConverterSpec.cxx @@ -44,12 +44,12 @@ void CellConverterSpec::run(framework::ProcessingContext& ctx) auto digitsTR = ctx.inputs().get>("digitTriggerRecords"); if (!digitsTR.size()) { // nothing to process mOutputCells.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", 0, o2::framework::Lifetime::Timeframe}, mOutputCells); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", 0}, mOutputCells); mOutputCellTrigRecs.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputCellTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", 0}, mOutputCellTrigRecs); if (mPropagateMC) { mOutputTruthCont.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLSMCTR", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLSMCTR", 0}, mOutputTruthCont); } return; } @@ -128,10 +128,10 @@ void CellConverterSpec::run(framework::ProcessingContext& ctx) } LOG(info) << "[PHOSCellConverter - run] Writing " << mOutputCells.size() << " cells, " << mOutputCellTrigRecs.size() << " Trig Records " << mOutputTruthCont.getNElements() << " PHOS labels "; - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", 0, o2::framework::Lifetime::Timeframe}, mOutputCells); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputCellTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", 0}, mOutputCells); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", 0}, mOutputCellTrigRecs); if (mPropagateMC) { - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLSMCTR", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLSMCTR", 0}, mOutputTruthCont); } } diff --git a/Detectors/PHOS/workflow/src/CellReaderSpec.cxx b/Detectors/PHOS/workflow/src/CellReaderSpec.cxx index 64f54c1baf189..c7d93fc20301f 100644 --- a/Detectors/PHOS/workflow/src/CellReaderSpec.cxx +++ b/Detectors/PHOS/workflow/src/CellReaderSpec.cxx @@ -44,10 +44,10 @@ void CellReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mCells.size() << " Cells in " << mTRs.size() << " TriggerRecords at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "CELLS", 0, Lifetime::Timeframe}, mCells); - pc.outputs().snapshot(Output{mOrigin, "CELLTRIGREC", 0, Lifetime::Timeframe}, mTRs); + pc.outputs().snapshot(Output{mOrigin, "CELLS", 0}, mCells); + pc.outputs().snapshot(Output{mOrigin, "CELLTRIGREC", 0}, mTRs); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "CELLSMCTR", 0, Lifetime::Timeframe}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "CELLSMCTR", 0}, mMCTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/PHOS/workflow/src/ClusterizerSpec.cxx b/Detectors/PHOS/workflow/src/ClusterizerSpec.cxx index 27bce0f8745f4..13aa21258125d 100644 --- a/Detectors/PHOS/workflow/src/ClusterizerSpec.cxx +++ b/Detectors/PHOS/workflow/src/ClusterizerSpec.cxx @@ -78,16 +78,16 @@ void ClusterizerSpec::run(framework::ProcessingContext& ctx) auto digitsTR = ctx.inputs().get>("digitTriggerRecords"); if (!digitsTR.size()) { // nothing to process mOutputClusters.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusters); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERS", 0}, mOutputClusters); if (mFullCluOutput) { mOutputCluElements.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUELEMENTS", 0, o2::framework::Lifetime::Timeframe}, mOutputCluElements); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUELEMENTS", 0}, mOutputCluElements); } mOutputClusterTrigRecs.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputClusterTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRIGREC", 0}, mOutputClusterTrigRecs); if (mPropagateMC) { mOutputTruthCont.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRUEMC", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRUEMC", 0}, mOutputTruthCont); } return; } @@ -120,13 +120,13 @@ void ClusterizerSpec::run(framework::ProcessingContext& ctx) } else { LOG(debug) << "[PHOSClusterizer - run] Writing " << mOutputClusters.size() << " clusters and " << mOutputClusterTrigRecs.size() << " TR"; } - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusters); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERS", 0}, mOutputClusters); if (mFullCluOutput) { - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUELEMENTS", 0, o2::framework::Lifetime::Timeframe}, mOutputCluElements); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUELEMENTS", 0}, mOutputCluElements); } - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputClusterTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRIGREC", 0}, mOutputClusterTrigRecs); if (mPropagateMC) { - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRUEMC", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRUEMC", 0}, mOutputTruthCont); } } diff --git a/Detectors/PHOS/workflow/src/DigitReaderSpec.cxx b/Detectors/PHOS/workflow/src/DigitReaderSpec.cxx index 737856051af0c..70f5077b2f0c9 100644 --- a/Detectors/PHOS/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/PHOS/workflow/src/DigitReaderSpec.cxx @@ -44,10 +44,10 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mDigits.size() << " Digits in " << mTRs.size() << " TriggerRecords at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}, mDigits); - pc.outputs().snapshot(Output{mOrigin, "DIGITTRIGREC", 0, Lifetime::Timeframe}, mTRs); + pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0}, mDigits); + pc.outputs().snapshot(Output{mOrigin, "DIGITTRIGREC", 0}, mTRs); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMCTR", 0}, mMCTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/PHOS/workflow/src/EntropyEncoderSpec.cxx b/Detectors/PHOS/workflow/src/EntropyEncoderSpec.cxx index 226184c1179c6..a932a45f1bb53 100644 --- a/Detectors/PHOS/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/PHOS/workflow/src/EntropyEncoderSpec.cxx @@ -54,7 +54,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"PHS", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"PHS", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, triggers, cells); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/PHOS/workflow/src/EventBuilderSpec.cxx b/Detectors/PHOS/workflow/src/EventBuilderSpec.cxx index d6562fdadfee6..9a1289889424e 100644 --- a/Detectors/PHOS/workflow/src/EventBuilderSpec.cxx +++ b/Detectors/PHOS/workflow/src/EventBuilderSpec.cxx @@ -178,8 +178,8 @@ void EventBuilderSpec::run(framework::ProcessingContext& ctx) } } - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginPHS, "CELLS", 0, framework::Lifetime::Timeframe}, outputCells); - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginPHS, "CELLTRIGREC", 0, framework::Lifetime::Timeframe}, outputTriggers); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginPHS, "CELLS", 0}, outputCells); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginPHS, "CELLTRIGREC", 0}, outputTriggers); } o2::framework::DataProcessorSpec o2::phos::getEventBuilderSpec() diff --git a/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx b/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx index ea0336f0ab19a..292e083d4bb48 100644 --- a/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx +++ b/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx @@ -108,11 +108,11 @@ void RawToCellConverterSpec::run(framework::ProcessingContext& ctx) contDeadBeef == maxWarn ? fmt::format(". {} such inputs in row received, stopping reporting", contDeadBeef) : ""); } mOutputCells.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", mflpId, o2::framework::Lifetime::Timeframe}, mOutputCells); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", mflpId}, mOutputCells); mOutputTriggerRecords.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", mflpId, o2::framework::Lifetime::Timeframe}, mOutputTriggerRecords); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", mflpId}, mOutputTriggerRecords); mOutputHWErrors.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0, o2::framework::Lifetime::Timeframe}, mOutputHWErrors); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0}, mOutputHWErrors); if (mFillChi2) { mOutputFitChi.clear(); ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0, o2::framework::Lifetime::QA}, mOutputFitChi); @@ -290,9 +290,9 @@ void RawToCellConverterSpec::run(framework::ProcessingContext& ctx) mLastSize = 1.1 * mOutputCells.size(); LOG(debug) << "[PHOSRawToCellConverter - run] Writing " << mOutputCells.size() << " cells ..."; - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", mflpId, o2::framework::Lifetime::Timeframe}, mOutputCells); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", mflpId, o2::framework::Lifetime::Timeframe}, mOutputTriggerRecords); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0, o2::framework::Lifetime::Timeframe}, mOutputHWErrors); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", mflpId}, mOutputCells); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", mflpId}, mOutputTriggerRecords); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0}, mOutputHWErrors); if (mFillChi2) { ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0, o2::framework::Lifetime::QA}, mOutputFitChi); } diff --git a/Detectors/PHOS/workflow/src/ReaderSpec.cxx b/Detectors/PHOS/workflow/src/ReaderSpec.cxx index b68046b47c156..9a1fb23a553df 100644 --- a/Detectors/PHOS/workflow/src/ReaderSpec.cxx +++ b/Detectors/PHOS/workflow/src/ReaderSpec.cxx @@ -58,7 +58,6 @@ DataProcessorSpec getDigitsReaderSpec(bool propagateMC) processAttributes->terminateOnEod = ic.options().get("terminate-on-eod"); processAttributes->finished = false; processAttributes->datatype = "PHOSDigit"; - constexpr auto persistency = Lifetime::Timeframe; o2::header::DataHeader::SubSpecificationType subSpec = 0; if (propagateMC) { processAttributes->reader = std::make_shared(treename.c_str(), // tree name @@ -66,19 +65,19 @@ DataProcessorSpec getDigitsReaderSpec(bool propagateMC) nofEvents, // number of entries to publish publishingMode, RootTreeReader::BranchDefinition>{ - Output{"PHS", "DIGITS", subSpec, persistency}, "PHOSDigit"}, + Output{"PHS", "DIGITS", subSpec}, "PHOSDigit"}, RootTreeReader::BranchDefinition>{ - Output{"PHS", "DIGITTRIGREC", subSpec, persistency}, "PHOSDigitTrigRecords"}, - Output{"PHS", "DIGITSMCTR", subSpec, persistency}, "PHOSDigitMCTruth"); // name of mc label branch + Output{"PHS", "DIGITTRIGREC", subSpec}, "PHOSDigitTrigRecords"}, + Output{"PHS", "DIGITSMCTR", subSpec}, "PHOSDigitMCTruth"); // name of mc label branch } else { processAttributes->reader = std::make_shared(treename.c_str(), // tree name filename.c_str(), // input file name nofEvents, // number of entries to publish publishingMode, RootTreeReader::BranchDefinition>{ - Output{"PHS", "DIGITS", subSpec, persistency}, "PHOSDigit"}, + Output{"PHS", "DIGITS", subSpec}, "PHOSDigit"}, RootTreeReader::BranchDefinition>{ - Output{"PHS", "DIGITTRIGREC", subSpec, persistency}, "PHOSDigitTrigRecords"}); + Output{"PHS", "DIGITTRIGREC", subSpec}, "PHOSDigitTrigRecords"}); } } @@ -156,7 +155,6 @@ DataProcessorSpec getCellReaderSpec(bool propagateMC) processAttributes->terminateOnEod = ic.options().get("terminate-on-eod"); processAttributes->finished = false; processAttributes->datatype = "PHOSCell"; - constexpr auto persistency = Lifetime::Timeframe; o2::header::DataHeader::SubSpecificationType subSpec = 0; if (propagateMC) { processAttributes->reader = std::make_shared(treename.c_str(), // tree name @@ -164,10 +162,10 @@ DataProcessorSpec getCellReaderSpec(bool propagateMC) nofEvents, // number of entries to publish publishingMode, RootTreeReader::BranchDefinition>{ - Output{"PHS", "CELLS", subSpec, persistency}, "PHOSCell"}, + Output{"PHS", "CELLS", subSpec}, "PHOSCell"}, RootTreeReader::BranchDefinition>{ - Output{"PHS", "CELLTRIGREC", subSpec, persistency}, "PHOSCellTrigRec"}, - Output{"PHS", "CELLSMCTR", subSpec, persistency}, + Output{"PHS", "CELLTRIGREC", subSpec}, "PHOSCellTrigRec"}, + Output{"PHS", "CELLSMCTR", subSpec}, "PHOSCellTrueMC"); // name of mc label branch } else { processAttributes->reader = std::make_shared(treename.c_str(), // tree name @@ -175,9 +173,9 @@ DataProcessorSpec getCellReaderSpec(bool propagateMC) nofEvents, // number of entries to publish publishingMode, RootTreeReader::BranchDefinition>{ - Output{"PHS", "CELLS", subSpec, persistency}, "PHOSCell"}, + Output{"PHS", "CELLS", subSpec}, "PHOSCell"}, RootTreeReader::BranchDefinition>{ - Output{"PHS", "CELLTRIGREC", subSpec, persistency}, "PHOSCellTrigRec"}); + Output{"PHS", "CELLTRIGREC", subSpec}, "PHOSCellTrigRec"}); } } diff --git a/Detectors/PHOS/workflow/src/StandaloneAODProducerSpec.cxx b/Detectors/PHOS/workflow/src/StandaloneAODProducerSpec.cxx index 736a569cacd34..454be7a5fcb83 100644 --- a/Detectors/PHOS/workflow/src/StandaloneAODProducerSpec.cxx +++ b/Detectors/PHOS/workflow/src/StandaloneAODProducerSpec.cxx @@ -136,8 +136,8 @@ void StandaloneAODProducerSpec::run(ProcessingContext& pc) } // end of event loop // std::cout << "Finished cell loop" << std::endl; - pc.outputs().snapshot(Output{"TFN", "TFNumber", 0, Lifetime::Timeframe}, tfNumber); - pc.outputs().snapshot(Output{"TFF", "TFFilename", 0, Lifetime::Timeframe}, ""); + pc.outputs().snapshot(Output{"TFN", "TFNumber", 0}, tfNumber); + pc.outputs().snapshot(Output{"TFF", "TFFilename", 0}, ""); mTimer.Stop(); } diff --git a/Detectors/TOF/calibration/testWorkflow/TOFCalibCollectorSpec.h b/Detectors/TOF/calibration/testWorkflow/TOFCalibCollectorSpec.h index f42c5676d55e5..4beef9541eb55 100644 --- a/Detectors/TOF/calibration/testWorkflow/TOFCalibCollectorSpec.h +++ b/Detectors/TOF/calibration/testWorkflow/TOFCalibCollectorSpec.h @@ -113,8 +113,8 @@ class TOFCalibCollectorDevice : public o2::framework::Task auto entries = collectedInfo.size(); // this means that we are ready to send the output auto entriesPerChannel = mCollector->getEntriesPerChannel(); - output.snapshot(Output{o2::header::gDataOriginTOF, "COLLECTEDINFO", 0, Lifetime::Timeframe}, collectedInfo); - output.snapshot(Output{o2::header::gDataOriginTOF, "ENTRIESCH", 0, Lifetime::Timeframe}, entriesPerChannel); + output.snapshot(Output{o2::header::gDataOriginTOF, "COLLECTEDINFO", 0}, collectedInfo); + output.snapshot(Output{o2::header::gDataOriginTOF, "ENTRIESCH", 0}, entriesPerChannel); mCollector->initOutput(); // reset the output for the next round } } diff --git a/Detectors/TOF/workflow/src/CompressedDecodingTask.cxx b/Detectors/TOF/workflow/src/CompressedDecodingTask.cxx index ebc61f05e85c0..fc0e2b9b2c30d 100644 --- a/Detectors/TOF/workflow/src/CompressedDecodingTask.cxx +++ b/Detectors/TOF/workflow/src/CompressedDecodingTask.cxx @@ -109,18 +109,18 @@ void CompressedDecodingTask::postData(ProcessingContext& pc) // LOG(info) << "TOF: N tof window decoded = " << n_tof_window << "(orbits = " << n_orbits << ") with " << digit_size << " digits"; // add digits in the output snapshot - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0, Lifetime::Timeframe}, *alldigits); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0, Lifetime::Timeframe}, *row); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0}, *alldigits); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0}, *row); std::vector& patterns = mDecoder.getPatterns(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0, Lifetime::Timeframe}, patterns); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0}, patterns); std::vector& errors = mDecoder.getErrors(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ERRORS", 0, Lifetime::Timeframe}, errors); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ERRORS", 0}, errors); DigitHeader& digitH = mDecoder.getDigitHeader(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITHEADER", 0, Lifetime::Timeframe}, digitH); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITHEADER", 0}, digitH); auto diagnosticFrequency = mDecoder.getDiagnosticFrequency(); diagnosticFrequency.setTimeStamp(mCreationTime / 1000); @@ -130,7 +130,7 @@ void CompressedDecodingTask::postData(ProcessingContext& pc) diagnosticFrequency.setTFIDInfo(tfinfo); //diagnosticFrequency.print(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0, Lifetime::Timeframe}, diagnosticFrequency); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0}, diagnosticFrequency); mDecoder.clear(); diff --git a/Detectors/TOF/workflow/src/EntropyDecoderSpec.cxx b/Detectors/TOF/workflow/src/EntropyDecoderSpec.cxx index b2c735af87241..400914c64021f 100644 --- a/Detectors/TOF/workflow/src/EntropyDecoderSpec.cxx +++ b/Detectors/TOF/workflow/src/EntropyDecoderSpec.cxx @@ -81,7 +81,7 @@ void EntropyDecoderSpec::run(ProcessingContext& pc) o2::dataformats::TFIDInfo tfinfo; o2::base::TFIDInfoHelper::fillTFIDInfo(pc, tfinfo); diagnostic.setTFIDInfo(tfinfo); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0, Lifetime::Timeframe}, diagnostic); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0}, diagnostic); pc.outputs().snapshot({"ctfrep", 0}, iosize); mTimer.Stop(); LOG(info) << "Decoded " << digits.size() << " digits in " << row.size() << " ROF, (" << iosize.asString() << ") in " << mTimer.CpuTime() - cput << " s"; diff --git a/Detectors/TOF/workflow/src/EntropyEncoderSpec.cxx b/Detectors/TOF/workflow/src/EntropyEncoderSpec.cxx index 8f215a0572961..3fc47955f53c0 100644 --- a/Detectors/TOF/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/TOF/workflow/src/EntropyEncoderSpec.cxx @@ -55,7 +55,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{o2::header::gDataOriginTOF, "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{o2::header::gDataOriginTOF, "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, rofs, compDigits, pspan); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/TOF/workflow/src/TOFClusterizerSpec.cxx b/Detectors/TOF/workflow/src/TOFClusterizerSpec.cxx index 4d3e0589c186d..48b1508692c24 100644 --- a/Detectors/TOF/workflow/src/TOFClusterizerSpec.cxx +++ b/Detectors/TOF/workflow/src/TOFClusterizerSpec.cxx @@ -224,25 +224,25 @@ class TOFDPLClustererTask } // send clusters - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERS", 0, Lifetime::Timeframe}, mClustersArray); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMULT", 0, Lifetime::Timeframe}, mMultPerLongBC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERS", 0}, mClustersArray); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMULT", 0}, mMultPerLongBC); // send labels if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, mClsLabels); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMCTR", 0}, mClsLabels); } if (mIsCalib) { std::vector* clusterCalInfo = mClusterer.getInfoFromCluster(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCALCLUS", 0, Lifetime::Timeframe}, *clusterCalInfo); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCALCLUS", 0}, *clusterCalInfo); } if (mIsCosmic) { std::vector* cosmicInfo = mCosmicProcessor.getCosmicInfo(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCOSMICS", 0, Lifetime::Timeframe}, *cosmicInfo); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCOSMICS", 0}, *cosmicInfo); std::vector* cosmicTrack = mCosmicProcessor.getCosmicTrack(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKCOS", 0, Lifetime::Timeframe}, *cosmicTrack); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKCOS", 0}, *cosmicTrack); std::vector* cosmicTrackSize = mCosmicProcessor.getCosmicTrackSize(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKSIZE", 0, Lifetime::Timeframe}, *cosmicTrackSize); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKSIZE", 0}, *cosmicTrackSize); } mTimer.Stop(); diff --git a/Detectors/TOF/workflowIO/src/CalibClusReaderSpec.cxx b/Detectors/TOF/workflowIO/src/CalibClusReaderSpec.cxx index 9be38858250ca..116f93a06c208 100644 --- a/Detectors/TOF/workflowIO/src/CalibClusReaderSpec.cxx +++ b/Detectors/TOF/workflowIO/src/CalibClusReaderSpec.cxx @@ -39,13 +39,13 @@ void CalibClusReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(debug) << "Pushing " << mPclusInfos->size() << " TOF clusters calib info at entry " << ent; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCALCLUS", 0, Lifetime::Timeframe}, mClusInfos); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCALCLUS", 0}, mClusInfos); if (mIsCosmics) { LOG(debug) << "Pushing " << mPcosmicInfo->size() << " TOF cosmics info at entry " << ent; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCOSMICS", 0, Lifetime::Timeframe}, mCosmicInfo); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKCOS", 0, Lifetime::Timeframe}, mCosmicTrack); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKSIZE", 0, Lifetime::Timeframe}, mCosmicTrackSize); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCOSMICS", 0}, mCosmicInfo); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKCOS", 0}, mCosmicTrack); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKSIZE", 0}, mCosmicTrackSize); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/TOF/workflowIO/src/CalibInfoReaderSpec.cxx b/Detectors/TOF/workflowIO/src/CalibInfoReaderSpec.cxx index d81905fb38888..1854be2f1b88f 100644 --- a/Detectors/TOF/workflowIO/src/CalibInfoReaderSpec.cxx +++ b/Detectors/TOF/workflowIO/src/CalibInfoReaderSpec.cxx @@ -90,9 +90,9 @@ void CalibInfoReader::run(ProcessingContext& pc) LOG(debug) << "Current entry " << mCurrentEntry; LOG(debug) << "Send " << mVect.size() << " calib infos"; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, mTOFTPC ? ddCalib_tpc : ddCalib, 0, Lifetime::Timeframe}, mVect); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, mTOFTPC ? ddCalib_tpc : ddCalib, 0}, mVect); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddDia, 0, Lifetime::Timeframe}, mDia); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddDia, 0}, mDia); usleep(100); } mGlobalEntry++; diff --git a/Detectors/TOF/workflowIO/src/ClusterReaderSpec.cxx b/Detectors/TOF/workflowIO/src/ClusterReaderSpec.cxx index 70f10b109a70e..e2979a8fc0dbf 100644 --- a/Detectors/TOF/workflowIO/src/ClusterReaderSpec.cxx +++ b/Detectors/TOF/workflowIO/src/ClusterReaderSpec.cxx @@ -44,10 +44,10 @@ void ClusterReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(debug) << "Pushing " << mClustersPtr->size() << " TOF clusters at entry " << ent; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERS", 0, Lifetime::Timeframe}, mClusters); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMULT", 0, Lifetime::Timeframe}, mClustersMult); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERS", 0}, mClusters); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMULT", 0}, mClustersMult); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMCTR", 0}, mLabels); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/TOF/workflowIO/src/DigitReaderSpec.cxx b/Detectors/TOF/workflowIO/src/DigitReaderSpec.cxx index f9fd57cf6b58f..30ddea9d5b9e7 100644 --- a/Detectors/TOF/workflowIO/src/DigitReaderSpec.cxx +++ b/Detectors/TOF/workflowIO/src/DigitReaderSpec.cxx @@ -81,18 +81,18 @@ void DigitReader::run(ProcessingContext& pc) mDiagnostic = mFiller.getDiagnosticFrequency(); // add digits loaded in the output snapshot - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0, Lifetime::Timeframe}, mDigits); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0, Lifetime::Timeframe}, mRow); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0, Lifetime::Timeframe}, mPatterns); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0, Lifetime::Timeframe}, mDiagnostic); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0}, mDigits); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0}, mRow); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0}, mPatterns); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0}, mDiagnostic); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITSMCTR", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITSMCTR", 0}, mLabels); } static o2::parameters::GRPObject::ROMode roMode = o2::parameters::GRPObject::CONTINUOUS; LOG(debug) << "TOF: Sending ROMode= " << roMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ROMode", 0, Lifetime::Timeframe}, roMode); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ROMode", 0}, roMode); } else { LOG(error) << "Cannot read the TOF digits !"; return; diff --git a/Detectors/TOF/workflowIO/src/TOFMatchedReaderSpec.cxx b/Detectors/TOF/workflowIO/src/TOFMatchedReaderSpec.cxx index 43264eb70cb58..48ccb83ca3cc3 100644 --- a/Detectors/TOF/workflowIO/src/TOFMatchedReaderSpec.cxx +++ b/Detectors/TOF/workflowIO/src/TOFMatchedReaderSpec.cxx @@ -74,12 +74,12 @@ void TOFMatchedReader::run(ProcessingContext& pc) LOG(debug) << "Pushing " << mMatches.size() << " TOF matchings at entry " << currEntry; uint32_t tpcMatchSS = o2::globaltracking::getSubSpec(mSubSpecStrict && (!mMode) ? o2::globaltracking::MatchingType::Strict : o2::globaltracking::MatchingType::Standard); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddMatchInfo[mMode], tpcMatchSS, Lifetime::Timeframe}, mMatches); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddMatchInfo[mMode], tpcMatchSS}, mMatches); if (mReadTracks && (!mMode)) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "TOFTRACKS_TPC", tpcMatchSS, Lifetime::Timeframe}, mTracks); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "TOFTRACKS_TPC", tpcMatchSS}, mTracks); } if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddMCMatchTOF[mMode], tpcMatchSS, Lifetime::Timeframe}, mLabelTOF); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddMCMatchTOF[mMode], tpcMatchSS}, mLabelTOF); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFLPIDCSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFLPIDCSpec.h index 6316824966856..02857398f0e07 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFLPIDCSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFLPIDCSpec.h @@ -195,14 +195,14 @@ class TPCFLPIDCDevice : public o2::framework::Task fill1DIDCs(cru); LOGP(debug, "Sending 1D-IDCs to EPNs of size {} and weights of size {}", mOneDIDCs.first.size(), mOneDIDCs.second.size()); - output.snapshot(Output{gDataOriginTPC, getDataDescription1DIDCEPN(), subSpec, Lifetime::Timeframe}, mOneDIDCs.first); - output.snapshot(Output{gDataOriginTPC, getDataDescription1DIDCEPNWeights(), subSpec, Lifetime::Timeframe}, mOneDIDCs.second); + output.snapshot(Output{gDataOriginTPC, getDataDescription1DIDCEPN(), subSpec}, mOneDIDCs.first); + output.snapshot(Output{gDataOriginTPC, getDataDescription1DIDCEPNWeights(), subSpec}, mOneDIDCs.second); } void sendOutput(DataAllocator& output, const uint32_t cru) { const header::DataHeader::SubSpecificationType subSpec{cru << 7}; - output.adoptContainer(Output{gDataOriginTPC, getDataDescriptionIDCGroup(CRU(cru).side()), subSpec, Lifetime::Timeframe}, std::move(mIDCs[cru])); + output.adoptContainer(Output{gDataOriginTPC, getDataDescriptionIDCGroup(CRU(cru).side()), subSpec}, std::move(mIDCs[cru])); } void fill1DIDCs(const uint32_t cru) diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformEPNSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformEPNSpec.h index 7fd4c923feb19..e52234b3bb9ef 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformEPNSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformEPNSpec.h @@ -120,7 +120,7 @@ class TPCFourierTransformEPNSpec : public o2::framework::Task void sendOutput(DataAllocator& output, const Side side) { - output.snapshot(Output{gDataOriginTPC, getDataDescription(), header::DataHeader::SubSpecificationType{side}, Lifetime::Timeframe}, mIDCFourierTransform.getFourierCoefficients().getFourierCoefficients()); + output.snapshot(Output{gDataOriginTPC, getDataDescription(), header::DataHeader::SubSpecificationType{side}}, mIDCFourierTransform.getFourierCoefficients().getFourierCoefficients()); } }; diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCIntegrateIDCSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCIntegrateIDCSpec.h index 99d64220a2238..3ce0553f8dd7f 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCIntegrateIDCSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCIntegrateIDCSpec.h @@ -92,14 +92,14 @@ class TPCIntegrateIDCDevice : public o2::framework::Task for (const auto& idcs : mIDCs[sector].get()) { const header::DataHeader::SubSpecificationType subSpec{cru << 7}; if (mIDCFormat == IDCFormat::Sim) { - output.snapshot(Output{gDataOriginTPC, getDataDescription(mIDCFormat), subSpec, Lifetime::Timeframe}, idcs); + output.snapshot(Output{gDataOriginTPC, getDataDescription(mIDCFormat), subSpec}, idcs); } else { // TODO // convert to format from thorsten here // send....... // DUMMY FOR NOW // const TPCCRUHeader cruheader{cru, mIntegrationIntervalsPerTF}; - output.snapshot(Output{gDataOriginTPC, getDataDescription(mIDCFormat), subSpec, Lifetime::Timeframe}, idcs); + output.snapshot(Output{gDataOriginTPC, getDataDescription(mIDCFormat), subSpec}, idcs); } ++cru; } diff --git a/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/PublisherSpec.h b/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/PublisherSpec.h index 531bf635a1e5b..d73f80adf8b4b 100644 --- a/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/PublisherSpec.h +++ b/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/PublisherSpec.h @@ -62,22 +62,21 @@ framework::DataProcessorSpec getPublisherSpec(PublisherConf const& config, bool // a creator callback for the actual reader instance auto creator = [dto, mco, propagateMC](const char* treename, const char* filename, int nofEvents, Reader::PublishingMode publishingMode, o2::header::DataHeader::SubSpecificationType subSpec, const char* branchname, const char* mcbranchname, Reader::SpecialPublishHook* publishhook = nullptr) { - constexpr auto persistency = o2::framework::Lifetime::Timeframe; if (propagateMC) { return std::make_shared(treename, filename, nofEvents, publishingMode, - Output{mco.origin, mco.description, subSpec, persistency}, + Output{mco.origin, mco.description, subSpec}, mcbranchname, - Reader::BranchDefinition{Output{dto.origin, dto.description, subSpec, persistency}, branchname}, + Reader::BranchDefinition{Output{dto.origin, dto.description, subSpec}, branchname}, publishhook); } else { return std::make_shared(treename, filename, nofEvents, publishingMode, - Reader::BranchDefinition{Output{dto.origin, dto.description, subSpec, persistency}, branchname}, + Reader::BranchDefinition{Output{dto.origin, dto.description, subSpec}, branchname}, publishhook); } }; diff --git a/Detectors/TPC/workflow/readers/src/PublisherSpec.cxx b/Detectors/TPC/workflow/readers/src/PublisherSpec.cxx index e02a78b3dca3a..7abd744440517 100644 --- a/Detectors/TPC/workflow/readers/src/PublisherSpec.cxx +++ b/Detectors/TPC/workflow/readers/src/PublisherSpec.cxx @@ -204,9 +204,9 @@ DataProcessorSpec createPublisherSpec(PublisherConf const& config, bool propagat header.sectorBits = 0; header.activeSectors = processAttributes->activeSectors; for (auto const& subSpec : processAttributes->zeroLengthOutputs) { - pc.outputs().make({dto.origin, dto.description, subSpec, Lifetime::Timeframe, {header}}); + pc.outputs().make({dto.origin, dto.description, subSpec, {header}}); if (pc.outputs().isAllowed({mco.origin, mco.description, subSpec})) { - pc.outputs().make({mco.origin, mco.description, subSpec, Lifetime::Timeframe, {header}}); + pc.outputs().make({mco.origin, mco.description, subSpec, {header}}); } } } diff --git a/Detectors/TPC/workflow/readers/src/TrackReaderSpec.cxx b/Detectors/TPC/workflow/readers/src/TrackReaderSpec.cxx index 66575dd843458..bfe1644bd8b5d 100644 --- a/Detectors/TPC/workflow/readers/src/TrackReaderSpec.cxx +++ b/Detectors/TPC/workflow/readers/src/TrackReaderSpec.cxx @@ -68,10 +68,10 @@ void TrackReader::run(ProcessingContext& pc) } } - pc.outputs().snapshot(Output{"TPC", "TRACKS", 0, Lifetime::Timeframe}, mTracksOut); - pc.outputs().snapshot(Output{"TPC", "CLUSREFS", 0, Lifetime::Timeframe}, mCluRefVecOut); + pc.outputs().snapshot(Output{"TPC", "TRACKS", 0}, mTracksOut); + pc.outputs().snapshot(Output{"TPC", "CLUSREFS", 0}, mCluRefVecOut); if (mUseMC) { - pc.outputs().snapshot(Output{"TPC", "TRACKSMCLBL", 0, Lifetime::Timeframe}, mMCTruthOut); + pc.outputs().snapshot(Output{"TPC", "TRACKSMCLBL", 0}, mMCTruthOut); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/TPC/workflow/readers/src/TriggerReaderSpec.cxx b/Detectors/TPC/workflow/readers/src/TriggerReaderSpec.cxx index 2c363f4f7ed93..5dd3c16b3b1a8 100644 --- a/Detectors/TPC/workflow/readers/src/TriggerReaderSpec.cxx +++ b/Detectors/TPC/workflow/readers/src/TriggerReaderSpec.cxx @@ -36,7 +36,7 @@ void TriggerReader::run(ProcessingContext& pc) auto ent = mTree->GetReadEntry() + 1; mTree->GetEntry(ent); - pc.outputs().snapshot(Output{"TPC", "TRIGGERWORDS", 0, Lifetime::Timeframe}, *mTrig); + pc.outputs().snapshot(Output{"TPC", "TRIGGERWORDS", 0}, *mTrig); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Detectors/TPC/workflow/src/ChunkedDigitPublisher.cxx b/Detectors/TPC/workflow/src/ChunkedDigitPublisher.cxx index cf11e3c7b39ce..adf0cba944c03 100644 --- a/Detectors/TPC/workflow/src/ChunkedDigitPublisher.cxx +++ b/Detectors/TPC/workflow/src/ChunkedDigitPublisher.cxx @@ -111,8 +111,7 @@ auto makePublishBuffer(framework::ProcessingContext& pc, int sector, uint64_t ac o2::tpc::TPCSectorHeader header{sector}; header.activeSectors = activeSectors; - return &pc.outputs().make(Output{"TPC", "DIGITS", static_cast(sector), Lifetime::Timeframe, - header}); + return &pc.outputs().make(Output{"TPC", "DIGITS", static_cast(sector), header}); } template <> @@ -138,7 +137,7 @@ void publishBuffer(framework::ProcessingContext& pc, int secto LabelType* sharedlabels; #pragma omp critical sharedlabels = &pc.outputs().make>( - Output{"TPC", "DIGITSMCTR", static_cast(sector), Lifetime::Timeframe, header}); + Output{"TPC", "DIGITSMCTR", static_cast(sector), header}); accum->flatten_to(*sharedlabels); delete accum; diff --git a/Detectors/TPC/workflow/src/ClusterDecoderRawSpec.cxx b/Detectors/TPC/workflow/src/ClusterDecoderRawSpec.cxx index bb20bf86d531a..9e2e70e8dde46 100644 --- a/Detectors/TPC/workflow/src/ClusterDecoderRawSpec.cxx +++ b/Detectors/TPC/workflow/src/ClusterDecoderRawSpec.cxx @@ -88,7 +88,7 @@ DataProcessorSpec getClusterDecoderRawSpec(bool sendMC) o2::header::Stack actual{*sectorHeaderMC}; std::swap(mcHeaderStack, actual); if (sectorHeaderMC->sector() < 0) { - pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLNATIVEMCLBL"), fanSpec, Lifetime::Timeframe, std::move(mcHeaderStack)}, fanSpec); + pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLNATIVEMCLBL"), fanSpec, std::move(mcHeaderStack)}, fanSpec); } } } @@ -97,7 +97,7 @@ DataProcessorSpec getClusterDecoderRawSpec(bool sendMC) o2::header::Stack actual{*sectorHeader}; std::swap(rawHeaderStack, actual); if (sectorHeader->sector() < 0) { - pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLUSTERNATIVE"), fanSpec, Lifetime::Timeframe, std::move(rawHeaderStack)}, fanSpec); + pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLUSTERNATIVE"), fanSpec, std::move(rawHeaderStack)}, fanSpec); return; } } @@ -167,7 +167,7 @@ DataProcessorSpec getClusterDecoderRawSpec(bool sendMC) // containers are created for clusters and MC labels per (sector,globalPadRow) address char* outputBuffer = nullptr; auto outputAllocator = [&pc, &fanSpec, &outputBuffer, &rawHeaderStack](size_t size) -> char* { - outputBuffer = pc.outputs().newChunk(Output{gDataOriginTPC, DataDescription("CLUSTERNATIVE"), fanSpec, Lifetime::Timeframe, std::move(rawHeaderStack)}, size).data(); + outputBuffer = pc.outputs().newChunk(Output{gDataOriginTPC, DataDescription("CLUSTERNATIVE"), fanSpec, std::move(rawHeaderStack)}, size).data(); return outputBuffer; }; MCLabelContainer mcout; @@ -188,7 +188,7 @@ DataProcessorSpec getClusterDecoderRawSpec(bool sendMC) // serialize the complete list of MC label containers ConstMCLabelContainer labelsFlat; mcout.flatten_to(labelsFlat); - pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLNATIVEMCLBL"), fanSpec, Lifetime::Timeframe, std::move(mcHeaderStack)}, labelsFlat); + pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLNATIVEMCLBL"), fanSpec, std::move(mcHeaderStack)}, labelsFlat); } }; diff --git a/Detectors/TPC/workflow/src/ClustererSpec.cxx b/Detectors/TPC/workflow/src/ClustererSpec.cxx index 71043db3935be..310551a67753e 100644 --- a/Detectors/TPC/workflow/src/ClustererSpec.cxx +++ b/Detectors/TPC/workflow/src/ClustererSpec.cxx @@ -81,9 +81,9 @@ DataProcessorSpec getClustererSpec(bool sendMC) // forward the control information // FIXME define and use flags in TPCSectorHeader o2::tpc::TPCSectorHeader header{sector}; - pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHW", fanSpec, Lifetime::Timeframe, {header}}, fanSpec); + pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHW", fanSpec, {header}}, fanSpec); if (DataRefUtils::isValid(mclabelref)) { - pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHWMCLBL", fanSpec, Lifetime::Timeframe, {header}}, fanSpec); + pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHWMCLBL", fanSpec, {header}}, fanSpec); } return; } @@ -131,12 +131,12 @@ DataProcessorSpec getClustererSpec(bool sendMC) } // FIXME: that should be a case for pmr, want to send the content of the vector as a binary // block by using move semantics - auto outputPages = pc.outputs().make(Output{gDataOriginTPC, "CLUSTERHW", fanSpec, Lifetime::Timeframe, {*sectorHeader}}, clusterArray.size()); + auto outputPages = pc.outputs().make(Output{gDataOriginTPC, "CLUSTERHW", fanSpec, {*sectorHeader}}, clusterArray.size()); std::copy(clusterArray.begin(), clusterArray.end(), outputPages.begin()); if (DataRefUtils::isValid(mclabelref)) { ConstMCLabelContainer mcflat; mctruthArray.flatten_to(mcflat); - pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHWMCLBL", fanSpec, Lifetime::Timeframe, {*sectorHeader}}, mcflat); + pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHWMCLBL", fanSpec, {*sectorHeader}}, mcflat); } }; diff --git a/Detectors/TPC/workflow/src/EntropyEncoderSpec.cxx b/Detectors/TPC/workflow/src/EntropyEncoderSpec.cxx index 7e03748da0bc4..75fedb4a0effe 100644 --- a/Detectors/TPC/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/TPC/workflow/src/EntropyEncoderSpec.cxx @@ -133,7 +133,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) auto triggers = pc.inputs().get>("trigger"); auto cput = mTimer.CpuTime(); mTimer.Start(false); - auto& buffer = pc.outputs().make>(Output{"TPC", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"TPC", "CTFDATA", 0}); std::vector rejectHits, rejectTracks, rejectTrackHits, rejectTrackHitsReduced; CompressedClusters clustersFiltered = clusters; std::vector, std::vector>> tmpBuffer(std::max(mNThreads, 1)); diff --git a/Detectors/TPC/workflow/src/KryptonClustererSpec.cxx b/Detectors/TPC/workflow/src/KryptonClustererSpec.cxx index d0119f719193c..e946488073bf9 100644 --- a/Detectors/TPC/workflow/src/KryptonClustererSpec.cxx +++ b/Detectors/TPC/workflow/src/KryptonClustererSpec.cxx @@ -75,7 +75,7 @@ class KrBoxClusterFinderDevice : public o2::framework::Task { o2::tpc::TPCSectorHeader header{sector}; header.activeSectors = (0x1 << sector); - output.snapshot(Output{gDataOriginTPC, "KRCLUSTERS", static_cast(sector), Lifetime::Timeframe, header}, clusters); + output.snapshot(Output{gDataOriginTPC, "KRCLUSTERS", static_cast(sector), header}, clusters); } }; diff --git a/Detectors/TPC/workflow/src/KryptonRawFilterSpec.cxx b/Detectors/TPC/workflow/src/KryptonRawFilterSpec.cxx index 36dfa5edc65f7..d968876eefccb 100644 --- a/Detectors/TPC/workflow/src/KryptonRawFilterSpec.cxx +++ b/Detectors/TPC/workflow/src/KryptonRawFilterSpec.cxx @@ -223,7 +223,7 @@ class KrRawFilterDevice : public o2::framework::Task { o2::tpc::TPCSectorHeader header{sector}; header.activeSectors = (0x1 << sector); - output.snapshot(Output{gDataOriginTPC, "FILTERDIG", static_cast(sector), Lifetime::Timeframe, header}, digits); + output.snapshot(Output{gDataOriginTPC, "FILTERDIG", static_cast(sector), header}, digits); } }; diff --git a/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx b/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx index c7758d9f2bb4e..c42a93f7a7f04 100644 --- a/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx +++ b/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx @@ -110,7 +110,7 @@ o2::framework::DataProcessorSpec getLinkZSToDigitsSpec(int channel, const std::s header.activeSectors = processAttributes->activeSectors; // digit for now are transported per sector, not per lane // pc.outputs().snapshot(Output{"TPC", "DIGITS", static_cast(channel), Lifetime::Timeframe, header}, - pc.outputs().snapshot(Output{"TPC", "DIGITS", static_cast(sector), Lifetime::Timeframe, header}, + pc.outputs().snapshot(Output{"TPC", "DIGITS", static_cast(sector), header}, const_cast&>(digits)); }; diff --git a/Detectors/TPC/workflow/src/MIPTrackFilterSpec.cxx b/Detectors/TPC/workflow/src/MIPTrackFilterSpec.cxx index d4291552ec151..2e60ba0bc72f6 100644 --- a/Detectors/TPC/workflow/src/MIPTrackFilterSpec.cxx +++ b/Detectors/TPC/workflow/src/MIPTrackFilterSpec.cxx @@ -141,7 +141,7 @@ void MIPTrackFilterDevice::run(ProcessingContext& pc) mMIPTracks.clear(); } -void MIPTrackFilterDevice::sendOutput(DataAllocator& output) { output.snapshot(Output{header::gDataOriginTPC, "MIPS", 0, Lifetime::Timeframe}, mMIPTracks); } +void MIPTrackFilterDevice::sendOutput(DataAllocator& output) { output.snapshot(Output{header::gDataOriginTPC, "MIPS", 0}, mMIPTracks); } void MIPTrackFilterDevice::endOfStream(EndOfStreamContext& eos) { diff --git a/Detectors/TPC/workflow/src/RawToDigitsSpec.cxx b/Detectors/TPC/workflow/src/RawToDigitsSpec.cxx index 5c465be53b8c8..658a60fb75fbf 100644 --- a/Detectors/TPC/workflow/src/RawToDigitsSpec.cxx +++ b/Detectors/TPC/workflow/src/RawToDigitsSpec.cxx @@ -226,10 +226,10 @@ class TPCDigitDumpDevice : public o2::framework::Task o2::tpc::TPCSectorHeader header{isector}; header.activeSectors = mActiveSectors; // digit for now are transported per sector, not per lane - output.snapshot(Output{"TPC", "DIGITS", static_cast(isector), Lifetime::Timeframe, header}, + output.snapshot(Output{"TPC", "DIGITS", static_cast(isector), header}, mDigitDump.getDigits(isector)); if (mSendCEdigits) { - output.snapshot(Output{"TPC", "CEDIGITS", static_cast(isector), Lifetime::Timeframe, header}, + output.snapshot(Output{"TPC", "CEDIGITS", static_cast(isector), header}, ceDigits[isector]); } } diff --git a/Detectors/TPC/workflow/src/SACProcessorSpec.cxx b/Detectors/TPC/workflow/src/SACProcessorSpec.cxx index 226bd8a410dc9..e69533a0bb6d3 100644 --- a/Detectors/TPC/workflow/src/SACProcessorSpec.cxx +++ b/Detectors/TPC/workflow/src/SACProcessorSpec.cxx @@ -129,8 +129,8 @@ class SACProcessorDevice : public Task void sendData(DataAllocator& output) { - output.snapshot(Output{"TPC", "REFTIMESAC", 0, Lifetime::Timeframe}, mDecoder.getDecodedData().referenceTime); - output.snapshot(Output{"TPC", "DECODEDSAC", 0, Lifetime::Timeframe}, mDecoder.getDecodedData().getGoodData()); + output.snapshot(Output{"TPC", "REFTIMESAC", 0}, mDecoder.getDecodedData().referenceTime); + output.snapshot(Output{"TPC", "DECODEDSAC", 0}, mDecoder.getDecodedData().getGoodData()); mDecoder.clearDecodedData(); } diff --git a/Detectors/TPC/workflow/src/ZSSpec.cxx b/Detectors/TPC/workflow/src/ZSSpec.cxx index c73d2ae406ebd..00c78ba463507 100644 --- a/Detectors/TPC/workflow/src/ZSSpec.cxx +++ b/Detectors/TPC/workflow/src/ZSSpec.cxx @@ -133,8 +133,8 @@ DataProcessorSpec getZSEncoderSpec(std::vector const& tpcSectors, bool outR } o2::tpc::TPCSectorHeader sh{0}; gsl::span outp(&page[0], offset); - pc.outputs().snapshot(Output{gDataOriginTPC, "TPCZS", 0, Lifetime::Timeframe, sh}, outp); - pc.outputs().snapshot(Output{gDataOriginTPC, "ZSSIZES", 0, Lifetime::Timeframe, sh}, sizes); + pc.outputs().snapshot(Output{gDataOriginTPC, "TPCZS", 0, sh}, outp); + pc.outputs().snapshot(Output{gDataOriginTPC, "ZSSIZES", 0, sh}, sizes); if (outRaw) { // ===| set up raw writer |=================================================== @@ -301,7 +301,7 @@ DataProcessorSpec getZStoDigitsSpec(std::vector const& tpcSectors) for (int i = 0; i < NSectors; i++) { LOG(info) << "digits in sector " << i << " : " << outDigits[i].size(); o2::tpc::TPCSectorHeader sh{i}; - pc.outputs().snapshot(Output{gDataOriginTPC, "DIGITS", (unsigned int)i, Lifetime::Timeframe, sh}, outDigits[i]); + pc.outputs().snapshot(Output{gDataOriginTPC, "DIGITS", (unsigned int)i, sh}, outDigits[i]); } }; return processingFct; diff --git a/Detectors/TPC/workflow/test/test_ft_EPN_Aggregator.cxx b/Detectors/TPC/workflow/test/test_ft_EPN_Aggregator.cxx index ed4be20a22569..e8255f98eff6c 100644 --- a/Detectors/TPC/workflow/test/test_ft_EPN_Aggregator.cxx +++ b/Detectors/TPC/workflow/test/test_ft_EPN_Aggregator.cxx @@ -243,7 +243,7 @@ DataProcessorSpec generateIDCsCRU(int lane, const unsigned int maxTFs, const std } } } - ctx.outputs().adoptContainer(Output{gDataOriginTPC, TPCIntegrateIDCDevice::getDataDescription(TPCIntegrateIDCDevice::IDCFormat::Sim), o2::header::DataHeader::SubSpecificationType{icru << 7}, Lifetime::Timeframe}, std::move(idcs)); + ctx.outputs().adoptContainer(Output{gDataOriginTPC, TPCIntegrateIDCDevice::getDataDescription(TPCIntegrateIDCDevice::IDCFormat::Sim), o2::header::DataHeader::SubSpecificationType{icru << 7}}, std::move(idcs)); } if (delay) { diff --git a/Detectors/TRD/reconstruction/src/EventRecord.cxx b/Detectors/TRD/reconstruction/src/EventRecord.cxx index 147b052a8ca3f..0e1a134e45b8f 100644 --- a/Detectors/TRD/reconstruction/src/EventRecord.cxx +++ b/Detectors/TRD/reconstruction/src/EventRecord.cxx @@ -77,16 +77,16 @@ void EventRecordContainer::sendData(o2::framework::ProcessingContext& pc, bool g counters.push_back(event.getCounters()); } - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "DIGITS", 0, o2::framework::Lifetime::Timeframe}, digits); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "TRACKLETS", 0, o2::framework::Lifetime::Timeframe}, tracklets); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "TRKTRGRD", 0, o2::framework::Lifetime::Timeframe}, triggers); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "DIGITS", 0}, digits); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "TRACKLETS", 0}, tracklets); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "TRKTRGRD", 0}, triggers); if (generatestats) { accumulateStats(); o2::base::TFIDInfoHelper::fillTFIDInfo(pc, mTFStats.mTFIDInfo); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "RAWSTATS", 0, o2::framework::Lifetime::Timeframe}, mTFStats); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "RAWSTATS", 0}, mTFStats); } if (sendLinkStats) { - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "LINKSTATS", 0, o2::framework::Lifetime::Timeframe}, counters); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "LINKSTATS", 0}, counters); } std::chrono::duration dataReadTime = std::chrono::high_resolution_clock::now() - dataReadStart; diff --git a/Detectors/TRD/workflow/include/TRDWorkflow/KrClustererSpec.h b/Detectors/TRD/workflow/include/TRDWorkflow/KrClustererSpec.h index 26be31d51c084..8f50c1af91183 100644 --- a/Detectors/TRD/workflow/include/TRDWorkflow/KrClustererSpec.h +++ b/Detectors/TRD/workflow/include/TRDWorkflow/KrClustererSpec.h @@ -67,8 +67,8 @@ void TRDKrClustererDevice::run(ProcessingContext& pc) LOGP(info, "Found {} Kr clusters in {} input trigger records. Timing: CPU: {}, Real: {}", mKrClFinder.getKrClusters().size(), triggerRecords.size(), timer.CpuTime(), timer.RealTime()); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "KRCLUSTER", 0, Lifetime::Timeframe}, mKrClFinder.getKrClusters()); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGKRCLS", 0, Lifetime::Timeframe}, mKrClFinder.getKrTrigRecs()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "KRCLUSTER", 0}, mKrClFinder.getKrClusters()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGKRCLS", 0}, mKrClFinder.getKrTrigRecs()); } void TRDKrClustererDevice::endOfStream(EndOfStreamContext& ec) diff --git a/Detectors/TRD/workflow/include/TRDWorkflow/TRDGlobalTrackingQCSpec.h b/Detectors/TRD/workflow/include/TRDWorkflow/TRDGlobalTrackingQCSpec.h index 375b36c54af17..a3c5b28aa8b63 100644 --- a/Detectors/TRD/workflow/include/TRDWorkflow/TRDGlobalTrackingQCSpec.h +++ b/Detectors/TRD/workflow/include/TRDWorkflow/TRDGlobalTrackingQCSpec.h @@ -63,7 +63,7 @@ class TRDGlobalTrackingQC : public Task mQC.reset(); mQC.setInput(recoData); mQC.run(); - pc.outputs().snapshot(Output{"TRD", "TRACKINGQC", 0, Lifetime::Timeframe}, mQC.getTrackQC()); + pc.outputs().snapshot(Output{"TRD", "TRACKINGQC", 0}, mQC.getTrackQC()); } void endOfStream(framework::EndOfStreamContext& ec) final {} void finaliseCCDB(framework::ConcreteDataMatcher& matcher, void* obj) final diff --git a/Detectors/TRD/workflow/include/TRDWorkflow/TRDPulseHeightSpec.h b/Detectors/TRD/workflow/include/TRDWorkflow/TRDPulseHeightSpec.h index 6ba185ee28ccb..a1e35f3a3482e 100644 --- a/Detectors/TRD/workflow/include/TRDWorkflow/TRDPulseHeightSpec.h +++ b/Detectors/TRD/workflow/include/TRDWorkflow/TRDPulseHeightSpec.h @@ -52,7 +52,7 @@ class PuseHeightDevice : public o2::framework::Task } if (mRunStopRequested) { std::vector mPHValues{}; // the calibration expects data at every TF, so inject dummy - pc.outputs().snapshot(Output{"TRD", "PULSEHEIGHT", 0, Lifetime::Timeframe}, mPHValues); + pc.outputs().snapshot(Output{"TRD", "PULSEHEIGHT", 0}, mPHValues); return; } RecoContainer recoData; @@ -61,7 +61,7 @@ class PuseHeightDevice : public o2::framework::Task mPulseHeight->setInput(recoData, &digits); mPulseHeight->reset(); mPulseHeight->process(); - pc.outputs().snapshot(Output{"TRD", "PULSEHEIGHT", 0, Lifetime::Timeframe}, mPulseHeight->getPHData()); + pc.outputs().snapshot(Output{"TRD", "PULSEHEIGHT", 0}, mPulseHeight->getPHData()); if (pc.transitionState() == TransitionHandlingState::Requested) { LOG(info) << "Run stop requested, finalizing"; mRunStopRequested = true; diff --git a/Detectors/TRD/workflow/io/src/TRDCalibReaderSpec.cxx b/Detectors/TRD/workflow/io/src/TRDCalibReaderSpec.cxx index d3a565b4c9804..cb76bb8bbad69 100644 --- a/Detectors/TRD/workflow/io/src/TRDCalibReaderSpec.cxx +++ b/Detectors/TRD/workflow/io/src/TRDCalibReaderSpec.cxx @@ -53,7 +53,7 @@ void TRDCalibReader::run(ProcessingContext& pc) assert(currEntry < mTree->GetEntries()); // this should not happen mTree->GetEntry(currEntry); LOG(info) << "Pushing angular residual histograms filled with " << mAngResids.getNEntries() << " entries at tree entry " << currEntry; - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "ANGRESHISTS", 0, Lifetime::Timeframe}, mAngResids); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "ANGRESHISTS", 0}, mAngResids); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/TRD/workflow/io/src/TRDDigitReaderSpec.cxx b/Detectors/TRD/workflow/io/src/TRDDigitReaderSpec.cxx index 4ca6fd46e4492..92d001bc5120a 100644 --- a/Detectors/TRD/workflow/io/src/TRDDigitReaderSpec.cxx +++ b/Detectors/TRD/workflow/io/src/TRDDigitReaderSpec.cxx @@ -58,13 +58,13 @@ void TRDDigitReaderSpec::run(ProcessingContext& pc) assert(currEntry < mTreeDigits->GetEntries()); // this should not happen mTreeDigits->GetEntry(currEntry); LOGP(info, "Pushing {} digits for tree entry {}", mDigits.size(), currEntry); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "DIGITS", mSubSpec, Lifetime::Timeframe}, mDigits); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "DIGITS", mSubSpec}, mDigits); if (mUseTriggerRecords) { LOGP(info, "Pushing {} trigger records for tree entry {}", mTriggerRecords.size(), currEntry); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRKTRGRD", mSubSpec, Lifetime::Timeframe}, mTriggerRecords); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRKTRGRD", mSubSpec}, mTriggerRecords); } if (mUseMC) { - auto& sharedlabels = pc.outputs().make>(Output{o2::header::gDataOriginTRD, "LABELS", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{o2::header::gDataOriginTRD, "LABELS", 0}); mLabels->copyandflatten(sharedlabels); } if (mTreeDigits->GetReadEntry() + 1 >= mTreeDigits->GetEntries()) { diff --git a/Detectors/TRD/workflow/io/src/TRDPHReaderSpec.cxx b/Detectors/TRD/workflow/io/src/TRDPHReaderSpec.cxx index 235c58c2ad302..7f873beb2ece1 100644 --- a/Detectors/TRD/workflow/io/src/TRDPHReaderSpec.cxx +++ b/Detectors/TRD/workflow/io/src/TRDPHReaderSpec.cxx @@ -54,7 +54,7 @@ void TRDPHReader::run(ProcessingContext& pc) mTree->GetEntry(currEntry); LOG(info) << "Pushing vector of PH values filled with " << mPHValues.size() << " entries at tree entry " << currEntry; - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "PULSEHEIGHT", 0, Lifetime::Timeframe}, mPHValues); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "PULSEHEIGHT", 0}, mPHValues); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/TRD/workflow/io/src/TRDTrackReaderSpec.cxx b/Detectors/TRD/workflow/io/src/TRDTrackReaderSpec.cxx index 20a7b5d5ac234..cd9702a3d2385 100644 --- a/Detectors/TRD/workflow/io/src/TRDTrackReaderSpec.cxx +++ b/Detectors/TRD/workflow/io/src/TRDTrackReaderSpec.cxx @@ -50,18 +50,18 @@ void TRDTrackReader::run(ProcessingContext& pc) if (mMode == Mode::TPCTRD) { uint32_t ss = o2::globaltracking::getSubSpec(mSubSpecStrict ? o2::globaltracking::MatchingType::Strict : o2::globaltracking::MatchingType::Standard); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_TPC", ss, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_TPC", ss, Lifetime::Timeframe}, mTrigRec); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_TPC", ss}, mTracks); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_TPC", ss}, mTrigRec); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC", ss, Lifetime::Timeframe}, mLabelsMatch); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC_TRD", ss, Lifetime::Timeframe}, mLabelsTrd); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC", ss}, mLabelsMatch); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC_TRD", ss}, mLabelsTrd); } } else if (mMode == Mode::ITSTPCTRD) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_ITSTPC", 0, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_ITSTPC", 0, Lifetime::Timeframe}, mTrigRec); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_ITSTPC", 0}, mTracks); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_ITSTPC", 0}, mTrigRec); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC", 0, Lifetime::Timeframe}, mLabelsMatch); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC_TRD", 0, Lifetime::Timeframe}, mLabelsTrd); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC", 0}, mLabelsMatch); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC_TRD", 0}, mLabelsTrd); } } diff --git a/Detectors/TRD/workflow/io/src/TRDTrackletReaderSpec.cxx b/Detectors/TRD/workflow/io/src/TRDTrackletReaderSpec.cxx index c588da696e321..864cc3d665af5 100644 --- a/Detectors/TRD/workflow/io/src/TRDTrackletReaderSpec.cxx +++ b/Detectors/TRD/workflow/io/src/TRDTrackletReaderSpec.cxx @@ -72,20 +72,20 @@ void TRDTrackletReader::run(ProcessingContext& pc) mTreeTrklt->GetEntry(currEntry); LOG(info) << "Pushing " << mTriggerRecords.size() << " TRD trigger records at entry " << currEntry; LOG(info) << "Pushing " << mTracklets.size() << " uncalibrated TRD tracklets for these trigger records"; - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRACKLETS", 0, Lifetime::Timeframe}, mTracklets); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRACKLETS", 0}, mTracklets); if (mUseTrackletTransform) { assert(mTreeTrklt->GetEntries() == mTreeCTrklt->GetEntries()); mTreeCTrklt->GetEntry(currEntry); LOG(info) << "Pushing " << mTrackletsCal.size() << " calibrated TRD tracklets for these trigger records"; LOG(info) << "Pushing " << mTrigRecMask.size() << " flags for the given TRD trigger records"; - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "CTRACKLETS", 0, Lifetime::Timeframe}, mTrackletsCal); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRIGRECMASK", 0, Lifetime::Timeframe}, mTrigRecMask); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "CTRACKLETS", 0}, mTrackletsCal); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRIGRECMASK", 0}, mTrigRecMask); } - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRKTRGRD", 0, Lifetime::Timeframe}, mTriggerRecords); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRKTRGRD", 0}, mTriggerRecords); if (mUseMC) { LOG(info) << "Pushing " << mLabels.getNElements() << " TRD tracklet labels"; - pc.outputs().snapshot(Output{"TRD", "TRKLABELS", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"TRD", "TRKLABELS", 0}, mLabels); } if (mTreeTrklt->GetReadEntry() + 1 >= mTreeTrklt->GetEntries()) { diff --git a/Detectors/TRD/workflow/src/EntropyEncoderSpec.cxx b/Detectors/TRD/workflow/src/EntropyEncoderSpec.cxx index 07243d2b316cb..d345dd74141ed 100644 --- a/Detectors/TRD/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/TRD/workflow/src/EntropyEncoderSpec.cxx @@ -76,7 +76,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"TRD", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"TRD", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, triggers, tracklets, digits); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/TRD/workflow/src/TRDDigitizerSpec.cxx b/Detectors/TRD/workflow/src/TRDDigitizerSpec.cxx index c0a4d85acd1bc..1cd145a6c2e36 100644 --- a/Detectors/TRD/workflow/src/TRDDigitizerSpec.cxx +++ b/Detectors/TRD/workflow/src/TRDDigitizerSpec.cxx @@ -177,17 +177,17 @@ class TRDDPLDigitizerTask : public o2::base::BaseDPLDigitizer LOGF(info, "TRD digitization timing: Cpu: %.3e Real: %.3e s", timer.CpuTime(), timer.RealTime()); LOG(info) << "TRD: Sending " << digitsAccum.size() << " digits"; - pc.outputs().snapshot(Output{"TRD", "DIGITS", 1, Lifetime::Timeframe}, digitsAccum); + pc.outputs().snapshot(Output{"TRD", "DIGITS", 1}, digitsAccum); if (mctruth) { LOG(info) << "TRD: Sending " << labelsAccum.getNElements() << " labels"; // we are flattening the labels and write to managed shared memory container for further communication - auto& sharedlabels = pc.outputs().make>(Output{"TRD", "LABELS", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{"TRD", "LABELS", 0}); labelsAccum.flatten_to(sharedlabels); } LOG(info) << "TRD: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"TRD", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"TRD", "ROMode", 0}, mROMode); LOG(info) << "TRD: Sending trigger records"; - pc.outputs().snapshot(Output{"TRD", "TRKTRGRD", 1, Lifetime::Timeframe}, triggers); + pc.outputs().snapshot(Output{"TRD", "TRKTRGRD", 1}, triggers); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); finished = true; diff --git a/Detectors/TRD/workflow/src/TRDGlobalTrackingSpec.cxx b/Detectors/TRD/workflow/src/TRDGlobalTrackingSpec.cxx index 1be328b4bcbe9..abe3a6f45ec70 100644 --- a/Detectors/TRD/workflow/src/TRDGlobalTrackingSpec.cxx +++ b/Detectors/TRD/workflow/src/TRDGlobalTrackingSpec.cxx @@ -496,19 +496,19 @@ void TRDGlobalTracking::run(ProcessingContext& pc) uint32_t ss = o2::globaltracking::getSubSpec(mStrict ? o2::globaltracking::MatchingType::Strict : o2::globaltracking::MatchingType::Standard); if (GTrackID::includesSource(GTrackID::Source::ITSTPC, mTrkMask)) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_ITSTPC", 0, Lifetime::Timeframe}, tracksOutITSTPC); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_ITSTPC", 0, Lifetime::Timeframe}, trackTrigRecITSTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_ITSTPC", 0}, tracksOutITSTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_ITSTPC", 0}, trackTrigRecITSTPC); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC", 0, Lifetime::Timeframe}, matchLabelsITSTPC); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC_TRD", 0, Lifetime::Timeframe}, trdLabelsITSTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC", 0}, matchLabelsITSTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC_TRD", 0}, trdLabelsITSTPC); } } if (GTrackID::includesSource(GTrackID::Source::TPC, mTrkMask)) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_TPC", ss, Lifetime::Timeframe}, tracksOutTPC); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_TPC", ss, Lifetime::Timeframe}, trackTrigRecTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_TPC", ss}, tracksOutTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_TPC", ss}, trackTrigRecTPC); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC", ss, Lifetime::Timeframe}, matchLabelsTPC); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC_TRD", ss, Lifetime::Timeframe}, trdLabelsTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC", ss}, matchLabelsTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC_TRD", ss}, trdLabelsTPC); } } diff --git a/Detectors/TRD/workflow/src/TRDTrackletTransformerSpec.cxx b/Detectors/TRD/workflow/src/TRDTrackletTransformerSpec.cxx index bb6d552cf3fee..0ac920c6e49c6 100644 --- a/Detectors/TRD/workflow/src/TRDTrackletTransformerSpec.cxx +++ b/Detectors/TRD/workflow/src/TRDTrackletTransformerSpec.cxx @@ -113,8 +113,8 @@ void TRDTrackletTransformerSpec::run(o2::framework::ProcessingContext& pc) LOGF(info, "Found %lu tracklets in %lu trigger records. Applied filter for ITS IR frames: %i. Transformed %i tracklets.", tracklets.size(), trigRecs.size(), mTrigRecFilterActive, nTrackletsTransformed); - pc.outputs().snapshot(Output{"TRD", "CTRACKLETS", 0, Lifetime::Timeframe}, calibratedTracklets); - pc.outputs().snapshot(Output{"TRD", "TRIGRECMASK", 0, Lifetime::Timeframe}, trigRecBitfield); + pc.outputs().snapshot(Output{"TRD", "CTRACKLETS", 0}, calibratedTracklets); + pc.outputs().snapshot(Output{"TRD", "TRIGRECMASK", 0}, trigRecBitfield); } void TRDTrackletTransformerSpec::updateTimeDependentParams(ProcessingContext& pc) diff --git a/Detectors/TRD/workflow/src/TRDTrapSimulatorSpec.cxx b/Detectors/TRD/workflow/src/TRDTrapSimulatorSpec.cxx index 4c6ac24a0dc43..20d0b719b9bf7 100644 --- a/Detectors/TRD/workflow/src/TRDTrapSimulatorSpec.cxx +++ b/Detectors/TRD/workflow/src/TRDTrapSimulatorSpec.cxx @@ -328,11 +328,11 @@ void TRDDPLTrapSimulatorTask::run(o2::framework::ProcessingContext& pc) LOG(info) << "Digit Sorting took: " << std::chrono::duration_cast(sortTime).count() << "ms"; LOG(info) << "Processing time for parallel region: " << std::chrono::duration_cast(parallelTime).count() << "ms"; - pc.outputs().snapshot(Output{"TRD", "TRACKLETS", 0, Lifetime::Timeframe}, tracklets); - pc.outputs().snapshot(Output{"TRD", "TRKTRGRD", 0, Lifetime::Timeframe}, triggerRecords); - pc.outputs().snapshot(Output{"TRD", "DIGITS", 0, Lifetime::Timeframe}, digitsOut); + pc.outputs().snapshot(Output{"TRD", "TRACKLETS", 0}, tracklets); + pc.outputs().snapshot(Output{"TRD", "TRKTRGRD", 0}, triggerRecords); + pc.outputs().snapshot(Output{"TRD", "DIGITS", 0}, digitsOut); if (mUseMC) { - pc.outputs().snapshot(Output{"TRD", "TRKLABELS", 0, Lifetime::Timeframe}, lblTracklets); + pc.outputs().snapshot(Output{"TRD", "TRKLABELS", 0}, lblTracklets); } LOG(debug) << "TRD Trap Simulator Device exiting"; diff --git a/Detectors/TRD/workflow/src/TrackBasedCalibSpec.cxx b/Detectors/TRD/workflow/src/TrackBasedCalibSpec.cxx index 6011e194685ad..4418b9af61300 100644 --- a/Detectors/TRD/workflow/src/TrackBasedCalibSpec.cxx +++ b/Detectors/TRD/workflow/src/TrackBasedCalibSpec.cxx @@ -75,12 +75,12 @@ void TRDTrackBasedCalibDevice::run(ProcessingContext& pc) if (mDoVdExBCalib) { mCalibrator.calculateAngResHistos(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "ANGRESHISTS", 0, Lifetime::Timeframe}, mCalibrator.getAngResHistos()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "ANGRESHISTS", 0}, mCalibrator.getAngResHistos()); } if (mDoGainCalib) { mCalibrator.calculateGainCalibObjs(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "GAINCALIBHISTS", 0, Lifetime::Timeframe}, mCalibrator.getGainCalibHistos()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "GAINCALIBHISTS", 0}, mCalibrator.getGainCalibHistos()); } mCalibrator.reset(); diff --git a/Detectors/Upgrades/ITS3/workflow/src/ClustererSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/ClustererSpec.cxx index e6c9b5bc4b910..5512964eec950 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/ClustererSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/ClustererSpec.cxx @@ -109,17 +109,17 @@ void ClustererDPL::run(ProcessingContext& pc) clusterLabels = std::make_unique>(); } mClusterer->process(mNThreads, reader, &clusCompVec, mPatterns ? &clusPattVec : nullptr, &clusROFVec, clusterLabels.get()); - pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0, Lifetime::Timeframe}, clusCompVec); - pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0, Lifetime::Timeframe}, clusROFVec); - pc.outputs().snapshot(Output{orig, "PATTERNS", 0, Lifetime::Timeframe}, clusPattVec); + pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0}, clusCompVec); + pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0}, clusROFVec); + pc.outputs().snapshot(Output{orig, "PATTERNS", 0}, clusPattVec); if (mUseMC) { - pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, *clusterLabels.get()); // at the moment requires snapshot + pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0}, *clusterLabels.get()); // at the moment requires snapshot std::vector clusterMC2ROframes(mc2rofs.size()); for (int i = mc2rofs.size(); i--;) { clusterMC2ROframes[i] = mc2rofs[i]; // Simply, replicate it from digits ? } - pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0, Lifetime::Timeframe}, clusterMC2ROframes); + pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0}, clusterMC2ROframes); } // TODO: in principle, after masking "overflow" pixels the MC2ROFRecord maxROF supposed to change, nominally to minROF diff --git a/Detectors/Upgrades/ITS3/workflow/src/DigitReaderSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/DigitReaderSpec.cxx index a679beed2c530..c2380be77f956 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/DigitReaderSpec.cxx @@ -71,17 +71,17 @@ void DigitReader::run(ProcessingContext& pc) // This is a very ugly way of providing DataDescription, which anyway does not need to contain detector name. // To be fixed once the names-definition class is ready - pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0, Lifetime::Timeframe}, mDigROFRec); - pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}, mDigits); + pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0}, mDigROFRec); + pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0}, mDigits); if (mUseCalib) { - pc.outputs().snapshot(Output{mOrigin, "GBTCALIB", 0, Lifetime::Timeframe}, mCalib); + pc.outputs().snapshot(Output{mOrigin, "GBTCALIB", 0}, mCalib); } if (mUseMC) { - auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0}); plabels->copyandflatten(sharedlabels); delete plabels; - pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0, Lifetime::Timeframe}, mDigMC2ROFs); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0}, mDigMC2ROFs); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/Upgrades/ITS3/workflow/src/TrackReaderSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/TrackReaderSpec.cxx index 3d048bf981ea3..54d8951b5a0d0 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/TrackReaderSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/TrackReaderSpec.cxx @@ -44,14 +44,14 @@ void TrackReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " track in " << mROFRec.size() << " ROFs at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "IT3TrackROF", 0, Lifetime::Timeframe}, mROFRec); - pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0, Lifetime::Timeframe}, mClusInd); - pc.outputs().snapshot(Output{"IT3", "VERTICES", 0, Lifetime::Timeframe}, mVertices); - pc.outputs().snapshot(Output{"IT3", "VERTICESROF", 0, Lifetime::Timeframe}, mVerticesROFRec); + pc.outputs().snapshot(Output{mOrigin, "IT3TrackROF", 0}, mROFRec); + pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0}, mTracks); + pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0}, mClusInd); + pc.outputs().snapshot(Output{"IT3", "VERTICES", 0}, mVertices); + pc.outputs().snapshot(Output{"IT3", "VERTICESROF", 0}, mVerticesROFRec); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0, Lifetime::Timeframe}, mMCTruth); - pc.outputs().snapshot(Output{mOrigin, "VERTICESMCTR", 0, Lifetime::Timeframe}, mMCVertTruth); + pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "VERTICESMCTR", 0}, mMCVertTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx index e2078046d1966..e4240401928b6 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx @@ -171,9 +171,9 @@ void TrackerDPL::run(ProcessingContext& pc) // the output vector however is created directly inside the message memory thus avoiding copy by // snapshot auto rofsinput = pc.inputs().get>("ROframes"); - auto& rofs = pc.outputs().make>(Output{"IT3", "IT3TrackROF", 0, Lifetime::Timeframe}, rofsinput.begin(), rofsinput.end()); + auto& rofs = pc.outputs().make>(Output{"IT3", "IT3TrackROF", 0}, rofsinput.begin(), rofsinput.end()); - auto& irFrames = pc.outputs().make>(Output{"IT3", "IRFRAMES", 0, Lifetime::Timeframe}); + auto& irFrames = pc.outputs().make>(Output{"IT3", "IRFRAMES", 0}); const auto& alpParams = o2::itsmft::DPLAlpideParam::Instance(); // RS: this should come from CCDB int nBCPerTF = alpParams.roFrameLengthInBC; @@ -189,15 +189,15 @@ void TrackerDPL::run(ProcessingContext& pc) LOG(info) << labels->getIndexedSize() << " MC label objects , in " << mc2rofs.size() << " MC events"; } - auto& allClusIdx = pc.outputs().make>(Output{"IT3", "TRACKCLSID", 0, Lifetime::Timeframe}); + auto& allClusIdx = pc.outputs().make>(Output{"IT3", "TRACKCLSID", 0}); std::vector trackLabels; std::vector verticesLabels; - auto& allTracks = pc.outputs().make>(Output{"IT3", "TRACKS", 0, Lifetime::Timeframe}); + auto& allTracks = pc.outputs().make>(Output{"IT3", "TRACKS", 0}); std::vector allTrackLabels; std::vector allVerticesLabels; - auto& vertROFvec = pc.outputs().make>(Output{"IT3", "VERTICESROF", 0, Lifetime::Timeframe}); - auto& vertices = pc.outputs().make>(Output{"IT3", "VERTICES", 0, Lifetime::Timeframe}); + auto& vertROFvec = pc.outputs().make>(Output{"IT3", "VERTICESROF", 0}); + auto& vertices = pc.outputs().make>(Output{"IT3", "VERTICES", 0}); TimeFrame* timeFrame = mChainITS->GetITSTimeframe(); timeFrame->resizeVectors(mNLayers); @@ -314,9 +314,9 @@ void TrackerDPL::run(ProcessingContext& pc) LOGP(info, "ITS3Tracker pushed {} track labels", allTrackLabels.size()); LOGP(info, "ITS3Tracker pushed {} vertex labels", allVerticesLabels.size()); - pc.outputs().snapshot(Output{"IT3", "TRACKSMCTR", 0, Lifetime::Timeframe}, allTrackLabels); - pc.outputs().snapshot(Output{"IT3", "VERTICESMCTR", 0, Lifetime::Timeframe}, allVerticesLabels); - pc.outputs().snapshot(Output{"IT3", "IT3TrackMC2ROF", 0, Lifetime::Timeframe}, mc2rofs); + pc.outputs().snapshot(Output{"IT3", "TRACKSMCTR", 0}, allTrackLabels); + pc.outputs().snapshot(Output{"IT3", "VERTICESMCTR", 0}, allVerticesLabels); + pc.outputs().snapshot(Output{"IT3", "IT3TrackMC2ROF", 0}, mc2rofs); } } mTimer.Stop(); diff --git a/Detectors/Upgrades/ITS3/workflow/src/VertexReaderSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/VertexReaderSpec.cxx index 015aa1a78977b..25fd3e530161a 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/VertexReaderSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/VertexReaderSpec.cxx @@ -41,8 +41,8 @@ void VertexReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mVerticesPtr->size() << " vertices in " << mVerticesROFRecPtr->size() << " ROFs at entry " << ent; - pc.outputs().snapshot(Output{"IT3", "VERTICES", 0, Lifetime::Timeframe}, mVertices); - pc.outputs().snapshot(Output{"IT3", "VERTICESROF", 0, Lifetime::Timeframe}, mVerticesROFRec); + pc.outputs().snapshot(Output{"IT3", "VERTICES", 0}, mVertices); + pc.outputs().snapshot(Output{"IT3", "VERTICESROF", 0}, mVerticesROFRec); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/Vertexing/src/SVertexer.cxx b/Detectors/Vertexing/src/SVertexer.cxx index de167af74526e..120d49d44b4e5 100644 --- a/Detectors/Vertexing/src/SVertexer.cxx +++ b/Detectors/Vertexing/src/SVertexer.cxx @@ -117,15 +117,15 @@ void SVertexer::produceOutput(o2::framework::ProcessingContext& pc) std::sort(nbodySortID.begin(), nbodySortID.end(), [](const vid& a, const vid& b) { return a.vtxID < b.vtxID; }); // dpl output - auto& v0sIdx = pc.outputs().make>(o2f::Output{"GLO", "V0S_IDX", 0, o2f::Lifetime::Timeframe}); - auto& cascsIdx = pc.outputs().make>(o2f::Output{"GLO", "CASCS_IDX", 0, o2f::Lifetime::Timeframe}); - auto& body3Idx = pc.outputs().make>(o2f::Output{"GLO", "DECAYS3BODY_IDX", 0, o2f::Lifetime::Timeframe}); - auto& fullv0s = pc.outputs().make>(o2f::Output{"GLO", "V0S", 0, o2f::Lifetime::Timeframe}); - auto& fullcascs = pc.outputs().make>(o2f::Output{"GLO", "CASCS", 0, o2f::Lifetime::Timeframe}); - auto& full3body = pc.outputs().make>(o2f::Output{"GLO", "DECAYS3BODY", 0, o2f::Lifetime::Timeframe}); - auto& v0Refs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_V0REFS", 0, o2f::Lifetime::Timeframe}); - auto& cascRefs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_CASCREFS", 0, o2f::Lifetime::Timeframe}); - auto& vtx3bodyRefs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_3BODYREFS", 0, o2f::Lifetime::Timeframe}); + auto& v0sIdx = pc.outputs().make>(o2f::Output{"GLO", "V0S_IDX", 0}); + auto& cascsIdx = pc.outputs().make>(o2f::Output{"GLO", "CASCS_IDX", 0}); + auto& body3Idx = pc.outputs().make>(o2f::Output{"GLO", "DECAYS3BODY_IDX", 0}); + auto& fullv0s = pc.outputs().make>(o2f::Output{"GLO", "V0S", 0}); + auto& fullcascs = pc.outputs().make>(o2f::Output{"GLO", "CASCS", 0}); + auto& full3body = pc.outputs().make>(o2f::Output{"GLO", "DECAYS3BODY", 0}); + auto& v0Refs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_V0REFS", 0}); + auto& cascRefs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_CASCREFS", 0}); + auto& vtx3bodyRefs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_3BODYREFS", 0}); // sorted V0s v0sIdx.reserve(mNV0s); @@ -214,8 +214,8 @@ void SVertexer::produceOutput(o2::framework::ProcessingContext& pc) } } - auto& strTracksOut = pc.outputs().make>(o2f::Output{"GLO", "STRANGETRACKS", 0, o2f::Lifetime::Timeframe}); - auto& strClustOut = pc.outputs().make>(o2f::Output{"GLO", "CLUSUPDATES", 0, o2f::Lifetime::Timeframe}); + auto& strTracksOut = pc.outputs().make>(o2f::Output{"GLO", "STRANGETRACKS", 0}); + auto& strClustOut = pc.outputs().make>(o2f::Output{"GLO", "CLUSUPDATES", 0}); o2::pmr::vector mcLabsOut; strTracksOut.resize(mNStrangeTracks); strClustOut.resize(mNStrangeTracks); @@ -239,7 +239,7 @@ void SVertexer::produceOutput(o2::framework::ProcessingContext& pc) } if (mStrTracker->getMCTruthOn()) { - auto& strTrMCLableOut = pc.outputs().make>(o2f::Output{"GLO", "STRANGETRACKS_MC", 0, o2f::Lifetime::Timeframe}); + auto& strTrMCLableOut = pc.outputs().make>(o2f::Output{"GLO", "STRANGETRACKS_MC", 0}); strTrMCLableOut.swap(mcLabsOut); } } diff --git a/Detectors/ZDC/calib/src/InterCalibEPNSpec.cxx b/Detectors/ZDC/calib/src/InterCalibEPNSpec.cxx index 80b23c696b5a4..a1a83aada50f0 100644 --- a/Detectors/ZDC/calib/src/InterCalibEPNSpec.cxx +++ b/Detectors/ZDC/calib/src/InterCalibEPNSpec.cxx @@ -101,16 +101,16 @@ void InterCalibEPNSpec::run(ProcessingContext& pc) mWorker.process(bcrec, energy, tdc, info); // Send intermediate calibration data and histograms - o2::framework::Output output("ZDC", "INTERCALIBDATA", 0, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "INTERCALIBDATA", 0); pc.outputs().snapshot(output, mWorker.mData); for (int ih = 0; ih < (2 * InterCalibData::NH); ih++) { if (mWorker.mH[ih] != nullptr) { - o2::framework::Output output("ZDC", "INTER_1DH", ih, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "INTER_1DH", ih); pc.outputs().snapshot(output, mWorker.mH[ih]->getBase()); } } for (int ih = 0; ih < InterCalibData::NH; ih++) { - o2::framework::Output output("ZDC", "INTER_2DH", ih, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "INTER_2DH", ih); pc.outputs().snapshot(output, mWorker.mC[ih]->getBase()); } } diff --git a/Detectors/ZDC/calib/src/TDCCalibEPNSpec.cxx b/Detectors/ZDC/calib/src/TDCCalibEPNSpec.cxx index 8b90ee3b0af36..6125baaa197f8 100644 --- a/Detectors/ZDC/calib/src/TDCCalibEPNSpec.cxx +++ b/Detectors/ZDC/calib/src/TDCCalibEPNSpec.cxx @@ -114,10 +114,10 @@ void TDCCalibEPNSpec::run(ProcessingContext& pc) if (mVerbosity > DbgMedium && mModTF > 0) { LOG(info) << "Send intermediate calibration data mProcessed=" << mProcessed << " >= mModTF=" << mModTF; } - o2::framework::Output output("ZDC", "TDCCALIBDATA", 0, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "TDCCALIBDATA", 0); pc.outputs().snapshot(output, mWorker.mData); for (int ih = 0; ih < TDCCalibData::NTDC; ih++) { - o2::framework::Output output("ZDC", "TDC_1DH", ih, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "TDC_1DH", ih); pc.outputs().snapshot(output, mWorker.mTDC[ih]->getBase()); } mWorker.clear(); diff --git a/Detectors/ZDC/calib/src/WaveformCalibEPNSpec.cxx b/Detectors/ZDC/calib/src/WaveformCalibEPNSpec.cxx index 62ee862905524..72f606cc4f6ed 100644 --- a/Detectors/ZDC/calib/src/WaveformCalibEPNSpec.cxx +++ b/Detectors/ZDC/calib/src/WaveformCalibEPNSpec.cxx @@ -107,7 +107,7 @@ void WaveformCalibEPNSpec::run(ProcessingContext& pc) mWorker.process(bcrec, energy, tdc, info, wave); // Send intermediate calibration data - o2::framework::Output output("ZDC", "WAVECALIBDATA", 0, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "WAVECALIBDATA", 0); pc.outputs().snapshot(output, mWorker.mData); } diff --git a/Detectors/ZDC/raw/include/ZDCRaw/RawReaderZDC.h b/Detectors/ZDC/raw/include/ZDCRaw/RawReaderZDC.h index b27f7e34353c0..70f7ce30647f5 100644 --- a/Detectors/ZDC/raw/include/ZDCRaw/RawReaderZDC.h +++ b/Detectors/ZDC/raw/include/ZDCRaw/RawReaderZDC.h @@ -95,9 +95,9 @@ class RawReaderZDC void makeSnapshot(o2::framework::ProcessingContext& pc) { - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSBC", 0, o2::framework::Lifetime::Timeframe}, mDigitsBC); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSCH", 0, o2::framework::Lifetime::Timeframe}, mDigitsCh); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSPD", 0, o2::framework::Lifetime::Timeframe}, mOrbitData); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSBC", 0}, mDigitsBC); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSCH", 0}, mDigitsCh); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSPD", 0}, mOrbitData); } }; } // namespace zdc diff --git a/Detectors/ZDC/workflow/src/DigitReaderSpec.cxx b/Detectors/ZDC/workflow/src/DigitReaderSpec.cxx index 59b0e34d16ecd..e952111e0c6c3 100644 --- a/Detectors/ZDC/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/ZDC/workflow/src/DigitReaderSpec.cxx @@ -69,11 +69,11 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "ZDCDigitReader pushed " << zdcOrbitData.size() << " orbits with " << zdcBCData.size() << " bcs and " << zdcChData.size() << " digits"; - pc.outputs().snapshot(Output{"ZDC", "DIGITSPD", 0, Lifetime::Timeframe}, zdcOrbitData); - pc.outputs().snapshot(Output{"ZDC", "DIGITSBC", 0, Lifetime::Timeframe}, zdcBCData); - pc.outputs().snapshot(Output{"ZDC", "DIGITSCH", 0, Lifetime::Timeframe}, zdcChData); + pc.outputs().snapshot(Output{"ZDC", "DIGITSPD", 0}, zdcOrbitData); + pc.outputs().snapshot(Output{"ZDC", "DIGITSBC", 0}, zdcBCData); + pc.outputs().snapshot(Output{"ZDC", "DIGITSCH", 0}, zdcChData); if (mUseMC) { - pc.outputs().snapshot(Output{"ZDC", "DIGITSLBL", 0, Lifetime::Timeframe}, labels); + pc.outputs().snapshot(Output{"ZDC", "DIGITSLBL", 0}, labels); } uint64_t nextEntry = mTree->GetReadEntry() + 1; if (nextEntry >= mTree->GetEntries() || (mLastEntry >= 0 && nextEntry > mLastEntry)) { diff --git a/Detectors/ZDC/workflow/src/DigitRecoSpec.cxx b/Detectors/ZDC/workflow/src/DigitRecoSpec.cxx index d504d33483081..e9b63c5b49d5b 100644 --- a/Detectors/ZDC/workflow/src/DigitRecoSpec.cxx +++ b/Detectors/ZDC/workflow/src/DigitRecoSpec.cxx @@ -264,11 +264,11 @@ void DigitRecoSpec::run(ProcessingContext& pc) } // TODO: rate information for all channels // TODO: summary of reconstruction to be collected by DQM? - pc.outputs().snapshot(Output{"ZDC", "BCREC", 0, Lifetime::Timeframe}, recEvent.mRecBC); - pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0, Lifetime::Timeframe}, recEvent.mEnergy); - pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0, Lifetime::Timeframe}, recEvent.mTDCData); - pc.outputs().snapshot(Output{"ZDC", "INFO", 0, Lifetime::Timeframe}, recEvent.mInfo); - pc.outputs().snapshot(Output{"ZDC", "WAVE", 0, Lifetime::Timeframe}, recEvent.mWaveform); + pc.outputs().snapshot(Output{"ZDC", "BCREC", 0}, recEvent.mRecBC); + pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0}, recEvent.mEnergy); + pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0}, recEvent.mTDCData); + pc.outputs().snapshot(Output{"ZDC", "INFO", 0}, recEvent.mInfo); + pc.outputs().snapshot(Output{"ZDC", "WAVE", 0}, recEvent.mWaveform); mTimer.Stop(); } diff --git a/Detectors/ZDC/workflow/src/EntropyEncoderSpec.cxx b/Detectors/ZDC/workflow/src/EntropyEncoderSpec.cxx index 7c30a0f34d151..abbd821fcb749 100644 --- a/Detectors/ZDC/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/ZDC/workflow/src/EntropyEncoderSpec.cxx @@ -58,7 +58,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"ZDC", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"ZDC", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, bcdata, chans, peds); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/ZDC/workflow/src/RecEventReaderSpec.cxx b/Detectors/ZDC/workflow/src/RecEventReaderSpec.cxx index acb0cef3e29be..18c620e427569 100644 --- a/Detectors/ZDC/workflow/src/RecEventReaderSpec.cxx +++ b/Detectors/ZDC/workflow/src/RecEventReaderSpec.cxx @@ -49,10 +49,10 @@ void RecEventReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "ZDC RecEventReader pushes " << mBCRecData->size() << " events with " << mBCRecData->size() << " energy, " << mZDCTDCData->size() << " TDC and " << mZDCInfo->size() << " info records at entry " << ent; - pc.outputs().snapshot(Output{"ZDC", "BCREC", 0, Lifetime::Timeframe}, *mBCRecData); - pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0, Lifetime::Timeframe}, *mZDCEnergy); - pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0, Lifetime::Timeframe}, *mZDCTDCData); - pc.outputs().snapshot(Output{"ZDC", "INFO", 0, Lifetime::Timeframe}, *mZDCInfo); + pc.outputs().snapshot(Output{"ZDC", "BCREC", 0}, *mBCRecData); + pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0}, *mZDCEnergy); + pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0}, *mZDCTDCData); + pc.outputs().snapshot(Output{"ZDC", "INFO", 0}, *mZDCInfo); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/ZDC/workflow/src/RecoReaderSpec.cxx b/Detectors/ZDC/workflow/src/RecoReaderSpec.cxx index 20b9510243be4..ebea51c932c5c 100644 --- a/Detectors/ZDC/workflow/src/RecoReaderSpec.cxx +++ b/Detectors/ZDC/workflow/src/RecoReaderSpec.cxx @@ -64,10 +64,10 @@ void RecoReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "ZDCRecoReader pushed " << RecBC.size() << " b.c. " << Energy.size() << " Energies " << TDCData.size() << " TDCs " << Info.size() << " Infos"; - pc.outputs().snapshot(Output{"ZDC", "BCREC", 0, Lifetime::Timeframe}, RecBC); - pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0, Lifetime::Timeframe}, Energy); - pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0, Lifetime::Timeframe}, TDCData); - pc.outputs().snapshot(Output{"ZDC", "INFO", 0, Lifetime::Timeframe}, Info); + pc.outputs().snapshot(Output{"ZDC", "BCREC", 0}, RecBC); + pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0}, Energy); + pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0}, TDCData); + pc.outputs().snapshot(Output{"ZDC", "INFO", 0}, Info); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/GPU/Workflow/src/GPUWorkflowITS.cxx b/GPU/Workflow/src/GPUWorkflowITS.cxx index e82f2348255d7..1785ddd05f39c 100644 --- a/GPU/Workflow/src/GPUWorkflowITS.cxx +++ b/GPU/Workflow/src/GPUWorkflowITS.cxx @@ -134,8 +134,8 @@ int GPURecoWorkflowSpec::runITSTracking(o2::framework::ProcessingContext& pc) auto rofsinput = pc.inputs().get>("ROframes"); - auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0, Lifetime::Timeframe}, rofsinput.begin(), rofsinput.end()); - auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0, Lifetime::Timeframe}); + auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0}, rofsinput.begin(), rofsinput.end()); + auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0}); irFrames.reserve(rofs.size()); const auto& alpParams = o2::itsmft::DPLAlpideParam::Instance(); // RS: this should come from CCDB @@ -148,19 +148,19 @@ int GPURecoWorkflowSpec::runITSTracking(o2::framework::ProcessingContext& pc) if (mSpecConfig.processMC) { labels = pc.inputs().get*>("itsmclabels").release(); // get the array as read-only span, a snapshot is sent forward - pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0, Lifetime::Timeframe}, pc.inputs().get>("ITSMC2ROframes")); + pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0}, pc.inputs().get>("ITSMC2ROframes")); LOG(info) << labels->getIndexedSize() << " MC label objects , in " << mc2rofs.size() << " MC events"; } - auto& allClusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0, Lifetime::Timeframe}); - auto& allTracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0, Lifetime::Timeframe}); - auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0, Lifetime::Timeframe}); - auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0, Lifetime::Timeframe}); + auto& allClusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0}); + auto& allTracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0}); + auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0}); + auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0}); // MC static pmr::vector dummyMCLabTracks, dummyMCLabVerts; - auto& allTrackLabels = mSpecConfig.processMC ? pc.outputs().make>(Output{"ITS", "TRACKSMCTR", 0, Lifetime::Timeframe}) : dummyMCLabTracks; - auto& allVerticesLabels = mSpecConfig.processMC ? pc.outputs().make>(Output{"ITS", "VERTICESMCTR", 0, Lifetime::Timeframe}) : dummyMCLabVerts; + auto& allTrackLabels = mSpecConfig.processMC ? pc.outputs().make>(Output{"ITS", "TRACKSMCTR", 0}) : dummyMCLabTracks; + auto& allVerticesLabels = mSpecConfig.processMC ? pc.outputs().make>(Output{"ITS", "VERTICESMCTR", 0}) : dummyMCLabVerts; std::uint32_t roFrame = 0; diff --git a/GPU/Workflow/src/GPUWorkflowPipeline.cxx b/GPU/Workflow/src/GPUWorkflowPipeline.cxx index c50f8f67cbd32..0352151b342c6 100644 --- a/GPU/Workflow/src/GPUWorkflowPipeline.cxx +++ b/GPU/Workflow/src/GPUWorkflowPipeline.cxx @@ -177,7 +177,7 @@ int GPURecoWorkflowSpec::handlePipeline(ProcessingContext& pc, GPUTrackingInOutP ptrs.tpcZS = &tpcZS; } if (mSpecConfig.enableDoublePipeline == 2) { - auto prepareBuffer = pc.outputs().make>(Output{gDataOriginGPU, "PIPELINEPREPARE", 0, Lifetime::Timeframe}, 0u); + auto prepareBuffer = pc.outputs().make>(Output{gDataOriginGPU, "PIPELINEPREPARE", 0}, 0u); size_t ptrsTotal = 0; const void* firstPtr = nullptr; diff --git a/GPU/Workflow/src/GPUWorkflowSpec.cxx b/GPU/Workflow/src/GPUWorkflowSpec.cxx index 40b6c13495a66..fb482df5b11c0 100644 --- a/GPU/Workflow/src/GPUWorkflowSpec.cxx +++ b/GPU/Workflow/src/GPUWorkflowSpec.cxx @@ -734,7 +734,7 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) }; setOutputAllocator("COMPCLUSTERSFLAT", mSpecConfig.outputCompClustersFlat, outputRegions.compressedClusters, std::make_tuple(gDataOriginTPC, (DataDescription) "COMPCLUSTERSFLAT", 0)); - setOutputAllocator("CLUSTERNATIVE", mClusterOutputIds.size() > 0, outputRegions.clustersNative, std::make_tuple(gDataOriginTPC, mSpecConfig.sendClustersPerSector ? (DataDescription) "CLUSTERNATIVETMP" : (DataDescription) "CLUSTERNATIVE", NSectors, Lifetime::Timeframe, clusterOutputSectorHeader), sizeof(o2::tpc::ClusterCountIndex)); + setOutputAllocator("CLUSTERNATIVE", mClusterOutputIds.size() > 0, outputRegions.clustersNative, std::make_tuple(gDataOriginTPC, mSpecConfig.sendClustersPerSector ? (DataDescription) "CLUSTERNATIVETMP" : (DataDescription) "CLUSTERNATIVE", NSectors, clusterOutputSectorHeader), sizeof(o2::tpc::ClusterCountIndex)); setOutputAllocator("CLSHAREDMAP", mSpecConfig.outputSharedClusterMap, outputRegions.sharedClusterMap, std::make_tuple(gDataOriginTPC, (DataDescription) "CLSHAREDMAP", 0)); setOutputAllocator("TRACKS", mSpecConfig.outputTracks, outputRegions.tpcTracksO2, std::make_tuple(gDataOriginTPC, (DataDescription) "TRACKS", 0)); setOutputAllocator("CLUSREFS", mSpecConfig.outputTracks, outputRegions.tpcTracksO2ClusRefs, std::make_tuple(gDataOriginTPC, (DataDescription) "CLUSREFS", 0)); @@ -909,7 +909,7 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) if (mTPCSectorMask & (1ul << i)) { DataHeader::SubSpecificationType subspec = i; clusterOutputSectorHeader.sectorBits = (1ul << i); - char* buffer = pc.outputs().make({gDataOriginTPC, "CLUSTERNATIVE", subspec, Lifetime::Timeframe, {clusterOutputSectorHeader}}, accessIndex.nClustersSector[i] * sizeof(*accessIndex.clustersLinear) + sizeof(o2::tpc::ClusterCountIndex)).data(); + char* buffer = pc.outputs().make({gDataOriginTPC, "CLUSTERNATIVE", subspec, {clusterOutputSectorHeader}}, accessIndex.nClustersSector[i] * sizeof(*accessIndex.clustersLinear) + sizeof(o2::tpc::ClusterCountIndex)).data(); o2::tpc::ClusterCountIndex* outIndex = reinterpret_cast(buffer); memset(outIndex, 0, sizeof(*outIndex)); for (int j = 0; j < o2::tpc::constants::MAXGLOBALPADROW; j++) { @@ -926,7 +926,7 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) } ConstMCLabelContainer contflat; cont.flatten_to(contflat); - pc.outputs().snapshot({gDataOriginTPC, "CLNATIVEMCLBL", subspec, Lifetime::Timeframe, {clusterOutputSectorHeader}}, contflat); + pc.outputs().snapshot({gDataOriginTPC, "CLNATIVEMCLBL", subspec, {clusterOutputSectorHeader}}, contflat); } } } @@ -937,7 +937,7 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) static_assert(sizeof(o2::tpc::ClusterCountIndex) == sizeof(accessIndex.nClusters)); memcpy(outIndex, &accessIndex.nClusters[0][0], sizeof(o2::tpc::ClusterCountIndex)); if (mSpecConfig.processMC && mSpecConfig.caClusterer && accessIndex.clustersMCTruth) { - pc.outputs().snapshot({gDataOriginTPC, "CLNATIVEMCLBL", subspec, Lifetime::Timeframe, {clusterOutputSectorHeader}}, clustersMCBuffer.first); + pc.outputs().snapshot({gDataOriginTPC, "CLNATIVEMCLBL", subspec, {clusterOutputSectorHeader}}, clustersMCBuffer.first); } } } @@ -952,17 +952,17 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) if (sendQAOutput) { mQA->postprocessExternal(copy1, copy2, copy3, copy4, out, mQATaskMask ? mQATaskMask : -1); } - pc.outputs().snapshot({gDataOriginTPC, "TRACKINGQA", 0, Lifetime::Timeframe}, out); + pc.outputs().snapshot({gDataOriginTPC, "TRACKINGQA", 0}, out); if (sendQAOutput) { mQA->cleanup(); } } if (mSpecConfig.outputErrorQA) { - pc.outputs().snapshot({gDataOriginGPU, "ERRORQA", 0, Lifetime::Timeframe}, mErrorQA); + pc.outputs().snapshot({gDataOriginGPU, "ERRORQA", 0}, mErrorQA); mErrorQA.clear(); // FIXME: This is a race condition once we run multi-threaded! } if (mSpecConfig.tpcTriggerHandling && !(mSpecConfig.zsOnTheFly || mSpecConfig.zsDecoder)) { - pc.outputs().make>(Output{gDataOriginTPC, "TRIGGERWORDS", 0, Lifetime::Timeframe}, 0u); + pc.outputs().make>(Output{gDataOriginTPC, "TRIGGERWORDS", 0}, 0u); } mTimer->Stop(); LOG(info) << "GPU Reconstruction time for this TF " << mTimer->CpuTime() - cput << " s (cpu), " << mTimer->RealTime() - realt << " s (wall)"; diff --git a/Steer/DigitizerWorkflow/src/CPVDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/CPVDigitizerSpec.cxx index 57fb11249b312..fc64106e39d79 100644 --- a/Steer/DigitizerWorkflow/src/CPVDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/CPVDigitizerSpec.cxx @@ -171,15 +171,15 @@ void DigitizerSpec::run(framework::ProcessingContext& pc) } LOG(debug) << "Have " << mLabels.getNElements() << " CPV labels "; // here we have all digits and we can send them to consumer (aka snapshot it onto output) - pc.outputs().snapshot(Output{"CPV", "DIGITS", 0, Lifetime::Timeframe}, mDigitsOut); - pc.outputs().snapshot(Output{"CPV", "DIGITTRIGREC", 0, Lifetime::Timeframe}, triggers); + pc.outputs().snapshot(Output{"CPV", "DIGITS", 0}, mDigitsOut); + pc.outputs().snapshot(Output{"CPV", "DIGITTRIGREC", 0}, triggers); if (pc.outputs().isAllowed({"CPV", "DIGITSMCTR", 0})) { - pc.outputs().snapshot(Output{"CPV", "DIGITSMCTR", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"CPV", "DIGITSMCTR", 0}, mLabels); } // CPV is always a triggered detector const o2::parameters::GRPObject::ROMode roMode = o2::parameters::GRPObject::PRESENT; LOG(debug) << "CPV: Sending ROMode= " << roMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"CPV", "ROMode", 0, Lifetime::Timeframe}, roMode); + pc.outputs().snapshot(Output{"CPV", "ROMode", 0}, roMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/CTPDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/CTPDigitizerSpec.cxx index 412e6d6129a76..a72b9e26581eb 100644 --- a/Steer/DigitizerWorkflow/src/CTPDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/CTPDigitizerSpec.cxx @@ -79,9 +79,9 @@ class CTPDPLDigitizerTask : public o2::base::BaseDPLDigitizer auto digits = mDigitizer.process(ginputs); // send out to next stage LOG(info) << "CTP DIGITS being sent."; - pc.outputs().snapshot(Output{"CTP", "DIGITS", 0, Lifetime::Timeframe}, digits); + pc.outputs().snapshot(Output{"CTP", "DIGITS", 0}, digits); LOG(info) << "CTP PRESENT being sent."; - pc.outputs().snapshot(Output{"CTP", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"CTP", "ROMode", 0}, mROMode); timer.Stop(); LOG(info) << "CTP Digitization took " << timer.CpuTime() << "s"; } diff --git a/Steer/DigitizerWorkflow/src/FDDDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/FDDDigitizerSpec.cxx index 99150a3cb83fb..901c7e709d265 100644 --- a/Steer/DigitizerWorkflow/src/FDDDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/FDDDigitizerSpec.cxx @@ -120,17 +120,17 @@ class FDDDPLDigitizerTask : public o2::base::BaseDPLDigitizer mDigitizer.flush(mDigitsBC, mDigitsCh, mDigitsTrig, labels); // send out to next stage - pc.outputs().snapshot(Output{"FDD", "DIGITSBC", 0, Lifetime::Timeframe}, mDigitsBC); - pc.outputs().snapshot(Output{"FDD", "DIGITSCH", 0, Lifetime::Timeframe}, mDigitsCh); - pc.outputs().snapshot(Output{"FDD", "TRIGGERINPUT", 0, Lifetime::Timeframe}, mDigitsTrig); + pc.outputs().snapshot(Output{"FDD", "DIGITSBC", 0}, mDigitsBC); + pc.outputs().snapshot(Output{"FDD", "DIGITSCH", 0}, mDigitsCh); + pc.outputs().snapshot(Output{"FDD", "TRIGGERINPUT", 0}, mDigitsTrig); if (pc.outputs().isAllowed({"FDD", "DIGITLBL", 0})) { - auto& sharedlabels = pc.outputs().make>(Output{"FDD", "DIGITLBL", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{"FDD", "DIGITLBL", 0}); labels.flatten_to(sharedlabels); labels.clear_andfreememory(); } LOG(info) << "FDD: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"FDD", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"FDD", "ROMode", 0}, mROMode); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Steer/DigitizerWorkflow/src/FT0DigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/FT0DigitizerSpec.cxx index 4eda984358634..9098240d17627 100644 --- a/Steer/DigitizerWorkflow/src/FT0DigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/FT0DigitizerSpec.cxx @@ -138,14 +138,14 @@ class FT0DPLDigitizerTask : public o2::base::BaseDPLDigitizer mDigitizer.flush_all(mDigitsBC, mDigitsCh, mDigitsTrig, labels); // send out to next stage - pc.outputs().snapshot(Output{"FT0", "DIGITSBC", 0, Lifetime::Timeframe}, mDigitsBC); - pc.outputs().snapshot(Output{"FT0", "DIGITSCH", 0, Lifetime::Timeframe}, mDigitsCh); - pc.outputs().snapshot(Output{"FT0", "TRIGGERINPUT", 0, Lifetime::Timeframe}, mDigitsTrig); + pc.outputs().snapshot(Output{"FT0", "DIGITSBC", 0}, mDigitsBC); + pc.outputs().snapshot(Output{"FT0", "DIGITSCH", 0}, mDigitsCh); + pc.outputs().snapshot(Output{"FT0", "TRIGGERINPUT", 0}, mDigitsTrig); if (pc.outputs().isAllowed({"FT0", "DIGITSMCTR", 0})) { - pc.outputs().snapshot(Output{"FT0", "DIGITSMCTR", 0, Lifetime::Timeframe}, labels); + pc.outputs().snapshot(Output{"FT0", "DIGITSMCTR", 0}, labels); } LOG(info) << "FT0: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"FT0", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"FT0", "ROMode", 0}, mROMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/FV0DigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/FV0DigitizerSpec.cxx index 151175e0c0758..bb5174dfcbb2c 100644 --- a/Steer/DigitizerWorkflow/src/FV0DigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/FV0DigitizerSpec.cxx @@ -102,14 +102,14 @@ class FV0DPLDigitizerTask : public o2::base::BaseDPLDigitizer LOG(info) << "FV0: Sending " << mDigitsBC.size() << " digitsBC and " << mDigitsCh.size() << " digitsCh."; // send out to next stage - pc.outputs().snapshot(Output{"FV0", "DIGITSBC", 0, Lifetime::Timeframe}, mDigitsBC); - pc.outputs().snapshot(Output{"FV0", "DIGITSCH", 0, Lifetime::Timeframe}, mDigitsCh); - pc.outputs().snapshot(Output{"FV0", "TRIGGERINPUT", 0, Lifetime::Timeframe}, mDigitsTrig); + pc.outputs().snapshot(Output{"FV0", "DIGITSBC", 0}, mDigitsBC); + pc.outputs().snapshot(Output{"FV0", "DIGITSCH", 0}, mDigitsCh); + pc.outputs().snapshot(Output{"FV0", "TRIGGERINPUT", 0}, mDigitsTrig); if (pc.outputs().isAllowed({"FV0", "DIGITLBL", 0})) { - pc.outputs().snapshot(Output{"FV0", "DIGITLBL", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"FV0", "DIGITLBL", 0}, mLabels); } LOG(info) << "FV0: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"FV0", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"FV0", "ROMode", 0}, mROMode); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Steer/DigitizerWorkflow/src/HMPIDDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/HMPIDDigitizerSpec.cxx index 22ab3e5d7ef84..0adb9eb0a90df 100644 --- a/Steer/DigitizerWorkflow/src/HMPIDDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/HMPIDDigitizerSpec.cxx @@ -132,13 +132,13 @@ class HMPIDDPLDigitizerTask : public o2::base::BaseDPLDigitizer flushDigitsAndLabels(); // send out to next stage - pc.outputs().snapshot(Output{"HMP", "DIGITS", 0, Lifetime::Timeframe}, digitsAccum); - pc.outputs().snapshot(Output{"HMP", "INTRECORDS", 0, Lifetime::Timeframe}, mIntRecord); + pc.outputs().snapshot(Output{"HMP", "DIGITS", 0}, digitsAccum); + pc.outputs().snapshot(Output{"HMP", "INTRECORDS", 0}, mIntRecord); if (pc.outputs().isAllowed({"HMP", "DIGITLBL", 0})) { - pc.outputs().snapshot(Output{"HMP", "DIGITLBL", 0, Lifetime::Timeframe}, labelAccum); + pc.outputs().snapshot(Output{"HMP", "DIGITLBL", 0}, labelAccum); } LOG(info) << "HMP: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"HMP", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"HMP", "ROMode", 0}, mROMode); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Steer/DigitizerWorkflow/src/ITS3DigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/ITS3DigitizerSpec.cxx index 9f03bd6733729..21f808f1e1ed2 100644 --- a/Steer/DigitizerWorkflow/src/ITS3DigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/ITS3DigitizerSpec.cxx @@ -143,7 +143,7 @@ class ITS3DPLDigitizerTask : BaseDPLDigitizer mDigitizer.setMCLabels(&mLabels); // digits are directly put into DPL owned resource - auto& digitsAccum = pc.outputs().make>(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}); + auto& digitsAccum = pc.outputs().make>(Output{mOrigin, "DIGITS", 0}); auto accumulate = [this, &digitsAccum]() { // accumulate result of single event processing, called after processing every event supplied @@ -216,17 +216,17 @@ class ITS3DPLDigitizerTask : BaseDPLDigitizer // here we have all digits and labels and we can send them to consumer (aka snapshot it onto output) - pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0, Lifetime::Timeframe}, mROFRecordsAccum); + pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0}, mROFRecordsAccum); if (mWithMCTruth) { - pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0, Lifetime::Timeframe}, mMC2ROFRecordsAccum); - auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0}, mMC2ROFRecordsAccum); + auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0}); mLabelsAccum.flatten_to(sharedlabels); // free space of existing label containers mLabels.clear_andfreememory(); mLabelsAccum.clear_andfreememory(); } LOG(info) << mID.getName() << ": Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{mOrigin, "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{mOrigin, "ROMode", 0}, mROMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/ITSMFTDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/ITSMFTDigitizerSpec.cxx index e1291fe0cb0d8..2a8c28bb4dbbd 100644 --- a/Steer/DigitizerWorkflow/src/ITSMFTDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/ITSMFTDigitizerSpec.cxx @@ -82,7 +82,7 @@ class ITSMFTDPLDigitizerTask : BaseDPLDigitizer mDigitizer.setMCLabels(&mLabels); // digits are directly put into DPL owned resource - auto& digitsAccum = pc.outputs().make>(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}); + auto& digitsAccum = pc.outputs().make>(Output{mOrigin, "DIGITS", 0}); auto accumulate = [this, &digitsAccum]() { // accumulate result of single event processing, called after processing every event supplied @@ -160,17 +160,17 @@ class ITSMFTDPLDigitizerTask : BaseDPLDigitizer // here we have all digits and labels and we can send them to consumer (aka snapshot it onto output) - pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0, Lifetime::Timeframe}, mROFRecordsAccum); + pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0}, mROFRecordsAccum); if (mWithMCTruth) { - pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0, Lifetime::Timeframe}, mMC2ROFRecordsAccum); - auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0}, mMC2ROFRecordsAccum); + auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0}); mLabelsAccum.flatten_to(sharedlabels); // free space of existing label containers mLabels.clear_andfreememory(); mLabelsAccum.clear_andfreememory(); } LOG(info) << mID.getName() << ": Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{mOrigin, "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{mOrigin, "ROMode", 0}, mROMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/MCHDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/MCHDigitizerSpec.cxx index fc92020f25b21..187c24f962c1f 100644 --- a/Steer/DigitizerWorkflow/src/MCHDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/MCHDigitizerSpec.cxx @@ -111,12 +111,12 @@ class MCHDPLDigitizerTask : public o2::base::BaseDPLDigitizer dataformats::MCLabelContainer labels{}; auto nPileup = mDigitizer->digitize(rofs, digits, labels); - pc.outputs().snapshot(Output{"MCH", "DIGITS", 0, Lifetime::Timeframe}, digits); - pc.outputs().snapshot(Output{"MCH", "DIGITROFS", 0, Lifetime::Timeframe}, rofs); + pc.outputs().snapshot(Output{"MCH", "DIGITS", 0}, digits); + pc.outputs().snapshot(Output{"MCH", "DIGITROFS", 0}, rofs); if (pc.outputs().isAllowed({"MCH", "DIGITSLABELS", 0})) { - pc.outputs().snapshot(Output{"MCH", "DIGITSLABELS", 0, Lifetime::Timeframe}, labels); + pc.outputs().snapshot(Output{"MCH", "DIGITSLABELS", 0}, labels); } - pc.outputs().snapshot(Output{"MCH", "ROMode", 0, Lifetime::Timeframe}, + pc.outputs().snapshot(Output{"MCH", "ROMode", 0}, DigitizerParam::Instance().continuous ? o2::parameters::GRPObject::CONTINUOUS : o2::parameters::GRPObject::TRIGGERING); // we should be only called once; tell DPL that this process is ready to exit diff --git a/Steer/DigitizerWorkflow/src/MCTruthReaderSpec.h b/Steer/DigitizerWorkflow/src/MCTruthReaderSpec.h index e3359af01b1e5..8204f4e437cc3 100644 --- a/Steer/DigitizerWorkflow/src/MCTruthReaderSpec.h +++ b/Steer/DigitizerWorkflow/src/MCTruthReaderSpec.h @@ -61,7 +61,7 @@ class MCTruthReaderTask : public o2::framework::Task br->GetEntry(0); // publish the labels in a const shared memory container - auto& sharedlabels = pc.outputs().make>(Output{"TST", "LABELS2", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{"TST", "LABELS2", 0}); iocontainer->copyandflatten(sharedlabels); } else { @@ -74,7 +74,7 @@ class MCTruthReaderTask : public o2::framework::Task LOG(info) << "MCCONTAINER CHECK" << mccontainer->getNElements(); // publish the original labels - pc.outputs().snapshot(Output{"TST", "LABELS2", 0, Lifetime::Timeframe}, *mccontainer); + pc.outputs().snapshot(Output{"TST", "LABELS2", 0}, *mccontainer); } // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Steer/DigitizerWorkflow/src/MCTruthSourceSpec.cxx b/Steer/DigitizerWorkflow/src/MCTruthSourceSpec.cxx index 1b0f325cba9d6..6888b118fb06c 100644 --- a/Steer/DigitizerWorkflow/src/MCTruthSourceSpec.cxx +++ b/Steer/DigitizerWorkflow/src/MCTruthSourceSpec.cxx @@ -54,12 +54,12 @@ class MCTruthSourceTask : public o2::framework::Task if (mNew) { LOG(info) << "New serialization"; // we need to flatten it and write to managed shared memory container - auto& sharedlabels = pc.outputs().make>(Output{"TST", "LABELS", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{"TST", "LABELS", 0}); container.flatten_to(sharedlabels); sleep(1); } else { LOG(info) << "Old serialization"; - pc.outputs().snapshot({"TST", "LABELS", 0, Lifetime::Timeframe}, container); + pc.outputs().snapshot({"TST", "LABELS", 0}, container); sleep(1); } diff --git a/Steer/DigitizerWorkflow/src/MCTruthWriterSpec.cxx b/Steer/DigitizerWorkflow/src/MCTruthWriterSpec.cxx index 9ce78d97418c3..78fb8988a2419 100644 --- a/Steer/DigitizerWorkflow/src/MCTruthWriterSpec.cxx +++ b/Steer/DigitizerWorkflow/src/MCTruthWriterSpec.cxx @@ -84,7 +84,7 @@ class MCTruthWriterTask : public o2::framework::Task } if (mIO) { // this triggers the reader process - pc.outputs().snapshot({"TST", "TRIGGERREAD", 0, Lifetime::Timeframe}, labelfilename); + pc.outputs().snapshot({"TST", "TRIGGERREAD", 0}, labelfilename); } // we should be only called once; tell DPL that this process is ready to exit diff --git a/Steer/DigitizerWorkflow/src/MIDDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/MIDDigitizerSpec.cxx index c0a3be730c411..1dbd392fb9f73 100644 --- a/Steer/DigitizerWorkflow/src/MIDDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/MIDDigitizerSpec.cxx @@ -117,13 +117,13 @@ class MIDDPLDigitizerTask : public o2::base::BaseDPLDigitizer mDigitsMerger.process(digitsAccum, labelsAccum, rofRecords); LOG(debug) << "MID: Sending " << digitsAccum.size() << " digits."; - pc.outputs().snapshot(Output{"MID", "DIGITS", 0, Lifetime::Timeframe}, mDigitsMerger.getColumnData()); - pc.outputs().snapshot(Output{"MID", "DIGITSROF", 0, Lifetime::Timeframe}, mDigitsMerger.getROFRecords()); + pc.outputs().snapshot(Output{"MID", "DIGITS", 0}, mDigitsMerger.getColumnData()); + pc.outputs().snapshot(Output{"MID", "DIGITSROF", 0}, mDigitsMerger.getROFRecords()); if (pc.outputs().isAllowed({"MID", "DIGITLABELS", 0})) { - pc.outputs().snapshot(Output{"MID", "DIGITLABELS", 0, Lifetime::Timeframe}, mDigitsMerger.getMCContainer()); + pc.outputs().snapshot(Output{"MID", "DIGITLABELS", 0}, mDigitsMerger.getMCContainer()); } LOG(debug) << "MID: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"MID", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"MID", "ROMode", 0}, mROMode); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Steer/DigitizerWorkflow/src/PHOSDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/PHOSDigitizerSpec.cxx index db20e811941c2..0b7b968d22ff6 100644 --- a/Steer/DigitizerWorkflow/src/PHOSDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/PHOSDigitizerSpec.cxx @@ -160,16 +160,16 @@ void DigitizerSpec::run(framework::ProcessingContext& pc) } LOG(debug) << "Have " << mLabels.getNElements() << " PHOS labels "; // here we have all digits and we can send them to consumer (aka snapshot it onto output) - pc.outputs().snapshot(Output{"PHS", "DIGITS", 0, Lifetime::Timeframe}, mDigitsOut); - pc.outputs().snapshot(Output{"PHS", "DIGITTRIGREC", 0, Lifetime::Timeframe}, triggers); + pc.outputs().snapshot(Output{"PHS", "DIGITS", 0}, mDigitsOut); + pc.outputs().snapshot(Output{"PHS", "DIGITTRIGREC", 0}, triggers); if (pc.outputs().isAllowed({"PHS", "DIGITSMCTR", 0})) { - pc.outputs().snapshot(Output{"PHS", "DIGITSMCTR", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"PHS", "DIGITSMCTR", 0}, mLabels); } // PHOS is always a triggering detector const o2::parameters::GRPObject::ROMode roMode = o2::parameters::GRPObject::TRIGGERING; LOG(debug) << "PHOS: Sending ROMode= " << roMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"PHS", "ROMode", 0, Lifetime::Timeframe}, roMode); + pc.outputs().snapshot(Output{"PHS", "ROMode", 0}, roMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/TOFDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/TOFDigitizerSpec.cxx index c74e5ca6327fc..e512659686c86 100644 --- a/Steer/DigitizerWorkflow/src/TOFDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/TOFDigitizerSpec.cxx @@ -259,21 +259,21 @@ class TOFDPLDigitizerTask : public o2::base::BaseDPLDigitizer LOG(info) << "Post " << digitsVector->size() << " digits in " << readoutwindow->size() << " RO windows"; // here we have all digits and we can send them to consumer (aka snapshot it onto output) - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0, Lifetime::Timeframe}, *digitsVector); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0}, *digitsVector); if (pc.outputs().isAllowed({o2::header::gDataOriginTOF, "DIGITSMCTR", 0})) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITSMCTR", 0, Lifetime::Timeframe}, *mcLabVecOfVec); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITSMCTR", 0}, *mcLabVecOfVec); } - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0, Lifetime::Timeframe}, *readoutwindow); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0}, *readoutwindow); // send empty pattern from digitizer (it may change in future) std::vector& patterns = mDigitizer->getPatterns(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0, Lifetime::Timeframe}, patterns); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0}, patterns); DigitHeader& digitH = mDigitizer->getDigitHeader(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITHEADER", 0, Lifetime::Timeframe}, digitH); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITHEADER", 0}, digitH); LOG(info) << "TOF: Sending ROMode= " << roMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ROMode", 0, Lifetime::Timeframe}, roMode); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ROMode", 0}, roMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/TPCDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/TPCDigitizerSpec.cxx index 59f8589a56449..3f24bb1a9ee12 100644 --- a/Steer/DigitizerWorkflow/src/TPCDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/TPCDigitizerSpec.cxx @@ -282,7 +282,7 @@ class TPCDPLDigitizerTask : public BaseDPLDigitizer auto roMode = isContinuous ? o2::parameters::GRPObject::CONTINUOUS : o2::parameters::GRPObject::PRESENT; LOG(info) << "TPC: Sending ROMode= " << (mDigitizer.isContinuousReadout() ? "Continuous" : "Triggered") << " to GRPUpdater from channel " << dh->subSpecification; - pc.outputs().snapshot(Output{"TPC", "ROMode", 0, Lifetime::Timeframe}, roMode); + pc.outputs().snapshot(Output{"TPC", "ROMode", 0}, roMode); } mWriteGRP = false; @@ -309,7 +309,7 @@ class TPCDPLDigitizerTask : public BaseDPLDigitizer return ContainerType(nullptr); } else { // default case - return &pc.outputs().make>(Output{"TPC", "DIGITS", static_cast(dh->subSpecification), Lifetime::Timeframe, header}); + return &pc.outputs().make>(Output{"TPC", "DIGITS", static_cast(dh->subSpecification), header}); } }; // lambda that snapshots the common mode vector to be sent out; prepares and attaches header with sector information @@ -318,8 +318,7 @@ class TPCDPLDigitizerTask : public BaseDPLDigitizer header.activeSectors = activeSectors; if (!mInternalWriter) { // note that snapshoting only works with non-const references (to be fixed?) - pc.outputs().snapshot(Output{"TPC", "COMMONMODE", static_cast(dh->subSpecification), Lifetime::Timeframe, - header}, + pc.outputs().snapshot(Output{"TPC", "COMMONMODE", static_cast(dh->subSpecification), header}, const_cast&>(commonMode)); } }; @@ -329,7 +328,7 @@ class TPCDPLDigitizerTask : public BaseDPLDigitizer header.activeSectors = activeSectors; if (mWithMCTruth) { if (!mInternalWriter) { - auto& sharedlabels = pc.outputs().make>(Output{"TPC", "DIGITSMCTR", static_cast(dh->subSpecification), Lifetime::Timeframe, header}); + auto& sharedlabels = pc.outputs().make>(Output{"TPC", "DIGITSMCTR", static_cast(dh->subSpecification), header}); labels.flatten_to(sharedlabels); } } @@ -340,8 +339,7 @@ class TPCDPLDigitizerTask : public BaseDPLDigitizer header.activeSectors = activeSectors; if (!mInternalWriter) { LOG(info) << "TPC: Send TRIGGERS for sector " << sector << " channel " << dh->subSpecification << " | size " << events.size(); - pc.outputs().snapshot(Output{"TPC", "DIGTRIGGERS", static_cast(dh->subSpecification), Lifetime::Timeframe, - header}, + pc.outputs().snapshot(Output{"TPC", "DIGTRIGGERS", static_cast(dh->subSpecification), header}, const_cast&>(events)); } }; diff --git a/Steer/DigitizerWorkflow/src/ZDCDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/ZDCDigitizerSpec.cxx index d616df2fadb35..cd07f6389c43f 100644 --- a/Steer/DigitizerWorkflow/src/ZDCDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/ZDCDigitizerSpec.cxx @@ -147,15 +147,15 @@ class ZDCDPLDigitizerTask : public o2::base::BaseDPLDigitizer mDigitizer.Finalize(mDigitsBC, mOrbitData); // send out to next stage - pc.outputs().snapshot(Output{"ZDC", "DIGITSBC", 0, Lifetime::Timeframe}, mDigitsBC); - pc.outputs().snapshot(Output{"ZDC", "DIGITSCH", 0, Lifetime::Timeframe}, mDigitsCh); - pc.outputs().snapshot(Output{"ZDC", "DIGITSPD", 0, Lifetime::Timeframe}, mOrbitData); + pc.outputs().snapshot(Output{"ZDC", "DIGITSBC", 0}, mDigitsBC); + pc.outputs().snapshot(Output{"ZDC", "DIGITSCH", 0}, mDigitsCh); + pc.outputs().snapshot(Output{"ZDC", "DIGITSPD", 0}, mOrbitData); if (pc.outputs().isAllowed({"ZDC", "DIGITSLBL", 0})) { - pc.outputs().snapshot(Output{"ZDC", "DIGITSLBL", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"ZDC", "DIGITSLBL", 0}, mLabels); } LOG(info) << "ZDC: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"ZDC", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"ZDC", "ROMode", 0}, mROMode); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Utilities/DataSampling/src/DataSamplingPolicy.cxx b/Utilities/DataSampling/src/DataSamplingPolicy.cxx index de7aa5d8f6865..44a7a63efcf51 100644 --- a/Utilities/DataSampling/src/DataSamplingPolicy.cxx +++ b/Utilities/DataSampling/src/DataSamplingPolicy.cxx @@ -128,7 +128,7 @@ Output DataSamplingPolicy::prepareOutput(const ConcreteDataMatcher& input, Lifet auto result = mPaths.find(input); if (result != mPaths.end()) { auto dataType = DataSpecUtils::asConcreteDataTypeMatcher(result->second); - return Output{dataType.origin, dataType.description, input.subSpec, lifetime}; + return Output{dataType.origin, dataType.description, input.subSpec}; } else { return Output{header::gDataOriginInvalid, header::gDataDescriptionInvalid}; } diff --git a/Utilities/DataSampling/src/Dispatcher.cxx b/Utilities/DataSampling/src/Dispatcher.cxx index b32218ea43e58..de78993ff083e 100644 --- a/Utilities/DataSampling/src/Dispatcher.cxx +++ b/Utilities/DataSampling/src/Dispatcher.cxx @@ -114,7 +114,6 @@ void Dispatcher::run(ProcessingContext& ctx) routeAsConcreteDataType.origin, routeAsConcreteDataType.description, partInputHeader->subSpecification, - part.spec->lifetime, std::move(headerStack)}; send(ctx.outputs(), part, output); } diff --git a/run/dpl_eventgen.cxx b/run/dpl_eventgen.cxx index 9700ac32c6938..3090944031ad3 100644 --- a/run/dpl_eventgen.cxx +++ b/run/dpl_eventgen.cxx @@ -69,8 +69,8 @@ struct GeneratorTask { for (auto i = 0; i < std::min((int)aggregate, nEvents - eventCounter); ++i) { mctracks.clear(); genservice.generateEvent_MCTracks(mctracks, mcheader); - pc.outputs().snapshot(Output{"MC", "MCHEADER", 0, Lifetime::Timeframe}, mcheader); - pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0, Lifetime::Timeframe}, mctracks); + pc.outputs().snapshot(Output{"MC", "MCHEADER", 0}, mcheader); + pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0}, mctracks); ++eventCounter; } // report number of TFs injected for the rate limiter to work diff --git a/run/o2sim_hepmc_publisher.cxx b/run/o2sim_hepmc_publisher.cxx index 76fbddc1993f3..7172248ca68ac 100644 --- a/run/o2sim_hepmc_publisher.cxx +++ b/run/o2sim_hepmc_publisher.cxx @@ -135,8 +135,8 @@ struct O2simHepmcPublisher { } // add to the message - pc.outputs().snapshot(Output{"MC", "MCHEADER", 0, Lifetime::Timeframe}, mcHeader); - pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0, Lifetime::Timeframe}, mcTracks); + pc.outputs().snapshot(Output{"MC", "MCHEADER", 0}, mcHeader); + pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0}, mcTracks); mcTracks.clear(); ++eventCounter; } diff --git a/run/o2sim_kine_publisher.cxx b/run/o2sim_kine_publisher.cxx index 35fd48af199e0..83f82fd69cca3 100644 --- a/run/o2sim_kine_publisher.cxx +++ b/run/o2sim_kine_publisher.cxx @@ -44,8 +44,8 @@ struct O2simKinePublisher { for (auto i = 0; i < std::min((int)aggregate, nEvents - eventCounter); ++i) { auto mcevent = mcKinReader->getMCEventHeader(0, eventCounter); auto mctracks = mcKinReader->getTracks(0, eventCounter); - pc.outputs().snapshot(Output{"MC", "MCHEADER", 0, Lifetime::Timeframe}, mcevent); - pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0, Lifetime::Timeframe}, mctracks); + pc.outputs().snapshot(Output{"MC", "MCHEADER", 0}, mcevent); + pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0}, mctracks); ++eventCounter; } // report number of TFs injected for the rate limiter to work diff --git a/run/o2sim_mctracks_to_aod.cxx b/run/o2sim_mctracks_to_aod.cxx index e5460e346c3f9..ca64ce0705f1a 100644 --- a/run/o2sim_mctracks_to_aod.cxx +++ b/run/o2sim_mctracks_to_aod.cxx @@ -171,8 +171,8 @@ struct MctracksToAod { } } ++timeframe; - pc.outputs().snapshot(Output{"TFF", "TFFilename", 0, Lifetime::Timeframe}, ""); - pc.outputs().snapshot(Output{"TFN", "TFNumber", 0, Lifetime::Timeframe}, timeframe); + pc.outputs().snapshot(Output{"TFF", "TFFilename", 0}, ""); + pc.outputs().snapshot(Output{"TFN", "TFNumber", 0}, timeframe); } }; From 5654c48597898d436a8504946c6b497832594a48 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:55:54 +0100 Subject: [PATCH 150/726] Remove Lifetime specification from Output The .lifetime field in Output is to be removed. This includes cases in which the value is not the default. --- .../src/GlobalFwdMatchingAssessmentSpec.cxx | 2 +- Detectors/ITSMFT/MFT/workflow/src/MFTAssessmentSpec.cxx | 2 +- Detectors/PHOS/calib/src/PHOSL1phaseCalibDevice.cxx | 2 +- Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx | 4 ++-- .../workflow/include/TPCWorkflow/TPCDistributeSACSpec.h | 2 +- Detectors/TPC/workflow/src/OccupancyFilterSpec.cxx | 2 +- Detectors/TPC/workflow/src/TPCVDriftTglCalibSpec.cxx | 4 ++-- Detectors/ZDC/calib/src/BaselineCalibEPNSpec.cxx | 2 +- Detectors/ZDC/calib/src/NoiseCalibEPNSpec.cxx | 8 ++++---- 9 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingAssessmentSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingAssessmentSpec.cxx index e4c045d2a7d93..e896c097cd03d 100644 --- a/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingAssessmentSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingAssessmentSpec.cxx @@ -109,7 +109,7 @@ void GlobalFwdAssessmentSpec::sendOutput(DataAllocator& output) TObjArray objar; mGloFwdAssessment->getHistos(objar); - output.snapshot(Output{"GLO", "FWDASSESSMENT", 0, Lifetime::Sporadic}, objar); + output.snapshot(Output{"GLO", "FWDASSESSMENT", 0}, objar); TFile* f = new TFile(Form("GlobalForwardAssessment.root"), "RECREATE"); objar.Write(); diff --git a/Detectors/ITSMFT/MFT/workflow/src/MFTAssessmentSpec.cxx b/Detectors/ITSMFT/MFT/workflow/src/MFTAssessmentSpec.cxx index 9778120a75389..d3e4c33457ae0 100644 --- a/Detectors/ITSMFT/MFT/workflow/src/MFTAssessmentSpec.cxx +++ b/Detectors/ITSMFT/MFT/workflow/src/MFTAssessmentSpec.cxx @@ -90,7 +90,7 @@ void MFTAssessmentSpec::sendOutput(DataAllocator& output) TObjArray objar; mMFTAssessment->getHistos(objar); - output.snapshot(Output{"MFT", "MFTASSESSMENT", 0, Lifetime::Sporadic}, objar); + output.snapshot(Output{"MFT", "MFTASSESSMENT", 0}, objar); TFile* f = new TFile(Form("MFTAssessment.root"), "RECREATE"); objar.Write(); diff --git a/Detectors/PHOS/calib/src/PHOSL1phaseCalibDevice.cxx b/Detectors/PHOS/calib/src/PHOSL1phaseCalibDevice.cxx index 1ed04baa77892..9ce61d0af38cb 100644 --- a/Detectors/PHOS/calib/src/PHOSL1phaseCalibDevice.cxx +++ b/Detectors/PHOS/calib/src/PHOSL1phaseCalibDevice.cxx @@ -71,7 +71,7 @@ void PHOSL1phaseCalibDevice::endOfStream(o2::framework::EndOfStreamContext& ec) ec.outputs().snapshot(Output{o2::calibration::Utils::gDataOriginCDBWrapper, "PHOS_L1phase", 0}, info); // Send summary to QC LOG(info) << "Sending histos to QC "; - ec.outputs().snapshot(o2::framework::Output{"PHS", "L1PHASEHISTO", 0, o2::framework::Lifetime::Sporadic}, mCalibrator->getQcHistos()); + ec.outputs().snapshot(o2::framework::Output{"PHS", "L1PHASEHISTO", 0}, mCalibrator->getQcHistos()); } o2::framework::DataProcessorSpec o2::phos::getPHOSL1phaseCalibDeviceSpec() diff --git a/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx b/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx index 292e083d4bb48..bc650dfaa54b8 100644 --- a/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx +++ b/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx @@ -115,7 +115,7 @@ void RawToCellConverterSpec::run(framework::ProcessingContext& ctx) ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0}, mOutputHWErrors); if (mFillChi2) { mOutputFitChi.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0, o2::framework::Lifetime::QA}, mOutputFitChi); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0}, mOutputFitChi); } return; // empty TF, nothing to process } @@ -294,7 +294,7 @@ void RawToCellConverterSpec::run(framework::ProcessingContext& ctx) ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", mflpId}, mOutputTriggerRecords); ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0}, mOutputHWErrors); if (mFillChi2) { - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0, o2::framework::Lifetime::QA}, mOutputFitChi); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0}, mOutputFitChi); } } diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCDistributeSACSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCDistributeSACSpec.h index f7c207ba3a27b..ee05a58b5dcbe 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCDistributeSACSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCDistributeSACSpec.h @@ -102,7 +102,7 @@ class TPCDistributeSACSpec : public o2::framework::Task void sendOutput(o2::framework::ProcessingContext& pc) { LOGP(info, "Sending SACs on lane: {} for {} TFs", mCurrentOutLane, mProcessedTFs); - pc.outputs().snapshot(Output{gDataOriginTPC, getDataDescriptionSACCCDB(), 0, Lifetime::Sporadic}, mCCDBTimeStamp); + pc.outputs().snapshot(Output{gDataOriginTPC, getDataDescriptionSACCCDB(), 0}, mCCDBTimeStamp); for (unsigned int i = 0; i < o2::tpc::GEMSTACKS; ++i) { pc.outputs().snapshot(Output{gDataOriginTPC, mDataDescrOut[mCurrentOutLane], header::DataHeader::SubSpecificationType{i}}, mSACs[i]); } diff --git a/Detectors/TPC/workflow/src/OccupancyFilterSpec.cxx b/Detectors/TPC/workflow/src/OccupancyFilterSpec.cxx index 9891d80a39503..5ef2bdc7adc51 100644 --- a/Detectors/TPC/workflow/src/OccupancyFilterSpec.cxx +++ b/Detectors/TPC/workflow/src/OccupancyFilterSpec.cxx @@ -106,7 +106,7 @@ class OccupancyFilterDevice : public o2::framework::Task { o2::tpc::TPCSectorHeader header{sector}; header.activeSectors = (0x1 << sector); - output.snapshot(Output{gDataOriginTPC, "FILTERDIG", static_cast(sector), Lifetime::Sporadic, header}, digits); + output.snapshot(Output{gDataOriginTPC, "FILTERDIG", static_cast(sector), header}, digits); } }; diff --git a/Detectors/TPC/workflow/src/TPCVDriftTglCalibSpec.cxx b/Detectors/TPC/workflow/src/TPCVDriftTglCalibSpec.cxx index d48dde325c614..ea50a32c6a906 100644 --- a/Detectors/TPC/workflow/src/TPCVDriftTglCalibSpec.cxx +++ b/Detectors/TPC/workflow/src/TPCVDriftTglCalibSpec.cxx @@ -104,8 +104,8 @@ void TPCVDriftTglCalibSpec::sendOutput(DataAllocator& output) auto image = o2::ccdb::CcdbApi::createObjectImage(&payloadVec[i], &w); LOG(info) << "Sending object " << w.getPath() << "/" << w.getFileName() << " of size " << image->size() << " bytes, valid for " << w.getStartValidityTimestamp() << " : " << w.getEndValidityTimestamp(); - output.snapshot(Output{o2::calibration::Utils::gDataOriginCDBPayload, "TPCVDTGL", i, Lifetime::Sporadic}, *image.get()); // vector - output.snapshot(Output{o2::calibration::Utils::gDataOriginCDBWrapper, "TPCVDTGL", i, Lifetime::Sporadic}, w); // root-serialized + output.snapshot(Output{o2::calibration::Utils::gDataOriginCDBPayload, "TPCVDTGL", i}, *image.get()); // vector + output.snapshot(Output{o2::calibration::Utils::gDataOriginCDBWrapper, "TPCVDTGL", i}, w); // root-serialized } if (payloadVec.size()) { mCalibrator->initOutput(); // reset the outputs once they are already sent diff --git a/Detectors/ZDC/calib/src/BaselineCalibEPNSpec.cxx b/Detectors/ZDC/calib/src/BaselineCalibEPNSpec.cxx index 422221157f877..328f824918cd4 100644 --- a/Detectors/ZDC/calib/src/BaselineCalibEPNSpec.cxx +++ b/Detectors/ZDC/calib/src/BaselineCalibEPNSpec.cxx @@ -127,7 +127,7 @@ void BaselineCalibEPNSpec::run(ProcessingContext& pc) } // Send intermediate calibration data auto& summary = mWorker.mData.getSummary(); - o2::framework::Output outputData("ZDC", "BASECALIBDATA", 0, Lifetime::Sporadic); + o2::framework::Output outputData("ZDC", "BASECALIBDATA", 0); pc.outputs().snapshot(outputData, summary); if (pc.transitionState() == TransitionHandlingState::Requested) { // End of processing for this run diff --git a/Detectors/ZDC/calib/src/NoiseCalibEPNSpec.cxx b/Detectors/ZDC/calib/src/NoiseCalibEPNSpec.cxx index 32fe2ee2eb1fb..186cec68517ea 100644 --- a/Detectors/ZDC/calib/src/NoiseCalibEPNSpec.cxx +++ b/Detectors/ZDC/calib/src/NoiseCalibEPNSpec.cxx @@ -113,19 +113,19 @@ void NoiseCalibEPNSpec::run(ProcessingContext& pc) if (mVerbosity > DbgMedium && mModTF > 0) { LOG(info) << "Send intermediate calibration data mProcessed=" << mProcessed << " >= mModTF=" << mModTF; } - o2::framework::Output outputData("ZDC", "NOISECALIBDATA", 0, Lifetime::Sporadic); + o2::framework::Output outputData("ZDC", "NOISECALIBDATA", 0); pc.outputs().snapshot(outputData, summary); for (int ih = 0; ih < NChannels; ih++) { { - o2::framework::Output output("ZDC", "NOISE_1DH", ih, Lifetime::Sporadic); + o2::framework::Output output("ZDC", "NOISE_1DH", ih); pc.outputs().snapshot(output, mWorker.mH[0][ih]->getBase()); } { - o2::framework::Output output("ZDC", "NOISE_1DH_S", ih, Lifetime::Sporadic); + o2::framework::Output output("ZDC", "NOISE_1DH_S", ih); pc.outputs().snapshot(output, mWorker.mH[1][ih]->getBase()); } { - o2::framework::Output output("ZDC", "NOISE_1DH_D", ih, Lifetime::Sporadic); + o2::framework::Output output("ZDC", "NOISE_1DH_D", ih); pc.outputs().snapshot(output, mWorker.mH[2][ih]->getBase()); } } From 5d49fa20979e4532ebc7522c441fbf58acc1c224 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:53:34 +0100 Subject: [PATCH 151/726] DPL: drop lifetime member from Output This is apparently not really used for anything and in general not passed correctly. Lifetime is a static property of the data flow, so it does not make sense to have it on a per message basis. --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 4 ++-- Framework/Core/include/Framework/Output.h | 18 ++++------------- Framework/Core/src/CommonServices.cxx | 2 +- Framework/Core/src/DataAllocator.cxx | 4 ++-- Framework/Core/test/test_DataAllocator.cxx | 20 +++++++++---------- Framework/Core/test/test_ParallelPipeline.cxx | 8 ++++---- Framework/Core/test/test_ProcessorOptions.cxx | 2 +- .../Core/test/test_StaggeringWorkflow.cxx | 6 +++--- Framework/Core/test/test_TableBuilder.cxx | 2 +- .../Utils/include/DPLUtils/RootTreeReader.h | 8 ++++---- Framework/Utils/src/Utils.cxx | 2 +- Framework/Utils/test/test_RootTreeReader.cxx | 5 ++--- .../test/test_RootTreeWriterWorkflow.cxx | 2 +- 13 files changed, 36 insertions(+), 47 deletions(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 0e067dc2f2d52..e45e1c6996a98 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -185,7 +185,7 @@ auto populateCacheWith(std::shared_ptr const& helper, LOGP(debug, "Fetching object for route {}", DataSpecUtils::describe(route.matcher)); objCnt++; auto concrete = DataSpecUtils::asConcreteDataMatcher(route.matcher); - Output output{concrete.origin, concrete.description, concrete.subSpec, route.matcher.lifetime}; + Output output{concrete.origin, concrete.description, concrete.subSpec}; auto&& v = allocator.makeVector(output); std::map metadata; std::map headers; @@ -343,7 +343,7 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() checkValidity = true; // never skip check if the cache is empty } LOG(debug) << "checkValidity = " << checkValidity << " for TF " << timingInfo.timeslice; - Output output{"CTP", "OrbitReset", 0, Lifetime::Condition}; + Output output{"CTP", "OrbitReset", 0}; Long64_t newOrbitResetTime = orbitResetTime; auto&& v = allocator.makeVector(output); const auto& api = helper->getAPI(path); diff --git a/Framework/Core/include/Framework/Output.h b/Framework/Core/include/Framework/Output.h index b7d26e8dfd7ee..a2abf5f7600a9 100644 --- a/Framework/Core/include/Framework/Output.h +++ b/Framework/Core/include/Framework/Output.h @@ -28,7 +28,6 @@ struct Output { header::DataOrigin origin; header::DataDescription description; header::DataHeader::SubSpecificationType subSpec = 0; - enum Lifetime lifetime = Lifetime::Timeframe; header::Stack metaHeader = {}; Output(header::DataOrigin o, header::DataDescription d) : origin(o), description(d) {} @@ -38,14 +37,8 @@ struct Output { { } - Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, Lifetime l) - : origin(o), description(d), subSpec(s), lifetime(l) - { - } - - Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, Lifetime l, - header::Stack&& stack) - : origin(o), description(d), subSpec(s), lifetime(l), metaHeader(std::move(stack)) + Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, header::Stack&& stack) + : origin(o), description(d), subSpec(s), metaHeader(std::move(stack)) { } @@ -60,7 +53,6 @@ struct Output { : origin(rhs.origin), description(rhs.description), subSpec(rhs.subSpec), - lifetime(rhs.lifetime), metaHeader(std::move(rhs.metaHeader)) { } @@ -72,16 +64,14 @@ struct Output { origin = rhs.origin; description = rhs.description; subSpec = rhs.subSpec; - lifetime = rhs.lifetime; metaHeader = std::move(rhs.metaHeader); return *this; } bool operator==(const Output& that) const { - return origin == that.origin && description == that.description && subSpec == that.subSpec && - lifetime == that.lifetime; - }; + return origin == that.origin && description == that.description && subSpec == that.subSpec; + } }; } // namespace o2 diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 7d502216bc509..05b8fba47e532 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -425,7 +425,7 @@ o2::framework::ServiceSpec CommonServices::ccdbSupportSpec() if (concrete.subSpec == 0) { continue; } - auto& stfDist = pc.outputs().make(Output{concrete.origin, concrete.description, concrete.subSpec, output.matcher.lifetime}); + auto& stfDist = pc.outputs().make(Output{concrete.origin, concrete.description, concrete.subSpec}); stfDist.id = timingInfo.timeslice; stfDist.firstOrbit = timingInfo.firstTForbit; stfDist.runNumber = timingInfo.runNumber; diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index 296e094bffe26..1230d2e942f6e 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -294,7 +294,7 @@ Output DataAllocator::getOutputByBind(OutputRef&& ref) if (allowedOutputRoutes[ri].matcher.binding.value == ref.label) { auto spec = allowedOutputRoutes[ri].matcher; auto dataType = DataSpecUtils::asConcreteDataTypeMatcher(spec); - return Output{dataType.origin, dataType.description, ref.subSpec, spec.lifetime, std::move(ref.headerStack)}; + return Output{dataType.origin, dataType.description, ref.subSpec, std::move(ref.headerStack)}; } } std::string availableRoutes; @@ -342,7 +342,7 @@ void DataAllocator::cookDeadBeef(const Output& spec) // We get the output route from the original spec, but we send it // using the binding of the deadbeef subSpecification. RouteIndex routeIndex = matchDataHeader(spec, timingInfo.timeslice); - auto deadBeefOutput = Output{spec.origin, spec.description, 0xdeadbeef, Lifetime::Timeframe}; + auto deadBeefOutput = Output{spec.origin, spec.description, 0xdeadbeef}; auto headerMessage = headerMessageFromOutput(deadBeefOutput, routeIndex, header::gSerializationMethodNone, 0); addPartToContext(proxy.createOutputMessage(routeIndex, 0), deadBeefOutput, header::gSerializationMethodNone); diff --git a/Framework/Core/test/test_DataAllocator.cxx b/Framework/Core/test/test_DataAllocator.cxx index d880da72d7cb4..894fb5f38368a 100644 --- a/Framework/Core/test/test_DataAllocator.cxx +++ b/Framework/Core/test/test_DataAllocator.cxx @@ -45,7 +45,7 @@ using namespace o2::framework; // this function is only used to do the static checks for API return types void doTypeChecks() { - const Output output{"TST", "DUMMY", 0, Lifetime::Timeframe}; + const Output output{"TST", "DUMMY", 0}; // we require references to objects owned by allocator context static_assert(std::is_lvalue_reference().make(output))>::value); static_assert(std::is_lvalue_reference().make(output, "test"))>::value); @@ -83,24 +83,24 @@ DataProcessorSpec getSourceSpec() // picked by the framework is no serialization test::MetaHeader meta1{42}; test::MetaHeader meta2{23}; - pc.outputs().snapshot(Output{"TST", "MESSAGEABLE", 0, Lifetime::Timeframe, {meta1, meta2}}, a); - pc.outputs().snapshot(Output{"TST", "MSGBLEROOTSRLZ", 0, Lifetime::Timeframe}, + pc.outputs().snapshot(Output{"TST", "MESSAGEABLE", 0, {meta1, meta2}}, a); + pc.outputs().snapshot(Output{"TST", "MSGBLEROOTSRLZ", 0}, o2::framework::ROOTSerialized(a)); // class Polymorphic is not messageable, so the serialization type is deduced // from the fact that the type has a dictionary and can be ROOT-serialized. - pc.outputs().snapshot(Output{"TST", "ROOTNONTOBJECT", 0, Lifetime::Timeframe}, b); + pc.outputs().snapshot(Output{"TST", "ROOTNONTOBJECT", 0}, b); // vector of ROOT serializable class - pc.outputs().snapshot(Output{"TST", "ROOTVECTOR", 0, Lifetime::Timeframe}, c); + pc.outputs().snapshot(Output{"TST", "ROOTVECTOR", 0}, c); // deque of simple types - pc.outputs().snapshot(Output{"TST", "DEQUE", 0, Lifetime::Timeframe}, testDequePayload); + pc.outputs().snapshot(Output{"TST", "DEQUE", 0}, testDequePayload); // likewise, passed anonymously with char type and class name o2::framework::ROOTSerialized d(*((char*)&c), "vector"); - pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC", 0, Lifetime::Timeframe}, d); + pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC", 0}, d); // vector of ROOT serializable class wrapped with TClass info as hint auto* cl = TClass::GetClass(typeid(decltype(c))); ASSERT_ERROR(cl != nullptr); o2::framework::ROOTSerialized e(*((char*)&c), cl); - pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC2", 0, Lifetime::Timeframe}, e); + pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC2", 0}, e); // test the 'make' methods pc.outputs().make(OutputRef{"makesingle", 0}) = a; auto& multi = pc.outputs().make(OutputRef{"makespan", 0}, 3); @@ -111,7 +111,7 @@ DataProcessorSpec getSourceSpec() // test the adopt method auto freefct = [](void* data, void* hint) {}; // simply ignore the cleanup for the test static std::string teststring = "adoptchunk"; - pc.outputs().adoptChunk(Output{"TST", "ADOPTCHUNK", 0, Lifetime::Timeframe}, teststring.data(), teststring.length(), freefct, nullptr); + pc.outputs().adoptChunk(Output{"TST", "ADOPTCHUNK", 0}, teststring.data(), teststring.length(), freefct, nullptr); // test resizable data chunk, initial size 0 and grow auto& growchunk = pc.outputs().newChunk(OutputRef{"growchunk", 0}, 0); growchunk.resize(sizeof(o2::test::TriviallyCopyable)); @@ -307,7 +307,7 @@ DataProcessorSpec getSinkSpec() ASSERT_ERROR((object12[0] == o2::test::TriviallyCopyable{42, 23, 0xdead})); ASSERT_ERROR((object12[1] == o2::test::TriviallyCopyable{10, 20, 0xacdc})); // forward the read-only span on a different route - pc.outputs().snapshot(Output{"TST", "MSGABLVECTORCPY", 0, Lifetime::Timeframe}, object12); + pc.outputs().snapshot(Output{"TST", "MSGABLVECTORCPY", 0}, object12); LOG(info) << "extracting TNamed object from input13"; auto object13 = pc.inputs().get("input13"); diff --git a/Framework/Core/test/test_ParallelPipeline.cxx b/Framework/Core/test/test_ParallelPipeline.cxx index e13869873fca1..47106855a8f74 100644 --- a/Framework/Core/test/test_ParallelPipeline.cxx +++ b/Framework/Core/test/test_ParallelPipeline.cxx @@ -64,7 +64,7 @@ std::vector defineDataProcessing(ConfigContext const&) << *input.spec << ": " << *((int*)input.payload); auto const* dataheader = DataRefUtils::getHeader(input); //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); - auto& data = ctx.outputs().make(Output{"TST", "PREPROC", dataheader->subSpecification, Lifetime::Timeframe}); + auto& data = ctx.outputs().make(Output{"TST", "PREPROC", dataheader->subSpecification}); ASSERT_ERROR(ctx.inputs().get(input.spec->binding.c_str()) == parallelContext.index1D()); data = parallelContext.index1D(); } @@ -85,10 +85,10 @@ std::vector defineDataProcessing(ConfigContext const&) // TODO: there is a bug in the API for using OutputRef, returns an rvalue which can not be bound to // lvalue reference //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); - auto& data = ctx.outputs().make(Output{"TST", "DATA", dataheader->subSpecification, Lifetime::Timeframe}); + auto& data = ctx.outputs().make(Output{"TST", "DATA", dataheader->subSpecification}); data = ctx.inputs().get(input.spec->binding.c_str()); //auto& meta = ctx.outputs().make(OutputRef{"metadt", dataheader->subSpecification}); - auto& meta = ctx.outputs().make(Output{"TST", "META", dataheader->subSpecification, Lifetime::Timeframe}); + auto& meta = ctx.outputs().make(Output{"TST", "META", dataheader->subSpecification}); meta = dataheader->subSpecification; } }}}, @@ -145,7 +145,7 @@ std::vector defineDataProcessing(ConfigContext const&) if (multiplicities[pipeline] == 0) { continue; } - ctx.outputs().make(Output{"TST", "TRIGGER", subspecs[index], Lifetime::Timeframe}) = pipeline; + ctx.outputs().make(Output{"TST", "TRIGGER", subspecs[index]}) = pipeline; multiplicities[pipeline++]--; if (pipeline >= nPipelines) { pipeline = 0; diff --git a/Framework/Core/test/test_ProcessorOptions.cxx b/Framework/Core/test/test_ProcessorOptions.cxx index 8de90e0e23441..1751176eb639e 100644 --- a/Framework/Core/test/test_ProcessorOptions.cxx +++ b/Framework/Core/test/test_ProcessorOptions.cxx @@ -45,7 +45,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const&) } // there is nothing to do, simply stop the workflow but we have to send at least one message // to make sure that the callback of the consumer is called - ctx.outputs().make(Output{"TST", "TEST", 0, Lifetime::Timeframe}) = 42; + ctx.outputs().make(Output{"TST", "TEST", 0}) = 42; ctx.services().get().endOfStream(); *isReady = true; }; diff --git a/Framework/Core/test/test_StaggeringWorkflow.cxx b/Framework/Core/test/test_StaggeringWorkflow.cxx index 0e98c4324bc76..edc07d598465b 100644 --- a/Framework/Core/test/test_StaggeringWorkflow.cxx +++ b/Framework/Core/test/test_StaggeringWorkflow.cxx @@ -85,9 +85,9 @@ std::vector defineDataProcessing(ConfigContext const&) // since the snapshot copy is ready for sending it is scheduled but held back // because of the CompletionPolicy trigger matcher. This message will be // sent together with the second message. - outputs.snapshot(Output{"PROD", "CHANNEL", subspec, Lifetime::Timeframe}, subspec); + outputs.snapshot(Output{"PROD", "CHANNEL", subspec}, subspec); device.waitFor(100); - outputs.snapshot(Output{"PROD", "TRIGGER", subspec, Lifetime::Timeframe}, subspec); + outputs.snapshot(Output{"PROD", "TRIGGER", subspec}, subspec); device.waitFor(100); } control.endOfStream(); @@ -106,7 +106,7 @@ std::vector defineDataProcessing(ConfigContext const&) LOG(info) << "processing " << input.spec->binding << " " << data; // check if the channel binding starts with 'trigger' if (input.spec->binding.find("trigger") == 0) { - pc.outputs().make(Output{"PROC", "CHANNEL", data, Lifetime::Timeframe}) = data; + pc.outputs().make(Output{"PROC", "CHANNEL", data}) = data; } nActiveInputs++; } diff --git a/Framework/Core/test/test_TableBuilder.cxx b/Framework/Core/test/test_TableBuilder.cxx index a7b4739054d03..26a0f5a930b73 100644 --- a/Framework/Core/test/test_TableBuilder.cxx +++ b/Framework/Core/test/test_TableBuilder.cxx @@ -240,7 +240,7 @@ TEST_CASE("TestSoAIntegration") TEST_CASE("TestDataAllocatorReturnType") { - const Output output{"TST", "DUMMY", 0, Lifetime::Timeframe}; + const Output output{"TST", "DUMMY", 0}; } TEST_CASE("TestPodInjestion") diff --git a/Framework/Utils/include/DPLUtils/RootTreeReader.h b/Framework/Utils/include/DPLUtils/RootTreeReader.h index 42a8c9d01d0d0..109d79f9fad78 100644 --- a/Framework/Utils/include/DPLUtils/RootTreeReader.h +++ b/Framework/Utils/include/DPLUtils/RootTreeReader.h @@ -51,11 +51,11 @@ struct DefaultKey { enum Lifetime lifetime = Lifetime::Timeframe; DefaultKey(const Output& desc) - : origin(desc.origin), description(desc.description), subSpec(desc.subSpec), lifetime(desc.lifetime) + : origin(desc.origin), description(desc.description), subSpec(desc.subSpec) { } - operator Output() const { return Output{origin, description, subSpec, lifetime}; } + operator Output() const { return Output{origin, description, subSpec}; } }; } // namespace rtr @@ -302,7 +302,7 @@ class GenericRootTreeReader } auto snapshot = [&context, &stackcreator](const KeyType& key, const auto& object) { - context.outputs().snapshot(Output{key.origin, key.description, key.subSpec, key.lifetime, std::move(stackcreator())}, object); + context.outputs().snapshot(Output{key.origin, key.description, key.subSpec, std::move(stackcreator())}, object); }; char* data = nullptr; @@ -310,7 +310,7 @@ class GenericRootTreeReader mBranch->GetEntry(entry); // execute hook if it was registered; if this return true do not proceed further - if (mPublishHook != nullptr && (*mPublishHook).hook(mName, context, Output{mKey.origin, mKey.description, mKey.subSpec, mKey.lifetime, std::move(stackcreator())}, data)) { + if (mPublishHook != nullptr && (*mPublishHook).hook(mName, context, Output{mKey.origin, mKey.description, mKey.subSpec, std::move(stackcreator())}, data)) { } // try to figureout when we need to do something special diff --git a/Framework/Utils/src/Utils.cxx b/Framework/Utils/src/Utils.cxx index 3a52c46e074af..1dc0882c2f55d 100644 --- a/Framework/Utils/src/Utils.cxx +++ b/Framework/Utils/src/Utils.cxx @@ -28,7 +28,7 @@ namespace workflows Output getOutput(const o2f::OutputSpec outputSpec) { auto concrete = DataSpecUtils::asConcreteDataMatcher(outputSpec); - return Output{concrete.origin, concrete.description, concrete.subSpec, outputSpec.lifetime}; + return Output{concrete.origin, concrete.description, concrete.subSpec}; } // This method can convert a vector of OutputSpec into a vector of Output. diff --git a/Framework/Utils/test/test_RootTreeReader.cxx b/Framework/Utils/test/test_RootTreeReader.cxx index 0390a29f807b1..bf652fca6f97a 100644 --- a/Framework/Utils/test/test_RootTreeReader.cxx +++ b/Framework/Utils/test/test_RootTreeReader.cxx @@ -67,11 +67,10 @@ DataProcessorSpec getSourceSpec() testFile->Close(); } - constexpr auto persistency = Lifetime::Transient; auto reader = std::make_shared("testtree", // tree name fileName.c_str(), // input file name - RootTreeReader::BranchDefinition>{Output{"TST", "ARRAYOFMSGBL", 0, persistency}, "msgblarray"}, - Output{"TST", "ARRAYOFDATA", 0, persistency}, + RootTreeReader::BranchDefinition>{Output{"TST", "ARRAYOFMSGBL", 0}, "msgblarray"}, + Output{"TST", "ARRAYOFDATA", 0}, "dataarray", RootTreeReader::PublishingMode::Single); diff --git a/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx b/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx index 3213c1775c758..9a9b849d8d9d2 100644 --- a/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx +++ b/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx @@ -143,7 +143,7 @@ DataProcessorSpec getSourceSpec() o2::test::Polymorphic a(*counter); pc.outputs().snapshot(OutputRef{"output", 0}, a); pc.outputs().snapshot(OutputRef{"output", 1}, a); - int& metadata = pc.outputs().make(Output{"TST", "METADATA", 0, Lifetime::Timeframe}); + int& metadata = pc.outputs().make(Output{"TST", "METADATA", 0}); metadata = *counter; *counter = *counter + 1; if (*counter >= sTreeSize) { From 148366b7096871e052a7585f3ec321974807c84c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Dec 2023 15:25:58 +0100 Subject: [PATCH 152/726] Remove default Lifetime::Timeframe when instanciating an Output This lifetime field will be removed soon, and this is unambiguous, because it's in any case the default. --- .../AOD/src/AODMcProducerWorkflowSpec.cxx | 8 +-- Detectors/AOD/src/AODProducerWorkflowSpec.cxx | 8 +-- .../src/CPVBadMapCalibDevice.cxx | 2 +- .../src/CPVGainCalibDevice.cxx | 2 +- .../src/CPVPedestalCalibDevice.cxx | 2 +- .../CPV/workflow/src/ClusterReaderSpec.cxx | 6 +-- .../CPV/workflow/src/ClusterizerSpec.cxx | 16 +++--- .../CPV/workflow/src/DigitReaderSpec.cxx | 6 +-- .../CPV/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../workflow/src/RawToDigitConverterSpec.cxx | 18 +++---- .../CTP/workflow/src/EntropyEncoderSpec.cxx | 2 +- Detectors/CTP/workflow/src/RawDecoderSpec.cxx | 8 +-- .../CTP/workflowIO/src/DigitReaderSpec.cxx | 4 +- .../testWorkflow/src/DCSDataReplaySpec.cxx | 2 +- .../src/DCSRandomDataGeneratorSpec.cxx | 2 +- .../include/EMCALWorkflow/PublisherSpec.h | 11 ++-- .../workflow/src/AnalysisClusterSpec.cxx | 2 +- .../EMCAL/workflow/src/CellConverterSpec.cxx | 6 +-- .../workflow/src/CellRecalibratorSpec.cxx | 10 ++-- .../EMCAL/workflow/src/ClusterizerSpec.cxx | 8 +-- .../EMCAL/workflow/src/EMCALDigitizerSpec.cxx | 10 ++-- .../EMCAL/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../workflow/src/RawToCellConverterSpec.cxx | 6 +-- .../src/StandaloneAODProducerSpec.cxx | 4 +- .../src/emc-channel-data-producer.cxx | 6 +-- .../include/FDDWorkflow/RawReaderFDD.h | 4 +- .../FIT/FDD/workflow/src/DigitReaderSpec.cxx | 8 +-- .../FDD/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../FDD/workflow/src/RecPointReaderSpec.cxx | 4 +- .../FDD/workflow/src/ReconstructorSpec.cxx | 4 +- .../FT0CalibSlewingCollectorSpec.h | 4 +- .../FT0TimeSpectraProcessor-Workflow.cxx | 2 +- .../FIT/FT0/workflow/src/DigitReaderSpec.cxx | 8 +-- .../FT0/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../FT0/workflow/src/RecPointReaderSpec.cxx | 4 +- .../FT0/workflow/src/ReconstructionSpec.cxx | 4 +- .../FIT/FV0/workflow/src/DigitReaderSpec.cxx | 8 +-- .../FV0/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../FV0/workflow/src/RecPointReaderSpec.cxx | 4 +- .../FV0/workflow/src/ReconstructionSpec.cxx | 4 +- .../include/FITWorkflow/RawReaderFIT.h | 14 ++--- .../FOCAL/workflow/src/RawDecoderSpec.cxx | 10 ++-- .../Filtering/src/FilteredTFReaderSpec.cxx | 14 ++--- .../helpers/src/NoInpDummyOutSpec.cxx | 2 +- .../qc/src/ITSTPCMatchingQCSpec.cxx | 2 +- .../readers/src/GlobalFwdTrackReaderSpec.cxx | 4 +- .../readers/src/IRFrameReaderSpec.cxx | 2 +- .../readers/src/MatchedMFTMCHReaderSpec.cxx | 2 +- .../readers/src/PrimaryVertexReaderSpec.cxx | 8 +-- .../readers/src/SecondaryVertexReaderSpec.cxx | 18 +++---- .../src/StrangenessTrackingReaderSpec.cxx | 6 +-- .../readers/src/TrackCosmicsReaderSpec.cxx | 4 +- .../readers/src/TrackTPCITSReaderSpec.cxx | 10 ++-- .../src/CosmicsMatchingSpec.cxx | 4 +- .../src/GlobalFwdMatchingSpec.cxx | 12 ++--- .../src/HMPMatcherSpec.cxx | 4 +- .../src/PrimaryVertexingSpec.cxx | 8 +-- .../src/StrangenessTrackingSpec.cxx | 6 +-- .../src/TOFMatcherSpec.cxx | 54 +++++++++---------- .../src/TPCITSMatchingSpec.cxx | 12 ++--- .../src/VertexTrackMatcherSpec.cxx | 4 +- .../study/src/TPCDataFilter.cxx | 10 ++-- .../src/TPCInterpolationSpec.cxx | 12 ++--- .../src/TPCUnbinnedResidualReaderSpec.cxx | 6 +-- .../HMPID/workflow/src/ClustersReaderSpec.cxx | 4 +- .../HMPID/workflow/src/DataDecoderSpec.cxx | 4 +- .../HMPID/workflow/src/DataDecoderSpec2.cxx | 4 +- .../HMPID/workflow/src/DigitsReaderSpec.cxx | 4 +- .../workflow/src/DigitsToClustersSpec.cxx | 4 +- .../HMPID/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../workflow/src/HMPMatchedReaderSpec.cxx | 4 +- .../src/TestDataReader.cxx | 28 +++++----- .../ITSMFT/ITS/workflow/src/ClustererSpec.cxx | 10 ++-- .../ITS/workflow/src/CookedTrackerSpec.cxx | 16 +++--- .../ITS/workflow/src/DCSGeneratorSpec.cxx | 2 +- .../ITS/workflow/src/TrackReaderSpec.cxx | 14 ++--- .../ITSMFT/ITS/workflow/src/TrackerSpec.cxx | 18 +++---- .../ITS/workflow/src/VertexReaderSpec.cxx | 4 +- .../ITSMFT/MFT/workflow/src/ClustererSpec.cxx | 10 ++-- .../MFT/workflow/src/TrackReaderSpec.cxx | 8 +-- .../ITSMFT/MFT/workflow/src/TrackerSpec.cxx | 10 ++-- .../common/workflow/src/ClusterReaderSpec.cxx | 12 ++--- .../common/workflow/src/DigitReaderSpec.cxx | 12 ++--- .../workflow/src/EntropyEncoderSpec.cxx | 2 +- .../common/workflow/src/STFDecoderSpec.cxx | 20 +++---- Detectors/MUON/MCH/IO/src/DigitReaderSpec.cxx | 10 ++-- .../MCH/Workflow/src/TrackAtVertexSpec.cxx | 2 +- .../Workflow/src/entropy-encoder-workflow.cxx | 2 +- .../MUON/MID/Workflow/src/ClusterizerSpec.cxx | 8 +-- .../MID/Workflow/src/ColumnDataSpecsUtils.cxx | 4 +- .../src/DecodedDataAggregatorSpec.cxx | 4 +- .../MUON/MID/Workflow/src/DigitReaderSpec.cxx | 10 ++-- .../MID/Workflow/src/EntropyDecoderSpec.cxx | 4 +- .../MID/Workflow/src/EntropyEncoderSpec.cxx | 2 +- .../MUON/MID/Workflow/src/MaskMakerSpec.cxx | 4 +- .../MID/Workflow/src/RawGBTDecoderSpec.cxx | 4 +- .../MUON/MID/Workflow/src/TrackerSpec.cxx | 12 ++--- .../MID/Workflow/src/ZeroSuppressionSpec.cxx | 4 +- .../PHOS/calib/src/PHOSBadMapCalibDevice.cxx | 2 +- .../calib/src/PHOSHGLGRatioCalibDevice.cxx | 2 +- .../calib/src/PHOSPedestalCalibDevice.cxx | 2 +- .../PHOS/workflow/src/CellConverterSpec.cxx | 12 ++--- .../PHOS/workflow/src/CellReaderSpec.cxx | 6 +-- .../PHOS/workflow/src/ClusterizerSpec.cxx | 16 +++--- .../PHOS/workflow/src/DigitReaderSpec.cxx | 6 +-- .../PHOS/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../PHOS/workflow/src/EventBuilderSpec.cxx | 4 +- .../workflow/src/RawToCellConverterSpec.cxx | 12 ++--- Detectors/PHOS/workflow/src/ReaderSpec.cxx | 22 ++++---- .../src/StandaloneAODProducerSpec.cxx | 4 +- .../testWorkflow/TOFCalibCollectorSpec.h | 4 +- .../workflow/src/CompressedDecodingTask.cxx | 12 ++--- .../TOF/workflow/src/EntropyDecoderSpec.cxx | 2 +- .../TOF/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../TOF/workflow/src/TOFClusterizerSpec.cxx | 14 ++--- .../workflowIO/src/CalibClusReaderSpec.cxx | 8 +-- .../workflowIO/src/CalibInfoReaderSpec.cxx | 4 +- .../TOF/workflowIO/src/ClusterReaderSpec.cxx | 6 +-- .../TOF/workflowIO/src/DigitReaderSpec.cxx | 12 ++--- .../workflowIO/src/TOFMatchedReaderSpec.cxx | 6 +-- .../include/TPCWorkflow/TPCFLPIDCSpec.h | 6 +-- .../TPCWorkflow/TPCFourierTransformEPNSpec.h | 2 +- .../include/TPCWorkflow/TPCIntegrateIDCSpec.h | 4 +- .../include/TPCReaderWorkflow/PublisherSpec.h | 7 ++- .../workflow/readers/src/PublisherSpec.cxx | 4 +- .../workflow/readers/src/TrackReaderSpec.cxx | 6 +-- .../readers/src/TriggerReaderSpec.cxx | 2 +- .../workflow/src/ChunkedDigitPublisher.cxx | 5 +- .../workflow/src/ClusterDecoderRawSpec.cxx | 8 +-- Detectors/TPC/workflow/src/ClustererSpec.cxx | 8 +-- .../TPC/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../TPC/workflow/src/KryptonClustererSpec.cxx | 2 +- .../TPC/workflow/src/KryptonRawFilterSpec.cxx | 2 +- .../TPC/workflow/src/LinkZSToDigitsSpec.cxx | 2 +- .../TPC/workflow/src/MIPTrackFilterSpec.cxx | 2 +- .../TPC/workflow/src/RawToDigitsSpec.cxx | 4 +- .../TPC/workflow/src/SACProcessorSpec.cxx | 4 +- Detectors/TPC/workflow/src/ZSSpec.cxx | 6 +-- .../workflow/test/test_ft_EPN_Aggregator.cxx | 2 +- .../TRD/reconstruction/src/EventRecord.cxx | 10 ++-- .../include/TRDWorkflow/KrClustererSpec.h | 4 +- .../TRDWorkflow/TRDGlobalTrackingQCSpec.h | 2 +- .../include/TRDWorkflow/TRDPulseHeightSpec.h | 4 +- .../workflow/io/src/TRDCalibReaderSpec.cxx | 2 +- .../workflow/io/src/TRDDigitReaderSpec.cxx | 6 +-- .../TRD/workflow/io/src/TRDPHReaderSpec.cxx | 2 +- .../workflow/io/src/TRDTrackReaderSpec.cxx | 16 +++--- .../workflow/io/src/TRDTrackletReaderSpec.cxx | 10 ++-- .../TRD/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../TRD/workflow/src/TRDDigitizerSpec.cxx | 8 +-- .../workflow/src/TRDGlobalTrackingSpec.cxx | 16 +++--- .../src/TRDTrackletTransformerSpec.cxx | 4 +- .../TRD/workflow/src/TRDTrapSimulatorSpec.cxx | 8 +-- .../TRD/workflow/src/TrackBasedCalibSpec.cxx | 4 +- .../ITS3/workflow/src/ClustererSpec.cxx | 10 ++-- .../ITS3/workflow/src/DigitReaderSpec.cxx | 10 ++-- .../ITS3/workflow/src/TrackReaderSpec.cxx | 14 ++--- .../ITS3/workflow/src/TrackerSpec.cxx | 18 +++---- .../ITS3/workflow/src/VertexReaderSpec.cxx | 4 +- Detectors/Vertexing/src/SVertexer.cxx | 24 ++++----- Detectors/ZDC/calib/src/InterCalibEPNSpec.cxx | 6 +-- Detectors/ZDC/calib/src/TDCCalibEPNSpec.cxx | 4 +- .../ZDC/calib/src/WaveformCalibEPNSpec.cxx | 2 +- .../ZDC/raw/include/ZDCRaw/RawReaderZDC.h | 6 +-- .../ZDC/workflow/src/DigitReaderSpec.cxx | 8 +-- Detectors/ZDC/workflow/src/DigitRecoSpec.cxx | 10 ++-- .../ZDC/workflow/src/EntropyEncoderSpec.cxx | 2 +- .../ZDC/workflow/src/RecEventReaderSpec.cxx | 8 +-- Detectors/ZDC/workflow/src/RecoReaderSpec.cxx | 8 +-- Framework/Core/include/Framework/Output.h | 5 ++ GPU/Workflow/src/GPUWorkflowITS.cxx | 18 +++---- GPU/Workflow/src/GPUWorkflowPipeline.cxx | 2 +- GPU/Workflow/src/GPUWorkflowSpec.cxx | 14 ++--- .../src/CPVDigitizerSpec.cxx | 8 +-- .../src/CTPDigitizerSpec.cxx | 4 +- .../src/FDDDigitizerSpec.cxx | 10 ++-- .../src/FT0DigitizerSpec.cxx | 10 ++-- .../src/FV0DigitizerSpec.cxx | 10 ++-- .../src/HMPIDDigitizerSpec.cxx | 8 +-- .../src/ITS3DigitizerSpec.cxx | 10 ++-- .../src/ITSMFTDigitizerSpec.cxx | 10 ++-- .../src/MCHDigitizerSpec.cxx | 8 +-- .../DigitizerWorkflow/src/MCTruthReaderSpec.h | 4 +- .../src/MCTruthSourceSpec.cxx | 4 +- .../src/MCTruthWriterSpec.cxx | 2 +- .../src/MIDDigitizerSpec.cxx | 8 +-- .../src/PHOSDigitizerSpec.cxx | 8 +-- .../src/TOFDigitizerSpec.cxx | 12 ++--- .../src/TPCDigitizerSpec.cxx | 12 ++--- .../src/ZDCDigitizerSpec.cxx | 10 ++-- .../DataSampling/src/DataSamplingPolicy.cxx | 2 +- Utilities/DataSampling/src/Dispatcher.cxx | 1 - run/dpl_eventgen.cxx | 4 +- run/o2sim_hepmc_publisher.cxx | 4 +- run/o2sim_kine_publisher.cxx | 4 +- run/o2sim_mctracks_to_aod.cxx | 4 +- 196 files changed, 685 insertions(+), 688 deletions(-) diff --git a/Detectors/AOD/src/AODMcProducerWorkflowSpec.cxx b/Detectors/AOD/src/AODMcProducerWorkflowSpec.cxx index 2e60b70c9855a..4c1623188e55c 100644 --- a/Detectors/AOD/src/AODMcProducerWorkflowSpec.cxx +++ b/Detectors/AOD/src/AODMcProducerWorkflowSpec.cxx @@ -296,13 +296,13 @@ void AODMcProducerWorkflowDPL::run(ProcessingContext& pc) TString ROOTVersion = ROOT_RELEASE; mMetaDataKeys = {"DataType", "Run", "O2Version", "ROOTVersion", "RecoPassName", "AnchorProduction", "AnchorPassName", "LPMProductionTag"}; mMetaDataVals = {dataType, "3", O2Version, ROOTVersion, mRecoPass, mAnchorProd, mAnchorPass, mLPMProdTag}; - pc.outputs().snapshot(Output{"AMD", "AODMetadataKeys", 0, Lifetime::Timeframe}, mMetaDataKeys); - pc.outputs().snapshot(Output{"AMD", "AODMetadataVals", 0, Lifetime::Timeframe}, mMetaDataVals); + pc.outputs().snapshot(Output{"AMD", "AODMetadataKeys", 0}, mMetaDataKeys); + pc.outputs().snapshot(Output{"AMD", "AODMetadataVals", 0}, mMetaDataVals); mIsMDSent = true; } - pc.outputs().snapshot(Output{"TFN", "TFNumber", 0, Lifetime::Timeframe}, tfNumber); - pc.outputs().snapshot(Output{"TFF", "TFFilename", 0, Lifetime::Timeframe}, ""); + pc.outputs().snapshot(Output{"TFN", "TFNumber", 0}, tfNumber); + pc.outputs().snapshot(Output{"TFF", "TFFilename", 0}, ""); pc.services().get().endOfStream(); pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Detectors/AOD/src/AODProducerWorkflowSpec.cxx b/Detectors/AOD/src/AODProducerWorkflowSpec.cxx index 323d4b9f7f41a..e97c90f14f64f 100644 --- a/Detectors/AOD/src/AODProducerWorkflowSpec.cxx +++ b/Detectors/AOD/src/AODProducerWorkflowSpec.cxx @@ -2241,11 +2241,11 @@ void AODProducerWorkflowDPL::run(ProcessingContext& pc) TString ROOTVersion = ROOT_RELEASE; mMetaDataKeys = {"DataType", "Run", "O2Version", "ROOTVersion", "RecoPassName", "AnchorProduction", "AnchorPassName", "LPMProductionTag"}; mMetaDataVals = {dataType, "3", O2Version, ROOTVersion, mRecoPass, mAnchorProd, mAnchorPass, mLPMProdTag}; - pc.outputs().snapshot(Output{"AMD", "AODMetadataKeys", 0, Lifetime::Timeframe}, mMetaDataKeys); - pc.outputs().snapshot(Output{"AMD", "AODMetadataVals", 0, Lifetime::Timeframe}, mMetaDataVals); + pc.outputs().snapshot(Output{"AMD", "AODMetadataKeys", 0}, mMetaDataKeys); + pc.outputs().snapshot(Output{"AMD", "AODMetadataVals", 0}, mMetaDataVals); - pc.outputs().snapshot(Output{"TFN", "TFNumber", 0, Lifetime::Timeframe}, tfNumber); - pc.outputs().snapshot(Output{"TFF", "TFFilename", 0, Lifetime::Timeframe}, ""); + pc.outputs().snapshot(Output{"TFN", "TFNumber", 0}, tfNumber); + pc.outputs().snapshot(Output{"TFF", "TFFilename", 0}, ""); mTimer.Stop(); } diff --git a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVBadMapCalibDevice.cxx b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVBadMapCalibDevice.cxx index b1c61d25da9c8..f2a08e280f954 100644 --- a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVBadMapCalibDevice.cxx +++ b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVBadMapCalibDevice.cxx @@ -174,7 +174,7 @@ void CPVBadMapCalibDevice::sendOutput(DataAllocator& output) output.snapshot(Output{o2::calibration::Utils::gDataOriginCDBWrapper, "CPV_BadChanMap", subSpec}, info); } - output.snapshot(o2::framework::Output{"CPV", "BADMAPCHANGE", 0, o2::framework::Lifetime::Timeframe}, mMapDiff); + output.snapshot(o2::framework::Output{"CPV", "BADMAPCHANGE", 0}, mMapDiff); } bool CPVBadMapCalibDevice::differFromCurrent() diff --git a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVGainCalibDevice.cxx b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVGainCalibDevice.cxx index b5e98f00d021f..d7f187e3a88cc 100644 --- a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVGainCalibDevice.cxx +++ b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVGainCalibDevice.cxx @@ -143,7 +143,7 @@ void CPVGainCalibDevice::sendOutput(DataAllocator& output) fout.Close(); } // Anyway send change to QC - output.snapshot(o2::framework::Output{"CPV", "GAINDIFF", 0, o2::framework::Lifetime::Timeframe}, mGainRatio); + output.snapshot(o2::framework::Output{"CPV", "GAINDIFF", 0}, mGainRatio); } void CPVGainCalibDevice::calculateGains() diff --git a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVPedestalCalibDevice.cxx b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVPedestalCalibDevice.cxx index 125e8e1bc8c40..5dd414ab7aeb8 100644 --- a/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVPedestalCalibDevice.cxx +++ b/Detectors/CPV/calib/CPVCalibWorkflow/src/CPVPedestalCalibDevice.cxx @@ -118,7 +118,7 @@ void CPVPedestalCalibDevice::sendOutput(DataAllocator& output) } // Anyway send change to QC LOG(info) << "[CPVPedestalCalibDevice - run] Writing "; - output.snapshot(o2::framework::Output{"CPV", "PEDDIFF", 0, o2::framework::Lifetime::Timeframe}, mPedDiff); + output.snapshot(o2::framework::Output{"CPV", "PEDDIFF", 0}, mPedDiff); // Write pedestal distributions to calculate bad map std::string filename = mPath + "CPVPedestals.root"; diff --git a/Detectors/CPV/workflow/src/ClusterReaderSpec.cxx b/Detectors/CPV/workflow/src/ClusterReaderSpec.cxx index f9d82eb23bf68..f9d0817325c36 100644 --- a/Detectors/CPV/workflow/src/ClusterReaderSpec.cxx +++ b/Detectors/CPV/workflow/src/ClusterReaderSpec.cxx @@ -44,10 +44,10 @@ void ClusterReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mClusters.size() << " Clusters in " << mTRs.size() << " TriggerRecords at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "CLUSTERS", 0, Lifetime::Timeframe}, mClusters); - pc.outputs().snapshot(Output{mOrigin, "CLUSTERTRIGRECS", 0, Lifetime::Timeframe}, mTRs); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERS", 0}, mClusters); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERTRIGRECS", 0}, mTRs); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "CLUSTERTRUEMC", 0, Lifetime::Timeframe}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERTRUEMC", 0}, mMCTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/CPV/workflow/src/ClusterizerSpec.cxx b/Detectors/CPV/workflow/src/ClusterizerSpec.cxx index 87d0b86a98f5c..e004c3cec8949 100644 --- a/Detectors/CPV/workflow/src/ClusterizerSpec.cxx +++ b/Detectors/CPV/workflow/src/ClusterizerSpec.cxx @@ -49,14 +49,14 @@ void ClusterizerSpec::run(framework::ProcessingContext& ctx) if (!digits.size()) { // nothing to process LOG(info) << "ClusterizerSpec::run() : no digits; moving on"; mOutputClusters.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusters); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERS", 0}, mOutputClusters); mOutputClusterTrigRecs.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRIGRECS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusterTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRIGRECS", 0}, mOutputClusterTrigRecs); mCalibDigits.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CALIBDIGITS", 0, o2::framework::Lifetime::Timeframe}, mCalibDigits); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CALIBDIGITS", 0}, mCalibDigits); if (mPropagateMC) { mOutputTruthCont.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRUEMC", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRUEMC", 0}, mOutputTruthCont); } return; } @@ -73,12 +73,12 @@ void ClusterizerSpec::run(framework::ProcessingContext& ctx) LOG(debug) << "CPVClusterizer::run() : Received " << digitsTR.size() << " TR, calling clusterizer ..."; - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusters); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRIGRECS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusterTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERS", 0}, mOutputClusters); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRIGRECS", 0}, mOutputClusterTrigRecs); if (mPropagateMC) { - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRUEMC", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CLUSTERTRUEMC", 0}, mOutputTruthCont); } - ctx.outputs().snapshot(o2::framework::Output{"CPV", "CALIBDIGITS", 0, o2::framework::Lifetime::Timeframe}, mCalibDigits); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "CALIBDIGITS", 0}, mCalibDigits); LOG(info) << "Finished, wrote " << mOutputClusters.size() << " clusters, " << mOutputClusterTrigRecs.size() << "TR and " << mOutputTruthCont.getIndexedSize() << " Labels"; } o2::framework::DataProcessorSpec o2::cpv::reco_workflow::getClusterizerSpec(bool propagateMC) diff --git a/Detectors/CPV/workflow/src/DigitReaderSpec.cxx b/Detectors/CPV/workflow/src/DigitReaderSpec.cxx index ba74cb88b10d2..20fe497eb5d0c 100644 --- a/Detectors/CPV/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/CPV/workflow/src/DigitReaderSpec.cxx @@ -44,10 +44,10 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mDigits.size() << " Digits in " << mTRs.size() << " TriggerRecords at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}, mDigits); - pc.outputs().snapshot(Output{mOrigin, "DIGITTRIGREC", 0, Lifetime::Timeframe}, mTRs); + pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0}, mDigits); + pc.outputs().snapshot(Output{mOrigin, "DIGITTRIGREC", 0}, mTRs); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMCTR", 0}, mMCTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/CPV/workflow/src/EntropyEncoderSpec.cxx b/Detectors/CPV/workflow/src/EntropyEncoderSpec.cxx index 97f69fe928342..31ed720e66335 100644 --- a/Detectors/CPV/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/CPV/workflow/src/EntropyEncoderSpec.cxx @@ -54,7 +54,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"CPV", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"CPV", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, triggers, clusters); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/CPV/workflow/src/RawToDigitConverterSpec.cxx b/Detectors/CPV/workflow/src/RawToDigitConverterSpec.cxx index fcdc75c38c67f..766902a2fdc95 100644 --- a/Detectors/CPV/workflow/src/RawToDigitConverterSpec.cxx +++ b/Detectors/CPV/workflow/src/RawToDigitConverterSpec.cxx @@ -136,11 +136,11 @@ void RawToDigitConverterSpec::run(framework::ProcessingContext& ctx) contDeadBeef == maxWarn ? fmt::format(". {} such inputs in row received, stopping reporting", contDeadBeef) : ""); } mOutputDigits.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mOutputDigits); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0}, mOutputDigits); mOutputTriggerRecords.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputTriggerRecords); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0}, mOutputTriggerRecords); mOutputHWErrors.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0, o2::framework::Lifetime::Timeframe}, mOutputHWErrors); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0}, mOutputHWErrors); return; // empty TF, nothing to process } } @@ -272,11 +272,11 @@ void RawToDigitConverterSpec::run(framework::ProcessingContext& ctx) if (skipTF) { // Send no digits mOutputDigits.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mOutputDigits); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0}, mOutputDigits); mOutputTriggerRecords.clear(); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputTriggerRecords); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0}, mOutputTriggerRecords); // Send errors - ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0, o2::framework::Lifetime::Timeframe}, mOutputHWErrors); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0}, mOutputHWErrors); return; } } @@ -305,9 +305,9 @@ void RawToDigitConverterSpec::run(framework::ProcessingContext& ctx) digitBuffer.clear(); LOG(info) << "[CPVRawToDigitConverter - run] Sending " << mOutputDigits.size() << " digits in " << mOutputTriggerRecords.size() << "trigger records."; - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mOutputDigits); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputTriggerRecords); - ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0, o2::framework::Lifetime::Timeframe}, mOutputHWErrors); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITS", 0}, mOutputDigits); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "DIGITTRIGREC", 0}, mOutputTriggerRecords); + ctx.outputs().snapshot(o2::framework::Output{"CPV", "RAWHWERRORS", 0}, mOutputHWErrors); } //_____________________________________________________________________________ o2::framework::DataProcessorSpec o2::cpv::reco_workflow::getRawToDigitConverterSpec(bool askDISTSTF, bool isPedestal, bool useBadChannelMap, bool useGainCalibration) diff --git a/Detectors/CTP/workflow/src/EntropyEncoderSpec.cxx b/Detectors/CTP/workflow/src/EntropyEncoderSpec.cxx index 7ad4f821c12a9..ca16d10087fa6 100644 --- a/Detectors/CTP/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/CTP/workflow/src/EntropyEncoderSpec.cxx @@ -64,7 +64,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) lumi = lumiPrev; } } - auto& buffer = pc.outputs().make>(Output{"CTP", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"CTP", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, digits, lumi); pc.outputs().snapshot({"ctfrep", 0}, iosize); mTimer.Stop(); diff --git a/Detectors/CTP/workflow/src/RawDecoderSpec.cxx b/Detectors/CTP/workflow/src/RawDecoderSpec.cxx index 674c2f4208d8f..5f0cb3b759c48 100644 --- a/Detectors/CTP/workflow/src/RawDecoderSpec.cxx +++ b/Detectors/CTP/workflow/src/RawDecoderSpec.cxx @@ -80,10 +80,10 @@ void RawDecoderSpec::run(framework::ProcessingContext& ctx) auto& inputs = ctx.inputs(); auto dummyOutput = [&ctx, this]() { if (this->mDoDigits) { - ctx.outputs().snapshot(o2::framework::Output{"CTP", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, this->mOutputDigits); + ctx.outputs().snapshot(o2::framework::Output{"CTP", "DIGITS", 0}, this->mOutputDigits); } if (this->mDoLumi) { - ctx.outputs().snapshot(o2::framework::Output{"CTP", "LUMI", 0, o2::framework::Lifetime::Timeframe}, this->mOutputLumiInfo); + ctx.outputs().snapshot(o2::framework::Output{"CTP", "LUMI", 0}, this->mOutputLumiInfo); } }; // if we see requested data type input with 0xDEADBEEF subspec and 0 payload this means that the "delayed message" @@ -117,7 +117,7 @@ void RawDecoderSpec::run(framework::ProcessingContext& ctx) } if (mDoDigits) { LOG(info) << "[CTPRawToDigitConverter - run] Writing " << mOutputDigits.size() << " digits. IR rejected:" << mDecoder.getIRRejected() << " TCR rejected:" << mDecoder.getTCRRejected(); - ctx.outputs().snapshot(o2::framework::Output{"CTP", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mOutputDigits); + ctx.outputs().snapshot(o2::framework::Output{"CTP", "DIGITS", 0}, mOutputDigits); } if (mDoLumi) { uint32_t tfCountsT = 0; @@ -157,7 +157,7 @@ void RawDecoderSpec::run(framework::ProcessingContext& ctx) mOutputLumiInfo.printInputs(); LOGP(info, "Orbit {}: {}/{} counts inp1/inp2 in {}/{} HBFs -> lumi_inp1 = {:.3e}+-{:.3e} lumi_inp2 = {:.3e}+-{:.3e}", mOutputLumiInfo.orbit, mCountsT, mCountsV, mNHBIntegratedT, mNHBIntegratedV, mOutputLumiInfo.getLumi(), mOutputLumiInfo.getLumiError(), mOutputLumiInfo.getLumiFV0(), mOutputLumiInfo.getLumiFV0Error()); } - ctx.outputs().snapshot(o2::framework::Output{"CTP", "LUMI", 0, o2::framework::Lifetime::Timeframe}, mOutputLumiInfo); + ctx.outputs().snapshot(o2::framework::Output{"CTP", "LUMI", 0}, mOutputLumiInfo); } } o2::framework::DataProcessorSpec o2::ctp::reco_workflow::getRawDecoderSpec(bool askDISTSTF, bool digits, bool lumi) diff --git a/Detectors/CTP/workflowIO/src/DigitReaderSpec.cxx b/Detectors/CTP/workflowIO/src/DigitReaderSpec.cxx index 59f608bef2efb..ecf1c2e19b660 100644 --- a/Detectors/CTP/workflowIO/src/DigitReaderSpec.cxx +++ b/Detectors/CTP/workflowIO/src/DigitReaderSpec.cxx @@ -76,8 +76,8 @@ void DigitReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "DigitReader pushes " << mDigits.size() << " digits at entry " << ent; - pc.outputs().snapshot(Output{"CTP", "DIGITS", 0, Lifetime::Timeframe}, mDigits); - pc.outputs().snapshot(Output{"CTP", "LUMI", 0, Lifetime::Timeframe}, mLumi); + pc.outputs().snapshot(Output{"CTP", "DIGITS", 0}, mDigits); + pc.outputs().snapshot(Output{"CTP", "LUMI", 0}, mLumi); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Detectors/DCS/testWorkflow/src/DCSDataReplaySpec.cxx b/Detectors/DCS/testWorkflow/src/DCSDataReplaySpec.cxx index 05daf522f372a..783f6ae76e707 100644 --- a/Detectors/DCS/testWorkflow/src/DCSDataReplaySpec.cxx +++ b/Detectors/DCS/testWorkflow/src/DCSDataReplaySpec.cxx @@ -88,7 +88,7 @@ void DCSDataReplayer::run(o2::framework::ProcessingContext& pc) LOG(info) << "***************** TF " << tfid << " has generated " << dpcoms.size() << " DPs"; - pc.outputs().snapshot(Output{"DCS", mDataDescription, 0, Lifetime::Timeframe}, dpcoms); + pc.outputs().snapshot(Output{"DCS", mDataDescription, 0}, dpcoms); mTFs++; } } // namespace diff --git a/Detectors/DCS/testWorkflow/src/DCSRandomDataGeneratorSpec.cxx b/Detectors/DCS/testWorkflow/src/DCSRandomDataGeneratorSpec.cxx index a82a5b4164e2a..899a4e110aea8 100644 --- a/Detectors/DCS/testWorkflow/src/DCSRandomDataGeneratorSpec.cxx +++ b/Detectors/DCS/testWorkflow/src/DCSRandomDataGeneratorSpec.cxx @@ -153,7 +153,7 @@ void DCSRandomDataGenerator::run(o2::framework::ProcessingContext& pc) auto timeNow = std::chrono::system_clock::now(); timingInfo.creation = std::chrono::duration_cast(timeNow.time_since_epoch()).count(); // in ms - pc.outputs().snapshot(Output{"DCS", mDataDescription, 0, Lifetime::Timeframe}, dpcoms); + pc.outputs().snapshot(Output{"DCS", mDataDescription, 0}, dpcoms); mTFs++; } } // namespace diff --git a/Detectors/EMCAL/workflow/include/EMCALWorkflow/PublisherSpec.h b/Detectors/EMCAL/workflow/include/EMCALWorkflow/PublisherSpec.h index f583653f1c3cb..468e66c43a0ab 100644 --- a/Detectors/EMCAL/workflow/include/EMCALWorkflow/PublisherSpec.h +++ b/Detectors/EMCAL/workflow/include/EMCALWorkflow/PublisherSpec.h @@ -60,23 +60,22 @@ framework::DataProcessorSpec getPublisherSpec(PublisherConf const& config, uint3 // a creator callback for the actual reader instance auto creator = [dto, tro, mco, subspec, propagateMC](const char* treename, const char* filename, int nofEvents, Reader::PublishingMode publishingMode, const char* branchname, const char* triggerbranchname, const char* mcbranchname) { - constexpr auto persistency = o2::framework::Lifetime::Timeframe; if (propagateMC) { return std::make_shared(treename, filename, nofEvents, publishingMode, - Output{mco.origin, mco.description, subspec, persistency}, + Output{mco.origin, mco.description, subspec}, mcbranchname, - Reader::BranchDefinition{Output{dto.origin, dto.description, subspec, persistency}, branchname}, - Reader::BranchDefinition{Output{tro.origin, tro.description, subspec, persistency}, triggerbranchname}); + Reader::BranchDefinition{Output{dto.origin, dto.description, subspec}, branchname}, + Reader::BranchDefinition{Output{tro.origin, tro.description, subspec}, triggerbranchname}); } else { return std::make_shared(treename, filename, nofEvents, publishingMode, - Reader::BranchDefinition{Output{dto.origin, dto.description, subspec, persistency}, branchname}, - Reader::BranchDefinition{Output{tro.origin, tro.description, subspec, persistency}, triggerbranchname}); + Reader::BranchDefinition{Output{dto.origin, dto.description, subspec}, branchname}, + Reader::BranchDefinition{Output{tro.origin, tro.description, subspec}, triggerbranchname}); } }; diff --git a/Detectors/EMCAL/workflow/src/AnalysisClusterSpec.cxx b/Detectors/EMCAL/workflow/src/AnalysisClusterSpec.cxx index 2c98a100ac17a..7b81fec681c14 100644 --- a/Detectors/EMCAL/workflow/src/AnalysisClusterSpec.cxx +++ b/Detectors/EMCAL/workflow/src/AnalysisClusterSpec.cxx @@ -148,7 +148,7 @@ void AnalysisClusterSpec::run(framework::ProcessingContext& ctx) } LOG(debug) << "[EMCALClusterizer - run] Writing " << mOutputAnaClusters->size() << " clusters ..."; - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "ANALYSISCLUSTERS", 0, o2::framework::Lifetime::Timeframe}, *mOutputAnaClusters); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "ANALYSISCLUSTERS", 0}, *mOutputAnaClusters); } o2::framework::DataProcessorSpec o2::emcal::reco_workflow::getAnalysisClusterSpec(bool useDigits) diff --git a/Detectors/EMCAL/workflow/src/CellConverterSpec.cxx b/Detectors/EMCAL/workflow/src/CellConverterSpec.cxx index 489546a86eeaf..5fa7353e907e2 100644 --- a/Detectors/EMCAL/workflow/src/CellConverterSpec.cxx +++ b/Detectors/EMCAL/workflow/src/CellConverterSpec.cxx @@ -161,10 +161,10 @@ void CellConverterSpec::run(framework::ProcessingContext& ctx) ncellsTrigger = 0; } LOG(debug) << "[EMCALCellConverter - run] Writing " << mOutputCells.size() << " cells ..."; - ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLS", mSubspecificationOut, o2::framework::Lifetime::Timeframe}, mOutputCells); - ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLSTRGR", mSubspecificationOut, o2::framework::Lifetime::Timeframe}, mOutputTriggers); + ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLS", mSubspecificationOut}, mOutputCells); + ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLSTRGR", mSubspecificationOut}, mOutputTriggers); if (mPropagateMC) { - ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLSMCTR", mSubspecificationOut, o2::framework::Lifetime::Timeframe}, mOutputLabels); + ctx.outputs().snapshot(o2::framework::Output{"EMC", "CELLSMCTR", mSubspecificationOut}, mOutputLabels); } } diff --git a/Detectors/EMCAL/workflow/src/CellRecalibratorSpec.cxx b/Detectors/EMCAL/workflow/src/CellRecalibratorSpec.cxx index 493c205223161..3f19b49cd73bd 100644 --- a/Detectors/EMCAL/workflow/src/CellRecalibratorSpec.cxx +++ b/Detectors/EMCAL/workflow/src/CellRecalibratorSpec.cxx @@ -113,15 +113,15 @@ void CellRecalibratorSpec::run(framework::ProcessingContext& ctx) } // send recalibrated objects - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLS", mOutputSubspec, o2::framework::Lifetime::Timeframe}, outputcells); - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSTRGR", mOutputSubspec, o2::framework::Lifetime::Timeframe}, outputtriggers); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLS", mOutputSubspec}, outputcells); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSTRGR", mOutputSubspec}, outputtriggers); if (outputMCLabels.has_value()) { LOG(info) << "Timeframe: " << inputMCLabels->getIndexedSize() << " label entries read, " << outputMCLabels->getIndexedSize() << " label entries kept"; - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSMCTR", mOutputSubspec, o2::framework::Lifetime::Timeframe}, outputMCLabels.value()); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSMCTR", mOutputSubspec}, outputMCLabels.value()); } if (mLEDsettings == LEDEventSettings::REDIRECT) { - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLS", 10, o2::framework::Lifetime::Timeframe}, ledcells); - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSTRGR", 10, o2::framework::Lifetime::Timeframe}, ledtriggers); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLS", 10}, ledcells); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CELLSTRGR", 10}, ledtriggers); } } diff --git a/Detectors/EMCAL/workflow/src/ClusterizerSpec.cxx b/Detectors/EMCAL/workflow/src/ClusterizerSpec.cxx index eaa00d6d3d0b6..f938d02ce7e3f 100644 --- a/Detectors/EMCAL/workflow/src/ClusterizerSpec.cxx +++ b/Detectors/EMCAL/workflow/src/ClusterizerSpec.cxx @@ -107,11 +107,11 @@ void ClusterizerSpec::run(framework::ProcessingContext& ctx) currentStartIndices = mOutputCellDigitIndices->size(); } LOG(debug) << "[EMCALClusterizer - run] Writing " << mOutputClusters->size() << " clusters ..."; - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, *mOutputClusters); - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "INDICES", 0, o2::framework::Lifetime::Timeframe}, *mOutputCellDigitIndices); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CLUSTERS", 0}, *mOutputClusters); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "INDICES", 0}, *mOutputCellDigitIndices); - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CLUSTERSTRGR", 0, o2::framework::Lifetime::Timeframe}, *mOutputTriggerRecord); - ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "INDICESTRGR", 0, o2::framework::Lifetime::Timeframe}, *mOutputTriggerRecordIndices); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "CLUSTERSTRGR", 0}, *mOutputTriggerRecord); + ctx.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginEMC, "INDICESTRGR", 0}, *mOutputTriggerRecordIndices); mTimer.Stop(); } diff --git a/Detectors/EMCAL/workflow/src/EMCALDigitizerSpec.cxx b/Detectors/EMCAL/workflow/src/EMCALDigitizerSpec.cxx index 052e65af00bda..becf3c6cdcfc3 100644 --- a/Detectors/EMCAL/workflow/src/EMCALDigitizerSpec.cxx +++ b/Detectors/EMCAL/workflow/src/EMCALDigitizerSpec.cxx @@ -140,15 +140,15 @@ void DigitizerSpec::run(framework::ProcessingContext& ctx) mDigitizer.finish(); // here we have all digits and we can send them to consumer (aka snapshot it onto output) - ctx.outputs().snapshot(Output{"EMC", "DIGITS", 0, Lifetime::Timeframe}, mDigitizer.getDigits()); - ctx.outputs().snapshot(Output{"EMC", "TRGRDIG", 0, Lifetime::Timeframe}, mDigitizer.getTriggerRecords()); + ctx.outputs().snapshot(Output{"EMC", "DIGITS", 0}, mDigitizer.getDigits()); + ctx.outputs().snapshot(Output{"EMC", "TRGRDIG", 0}, mDigitizer.getTriggerRecords()); if (ctx.outputs().isAllowed({"EMC", "DIGITSMCTR", 0})) { - ctx.outputs().snapshot(Output{"EMC", "DIGITSMCTR", 0, Lifetime::Timeframe}, mDigitizer.getMCLabels()); + ctx.outputs().snapshot(Output{"EMC", "DIGITSMCTR", 0}, mDigitizer.getMCLabels()); } // EMCAL is always a triggering detector const o2::parameters::GRPObject::ROMode roMode = o2::parameters::GRPObject::TRIGGERING; LOG(info) << "EMCAL: Sending ROMode= " << roMode << " to GRPUpdater"; - ctx.outputs().snapshot(Output{"EMC", "ROMode", 0, Lifetime::Timeframe}, roMode); + ctx.outputs().snapshot(Output{"EMC", "ROMode", 0}, roMode); // Create CTP digits std::vector triggerinputs; for (auto& trg : mDigitizer.getTriggerRecords()) { @@ -161,7 +161,7 @@ void DigitizerSpec::run(framework::ProcessingContext& ctx) nextdigit.inputsMask.set(0); triggerinputs.push_back(nextdigit); } - ctx.outputs().snapshot(Output{"EMC", "TRIGGERINPUT", 0, Lifetime::Timeframe}, triggerinputs); + ctx.outputs().snapshot(Output{"EMC", "TRIGGERINPUT", 0}, triggerinputs); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Detectors/EMCAL/workflow/src/EntropyEncoderSpec.cxx b/Detectors/EMCAL/workflow/src/EntropyEncoderSpec.cxx index da34fcfd8c534..773c4c65fc9fe 100644 --- a/Detectors/EMCAL/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/EMCAL/workflow/src/EntropyEncoderSpec.cxx @@ -52,7 +52,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) auto triggers = pc.inputs().get>("triggers"); auto cells = pc.inputs().get>("cells"); - auto& buffer = pc.outputs().make>(Output{"EMC", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"EMC", "CTFDATA", 0}); if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } diff --git a/Detectors/EMCAL/workflow/src/RawToCellConverterSpec.cxx b/Detectors/EMCAL/workflow/src/RawToCellConverterSpec.cxx index 178f4c4418b40..ac09b842d9ab8 100644 --- a/Detectors/EMCAL/workflow/src/RawToCellConverterSpec.cxx +++ b/Detectors/EMCAL/workflow/src/RawToCellConverterSpec.cxx @@ -722,11 +722,11 @@ void RawToCellConverterSpec::handleMinorPageError(const RawReaderMemory::MinorEr void RawToCellConverterSpec::sendData(framework::ProcessingContext& ctx, const std::vector& cells, const std::vector& triggers, const std::vector& decodingErrors) const { constexpr auto originEMC = o2::header::gDataOriginEMC; - ctx.outputs().snapshot(framework::Output{originEMC, "CELLS", mSubspecification, framework::Lifetime::Timeframe}, cells); - ctx.outputs().snapshot(framework::Output{originEMC, "CELLSTRGR", mSubspecification, framework::Lifetime::Timeframe}, triggers); + ctx.outputs().snapshot(framework::Output{originEMC, "CELLS", mSubspecification}, cells); + ctx.outputs().snapshot(framework::Output{originEMC, "CELLSTRGR", mSubspecification}, triggers); if (mCreateRawDataErrors) { LOG(debug) << "Sending " << decodingErrors.size() << " decoding errors"; - ctx.outputs().snapshot(framework::Output{originEMC, "DECODERERR", mSubspecification, framework::Lifetime::Timeframe}, decodingErrors); + ctx.outputs().snapshot(framework::Output{originEMC, "DECODERERR", mSubspecification}, decodingErrors); } } diff --git a/Detectors/EMCAL/workflow/src/StandaloneAODProducerSpec.cxx b/Detectors/EMCAL/workflow/src/StandaloneAODProducerSpec.cxx index 152e9dd54119d..73987ce6d1c1b 100644 --- a/Detectors/EMCAL/workflow/src/StandaloneAODProducerSpec.cxx +++ b/Detectors/EMCAL/workflow/src/StandaloneAODProducerSpec.cxx @@ -136,8 +136,8 @@ void StandaloneAODProducerSpec::run(ProcessingContext& pc) } // end of event loop // std::cout << "Finished cell loop" << std::endl; - pc.outputs().snapshot(Output{"TFN", "TFNumber", 0, Lifetime::Timeframe}, tfNumber); - pc.outputs().snapshot(Output{"TFF", "TFFilename", 0, Lifetime::Timeframe}, ""); + pc.outputs().snapshot(Output{"TFN", "TFNumber", 0}, tfNumber); + pc.outputs().snapshot(Output{"TFF", "TFFilename", 0}, ""); mTimer.Stop(); } diff --git a/Detectors/EMCAL/workflow/src/emc-channel-data-producer.cxx b/Detectors/EMCAL/workflow/src/emc-channel-data-producer.cxx index f878a933b2d8d..75e2cdbd9fce3 100644 --- a/Detectors/EMCAL/workflow/src/emc-channel-data-producer.cxx +++ b/Detectors/EMCAL/workflow/src/emc-channel-data-producer.cxx @@ -160,7 +160,7 @@ DataProcessorSpec generateData(const std::string nameRootFile, const std::string o2::pmr::vector TriggerOutput; TriggerOutput.emplace_back(0, 0, 0, CellOutput.size()); - ctx.outputs().adoptContainer(Output{o2::header::gDataOriginEMC, "CELLS", 0, Lifetime::Timeframe}, std::move(CellOutput)); - ctx.outputs().adoptContainer(Output{o2::header::gDataOriginEMC, "CELLSTRGR", 0, Lifetime::Timeframe}, std::move(TriggerOutput)); + ctx.outputs().adoptContainer(Output{o2::header::gDataOriginEMC, "CELLS", 0}, std::move(CellOutput)); + ctx.outputs().adoptContainer(Output{o2::header::gDataOriginEMC, "CELLSTRGR", 0}, std::move(TriggerOutput)); }}}; -} \ No newline at end of file +} diff --git a/Detectors/FIT/FDD/workflow/include/FDDWorkflow/RawReaderFDD.h b/Detectors/FIT/FDD/workflow/include/FDDWorkflow/RawReaderFDD.h index e9612ae8c8bda..2d92c907c1d79 100644 --- a/Detectors/FIT/FDD/workflow/include/FDDWorkflow/RawReaderFDD.h +++ b/Detectors/FIT/FDD/workflow/include/FDDWorkflow/RawReaderFDD.h @@ -65,8 +65,8 @@ class RawReaderFDD : public RawReaderFDDBaseNorm } void makeSnapshot(o2::framework::ProcessingContext& pc) { - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFDD, "DIGITSBC", 0, o2::framework::Lifetime::Timeframe}, mVecDigits); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFDD, "DIGITSCH", 0, o2::framework::Lifetime::Timeframe}, mVecChannelData); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFDD, "DIGITSBC", 0}, mVecDigits); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFDD, "DIGITSCH", 0}, mVecChannelData); } bool mDumpData; std::vector mVecDigits; diff --git a/Detectors/FIT/FDD/workflow/src/DigitReaderSpec.cxx b/Detectors/FIT/FDD/workflow/src/DigitReaderSpec.cxx index 441c679a327e3..628a2160c6d0c 100644 --- a/Detectors/FIT/FDD/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/FIT/FDD/workflow/src/DigitReaderSpec.cxx @@ -81,18 +81,18 @@ void DigitReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "FDD DigitReader pushes " << digitsBC->size() << " digits"; - pc.outputs().snapshot(Output{mOrigin, "DIGITSBC", 0, Lifetime::Timeframe}, *digitsBC); - pc.outputs().snapshot(Output{mOrigin, "DIGITSCH", 0, Lifetime::Timeframe}, *digitsCh); + pc.outputs().snapshot(Output{mOrigin, "DIGITSBC", 0}, *digitsBC); + pc.outputs().snapshot(Output{mOrigin, "DIGITSCH", 0}, *digitsCh); if (mUseMC) { // TODO: To be replaced with sending ConstMCTruthContainer as soon as reco workflow supports it - pc.outputs().snapshot(Output{mOrigin, "TRIGGERINPUT", 0, Lifetime::Timeframe}, *digitsTrig); + pc.outputs().snapshot(Output{mOrigin, "TRIGGERINPUT", 0}, *digitsTrig); std::vector flatbuffer; mcTruthRootBuffer->copyandflatten(flatbuffer); o2::dataformats::MCTruthContainer mcTruth; mcTruth.restore_from(flatbuffer.data(), flatbuffer.size()); - pc.outputs().snapshot(Output{mOrigin, "DIGITLBL", 0, Lifetime::Timeframe}, mcTruth); + pc.outputs().snapshot(Output{mOrigin, "DIGITLBL", 0}, mcTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FDD/workflow/src/EntropyEncoderSpec.cxx b/Detectors/FIT/FDD/workflow/src/EntropyEncoderSpec.cxx index b79c033771743..abb2518e5ae0b 100644 --- a/Detectors/FIT/FDD/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/FIT/FDD/workflow/src/EntropyEncoderSpec.cxx @@ -54,7 +54,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"FDD", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"FDD", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, digits, channels); if (mSelIR) { mCTFCoder.getIRFramesSelector().clear(); diff --git a/Detectors/FIT/FDD/workflow/src/RecPointReaderSpec.cxx b/Detectors/FIT/FDD/workflow/src/RecPointReaderSpec.cxx index 3da258a672611..9b612c31d28e6 100644 --- a/Detectors/FIT/FDD/workflow/src/RecPointReaderSpec.cxx +++ b/Detectors/FIT/FDD/workflow/src/RecPointReaderSpec.cxx @@ -51,8 +51,8 @@ void RecPointReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "FDD RecPointReader pushes " << mRecPoints->size() << " recpoints with " << mChannelData->size() << " channels at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, *mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, *mChannelData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, *mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, *mChannelData); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FDD/workflow/src/ReconstructorSpec.cxx b/Detectors/FIT/FDD/workflow/src/ReconstructorSpec.cxx index 8d76bc4ee45b5..b7a0b9876a2ee 100644 --- a/Detectors/FIT/FDD/workflow/src/ReconstructorSpec.cxx +++ b/Detectors/FIT/FDD/workflow/src/ReconstructorSpec.cxx @@ -54,8 +54,8 @@ void FDDReconstructorDPL::run(ProcessingContext& pc) } // do we ignore MC in this task? LOG(debug) << "FDD reconstruction pushes " << mRecPoints.size() << " RecPoints"; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, mRecChData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, mRecChData); } DataProcessorSpec getFDDReconstructorSpec(bool useMC) diff --git a/Detectors/FIT/FT0/calibration/testWorkflow/FT0CalibSlewingCollectorSpec.h b/Detectors/FIT/FT0/calibration/testWorkflow/FT0CalibSlewingCollectorSpec.h index e01fed6f20588..8a1a071a0a043 100644 --- a/Detectors/FIT/FT0/calibration/testWorkflow/FT0CalibSlewingCollectorSpec.h +++ b/Detectors/FIT/FT0/calibration/testWorkflow/FT0CalibSlewingCollectorSpec.h @@ -88,8 +88,8 @@ class FT0CalibCollectorDevice : public o2::framework::Task auto entries = collectedInfo.size(); // this means that we are ready to send the output auto entriesPerChannel = mCollector->getEntriesPerChannel(); - output.snapshot(Output{o2::header::gDataOriginFT0, "COLLECTEDINFO", 0, Lifetime::Timeframe}, collectedInfo); - output.snapshot(Output{o2::header::gDataOriginFT0, "ENTRIESCH", 0, Lifetime::Timeframe}, entriesPerChannel); + output.snapshot(Output{o2::header::gDataOriginFT0, "COLLECTEDINFO", 0}, collectedInfo); + output.snapshot(Output{o2::header::gDataOriginFT0, "ENTRIESCH", 0}, entriesPerChannel); mCollector->initOutput(); // reset the output for the next round } } diff --git a/Detectors/FIT/FT0/calibration/testWorkflow/FT0TimeSpectraProcessor-Workflow.cxx b/Detectors/FIT/FT0/calibration/testWorkflow/FT0TimeSpectraProcessor-Workflow.cxx index 541f5a9827a4a..f1dd64a250a3b 100644 --- a/Detectors/FIT/FT0/calibration/testWorkflow/FT0TimeSpectraProcessor-Workflow.cxx +++ b/Detectors/FIT/FT0/calibration/testWorkflow/FT0TimeSpectraProcessor-Workflow.cxx @@ -101,7 +101,7 @@ class FT0TimeSpectraProcessor final : public o2::framework::Task } } - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "TIME_SPECTRA", 0, o2::framework::Lifetime::Timeframe}, timeSpectraInfoObject.getBase()); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "TIME_SPECTRA", 0}, timeSpectraInfoObject.getBase()); } }; diff --git a/Detectors/FIT/FT0/workflow/src/DigitReaderSpec.cxx b/Detectors/FIT/FT0/workflow/src/DigitReaderSpec.cxx index e4f11d3f5bd1e..09586d778ac15 100644 --- a/Detectors/FIT/FT0/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/FIT/FT0/workflow/src/DigitReaderSpec.cxx @@ -64,13 +64,13 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(debug) << "FT0DigitReader pushed " << channels.size() << " channels in " << digits.size() << " digits"; - pc.outputs().snapshot(Output{"FT0", "DIGITSBC", 0, Lifetime::Timeframe}, digits); - pc.outputs().snapshot(Output{"FT0", "DIGITSCH", 0, Lifetime::Timeframe}, channels); + pc.outputs().snapshot(Output{"FT0", "DIGITSBC", 0}, digits); + pc.outputs().snapshot(Output{"FT0", "DIGITSCH", 0}, channels); if (mUseMC) { - pc.outputs().snapshot(Output{"FT0", "DIGITSMCTR", 0, Lifetime::Timeframe}, labels); + pc.outputs().snapshot(Output{"FT0", "DIGITSMCTR", 0}, labels); } if (mUseTrgInput) { - pc.outputs().snapshot(Output{"FT0", "TRIGGERINPUT", 0, Lifetime::Timeframe}, trgInput); + pc.outputs().snapshot(Output{"FT0", "TRIGGERINPUT", 0}, trgInput); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FT0/workflow/src/EntropyEncoderSpec.cxx b/Detectors/FIT/FT0/workflow/src/EntropyEncoderSpec.cxx index edf86b3d7da0d..81bdc2e729bb4 100644 --- a/Detectors/FIT/FT0/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/FIT/FT0/workflow/src/EntropyEncoderSpec.cxx @@ -55,7 +55,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"FT0", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"FT0", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, digits, channels); if (mSelIR) { mCTFCoder.getIRFramesSelector().clear(); diff --git a/Detectors/FIT/FT0/workflow/src/RecPointReaderSpec.cxx b/Detectors/FIT/FT0/workflow/src/RecPointReaderSpec.cxx index 69f21abc369c6..ba5ae4aa1356c 100644 --- a/Detectors/FIT/FT0/workflow/src/RecPointReaderSpec.cxx +++ b/Detectors/FIT/FT0/workflow/src/RecPointReaderSpec.cxx @@ -49,8 +49,8 @@ void RecPointReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(debug) << "FT0 RecPointReader pushes " << mRecPoints->size() << " recpoints with " << mChannelData->size() << " channels at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, *mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, *mChannelData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, *mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, *mChannelData); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FT0/workflow/src/ReconstructionSpec.cxx b/Detectors/FIT/FT0/workflow/src/ReconstructionSpec.cxx index da5281d7e2ba1..40bc96ebca58e 100644 --- a/Detectors/FIT/FT0/workflow/src/ReconstructionSpec.cxx +++ b/Detectors/FIT/FT0/workflow/src/ReconstructionSpec.cxx @@ -74,8 +74,8 @@ void ReconstructionDPL::run(ProcessingContext& pc) mReco.processTF(digits, channels, mRecPoints, mRecChData); // do we ignore MC in this task? LOG(debug) << "FT0 reconstruction pushes " << mRecPoints.size() << " RecPoints"; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, mRecChData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, mRecChData); mTimer.Stop(); } diff --git a/Detectors/FIT/FV0/workflow/src/DigitReaderSpec.cxx b/Detectors/FIT/FV0/workflow/src/DigitReaderSpec.cxx index 08a6c4cbb2e24..a49bda2cec18b 100644 --- a/Detectors/FIT/FV0/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/FIT/FV0/workflow/src/DigitReaderSpec.cxx @@ -65,13 +65,13 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(debug) << "FV0DigitReader pushed " << channels.size() << " channels in " << digits.size() << " digits"; - pc.outputs().snapshot(Output{"FV0", "DIGITSBC", 0, Lifetime::Timeframe}, digits); - pc.outputs().snapshot(Output{"FV0", "DIGITSCH", 0, Lifetime::Timeframe}, channels); + pc.outputs().snapshot(Output{"FV0", "DIGITSBC", 0}, digits); + pc.outputs().snapshot(Output{"FV0", "DIGITSCH", 0}, channels); if (mUseMC) { - pc.outputs().snapshot(Output{"FV0", "DIGITSMCTR", 0, Lifetime::Timeframe}, labels); + pc.outputs().snapshot(Output{"FV0", "DIGITSMCTR", 0}, labels); } if (mUseTrgInput) { - pc.outputs().snapshot(Output{"FV0", "TRIGGERINPUT", 0, Lifetime::Timeframe}, trgInput); + pc.outputs().snapshot(Output{"FV0", "TRIGGERINPUT", 0}, trgInput); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FV0/workflow/src/EntropyEncoderSpec.cxx b/Detectors/FIT/FV0/workflow/src/EntropyEncoderSpec.cxx index a115712a0c980..a25c16a5d697c 100644 --- a/Detectors/FIT/FV0/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/FIT/FV0/workflow/src/EntropyEncoderSpec.cxx @@ -55,7 +55,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"FV0", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"FV0", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, digits, channels); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/FIT/FV0/workflow/src/RecPointReaderSpec.cxx b/Detectors/FIT/FV0/workflow/src/RecPointReaderSpec.cxx index 163beacf0c140..5997cac500ee6 100644 --- a/Detectors/FIT/FV0/workflow/src/RecPointReaderSpec.cxx +++ b/Detectors/FIT/FV0/workflow/src/RecPointReaderSpec.cxx @@ -49,8 +49,8 @@ void RecPointReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(debug) << "FV0 RecPointReader pushes " << mRecPoints->size() << " recpoints with " << mChannelData->size() << " channels at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, *mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, *mChannelData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, *mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, *mChannelData); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/FIT/FV0/workflow/src/ReconstructionSpec.cxx b/Detectors/FIT/FV0/workflow/src/ReconstructionSpec.cxx index d48b96fd4e626..520ac4dbaa563 100644 --- a/Detectors/FIT/FV0/workflow/src/ReconstructionSpec.cxx +++ b/Detectors/FIT/FV0/workflow/src/ReconstructionSpec.cxx @@ -68,8 +68,8 @@ void ReconstructionDPL::run(ProcessingContext& pc) } LOG(debug) << "FV0 reconstruction pushes " << mRecPoints.size() << " RecPoints"; - pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0, Lifetime::Timeframe}, mRecPoints); - pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0, Lifetime::Timeframe}, mRecChData); + pc.outputs().snapshot(Output{mOrigin, "RECPOINTS", 0}, mRecPoints); + pc.outputs().snapshot(Output{mOrigin, "RECCHDATA", 0}, mRecChData); mTimer.Stop(); } diff --git a/Detectors/FIT/workflow/include/FITWorkflow/RawReaderFIT.h b/Detectors/FIT/workflow/include/FITWorkflow/RawReaderFIT.h index 5c2cfce0a620c..cfe41e4bb474f 100644 --- a/Detectors/FIT/workflow/include/FITWorkflow/RawReaderFIT.h +++ b/Detectors/FIT/workflow/include/FITWorkflow/RawReaderFIT.h @@ -165,28 +165,28 @@ class RawReaderFIT : public RawReaderType } void makeSnapshot(o2::framework::ProcessingContext& pc) const { - pc.outputs().snapshot(o2::framework::Output{mDataOrigin, Digit_t::sChannelNameDPL, 0, o2::framework::Lifetime::Timeframe}, mVecDigit); + pc.outputs().snapshot(o2::framework::Output{mDataOrigin, Digit_t::sChannelNameDPL, 0}, mVecDigit); if constexpr (sSubDigitExists) { std::apply([&](const auto&... subDigit) { - ((pc.outputs().snapshot(o2::framework::Output{mDataOrigin, (std::decay::type::value_type::sChannelNameDPL), 0, o2::framework::Lifetime::Timeframe}, subDigit)), ...); + ((pc.outputs().snapshot(o2::framework::Output{mDataOrigin, (std::decay::type::value_type::sChannelNameDPL), 0}, subDigit)), ...); }, mVecSubDigit); } if constexpr (sSingleSubDigitExists) { std::apply([&](const auto&... singleSubDigit) { - ((pc.outputs().snapshot(o2::framework::Output{mDataOrigin, (std::decay::type::value_type::sChannelNameDPL), 0, o2::framework::Lifetime::Timeframe}, singleSubDigit)), ...); + ((pc.outputs().snapshot(o2::framework::Output{mDataOrigin, (std::decay::type::value_type::sChannelNameDPL), 0}, singleSubDigit)), ...); }, mVecSingleSubDigit); } if constexpr (sUseTrgInput) { - pc.outputs().snapshot(o2::framework::Output{mDataOrigin, DetTrigInput_t::sChannelNameDPL, 0, o2::framework::Lifetime::Timeframe}, mVecTrgInput); + pc.outputs().snapshot(o2::framework::Output{mDataOrigin, DetTrigInput_t::sChannelNameDPL, 0}, mVecTrgInput); } - pc.outputs().snapshot(o2::framework::Output{mDataOrigin, "RawDataMetric", 0, o2::framework::Lifetime::Timeframe}, mVecRawDataMetric); + pc.outputs().snapshot(o2::framework::Output{mDataOrigin, "RawDataMetric", 0}, mVecRawDataMetric); } template auto& getRefVec(o2::framework::ProcessingContext& pc) { - auto& refVec = pc.outputs().make(o2::framework::Output{mDataOrigin, VecDigitType::value_type::sChannelNameDPL, 0, o2::framework::Lifetime::Timeframe}); + auto& refVec = pc.outputs().make(o2::framework::Output{mDataOrigin, VecDigitType::value_type::sChannelNameDPL, 0}); return refVec; } void enableEmptyTFprotection() @@ -204,4 +204,4 @@ class RawReaderFIT : public RawReaderType } // namespace fit } // namespace o2 -#endif \ No newline at end of file +#endif diff --git a/Detectors/FOCAL/workflow/src/RawDecoderSpec.cxx b/Detectors/FOCAL/workflow/src/RawDecoderSpec.cxx index d6004561f852d..c7260ae3c9815 100644 --- a/Detectors/FOCAL/workflow/src/RawDecoderSpec.cxx +++ b/Detectors/FOCAL/workflow/src/RawDecoderSpec.cxx @@ -274,10 +274,10 @@ void RawDecoderSpec::endOfStream(o2::framework::EndOfStreamContext& ec) void RawDecoderSpec::sendOutput(framework::ProcessingContext& ctx) { - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PADLAYERS", mOutputSubspec, framework::Lifetime::Timeframe}, mOutputPadLayers); - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PIXELHITS", mOutputSubspec, framework::Lifetime::Timeframe}, mOutputPixelHits); - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PIXELCHIPS", mOutputSubspec, framework::Lifetime::Timeframe}, mOutputPixelChips); - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "TRIGGERS", mOutputSubspec, framework::Lifetime::Timeframe}, mOutputTriggerRecords); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PADLAYERS", mOutputSubspec}, mOutputPadLayers); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PIXELHITS", mOutputSubspec}, mOutputPixelHits); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "PIXELCHIPS", mOutputSubspec}, mOutputPixelChips); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginFOC, "TRIGGERS", mOutputSubspec}, mOutputTriggerRecords); } void RawDecoderSpec::resetContainers() @@ -689,4 +689,4 @@ o2::framework::DataProcessorSpec o2::focal::reco_workflow::getRawDecoderSpec(boo {"displayInconsistent", o2::framework::VariantType::Bool, false, {"Display information about inconsistent timeframes"}}, {"pixeltype", o2::framework::VariantType::String, "OB", {"Pixel mapping type"}}, {"pixelmapping", o2::framework::VariantType::String, "default", {"File with pixel mapping"}}}}; -} \ No newline at end of file +} diff --git a/Detectors/Filtering/src/FilteredTFReaderSpec.cxx b/Detectors/Filtering/src/FilteredTFReaderSpec.cxx index 3165208aef246..22fe1370040db 100644 --- a/Detectors/Filtering/src/FilteredTFReaderSpec.cxx +++ b/Detectors/Filtering/src/FilteredTFReaderSpec.cxx @@ -45,15 +45,15 @@ void FilteredTFReader::run(ProcessingContext& pc) LOG(info) << "Pushing filtered TF: " << mFiltTF.header.asString(); // ITS - pc.outputs().snapshot(Output{"ITS", "ITSTrackROF", 0, Lifetime::Timeframe}, mFiltTF.ITSTrackROFs); - pc.outputs().snapshot(Output{"ITS", "TRACKS", 0, Lifetime::Timeframe}, mFiltTF.ITSTracks); - pc.outputs().snapshot(Output{"ITS", "TRACKCLSID", 0, Lifetime::Timeframe}, mFiltTF.ITSClusterIndices); + pc.outputs().snapshot(Output{"ITS", "ITSTrackROF", 0}, mFiltTF.ITSTrackROFs); + pc.outputs().snapshot(Output{"ITS", "TRACKS", 0}, mFiltTF.ITSTracks); + pc.outputs().snapshot(Output{"ITS", "TRACKCLSID", 0}, mFiltTF.ITSClusterIndices); if (mUseMC) { - pc.outputs().snapshot(Output{"ITS", "TRACKSMCTR", 0, Lifetime::Timeframe}, mFiltTF.ITSTrackMCTruth); + pc.outputs().snapshot(Output{"ITS", "TRACKSMCTR", 0}, mFiltTF.ITSTrackMCTruth); } - pc.outputs().snapshot(Output{"ITS", "CLUSTERSROF", 0, Lifetime::Timeframe}, mFiltTF.ITSClusterROFs); - pc.outputs().snapshot(Output{"ITS", "COMPCLUSTERS", 0, Lifetime::Timeframe}, mFiltTF.ITSClusters); - pc.outputs().snapshot(Output{"ITS", "PATTERNS", 0, Lifetime::Timeframe}, mFiltTF.ITSClusterPatterns); + pc.outputs().snapshot(Output{"ITS", "CLUSTERSROF", 0}, mFiltTF.ITSClusterROFs); + pc.outputs().snapshot(Output{"ITS", "COMPCLUSTERS", 0}, mFiltTF.ITSClusters); + pc.outputs().snapshot(Output{"ITS", "PATTERNS", 0}, mFiltTF.ITSClusterPatterns); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/GlobalTrackingWorkflow/helpers/src/NoInpDummyOutSpec.cxx b/Detectors/GlobalTrackingWorkflow/helpers/src/NoInpDummyOutSpec.cxx index 34f445ed003da..17ad5f09027e1 100644 --- a/Detectors/GlobalTrackingWorkflow/helpers/src/NoInpDummyOutSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/helpers/src/NoInpDummyOutSpec.cxx @@ -47,7 +47,7 @@ void NoInpDummyOut::run(ProcessingContext& pc) { static int counter = 0; // send just once dummy output to trigger the ccdb-fetcher - pc.outputs().make>(Output{"GLO", "DUMMY_OUT", 0, Lifetime::Timeframe}); + pc.outputs().make>(Output{"GLO", "DUMMY_OUT", 0}); if (mLoops >= 0 && ++counter >= mLoops) { pc.services().get().endOfStream(); pc.services().get().readyToQuit(framework::QuitRequest::Me); diff --git a/Detectors/GlobalTrackingWorkflow/qc/src/ITSTPCMatchingQCSpec.cxx b/Detectors/GlobalTrackingWorkflow/qc/src/ITSTPCMatchingQCSpec.cxx index 3653fd8bcc90b..c8a402a48d659 100644 --- a/Detectors/GlobalTrackingWorkflow/qc/src/ITSTPCMatchingQCSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/qc/src/ITSTPCMatchingQCSpec.cxx @@ -71,7 +71,7 @@ void ITSTPCMatchingQCDevice::sendOutput(DataAllocator& output) TObjArray objar; mMatchITSTPCQC->getHistos(objar); - output.snapshot(Output{"GLO", "ITSTPCMATCHQC", 0, Lifetime::Sporadic}, objar); + output.snapshot(Output{"GLO", "ITSTPCMATCHQC", 0}, objar); TFile* f = new TFile(Form("outITSTPCmatchingQC.root"), "RECREATE"); objar.Write("ObjArray", TObject::kSingleKey); diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/GlobalFwdTrackReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/GlobalFwdTrackReaderSpec.cxx index 1fdc7b49616c3..11fa58333f89b 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/GlobalFwdTrackReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/GlobalFwdTrackReaderSpec.cxx @@ -65,9 +65,9 @@ void GlobalFwdTrackReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " Global Forward tracks at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "GLFWD", 0, Lifetime::Timeframe}, mTracks); + pc.outputs().snapshot(Output{"GLO", "GLFWD", 0}, mTracks); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "GLFWD_MC", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"GLO", "GLFWD_MC", 0}, mLabels); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/IRFrameReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/IRFrameReaderSpec.cxx index 012542e448d19..c1810a1deb743 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/IRFrameReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/IRFrameReaderSpec.cxx @@ -63,7 +63,7 @@ void IRFrameReaderSpec::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(debug) << "Pushing " << mIRF.size() << " IR-frames in at entry " << ent; - pc.outputs().snapshot(Output{mDataOrigin, "IRFRAMES", mSubSpec, Lifetime::Timeframe}, mIRF); + pc.outputs().snapshot(Output{mDataOrigin, "IRFRAMES", mSubSpec}, mIRF); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/MatchedMFTMCHReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/MatchedMFTMCHReaderSpec.cxx index 08c5c5871db84..5f02beebd1746 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/MatchedMFTMCHReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/MatchedMFTMCHReaderSpec.cxx @@ -65,7 +65,7 @@ void MatchMFTMCHReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " MFTMCH matches at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "MTC_MFTMCH", 0, Lifetime::Timeframe}, mTracks); + pc.outputs().snapshot(Output{"GLO", "MTC_MFTMCH", 0}, mTracks); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/PrimaryVertexReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/PrimaryVertexReaderSpec.cxx index 06bdf8ff95a45..6e1aba8b2e1f3 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/PrimaryVertexReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/PrimaryVertexReaderSpec.cxx @@ -84,12 +84,12 @@ void PrimaryVertexReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mVerticesPtr->size() << " vertices at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "PVTX", 0, Lifetime::Timeframe}, mVertices); - pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTC", 0, Lifetime::Timeframe}, mPV2MatchIdx); - pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTCREFS", 0, Lifetime::Timeframe}, mPV2MatchIdxRef); + pc.outputs().snapshot(Output{"GLO", "PVTX", 0}, mVertices); + pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTC", 0}, mPV2MatchIdx); + pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTCREFS", 0}, mPV2MatchIdxRef); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "PVTX_MCTR", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"GLO", "PVTX_MCTR", 0}, mLabels); } if (mVerbose) { diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/SecondaryVertexReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/SecondaryVertexReaderSpec.cxx index e440a075d2558..4dd6324c83675 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/SecondaryVertexReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/SecondaryVertexReaderSpec.cxx @@ -93,15 +93,15 @@ void SecondaryVertexReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mV0s.size() << " V0s and " << mCascs.size() << " cascades at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "V0S_IDX", 0, Lifetime::Timeframe}, mV0sIdx); - pc.outputs().snapshot(Output{"GLO", "V0S", 0, Lifetime::Timeframe}, mV0s); - pc.outputs().snapshot(Output{"GLO", "PVTX_V0REFS", 0, Lifetime::Timeframe}, mPV2V0Ref); - pc.outputs().snapshot(Output{"GLO", "CASCS_IDX", 0, Lifetime::Timeframe}, mCascsIdx); - pc.outputs().snapshot(Output{"GLO", "CASCS", 0, Lifetime::Timeframe}, mCascs); - pc.outputs().snapshot(Output{"GLO", "PVTX_CASCREFS", 0, Lifetime::Timeframe}, mPV2CascRef); - pc.outputs().snapshot(Output{"GLO", "DECAYS3BODY_IDX", 0, Lifetime::Timeframe}, m3BodysIdx); - pc.outputs().snapshot(Output{"GLO", "DECAYS3BODY", 0, Lifetime::Timeframe}, m3Bodys); - pc.outputs().snapshot(Output{"GLO", "PVTX_3BODYREFS", 0, Lifetime::Timeframe}, mPV23BodyRef); + pc.outputs().snapshot(Output{"GLO", "V0S_IDX", 0}, mV0sIdx); + pc.outputs().snapshot(Output{"GLO", "V0S", 0}, mV0s); + pc.outputs().snapshot(Output{"GLO", "PVTX_V0REFS", 0}, mPV2V0Ref); + pc.outputs().snapshot(Output{"GLO", "CASCS_IDX", 0}, mCascsIdx); + pc.outputs().snapshot(Output{"GLO", "CASCS", 0}, mCascs); + pc.outputs().snapshot(Output{"GLO", "PVTX_CASCREFS", 0}, mPV2CascRef); + pc.outputs().snapshot(Output{"GLO", "DECAYS3BODY_IDX", 0}, m3BodysIdx); + pc.outputs().snapshot(Output{"GLO", "DECAYS3BODY", 0}, m3Bodys); + pc.outputs().snapshot(Output{"GLO", "PVTX_3BODYREFS", 0}, mPV23BodyRef); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/StrangenessTrackingReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/StrangenessTrackingReaderSpec.cxx index 23eff571105e6..8c7f87a720925 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/StrangenessTrackingReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/StrangenessTrackingReaderSpec.cxx @@ -76,14 +76,14 @@ void StrangenessTrackingReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mStrangeTrack.size() << " strange tracks at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS", 0, Lifetime::Timeframe}, mStrangeTrack); + pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS", 0}, mStrangeTrack); if (mUseMC) { LOG(info) << "Pushing " << mStrangeTrackMC.size() << " strange tracks MC labels at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS_MC", 0, Lifetime::Timeframe}, mStrangeTrackMC); + pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS_MC", 0}, mStrangeTrackMC); } - // pc.outputs().snapshot(Output{"GLO", "PVTX_V0REFS", 0, Lifetime::Timeframe}, mPV2V0Ref); + // pc.outputs().snapshot(Output{"GLO", "PVTX_V0REFS", 0}, mPV2V0Ref); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/TrackCosmicsReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/TrackCosmicsReaderSpec.cxx index 90db5d08acc58..7e3cdffd84a6d 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/TrackCosmicsReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/TrackCosmicsReaderSpec.cxx @@ -41,9 +41,9 @@ void TrackCosmicsReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " Cosmic Tracks at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "COSMICTRC", 0, Lifetime::Timeframe}, mTracks); + pc.outputs().snapshot(Output{"GLO", "COSMICTRC", 0}, mTracks); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "COSMICTRC_MC", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"GLO", "COSMICTRC_MC", 0}, mLabels); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/GlobalTrackingWorkflow/readers/src/TrackTPCITSReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/readers/src/TrackTPCITSReaderSpec.cxx index 8ae55e478603e..c7fd0d543ecf6 100644 --- a/Detectors/GlobalTrackingWorkflow/readers/src/TrackTPCITSReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/readers/src/TrackTPCITSReaderSpec.cxx @@ -68,12 +68,12 @@ void TrackTPCITSReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " TPC-ITS matches at entry " << ent; - pc.outputs().snapshot(Output{"GLO", "TPCITS", 0, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{"GLO", "TPCITSAB_REFS", 0, Lifetime::Timeframe}, mABTrkClusRefs); - pc.outputs().snapshot(Output{"GLO", "TPCITSAB_CLID", 0, Lifetime::Timeframe}, mABTrkClIDs); + pc.outputs().snapshot(Output{"GLO", "TPCITS", 0}, mTracks); + pc.outputs().snapshot(Output{"GLO", "TPCITSAB_REFS", 0}, mABTrkClusRefs); + pc.outputs().snapshot(Output{"GLO", "TPCITSAB_CLID", 0}, mABTrkClIDs); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "TPCITS_MC", 0, Lifetime::Timeframe}, mLabels); - pc.outputs().snapshot(Output{"GLO", "TPCITSAB_MC", 0, Lifetime::Timeframe}, mLabelsAB); + pc.outputs().snapshot(Output{"GLO", "TPCITS_MC", 0}, mLabels); + pc.outputs().snapshot(Output{"GLO", "TPCITSAB_MC", 0}, mLabelsAB); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/GlobalTrackingWorkflow/src/CosmicsMatchingSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/CosmicsMatchingSpec.cxx index d8242fc2ccd79..a4c9d302019a5 100644 --- a/Detectors/GlobalTrackingWorkflow/src/CosmicsMatchingSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/CosmicsMatchingSpec.cxx @@ -99,9 +99,9 @@ void CosmicsMatchingSpec::run(ProcessingContext& pc) updateTimeDependentParams(pc); // Make sure this is called after recoData.collectData, which may load some conditions mMatching.process(recoData); - pc.outputs().snapshot(Output{"GLO", "COSMICTRC", 0, Lifetime::Timeframe}, mMatching.getCosmicTracks()); + pc.outputs().snapshot(Output{"GLO", "COSMICTRC", 0}, mMatching.getCosmicTracks()); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "COSMICTRC_MC", 0, Lifetime::Timeframe}, mMatching.getCosmicTracksLbl()); + pc.outputs().snapshot(Output{"GLO", "COSMICTRC_MC", 0}, mMatching.getCosmicTracksLbl()); } mTimer.Stop(); } diff --git a/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingSpec.cxx index 582f2be3c5f84..03dc823c62c42 100644 --- a/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingSpec.cxx @@ -92,18 +92,18 @@ void GlobalFwdMatchingDPL::run(ProcessingContext& pc) const auto& matchingParam = GlobalFwdMatchingParam::Instance(); if (matchingParam.saveMode == kSaveTrainingData) { - pc.outputs().snapshot(Output{"GLO", "GLFWDMFT", 0, Lifetime::Timeframe}, mMatching.getMFTMatchingPlaneParams()); - pc.outputs().snapshot(Output{"GLO", "GLFWDMCH", 0, Lifetime::Timeframe}, mMatching.getMCHMatchingPlaneParams()); - pc.outputs().snapshot(Output{"GLO", "GLFWDINF", 0, Lifetime::Timeframe}, mMatching.getMFTMCHMatchInfo()); + pc.outputs().snapshot(Output{"GLO", "GLFWDMFT", 0}, mMatching.getMFTMatchingPlaneParams()); + pc.outputs().snapshot(Output{"GLO", "GLFWDMCH", 0}, mMatching.getMCHMatchingPlaneParams()); + pc.outputs().snapshot(Output{"GLO", "GLFWDINF", 0}, mMatching.getMFTMCHMatchInfo()); } else { - pc.outputs().snapshot(Output{"GLO", "GLFWD", 0, Lifetime::Timeframe}, mMatching.getMatchedFwdTracks()); + pc.outputs().snapshot(Output{"GLO", "GLFWD", 0}, mMatching.getMatchedFwdTracks()); } if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "GLFWD_MC", 0, Lifetime::Timeframe}, mMatching.getMatchLabels()); + pc.outputs().snapshot(Output{"GLO", "GLFWD_MC", 0}, mMatching.getMatchLabels()); } if (mMatchRootOutput) { - pc.outputs().snapshot(Output{"GLO", "MTC_MFTMCH", 0, Lifetime::Timeframe}, mMatching.getMFTMCHMatchInfo()); + pc.outputs().snapshot(Output{"GLO", "MTC_MFTMCH", 0}, mMatching.getMFTMCHMatchInfo()); } mTimer.Stop(); } diff --git a/Detectors/GlobalTrackingWorkflow/src/HMPMatcherSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/HMPMatcherSpec.cxx index f8666b5561610..b0562fd4f3a1e 100644 --- a/Detectors/GlobalTrackingWorkflow/src/HMPMatcherSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/HMPMatcherSpec.cxx @@ -128,9 +128,9 @@ void HMPMatcherSpec::run(ProcessingContext& pc) mMatcher.run(recoData); - pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MATCHES", 0, Lifetime::Timeframe}, mMatcher.getMatchedTrackVector(o2::globaltracking::MatchHMP::trackType::CONSTR)); + pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MATCHES", 0}, mMatcher.getMatchedTrackVector(o2::globaltracking::MatchHMP::trackType::CONSTR)); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MCLABELS", 0, Lifetime::Timeframe}, mMatcher.getMatchedHMPLabelsVector(o2::globaltracking::MatchHMP::trackType::CONSTR)); + pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MCLABELS", 0}, mMatcher.getMatchedHMPLabelsVector(o2::globaltracking::MatchHMP::trackType::CONSTR)); } mTimer.Stop(); diff --git a/Detectors/GlobalTrackingWorkflow/src/PrimaryVertexingSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/PrimaryVertexingSpec.cxx index e70122e7a01b5..ff45790a95921 100644 --- a/Detectors/GlobalTrackingWorkflow/src/PrimaryVertexingSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/PrimaryVertexingSpec.cxx @@ -152,12 +152,12 @@ void PrimaryVertexingSpec::run(ProcessingContext& pc) mVertexer.process(tracks, gids, ft0Data, vertices, vertexTrackIDs, v2tRefs, tracksMCInfo, lblVtx); } - pc.outputs().snapshot(Output{"GLO", "PVTX", 0, Lifetime::Timeframe}, vertices); - pc.outputs().snapshot(Output{"GLO", "PVTX_CONTIDREFS", 0, Lifetime::Timeframe}, v2tRefs); - pc.outputs().snapshot(Output{"GLO", "PVTX_CONTID", 0, Lifetime::Timeframe}, vertexTrackIDs); + pc.outputs().snapshot(Output{"GLO", "PVTX", 0}, vertices); + pc.outputs().snapshot(Output{"GLO", "PVTX_CONTIDREFS"}, v2tRefs); + pc.outputs().snapshot(Output{"GLO", "PVTX_CONTID", 0}, vertexTrackIDs); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "PVTX_MCTR", 0, Lifetime::Timeframe}, lblVtx); + pc.outputs().snapshot(Output{"GLO", "PVTX_MCTR", 0}, lblVtx); } mTimer.Stop(); diff --git a/Detectors/GlobalTrackingWorkflow/src/StrangenessTrackingSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/StrangenessTrackingSpec.cxx index 46136ac6e2397..37b35df06ea72 100644 --- a/Detectors/GlobalTrackingWorkflow/src/StrangenessTrackingSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/StrangenessTrackingSpec.cxx @@ -75,11 +75,11 @@ void StrangenessTrackerSpec::run(framework::ProcessingContext& pc) mTracker.loadData(recoData); mTracker.prepareITStracks(); mTracker.process(); - pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS", 0, Lifetime::Timeframe}, mTracker.getStrangeTrackVec()); - pc.outputs().snapshot(Output{"GLO", "CLUSUPDATES", 0, Lifetime::Timeframe}, mTracker.getClusAttachments()); + pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS", 0}, mTracker.getStrangeTrackVec()); + pc.outputs().snapshot(Output{"GLO", "CLUSUPDATES", 0}, mTracker.getClusAttachments()); if (mUseMC) { - pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS_MC", 0, Lifetime::Timeframe}, mTracker.getStrangeTrackLabels()); + pc.outputs().snapshot(Output{"GLO", "STRANGETRACKS_MC", 0}, mTracker.getStrangeTrackLabels()); } mTimer.Stop(); diff --git a/Detectors/GlobalTrackingWorkflow/src/TOFMatcherSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/TOFMatcherSpec.cxx index 134ffbb75f341..ca4d3e2fc5dc8 100644 --- a/Detectors/GlobalTrackingWorkflow/src/TOFMatcherSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/TOFMatcherSpec.cxx @@ -171,8 +171,8 @@ void TOFMatcherSpec::run(ProcessingContext& pc) static pmr::vector dummyMCLab; if (isTPCused) { - auto& mtcInfo = pc.outputs().make>(Output{o2::header::gDataOriginTOF, "MTC_TPC", ss, Lifetime::Timeframe}); - auto& mclabels = mUseMC ? pc.outputs().make>(Output{o2::header::gDataOriginTOF, "MCMTC_TPC", ss, Lifetime::Timeframe}) : dummyMCLab; + auto& mtcInfo = pc.outputs().make>(Output{o2::header::gDataOriginTOF, "MTC_TPC", ss}); + auto& mclabels = mUseMC ? pc.outputs().make>(Output{o2::header::gDataOriginTOF, "MCMTC_TPC", ss}) : dummyMCLab; auto& tracksTPCTOF = pc.outputs().make>(OutputRef{"tpctofTracks", ss}); auto nmatch = mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::TPC).size(); LOG(debug) << (mDoTPCRefit ? "Refitting " : "Shifting Z for ") << nmatch << " matched TPC tracks with TOF time info"; @@ -180,48 +180,48 @@ void TOFMatcherSpec::run(ProcessingContext& pc) } if (isITSTPCused) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_ITSTPC", 0, Lifetime::Timeframe}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPC)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_ITSTPC", 0}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPC)); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_ITSTPC", 0, Lifetime::Timeframe}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPC)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_ITSTPC", 0}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPC)); } } if (isTPCTRDused) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_TPCTRD", ss, Lifetime::Timeframe}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::TPCTRD)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_TPCTRD", ss}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::TPCTRD)); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_TPCTRD", ss, Lifetime::Timeframe}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::TPCTRD)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_TPCTRD", ss}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::TPCTRD)); } } if (isITSTPCTRDused) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_ITSTPCTRD", 0, Lifetime::Timeframe}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPCTRD)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_ITSTPCTRD", 0}, mMatcher.getMatchedTrackVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPCTRD)); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_ITSTPCTRD", 0, Lifetime::Timeframe}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPCTRD)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMTC_ITSTPCTRD", 0}, mMatcher.getMatchedTOFLabelsVector(o2::dataformats::MatchInfoTOFReco::TrackType::ITSTPCTRD)); } } // TODO: TRD-matched tracks - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CALIBDATA", 0, Lifetime::Timeframe}, mMatcher.getCalibVector()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CALIBDATA", 0}, mMatcher.getCalibVector()); if (mPushMatchable) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_0", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(0)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_1", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(1)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_2", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(2)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_3", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(3)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_4", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(4)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_5", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(5)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_6", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(6)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_7", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(7)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_8", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(8)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_9", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(9)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_10", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(10)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_11", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(11)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_12", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(12)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_13", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(13)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_14", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(14)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_15", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(15)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_16", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(16)); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_17", 0, Lifetime::Timeframe}, mMatcher.getMatchedTracksPair(17)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_0", 0}, mMatcher.getMatchedTracksPair(0)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_1", 0}, mMatcher.getMatchedTracksPair(1)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_2", 0}, mMatcher.getMatchedTracksPair(2)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_3", 0}, mMatcher.getMatchedTracksPair(3)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_4", 0}, mMatcher.getMatchedTracksPair(4)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_5", 0}, mMatcher.getMatchedTracksPair(5)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_6", 0}, mMatcher.getMatchedTracksPair(6)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_7", 0}, mMatcher.getMatchedTracksPair(7)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_8", 0}, mMatcher.getMatchedTracksPair(8)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_9", 0}, mMatcher.getMatchedTracksPair(9)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_10", 0}, mMatcher.getMatchedTracksPair(10)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_11", 0}, mMatcher.getMatchedTracksPair(11)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_12", 0}, mMatcher.getMatchedTracksPair(12)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_13", 0}, mMatcher.getMatchedTracksPair(13)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_14", 0}, mMatcher.getMatchedTracksPair(14)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_15", 0}, mMatcher.getMatchedTracksPair(15)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_16", 0}, mMatcher.getMatchedTracksPair(16)); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MATCHABLES_17", 0}, mMatcher.getMatchedTracksPair(17)); } mTimer.Stop(); diff --git a/Detectors/GlobalTrackingWorkflow/src/TPCITSMatchingSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/TPCITSMatchingSpec.cxx index 1a061ee9707f5..aebcd96c4faa7 100644 --- a/Detectors/GlobalTrackingWorkflow/src/TPCITSMatchingSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/TPCITSMatchingSpec.cxx @@ -103,12 +103,12 @@ void TPCITSMatchingDPL::run(ProcessingContext& pc) static pmr::vector dummyMCLab, dummyMCLabAB; static pmr::vector> dummyCalib; - auto& matchedTracks = pc.outputs().make>(Output{"GLO", "TPCITS", 0, Lifetime::Timeframe}); - auto& ABTrackletRefs = pc.outputs().make>(Output{"GLO", "TPCITSAB_REFS", 0, Lifetime::Timeframe}); - auto& ABTrackletClusterIDs = pc.outputs().make>(Output{"GLO", "TPCITSAB_CLID", 0, Lifetime::Timeframe}); - auto& matchLabels = mUseMC ? pc.outputs().make>(Output{"GLO", "TPCITS_MC", 0, Lifetime::Timeframe}) : dummyMCLab; - auto& ABTrackletLabels = mUseMC ? pc.outputs().make>(Output{"GLO", "TPCITSAB_MC", 0, Lifetime::Timeframe}) : dummyMCLabAB; - auto& calib = mCalibMode ? pc.outputs().make>>(Output{"GLO", "TPCITS_VDTGL", 0, Lifetime::Timeframe}) : dummyCalib; + auto& matchedTracks = pc.outputs().make>(Output{"GLO", "TPCITS", 0}); + auto& ABTrackletRefs = pc.outputs().make>(Output{"GLO", "TPCITSAB_REFS", 0}); + auto& ABTrackletClusterIDs = pc.outputs().make>(Output{"GLO", "TPCITSAB_CLID", 0}); + auto& matchLabels = mUseMC ? pc.outputs().make>(Output{"GLO", "TPCITS_MC", 0}) : dummyMCLab; + auto& ABTrackletLabels = mUseMC ? pc.outputs().make>(Output{"GLO", "TPCITSAB_MC", 0}) : dummyMCLabAB; + auto& calib = mCalibMode ? pc.outputs().make>>(Output{"GLO", "TPCITS_VDTGL", 0}) : dummyCalib; mMatching.run(recoData, matchedTracks, ABTrackletRefs, ABTrackletClusterIDs, matchLabels, ABTrackletLabels, calib); diff --git a/Detectors/GlobalTrackingWorkflow/src/VertexTrackMatcherSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/VertexTrackMatcherSpec.cxx index 281fd06dea7eb..e5d2afb1ee700 100644 --- a/Detectors/GlobalTrackingWorkflow/src/VertexTrackMatcherSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/VertexTrackMatcherSpec.cxx @@ -78,8 +78,8 @@ void VertexTrackMatcherSpec::run(ProcessingContext& pc) mMatcher.process(recoData, trackIndex, vtxRefs); - pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTC", 0, Lifetime::Timeframe}, trackIndex); - pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTCREFS", 0, Lifetime::Timeframe}, vtxRefs); + pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTC", 0}, trackIndex); + pc.outputs().snapshot(Output{"GLO", "PVTX_TRMTCREFS", 0}, vtxRefs); mTimer.Stop(); LOG(info) << "Made " << trackIndex.size() << " track associations for " << recoData.getPrimaryVertices().size() diff --git a/Detectors/GlobalTrackingWorkflow/study/src/TPCDataFilter.cxx b/Detectors/GlobalTrackingWorkflow/study/src/TPCDataFilter.cxx index ed06e014184b6..8a962686d743e 100644 --- a/Detectors/GlobalTrackingWorkflow/study/src/TPCDataFilter.cxx +++ b/Detectors/GlobalTrackingWorkflow/study/src/TPCDataFilter.cxx @@ -107,10 +107,10 @@ void TPCDataFilter::run(ProcessingContext& pc) void TPCDataFilter::sendOutput(ProcessingContext& pc) { - pc.outputs().snapshot(Output{"TPC", "TRACKSF", 0, Lifetime::Timeframe}, mTracksFiltered); - pc.outputs().snapshot(Output{"TPC", "CLUSREFSF", 0, Lifetime::Timeframe}, mTrackClusIdxFiltered); + pc.outputs().snapshot(Output{"TPC", "TRACKSF", 0}, mTracksFiltered); + pc.outputs().snapshot(Output{"TPC", "CLUSREFSF", 0}, mTrackClusIdxFiltered); if (mUseMC) { - pc.outputs().snapshot(Output{"TPC", "TRACKSMCLBLF", 0, Lifetime::Timeframe}, mTPCTrkLabelsFiltered); + pc.outputs().snapshot(Output{"TPC", "TRACKSMCLBLF", 0}, mTPCTrkLabelsFiltered); } o2::tpc::TPCSectorHeader clusterOutputSectorHeader{0}; @@ -118,7 +118,7 @@ void TPCDataFilter::sendOutput(ProcessingContext& pc) for (int i = 0; i < o2::tpc::constants::MAXSECTOR; i++) { clusterOutputSectorHeader.sectorBits = (1ul << i); o2::header::DataHeader::SubSpecificationType subspec = i; - char* buffer = pc.outputs().make({o2::header::gDataOriginTPC, "CLUSTERNATIVEF", subspec, Lifetime::Timeframe, {clusterOutputSectorHeader}}, + char* buffer = pc.outputs().make({o2::header::gDataOriginTPC, "CLUSTERNATIVEF", subspec, {clusterOutputSectorHeader}}, mClusFiltered.nClustersSector[i] * sizeof(*mClusFiltered.clustersLinear) + sizeof(o2::tpc::ClusterCountIndex)) .data(); o2::tpc::ClusterCountIndex* outIndex = reinterpret_cast(buffer); @@ -138,7 +138,7 @@ void TPCDataFilter::sendOutput(ProcessingContext& pc) } o2::dataformats::ConstMCLabelContainer contflat; cont.flatten_to(contflat); - pc.outputs().snapshot({o2::header::gDataOriginTPC, "CLNATIVEMCLBLF", subspec, Lifetime::Timeframe, {clusterOutputSectorHeader}}, contflat); + pc.outputs().snapshot({o2::header::gDataOriginTPC, "CLNATIVEMCLBLF", subspec, {clusterOutputSectorHeader}}, contflat); } } } diff --git a/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCInterpolationSpec.cxx b/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCInterpolationSpec.cxx index 69554d3d495a4..89e3ae19cf65b 100644 --- a/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCInterpolationSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCInterpolationSpec.cxx @@ -133,18 +133,18 @@ void TPCInterpolationDPL::run(ProcessingContext& pc) LOGF(info, "TPC interpolation timing: Cpu: %.3e Real: %.3e s", mTimer.CpuTime(), mTimer.RealTime()); if (SpacePointsCalibConfParam::Instance().writeUnfiltered) { // these are the residuals and tracks before outlier rejection; they are not used in production - pc.outputs().snapshot(Output{"GLO", "TPCINT_RES", 0, Lifetime::Timeframe}, mInterpolation.getClusterResidualsUnfiltered()); + pc.outputs().snapshot(Output{"GLO", "TPCINT_RES", 0}, mInterpolation.getClusterResidualsUnfiltered()); if (mSendTrackData) { - pc.outputs().snapshot(Output{"GLO", "TPCINT_TRK", 0, Lifetime::Timeframe}, mInterpolation.getReferenceTracksUnfiltered()); + pc.outputs().snapshot(Output{"GLO", "TPCINT_TRK", 0}, mInterpolation.getReferenceTracksUnfiltered()); } } - pc.outputs().snapshot(Output{"GLO", "UNBINNEDRES", 0, Lifetime::Timeframe}, mInterpolation.getClusterResiduals()); - pc.outputs().snapshot(Output{"GLO", "TRKREFS", 0, Lifetime::Timeframe}, mInterpolation.getTrackDataCompact()); + pc.outputs().snapshot(Output{"GLO", "UNBINNEDRES", 0}, mInterpolation.getClusterResiduals()); + pc.outputs().snapshot(Output{"GLO", "TRKREFS", 0}, mInterpolation.getTrackDataCompact()); if (mSendTrackData) { - pc.outputs().snapshot(Output{"GLO", "TRKDATA", 0, Lifetime::Timeframe}, mInterpolation.getReferenceTracks()); + pc.outputs().snapshot(Output{"GLO", "TRKDATA", 0}, mInterpolation.getReferenceTracks()); } if (mDebugOutput) { - pc.outputs().snapshot(Output{"GLO", "TRKDATAEXT", 0, Lifetime::Timeframe}, mInterpolation.getTrackDataExtended()); + pc.outputs().snapshot(Output{"GLO", "TRKDATAEXT", 0}, mInterpolation.getTrackDataExtended()); } mInterpolation.reset(); } diff --git a/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCUnbinnedResidualReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCUnbinnedResidualReaderSpec.cxx index af50b32bc8bba..55da5a5e71e44 100644 --- a/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCUnbinnedResidualReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCUnbinnedResidualReaderSpec.cxx @@ -56,11 +56,11 @@ void TPCUnbinnedResidualReader::run(ProcessingContext& pc) assert(currEntry < mTreeIn->GetEntries()); // this should not happen mTreeIn->GetEntry(currEntry); LOG(info) << "Pushing " << mUnbinnedResid.size() << " unbinned residuals at entry " << currEntry; - pc.outputs().snapshot(Output{"GLO", "UNBINNEDRES", 0, Lifetime::Timeframe}, mUnbinnedResid); - pc.outputs().snapshot(Output{"GLO", "TRKREFS", 0, Lifetime::Timeframe}, mTrackDataCompact); + pc.outputs().snapshot(Output{"GLO", "UNBINNEDRES", 0}, mUnbinnedResid); + pc.outputs().snapshot(Output{"GLO", "TRKREFS", 0}, mTrackDataCompact); if (mTrackInput) { LOG(info) << "Pushing " << mTrackData.size() << " reference tracks for these residuals"; - pc.outputs().snapshot(Output{"GLO", "TRKDATA", 0, Lifetime::Timeframe}, mTrackData); + pc.outputs().snapshot(Output{"GLO", "TRKDATA", 0}, mTrackData); } if (mTreeIn->GetReadEntry() + 1 >= mTreeIn->GetEntries()) { diff --git a/Detectors/HMPID/workflow/src/ClustersReaderSpec.cxx b/Detectors/HMPID/workflow/src/ClustersReaderSpec.cxx index e71cd42845dd4..9ac5074acb505 100644 --- a/Detectors/HMPID/workflow/src/ClustersReaderSpec.cxx +++ b/Detectors/HMPID/workflow/src/ClustersReaderSpec.cxx @@ -71,8 +71,8 @@ void ClusterReaderTask::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); - pc.outputs().snapshot(Output{"HMP", "CLUSTERS", 0, Lifetime::Timeframe}, mClustersFromFile); - pc.outputs().snapshot(Output{"HMP", "INTRECORDS1", 0, Lifetime::Timeframe}, mClusterTriggersFromFile); + pc.outputs().snapshot(Output{"HMP", "CLUSTERS", 0}, mClustersFromFile); + pc.outputs().snapshot(Output{"HMP", "INTRECORDS1", 0}, mClusterTriggersFromFile); mClustersReceived += mClustersFromFile.size(); LOG(info) << "[HMPID ClusterReader - run() ] clusters = " << mClustersFromFile.size(); diff --git a/Detectors/HMPID/workflow/src/DataDecoderSpec.cxx b/Detectors/HMPID/workflow/src/DataDecoderSpec.cxx index 3a3b5a6338d81..1a6509fc75654 100644 --- a/Detectors/HMPID/workflow/src/DataDecoderSpec.cxx +++ b/Detectors/HMPID/workflow/src/DataDecoderSpec.cxx @@ -87,8 +87,8 @@ void DataDecoderTask::run(framework::ProcessingContext& pc) // decodeReadout(pc); // decodeRawFile(pc); - pc.outputs().snapshot(o2::framework::Output{"HMP", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mDeco->mDigits); - pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS", 0, o2::framework::Lifetime::Timeframe}, mDeco->mIntReco); + pc.outputs().snapshot(o2::framework::Output{"HMP", "DIGITS", 0}, mDeco->mDigits); + pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS", 0}, mDeco->mIntReco); LOG(debug) << "Writing Digitis=" << mDeco->mDigits.size() << "/" << mTotalDigits << " Frame=" << mTotalFrames << " IntRec " << mDeco->mIntReco; mExTimer.elapseMes("Decoding... Digits decoded = " + std::to_string(mTotalDigits) + " Frames received = " + std::to_string(mTotalFrames)); diff --git a/Detectors/HMPID/workflow/src/DataDecoderSpec2.cxx b/Detectors/HMPID/workflow/src/DataDecoderSpec2.cxx index 61051ab48577e..92a966938786a 100644 --- a/Detectors/HMPID/workflow/src/DataDecoderSpec2.cxx +++ b/Detectors/HMPID/workflow/src/DataDecoderSpec2.cxx @@ -93,8 +93,8 @@ void DataDecoderTask2::run(framework::ProcessingContext& pc) // Output the Digits/Triggers vector orderTriggers(); - pc.outputs().snapshot(o2::framework::Output{"HMP", "DIGITS", 0, o2::framework::Lifetime::Timeframe}, mDeco->mDigits); - pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS", 0, o2::framework::Lifetime::Timeframe}, mTriggers); + pc.outputs().snapshot(o2::framework::Output{"HMP", "DIGITS", 0}, mDeco->mDigits); + pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS", 0}, mTriggers); mExTimer.elapseMes("Decoding... Digits decoded = " + std::to_string(mTotalDigits) + " Frames received = " + std::to_string(mTotalFrames)); return; diff --git a/Detectors/HMPID/workflow/src/DigitsReaderSpec.cxx b/Detectors/HMPID/workflow/src/DigitsReaderSpec.cxx index ed83db476b01a..88f6df2bce2e7 100644 --- a/Detectors/HMPID/workflow/src/DigitsReaderSpec.cxx +++ b/Detectors/HMPID/workflow/src/DigitsReaderSpec.cxx @@ -115,8 +115,8 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); - pc.outputs().snapshot(Output{"HMP", "DIGITS", 0, Lifetime::Timeframe}, mDigitsFromFile); - pc.outputs().snapshot(Output{"HMP", "INTRECORDS", 0, Lifetime::Timeframe}, mTriggersFromFile); + pc.outputs().snapshot(Output{"HMP", "DIGITS", 0}, mDigitsFromFile); + pc.outputs().snapshot(Output{"HMP", "INTRECORDS", 0}, mTriggersFromFile); mDigitsReceived += mDigitsFromFile.size(); LOG(info) << "[HMPID DigitsReader - run() ] digits = " << mDigitsFromFile.size(); diff --git a/Detectors/HMPID/workflow/src/DigitsToClustersSpec.cxx b/Detectors/HMPID/workflow/src/DigitsToClustersSpec.cxx index 377bc0f7af45e..81c04a9875c51 100644 --- a/Detectors/HMPID/workflow/src/DigitsToClustersSpec.cxx +++ b/Detectors/HMPID/workflow/src/DigitsToClustersSpec.cxx @@ -116,8 +116,8 @@ void DigitsToClustersTask::run(framework::ProcessingContext& pc) mDigitsReceived += digits.size(); mClustersReceived += clusters.size(); - pc.outputs().snapshot(o2::framework::Output{"HMP", "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, clusters); - pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS1", 0, o2::framework::Lifetime::Timeframe}, clusterTriggers); + pc.outputs().snapshot(o2::framework::Output{"HMP", "CLUSTERS", 0}, clusters); + pc.outputs().snapshot(o2::framework::Output{"HMP", "INTRECORDS1", 0}, clusterTriggers); mExTimer.elapseMes("Clusterization of Digits received = " + std::to_string(mDigitsReceived)); mExTimer.elapseMes("Clusterization of Clusters received = " + std::to_string(mClustersReceived)); diff --git a/Detectors/HMPID/workflow/src/EntropyEncoderSpec.cxx b/Detectors/HMPID/workflow/src/EntropyEncoderSpec.cxx index 15bd0987a0ccc..95723f42d0fd6 100644 --- a/Detectors/HMPID/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/HMPID/workflow/src/EntropyEncoderSpec.cxx @@ -72,7 +72,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"HMP", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"HMP", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, triggers, digits); pc.outputs().snapshot({"ctfrep", 0}, iosize); diff --git a/Detectors/HMPID/workflow/src/HMPMatchedReaderSpec.cxx b/Detectors/HMPID/workflow/src/HMPMatchedReaderSpec.cxx index e466a8a4e00c4..3ddb71e282a96 100644 --- a/Detectors/HMPID/workflow/src/HMPMatchedReaderSpec.cxx +++ b/Detectors/HMPID/workflow/src/HMPMatchedReaderSpec.cxx @@ -62,9 +62,9 @@ void HMPMatchedReader::run(ProcessingContext& pc) mTree->GetEntry(currEntry); LOG(debug) << "Pushing " << mMatches.size() << " HMP matchings at entry " << currEntry; - pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MATCHES", 0, Lifetime::Timeframe}, mMatches); + pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MATCHES", 0}, mMatches); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MCLABELS", 0, Lifetime::Timeframe}, mLabelHMP); + pc.outputs().snapshot(Output{o2::header::gDataOriginHMP, "MCLABELS", 0}, mLabelHMP); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/ITSMFT/ITS/QC/TestDataReaderWorkflow/src/TestDataReader.cxx b/Detectors/ITSMFT/ITS/QC/TestDataReaderWorkflow/src/TestDataReader.cxx index d6ef8c947ed49..964f342c58b15 100644 --- a/Detectors/ITSMFT/ITS/QC/TestDataReaderWorkflow/src/TestDataReader.cxx +++ b/Detectors/ITSMFT/ITS/QC/TestDataReaderWorkflow/src/TestDataReader.cxx @@ -139,14 +139,14 @@ void TestDataReader::run(ProcessingContext& pc) if (mDiffFolderName.size() == 0) { cout << "No New Run -- No Need to Reset" << endl; mResetCommand = 0; - pc.outputs().snapshot(Output{"ITS", "TEST", 0, Lifetime::Timeframe}, mResetCommand); + pc.outputs().snapshot(Output{"ITS", "TEST", 0}, mResetCommand); } // New folders found, send the reset signal and reload configuration if (mDiffFolderName.size() > 0) { cout << "New Run Started -- Reset All Histograms" << endl; mResetCommand = 1; - pc.outputs().snapshot(Output{"ITS", "TEST", 0, Lifetime::Timeframe}, mResetCommand); + pc.outputs().snapshot(Output{"ITS", "TEST", 0}, mResetCommand); for (int i = 0; i < sNError; i++) { mErrors[i] = 0; } @@ -264,11 +264,11 @@ void TestDataReader::run(ProcessingContext& pc) mErrorsVecTest.push_back(mErrors); mFileDone = 1; mFileInfo = mFileDone + mFileRemain * 10; - pc.outputs().snapshot(Output{"ITS", "Run", 0, Lifetime::Timeframe}, mRunNumber); - pc.outputs().snapshot(Output{"ITS", "File", 0, Lifetime::Timeframe}, mFileID); - pc.outputs().snapshot(Output{"ITS", "Error", 0, Lifetime::Timeframe}, mErrorsVecTest[0]); - pc.outputs().snapshot(Output{"ITS", "Finish", 0, Lifetime::Timeframe}, mFileInfo); - pc.outputs().snapshot(Output{"ITS", "DIGITS", 0, Lifetime::Timeframe}, mMultiDigitsTest); + pc.outputs().snapshot(Output{"ITS", "Run", 0}, mRunNumber); + pc.outputs().snapshot(Output{"ITS", "File", 0}, mFileID); + pc.outputs().snapshot(Output{"ITS", "Error", 0}, mErrorsVecTest[0]); + pc.outputs().snapshot(Output{"ITS", "Finish", 0}, mFileInfo); + pc.outputs().snapshot(Output{"ITS", "DIGITS", 0}, mMultiDigitsTest); mNewFileInj = 0; mErrorsVecTest.clear(); mDigitsTest.clear(); @@ -422,10 +422,10 @@ void TestDataReader::run(ProcessingContext& pc) cout << "RunIDS = " << mRunNumber << " FileIDS = " << mFileID << endl; - pc.outputs().snapshot(Output{"ITS", "Run", 0, Lifetime::Timeframe}, mRunNumber); - pc.outputs().snapshot(Output{"ITS", "File", 0, Lifetime::Timeframe}, mFileID); + pc.outputs().snapshot(Output{"ITS", "Run", 0}, mRunNumber); + pc.outputs().snapshot(Output{"ITS", "File", 0}, mFileID); - pc.outputs().snapshot(Output{"ITS", "Error", 0, Lifetime::Timeframe}, mErrorsVec[j]); + pc.outputs().snapshot(Output{"ITS", "Error", 0}, mErrorsVec[j]); mIndexPushEx = mIndexPush + mNDigits[j]; LOG(debug) << "IndexPushEx = " << mIndexPushEx << " mDigits.size() " << mDigits.size(); if (mIndexPushEx > mDigits.size() - 5) { @@ -436,11 +436,11 @@ void TestDataReader::run(ProcessingContext& pc) mFileInfo = mFileDone + mFileRemain * 10; - pc.outputs().snapshot(Output{"ITS", "Finish", 0, Lifetime::Timeframe}, mFileInfo); + pc.outputs().snapshot(Output{"ITS", "Finish", 0}, mFileInfo); LOG(debug) << "mIndexPush = " << mIndexPush << " Chip ID Pushing " << mDigits[mIndexPush].getChipIndex(); - pc.outputs().snapshot(Output{"ITS", "DIGITS", 0, Lifetime::Timeframe}, mMultiDigits); + pc.outputs().snapshot(Output{"ITS", "DIGITS", 0}, mMultiDigits); mMultiDigits.clear(); mIndexPush = mIndexPush + mNDigits[j]; @@ -453,7 +453,7 @@ void TestDataReader::run(ProcessingContext& pc) // << "mIndexPush = " << mIndexPush << " mDigits.size() = " << mDigits.size(); // while (mIndexPush < mDigits.size()) { // LOG(debug) << "mDigits.size() = " << mDigits.size(); - // pc.outputs().snapshot(Output{"ITS", "DIGITS", 0, Lifetime::Timeframe}, mDigits[mIndexPush++]); + // pc.outputs().snapshot(Output{"ITS", "DIGITS", 0}, mDigits[mIndexPush++]); // if (mIndexPush % 100000 == 0) // LOG(debug) << "mIndexPush = " << mIndexPush << " Chip ID Pushing " << mDigits[mIndexPush].getChipIndex(); // } @@ -483,7 +483,7 @@ void TestDataReader::run(ProcessingContext& pc) j = 0; mNDigits.clear(); mFileDone = 1; - pc.outputs().snapshot(Output{"TST", "Finish", 0, Lifetime::Timeframe}, mFileDone); + pc.outputs().snapshot(Output{"TST", "Finish", 0}, mFileDone); PercentDone = 0; mErrorsVec.clear(); } diff --git a/Detectors/ITSMFT/ITS/workflow/src/ClustererSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/ClustererSpec.cxx index 89596e8357b74..71987d0480566 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/ClustererSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/ClustererSpec.cxx @@ -88,17 +88,17 @@ void ClustererDPL::run(ProcessingContext& pc) clusterLabels = std::make_unique>(); } mClusterer->process(mNThreads, reader, &clusCompVec, &clusPattVec, &clusROFVec, clusterLabels.get()); - pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0, Lifetime::Timeframe}, clusCompVec); - pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0, Lifetime::Timeframe}, clusROFVec); - pc.outputs().snapshot(Output{orig, "PATTERNS", 0, Lifetime::Timeframe}, clusPattVec); + pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0}, clusCompVec); + pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0}, clusROFVec); + pc.outputs().snapshot(Output{orig, "PATTERNS", 0}, clusPattVec); if (mUseMC) { - pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, *clusterLabels.get()); // at the moment requires snapshot + pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0}, *clusterLabels.get()); // at the moment requires snapshot std::vector clusterMC2ROframes(mc2rofs.size()); for (int i = mc2rofs.size(); i--;) { clusterMC2ROframes[i] = mc2rofs[i]; // Simply, replicate it from digits ? } - pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0, Lifetime::Timeframe}, clusterMC2ROframes); + pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0}, clusterMC2ROframes); } // TODO: in principle, after masking "overflow" pixels the MC2ROFRecord maxROF supposed to change, nominally to minROF diff --git a/Detectors/ITSMFT/ITS/workflow/src/CookedTrackerSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/CookedTrackerSpec.cxx index e3f585c81cd97..e4c9d43541e1f 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/CookedTrackerSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/CookedTrackerSpec.cxx @@ -95,7 +95,7 @@ void CookedTrackerDPL::run(ProcessingContext& pc) physTriggers = pc.inputs().get>("phystrig"); } - auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0, Lifetime::Timeframe}, rofsinput.begin(), rofsinput.end()); + auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0}, rofsinput.begin(), rofsinput.end()); std::unique_ptr> labels; gsl::span mc2rofs; @@ -116,11 +116,11 @@ void CookedTrackerDPL::run(ProcessingContext& pc) o2::its::ROframe event(0, 7); mVertexerPtr->adoptTimeFrame(mTimeFrame); - auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0, Lifetime::Timeframe}); - auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0, Lifetime::Timeframe}); - auto& tracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0, Lifetime::Timeframe}); - auto& clusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0, Lifetime::Timeframe}); - auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0, Lifetime::Timeframe}); + auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0}); + auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0}); + auto& tracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0}); + auto& clusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0}); + auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0}); const auto& alpParams = o2::itsmft::DPLAlpideParam::Instance(); // RS: this should come from CCDB int nBCPerTF = mTracker.getContinuousMode() ? alpParams.roFrameLengthInBC : alpParams.roFrameLengthTrig; @@ -201,8 +201,8 @@ void CookedTrackerDPL::run(ProcessingContext& pc) LOG(info) << "ITSCookedTracker pushed " << tracks.size() << " tracks and " << vertices.size() << " vertices"; if (mUseMC) { - pc.outputs().snapshot(Output{"ITS", "TRACKSMCTR", 0, Lifetime::Timeframe}, trackLabels); - pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0, Lifetime::Timeframe}, mc2rofs); + pc.outputs().snapshot(Output{"ITS", "TRACKSMCTR", 0}, trackLabels); + pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0}, mc2rofs); } mTimer.Stop(); } diff --git a/Detectors/ITSMFT/ITS/workflow/src/DCSGeneratorSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/DCSGeneratorSpec.cxx index 48bbaed171a85..2ea0c833f3871 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/DCSGeneratorSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/DCSGeneratorSpec.cxx @@ -117,7 +117,7 @@ void ITSDCSDataGenerator::run(o2::framework::ProcessingContext& pc) auto timeNow = std::chrono::system_clock::now(); timingInfo.creation = std::chrono::duration_cast(timeNow.time_since_epoch()).count(); // in ms - pc.outputs().snapshot(Output{"ITS", mDataDescription, 0, Lifetime::Timeframe}, dpcoms); + pc.outputs().snapshot(Output{"ITS", mDataDescription, 0}, dpcoms); mTFs++; } } // namespace diff --git a/Detectors/ITSMFT/ITS/workflow/src/TrackReaderSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/TrackReaderSpec.cxx index 416ba5b3b07cc..8e72faae9fd37 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/TrackReaderSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/TrackReaderSpec.cxx @@ -44,14 +44,14 @@ void TrackReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " track in " << mROFRec.size() << " ROFs at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "ITSTrackROF", 0, Lifetime::Timeframe}, mROFRec); - pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0, Lifetime::Timeframe}, mClusInd); - pc.outputs().snapshot(Output{"ITS", "VERTICES", 0, Lifetime::Timeframe}, mVertices); - pc.outputs().snapshot(Output{"ITS", "VERTICESROF", 0, Lifetime::Timeframe}, mVerticesROFRec); + pc.outputs().snapshot(Output{mOrigin, "ITSTrackROF", 0}, mROFRec); + pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0}, mTracks); + pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0}, mClusInd); + pc.outputs().snapshot(Output{"ITS", "VERTICES", 0}, mVertices); + pc.outputs().snapshot(Output{"ITS", "VERTICESROF", 0}, mVerticesROFRec); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0, Lifetime::Timeframe}, mMCTruth); - pc.outputs().snapshot(Output{mOrigin, "VERTICESMCTR", 0, Lifetime::Timeframe}, mMCVertTruth); + pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "VERTICESMCTR", 0}, mMCVertTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/ITSMFT/ITS/workflow/src/TrackerSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/TrackerSpec.cxx index 35671f646bd04..331e42907530a 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/TrackerSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/TrackerSpec.cxx @@ -150,8 +150,8 @@ void TrackerDPL::run(ProcessingContext& pc) } auto rofsinput = pc.inputs().get>("ROframes"); - auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0, Lifetime::Timeframe}, rofsinput.begin(), rofsinput.end()); - auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0, Lifetime::Timeframe}); + auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0}, rofsinput.begin(), rofsinput.end()); + auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0}); const auto& alpParams = o2::itsmft::DPLAlpideParam::Instance(); // RS: this should come from CCDB irFrames.reserve(rofs.size()); @@ -164,19 +164,19 @@ void TrackerDPL::run(ProcessingContext& pc) if (mIsMC) { labels = pc.inputs().get*>("itsmclabels").release(); // get the array as read-only span, a snapshot is sent forward - pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0, Lifetime::Timeframe}, pc.inputs().get>("ITSMC2ROframes")); + pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0}, pc.inputs().get>("ITSMC2ROframes")); LOG(info) << labels->getIndexedSize() << " MC label objects , in " << mc2rofs.size() << " MC events"; } - auto& allClusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0, Lifetime::Timeframe}); - auto& allTracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0, Lifetime::Timeframe}); - auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0, Lifetime::Timeframe}); - auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0, Lifetime::Timeframe}); + auto& allClusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0}); + auto& allTracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0}); + auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0}); + auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0}); // MC static pmr::vector dummyMCLabTracks, dummyMCLabVerts; - auto& allTrackLabels = mIsMC ? pc.outputs().make>(Output{"ITS", "TRACKSMCTR", 0, Lifetime::Timeframe}) : dummyMCLabTracks; - auto& allVerticesLabels = mIsMC ? pc.outputs().make>(Output{"ITS", "VERTICESMCTR", 0, Lifetime::Timeframe}) : dummyMCLabVerts; + auto& allTrackLabels = mIsMC ? pc.outputs().make>(Output{"ITS", "TRACKSMCTR", 0}) : dummyMCLabTracks; + auto& allVerticesLabels = mIsMC ? pc.outputs().make>(Output{"ITS", "VERTICESMCTR", 0}) : dummyMCLabVerts; std::uint32_t roFrame = 0; diff --git a/Detectors/ITSMFT/ITS/workflow/src/VertexReaderSpec.cxx b/Detectors/ITSMFT/ITS/workflow/src/VertexReaderSpec.cxx index bbf033a31abee..e92f08af23c0d 100644 --- a/Detectors/ITSMFT/ITS/workflow/src/VertexReaderSpec.cxx +++ b/Detectors/ITSMFT/ITS/workflow/src/VertexReaderSpec.cxx @@ -41,8 +41,8 @@ void VertexReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mVerticesPtr->size() << " vertices in " << mVerticesROFRecPtr->size() << " ROFs at entry " << ent; - pc.outputs().snapshot(Output{"ITS", "VERTICES", 0, Lifetime::Timeframe}, mVertices); - pc.outputs().snapshot(Output{"ITS", "VERTICESROF", 0, Lifetime::Timeframe}, mVerticesROFRec); + pc.outputs().snapshot(Output{"ITS", "VERTICES", 0}, mVertices); + pc.outputs().snapshot(Output{"ITS", "VERTICESROF", 0}, mVerticesROFRec); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/ITSMFT/MFT/workflow/src/ClustererSpec.cxx b/Detectors/ITSMFT/MFT/workflow/src/ClustererSpec.cxx index f6279be58cfd9..bc8568112e447 100644 --- a/Detectors/ITSMFT/MFT/workflow/src/ClustererSpec.cxx +++ b/Detectors/ITSMFT/MFT/workflow/src/ClustererSpec.cxx @@ -89,17 +89,17 @@ void ClustererDPL::run(ProcessingContext& pc) clusterLabels = std::make_unique>(); } mClusterer->process(mNThreads, reader, &clusCompVec, &clusPattVec, &clusROFVec, clusterLabels.get()); - pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0, Lifetime::Timeframe}, clusCompVec); - pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0, Lifetime::Timeframe}, clusROFVec); - pc.outputs().snapshot(Output{orig, "PATTERNS", 0, Lifetime::Timeframe}, clusPattVec); + pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0}, clusCompVec); + pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0}, clusROFVec); + pc.outputs().snapshot(Output{orig, "PATTERNS", 0}, clusPattVec); if (mUseMC) { - pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, *clusterLabels.get()); // at the moment requires snapshot + pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0}, *clusterLabels.get()); // at the moment requires snapshot std::vector clusterMC2ROframes(mc2rofs.size()); for (int i = mc2rofs.size(); i--;) { clusterMC2ROframes[i] = mc2rofs[i]; // Simply, replicate it from digits ? } - pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0, Lifetime::Timeframe}, clusterMC2ROframes); + pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0}, clusterMC2ROframes); } // TODO: in principle, after masking "overflow" pixels the MC2ROFRecord maxROF supposed to change, nominally to minROF diff --git a/Detectors/ITSMFT/MFT/workflow/src/TrackReaderSpec.cxx b/Detectors/ITSMFT/MFT/workflow/src/TrackReaderSpec.cxx index 8d9b889d4d2aa..1a2ae573af536 100644 --- a/Detectors/ITSMFT/MFT/workflow/src/TrackReaderSpec.cxx +++ b/Detectors/ITSMFT/MFT/workflow/src/TrackReaderSpec.cxx @@ -45,11 +45,11 @@ void TrackReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " track in " << mROFRec.size() << " ROFs at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "MFTTrackROF", 0, Lifetime::Timeframe}, mROFRec); - pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0, Lifetime::Timeframe}, mClusInd); + pc.outputs().snapshot(Output{mOrigin, "MFTTrackROF", 0}, mROFRec); + pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0}, mTracks); + pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0}, mClusInd); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0, Lifetime::Timeframe}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0}, mMCTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/ITSMFT/MFT/workflow/src/TrackerSpec.cxx b/Detectors/ITSMFT/MFT/workflow/src/TrackerSpec.cxx index 7bf7d27a69e4c..5d22a6f2d2306 100644 --- a/Detectors/ITSMFT/MFT/workflow/src/TrackerSpec.cxx +++ b/Detectors/ITSMFT/MFT/workflow/src/TrackerSpec.cxx @@ -72,7 +72,7 @@ void TrackerDPL::run(ProcessingContext& pc) // the output vector however is created directly inside the message memory thus avoiding copy by // snapshot auto rofsinput = pc.inputs().get>("ROframes"); - auto& rofs = pc.outputs().make>(Output{"MFT", "MFTTrackROF", 0, Lifetime::Timeframe}, rofsinput.begin(), rofsinput.end()); + auto& rofs = pc.outputs().make>(Output{"MFT", "MFTTrackROF", 0}, rofsinput.begin(), rofsinput.end()); ROFFilter filter = [](const o2::itsmft::ROFRecord& r) { return true; }; @@ -104,12 +104,12 @@ void TrackerDPL::run(ProcessingContext& pc) LOG(info) << labels->getIndexedSize() << " MC label objects , in " << mc2rofs.size() << " MC events"; } - auto& allClusIdx = pc.outputs().make>(Output{"MFT", "TRACKCLSID", 0, Lifetime::Timeframe}); + auto& allClusIdx = pc.outputs().make>(Output{"MFT", "TRACKCLSID", 0}); std::vector trackLabels; std::vector allTrackLabels; std::vector tracks; std::vector tracksL; - auto& allTracksMFT = pc.outputs().make>(Output{"MFT", "TRACKS", 0, Lifetime::Timeframe}); + auto& allTracksMFT = pc.outputs().make>(Output{"MFT", "TRACKS", 0}); std::uint32_t roFrameId = 0; int nROFs = rofs.size(); @@ -327,8 +327,8 @@ void TrackerDPL::run(ProcessingContext& pc) LOG(info) << "MFTTracker pushed " << allTracksMFT.size() << " tracks"; if (mUseMC) { - pc.outputs().snapshot(Output{"MFT", "TRACKSMCTR", 0, Lifetime::Timeframe}, allTrackLabels); - pc.outputs().snapshot(Output{"MFT", "TRACKSMC2ROF", 0, Lifetime::Timeframe}, mc2rofs); + pc.outputs().snapshot(Output{"MFT", "TRACKSMCTR", 0}, allTrackLabels); + pc.outputs().snapshot(Output{"MFT", "TRACKSMC2ROF", 0}, mc2rofs); } mTimer[SWTot].Stop(); diff --git a/Detectors/ITSMFT/common/workflow/src/ClusterReaderSpec.cxx b/Detectors/ITSMFT/common/workflow/src/ClusterReaderSpec.cxx index ad7f26b1b4680..ea906056c7898 100644 --- a/Detectors/ITSMFT/common/workflow/src/ClusterReaderSpec.cxx +++ b/Detectors/ITSMFT/common/workflow/src/ClusterReaderSpec.cxx @@ -58,18 +58,18 @@ void ClusterReader::run(ProcessingContext& pc) // This is a very ugly way of providing DataDescription, which anyway does not need to contain detector name. // To be fixed once the names-definition class is ready - pc.outputs().snapshot(Output{mOrigin, "CLUSTERSROF", 0, Lifetime::Timeframe}, mClusROFRec); - pc.outputs().snapshot(Output{mOrigin, "COMPCLUSTERS", 0, Lifetime::Timeframe}, mClusterCompArray); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERSROF", 0}, mClusROFRec); + pc.outputs().snapshot(Output{mOrigin, "COMPCLUSTERS", 0}, mClusterCompArray); if (mUsePatterns) { - pc.outputs().snapshot(Output{mOrigin, "PATTERNS", 0, Lifetime::Timeframe}, mPatternsArray); + pc.outputs().snapshot(Output{mOrigin, "PATTERNS", 0}, mPatternsArray); } if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, mClusterMCTruth); - pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMC2ROF", 0, Lifetime::Timeframe}, mClusMC2ROFs); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMCTR", 0}, mClusterMCTruth); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMC2ROF", 0}, mClusMC2ROFs); } if (mTriggerOut) { std::vector dummyTrig; - pc.outputs().snapshot(Output{mOrigin, "PHYSTRIG", 0, Lifetime::Timeframe}, dummyTrig); + pc.outputs().snapshot(Output{mOrigin, "PHYSTRIG", 0}, dummyTrig); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/ITSMFT/common/workflow/src/DigitReaderSpec.cxx b/Detectors/ITSMFT/common/workflow/src/DigitReaderSpec.cxx index fbdd63c73d782..ef9494c85bf4b 100644 --- a/Detectors/ITSMFT/common/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/ITSMFT/common/workflow/src/DigitReaderSpec.cxx @@ -74,20 +74,20 @@ void DigitReader::run(ProcessingContext& pc) // This is a very ugly way of providing DataDescription, which anyway does not need to contain detector name. // To be fixed once the names-definition class is ready - pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0, Lifetime::Timeframe}, mDigROFRec); - pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}, mDigits); + pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0}, mDigROFRec); + pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0}, mDigits); if (mUseCalib) { - pc.outputs().snapshot(Output{mOrigin, "GBTCALIB", 0, Lifetime::Timeframe}, mCalib); + pc.outputs().snapshot(Output{mOrigin, "GBTCALIB", 0}, mCalib); } if (mTriggerOut) { std::vector dummyTrig; - pc.outputs().snapshot(Output{mOrigin, "PHYSTRIG", 0, Lifetime::Timeframe}, dummyTrig); + pc.outputs().snapshot(Output{mOrigin, "PHYSTRIG", 0}, dummyTrig); } if (mUseMC) { - auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0}); plabels->copyandflatten(sharedlabels); delete plabels; - pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0, Lifetime::Timeframe}, mDigMC2ROFs); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0}, mDigMC2ROFs); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/ITSMFT/common/workflow/src/EntropyEncoderSpec.cxx b/Detectors/ITSMFT/common/workflow/src/EntropyEncoderSpec.cxx index ccb6f0539909e..4b35f6cc44e39 100644 --- a/Detectors/ITSMFT/common/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/ITSMFT/common/workflow/src/EntropyEncoderSpec.cxx @@ -56,7 +56,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{mOrigin, "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{mOrigin, "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, rofs, compClusters, pspan, mPattIdConverter, mStrobeLength); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/ITSMFT/common/workflow/src/STFDecoderSpec.cxx b/Detectors/ITSMFT/common/workflow/src/STFDecoderSpec.cxx index 73132163d102c..a31ec0f1a4af2 100644 --- a/Detectors/ITSMFT/common/workflow/src/STFDecoderSpec.cxx +++ b/Detectors/ITSMFT/common/workflow/src/STFDecoderSpec.cxx @@ -139,7 +139,7 @@ void STFDecoder::run(ProcessingContext& pc) std::vector digVec; std::vector calVec; std::vector digROFVec; - auto& chipStatus = pc.outputs().make>(Output{orig, "CHIPSSTATUS", 0, Lifetime::Timeframe}, (size_t)Mapping::getNChips()); + auto& chipStatus = pc.outputs().make>(Output{orig, "CHIPSSTATUS", 0}, (size_t)Mapping::getNChips()); try { mDecoder->startNewTF(pc.inputs()); @@ -202,29 +202,29 @@ void STFDecoder::run(ProcessingContext& pc) } } if (mDoDigits) { - pc.outputs().snapshot(Output{orig, "DIGITS", 0, Lifetime::Timeframe}, digVec); - pc.outputs().snapshot(Output{orig, "DIGITSROF", 0, Lifetime::Timeframe}, digROFVec); + pc.outputs().snapshot(Output{orig, "DIGITS", 0}, digVec); + pc.outputs().snapshot(Output{orig, "DIGITSROF", 0}, digROFVec); mEstNDig = std::max(mEstNDig, size_t(digVec.size() * 1.2)); mEstNROF = std::max(mEstNROF, size_t(digROFVec.size() * 1.2)); if (mDoCalibData) { - pc.outputs().snapshot(Output{orig, "GBTCALIB", 0, Lifetime::Timeframe}, calVec); + pc.outputs().snapshot(Output{orig, "GBTCALIB", 0}, calVec); mEstNCalib = std::max(mEstNCalib, size_t(calVec.size() * 1.2)); } } if (mDoClusters) { // we are not obliged to create vectors which are not requested, but other devices might not know the options of this one - pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0, Lifetime::Timeframe}, clusCompVec); - pc.outputs().snapshot(Output{orig, "PATTERNS", 0, Lifetime::Timeframe}, clusPattVec); - pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0, Lifetime::Timeframe}, clusROFVec); + pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0}, clusCompVec); + pc.outputs().snapshot(Output{orig, "PATTERNS", 0}, clusPattVec); + pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0}, clusROFVec); mEstNClus = std::max(mEstNClus, size_t(clusCompVec.size() * 1.2)); mEstNClusPatt = std::max(mEstNClusPatt, size_t(clusPattVec.size() * 1.2)); mEstNROF = std::max(mEstNROF, size_t(clusROFVec.size() * 1.2)); } - auto& linkErrors = pc.outputs().make>(Output{orig, "LinkErrors", 0, Lifetime::Timeframe}); - auto& decErrors = pc.outputs().make>(Output{orig, "ChipErrors", 0, Lifetime::Timeframe}); + auto& linkErrors = pc.outputs().make>(Output{orig, "LinkErrors", 0}); + auto& decErrors = pc.outputs().make>(Output{orig, "ChipErrors", 0}); mDecoder->collectDecodingErrors(linkErrors, decErrors); - pc.outputs().snapshot(Output{orig, "PHYSTRIG", 0, Lifetime::Timeframe}, mDecoder->getExternalTriggers()); + pc.outputs().snapshot(Output{orig, "PHYSTRIG", 0}, mDecoder->getExternalTriggers()); if (mDumpOnError != int(GBTLink::RawDataDumps::DUMP_NONE)) { mDecoder->produceRawDataDumps(mDumpOnError, pc.services().get()); diff --git a/Detectors/MUON/MCH/IO/src/DigitReaderSpec.cxx b/Detectors/MUON/MCH/IO/src/DigitReaderSpec.cxx index da4c37035c82b..541e6c5fa26c0 100644 --- a/Detectors/MUON/MCH/IO/src/DigitReaderSpec.cxx +++ b/Detectors/MUON/MCH/IO/src/DigitReaderSpec.cxx @@ -54,18 +54,18 @@ class DigitsReaderDeviceDPL mReader = std::make_unique("o2sim", filename.c_str(), -1, RootTreeReader::PublishingMode::Single, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMCH, mDescriptions[0], 0, Lifetime::Timeframe}, "MCHDigit"}, + Output{header::gDataOriginMCH, mDescriptions[0], 0}, "MCHDigit"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMCH, mDescriptions[1], 0, Lifetime::Timeframe}, "MCHROFRecords"}, + Output{header::gDataOriginMCH, mDescriptions[1], 0}, "MCHROFRecords"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMCH, mDescriptions[2], 0, Lifetime::Timeframe}, "MCHMCLabels"}); + Output{header::gDataOriginMCH, mDescriptions[2], 0}, "MCHMCLabels"}); } else { mReader = std::make_unique("o2sim", filename.c_str(), -1, RootTreeReader::PublishingMode::Single, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMCH, mDescriptions[0], 0, Lifetime::Timeframe}, "MCHDigit"}, + Output{header::gDataOriginMCH, mDescriptions[0], 0}, "MCHDigit"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMCH, mDescriptions[1], 0, Lifetime::Timeframe}, "MCHROFRecords"}); + Output{header::gDataOriginMCH, mDescriptions[1], 0}, "MCHROFRecords"}); } } diff --git a/Detectors/MUON/MCH/Workflow/src/TrackAtVertexSpec.cxx b/Detectors/MUON/MCH/Workflow/src/TrackAtVertexSpec.cxx index 080c36706a0eb..8b981d4ee4abd 100644 --- a/Detectors/MUON/MCH/Workflow/src/TrackAtVertexSpec.cxx +++ b/Detectors/MUON/MCH/Workflow/src/TrackAtVertexSpec.cxx @@ -139,7 +139,7 @@ class TrackAtVertexTask } // create the output message - auto msgOut = pc.outputs().make(Output{"MCH", "TRACKSATVERTEX", 0, Lifetime::Timeframe}, + auto msgOut = pc.outputs().make(Output{"MCH", "TRACKSATVERTEX", 0}, mTracksAtVtx.size() * sizeof(int) + nTracksTot * sizeof(TrackAtVtxStruct)); // write the tracks diff --git a/Detectors/MUON/MCH/Workflow/src/entropy-encoder-workflow.cxx b/Detectors/MUON/MCH/Workflow/src/entropy-encoder-workflow.cxx index 50e085e238d55..058202dfb802b 100644 --- a/Detectors/MUON/MCH/Workflow/src/entropy-encoder-workflow.cxx +++ b/Detectors/MUON/MCH/Workflow/src/entropy-encoder-workflow.cxx @@ -72,7 +72,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"MCH", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"MCH", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, rofs, digits); pc.outputs().snapshot({"ctfrep", 0}, iosize); mTimer.Stop(); diff --git a/Detectors/MUON/MID/Workflow/src/ClusterizerSpec.cxx b/Detectors/MUON/MID/Workflow/src/ClusterizerSpec.cxx index 5657fd375f118..c544ce19fcdea 100644 --- a/Detectors/MUON/MID/Workflow/src/ClusterizerSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/ClusterizerSpec.cxx @@ -99,13 +99,13 @@ class ClusterizerDeviceDPL // Clear the index correlations that will be used in the next cluster processing mCorrelation.clear(); - pc.outputs().snapshot(of::Output{"MID", "CLUSTERSLABELS", 0, of::Lifetime::Timeframe}, mClusterLabeler.getContainer()); + pc.outputs().snapshot(of::Output{"MID", "CLUSTERSLABELS", 0}, mClusterLabeler.getContainer()); LOG(debug) << "Sent " << mClusterLabeler.getContainer().getIndexedSize() << " indexed clusters"; } - pc.outputs().snapshot(of::Output{"MID", "CLUSTERS", 0, of::Lifetime::Timeframe}, mClusterizer.getClusters()); + pc.outputs().snapshot(of::Output{"MID", "CLUSTERS", 0}, mClusterizer.getClusters()); LOG(debug) << "Sent " << mClusterizer.getClusters().size() << " clusters"; - pc.outputs().snapshot(of::Output{"MID", "CLUSTERSROF", 0, of::Lifetime::Timeframe}, mClusterizer.getROFRecords()); + pc.outputs().snapshot(of::Output{"MID", "CLUSTERSROF", 0}, mClusterizer.getROFRecords()); LOG(debug) << "Sent " << mClusterizer.getROFRecords().size() << " ROF"; mTimer += std::chrono::high_resolution_clock::now() - tStart; @@ -141,4 +141,4 @@ framework::DataProcessorSpec getClusterizerSpec(bool isMC, std::string_view inDa of::AlgorithmSpec{of::adaptFromTask(isMC)}}; } } // namespace mid -} // namespace o2 \ No newline at end of file +} // namespace o2 diff --git a/Detectors/MUON/MID/Workflow/src/ColumnDataSpecsUtils.cxx b/Detectors/MUON/MID/Workflow/src/ColumnDataSpecsUtils.cxx index 98f9b8c08c654..e0d41cd8d91d2 100644 --- a/Detectors/MUON/MID/Workflow/src/ColumnDataSpecsUtils.cxx +++ b/Detectors/MUON/MID/Workflow/src/ColumnDataSpecsUtils.cxx @@ -127,7 +127,7 @@ std::vector buildOutputs(std::vector o std::vector outputs; for (auto& outSpec : outputSpecs) { auto matcher = framework::DataSpecUtils::asConcreteDataMatcher(outSpec); - outputs.emplace_back(framework::Output{matcher.origin, matcher.description, matcher.subSpec, framework::Lifetime::Timeframe}); + outputs.emplace_back(framework::Output{matcher.origin, matcher.description, matcher.subSpec}); } return outputs; } @@ -161,4 +161,4 @@ std::unique_ptr> getLabels(fram } // namespace specs } // namespace mid -} // namespace o2 \ No newline at end of file +} // namespace o2 diff --git a/Detectors/MUON/MID/Workflow/src/DecodedDataAggregatorSpec.cxx b/Detectors/MUON/MID/Workflow/src/DecodedDataAggregatorSpec.cxx index f03eb6715798a..192b4c52be9cc 100644 --- a/Detectors/MUON/MID/Workflow/src/DecodedDataAggregatorSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/DecodedDataAggregatorSpec.cxx @@ -60,8 +60,8 @@ class DecodedDataAggregatorDeviceDPL for (o2::header::DataHeader::SubSpecificationType subSpec = 0; subSpec < 3; ++subSpec) { EventType evtType = static_cast(subSpec); - pc.outputs().snapshot(of::Output{o2::header::gDataOriginMID, "DATA", subSpec, of::Lifetime::Timeframe}, mAggregator.getData(evtType)); - pc.outputs().snapshot(of::Output{o2::header::gDataOriginMID, "DATAROF", subSpec, of::Lifetime::Timeframe}, mAggregator.getROFRecords(evtType)); + pc.outputs().snapshot(of::Output{o2::header::gDataOriginMID, "DATA", subSpec}, mAggregator.getData(evtType)); + pc.outputs().snapshot(of::Output{o2::header::gDataOriginMID, "DATAROF", subSpec}, mAggregator.getROFRecords(evtType)); } mTimer += std::chrono::high_resolution_clock::now() - tStart; diff --git a/Detectors/MUON/MID/Workflow/src/DigitReaderSpec.cxx b/Detectors/MUON/MID/Workflow/src/DigitReaderSpec.cxx index 3445fad66642e..7f380d1afe13a 100644 --- a/Detectors/MUON/MID/Workflow/src/DigitReaderSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/DigitReaderSpec.cxx @@ -55,19 +55,19 @@ class DigitsReaderDeviceDPL mReader = std::make_unique("o2sim", filename.c_str(), -1, RootTreeReader::PublishingMode::Single, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMID, mDescriptions[0], 0, Lifetime::Timeframe}, "MIDDigit"}, + Output{header::gDataOriginMID, mDescriptions[0], 0}, "MIDDigit"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMID, mDescriptions[1], 0, Lifetime::Timeframe}, "MIDROFRecords"}, + Output{header::gDataOriginMID, mDescriptions[1], 0}, "MIDROFRecords"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMID, mDescriptions[2], 0, Lifetime::Timeframe}, "MIDDigitMCLabels"}, + Output{header::gDataOriginMID, mDescriptions[2], 0}, "MIDDigitMCLabels"}, &mPublishDigits); } else { mReader = std::make_unique("o2sim", filename.c_str(), -1, RootTreeReader::PublishingMode::Single, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMID, mDescriptions[0], 0, Lifetime::Timeframe}, "MIDDigit"}, + Output{header::gDataOriginMID, mDescriptions[0], 0}, "MIDDigit"}, RootTreeReader::BranchDefinition>{ - Output{header::gDataOriginMID, mDescriptions[1], 0, Lifetime::Timeframe}, "MIDROFRecords"}, + Output{header::gDataOriginMID, mDescriptions[1], 0}, "MIDROFRecords"}, &mPublishDigits); } } diff --git a/Detectors/MUON/MID/Workflow/src/EntropyDecoderSpec.cxx b/Detectors/MUON/MID/Workflow/src/EntropyDecoderSpec.cxx index e0030eb102c8d..5a8df6f8e81cb 100644 --- a/Detectors/MUON/MID/Workflow/src/EntropyDecoderSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/EntropyDecoderSpec.cxx @@ -66,9 +66,9 @@ void EntropyDecoderSpec::run(ProcessingContext& pc) size_t insize = 0; for (uint32_t it = 0; it < NEvTypes; it++) { insize += cols[it].size() * sizeof(o2::mid::ColumnData); - pc.outputs().snapshot(Output{o2::header::gDataOriginMID, "DATA", it, Lifetime::Timeframe}, cols[it]); + pc.outputs().snapshot(Output{o2::header::gDataOriginMID, "DATA", it}, cols[it]); insize += rofs[it].size() * sizeof(o2::mid::ROFRecord); - pc.outputs().snapshot(Output{o2::header::gDataOriginMID, "DATAROF", it, Lifetime::Timeframe}, rofs[it]); + pc.outputs().snapshot(Output{o2::header::gDataOriginMID, "DATAROF", it}, rofs[it]); } iosize.rawIn = insize; pc.outputs().snapshot({"ctfrep", 0}, iosize); diff --git a/Detectors/MUON/MID/Workflow/src/EntropyEncoderSpec.cxx b/Detectors/MUON/MID/Workflow/src/EntropyEncoderSpec.cxx index 44bf3dcc0ae6f..a472d6e28ff16 100644 --- a/Detectors/MUON/MID/Workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/EntropyEncoderSpec.cxx @@ -83,7 +83,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) // build references for looping over the data in BC increasing direction tfData.buildReferences(mCTFCoder.getIRFramesSelector()); - auto& buffer = pc.outputs().make>(Output{header::gDataOriginMID, "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{header::gDataOriginMID, "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, tfData); pc.outputs().snapshot({"ctfrep", 0}, iosize); iosize.rawIn = insize; diff --git a/Detectors/MUON/MID/Workflow/src/MaskMakerSpec.cxx b/Detectors/MUON/MID/Workflow/src/MaskMakerSpec.cxx index 99acaa08ac5f0..28d2ff953ea23 100644 --- a/Detectors/MUON/MID/Workflow/src/MaskMakerSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/MaskMakerSpec.cxx @@ -117,7 +117,7 @@ class MaskMakerDeviceDPL if (mCounterSinceReset >= mNReset) { for (size_t itype = 0; itype < 2; ++itype) { auto masks = o2::mid::makeMasks(mScalers[itype], mCounterSinceReset, mThreshold, mRefMasks); - pc.outputs().snapshot(of::Output{header::gDataOriginMID, "MASKS", static_cast(itype + 1), of::Lifetime::Timeframe}, masks); + pc.outputs().snapshot(of::Output{header::gDataOriginMID, "MASKS", static_cast(itype + 1)}, masks); } mCounterSinceReset = 0; for (auto& scaler : mScalers) { @@ -160,4 +160,4 @@ framework::DataProcessorSpec getMaskMakerSpec(const FEEIdConfig& feeIdConfig, co of::Options{{"mid-mask-threshold", of::VariantType::Double, 0.9, {"Tolerated occupancy before producing a map"}}, {"mid-mask-reset", of::VariantType::Int, 100, {"Number of calibration events to be checked before resetting the scalers"}}}}; } } // namespace mid -} // namespace o2 \ No newline at end of file +} // namespace o2 diff --git a/Detectors/MUON/MID/Workflow/src/RawGBTDecoderSpec.cxx b/Detectors/MUON/MID/Workflow/src/RawGBTDecoderSpec.cxx index 6d23ce9cdab1f..ba15644e2b17a 100644 --- a/Detectors/MUON/MID/Workflow/src/RawGBTDecoderSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/RawGBTDecoderSpec.cxx @@ -82,8 +82,8 @@ class RawGBTDecoderDeviceDPL mTimerAlgo += std::chrono::high_resolution_clock::now() - tAlgoStart; - pc.outputs().snapshot(o2::framework::Output{header::gDataOriginMID, "DECODED", dh->subSpecification, o2::framework::Lifetime::Timeframe}, data); - pc.outputs().snapshot(o2::framework::Output{header::gDataOriginMID, "DECODEDROF", dh->subSpecification, o2::framework::Lifetime::Timeframe}, rofRecords); + pc.outputs().snapshot(o2::framework::Output{header::gDataOriginMID, "DECODED", dh->subSpecification}, data); + pc.outputs().snapshot(o2::framework::Output{header::gDataOriginMID, "DECODEDROF", dh->subSpecification}, rofRecords); mTimer += std::chrono::high_resolution_clock::now() - tStart; mNROFs += rofRecords.size(); diff --git a/Detectors/MUON/MID/Workflow/src/TrackerSpec.cxx b/Detectors/MUON/MID/Workflow/src/TrackerSpec.cxx index be1dd9124315c..e1044768e8ea6 100644 --- a/Detectors/MUON/MID/Workflow/src/TrackerSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/TrackerSpec.cxx @@ -80,20 +80,20 @@ class TrackerDeviceDPL if (mIsMC) { std::unique_ptr> labels = pc.inputs().get*>("mid_clusterlabels"); mTrackLabeler.process(mTracker->getClusters(), tracks, *labels); - pc.outputs().snapshot(of::Output{"MID", "TRACKLABELS", 0, of::Lifetime::Timeframe}, mTrackLabeler.getTracksLabels()); + pc.outputs().snapshot(of::Output{"MID", "TRACKLABELS", 0}, mTrackLabeler.getTracksLabels()); LOG(debug) << "Sent " << mTrackLabeler.getTracksLabels().size() << " indexed tracks."; - pc.outputs().snapshot(of::Output{"MID", "TRCLUSLABELS", 0, of::Lifetime::Timeframe}, mTrackLabeler.getTrackClustersLabels()); + pc.outputs().snapshot(of::Output{"MID", "TRCLUSLABELS", 0}, mTrackLabeler.getTrackClustersLabels()); LOG(debug) << "Sent " << mTrackLabeler.getTrackClustersLabels().getIndexedSize() << " indexed track clusters."; } - pc.outputs().snapshot(of::Output{"MID", "TRACKS", 0, of::Lifetime::Timeframe}, tracks); + pc.outputs().snapshot(of::Output{"MID", "TRACKS", 0}, tracks); LOG(debug) << "Sent " << tracks.size() << " tracks."; - pc.outputs().snapshot(of::Output{"MID", "TRACKCLUSTERS", 0, of::Lifetime::Timeframe}, mTracker->getClusters()); + pc.outputs().snapshot(of::Output{"MID", "TRACKCLUSTERS", 0}, mTracker->getClusters()); LOG(debug) << "Sent " << mTracker->getClusters().size() << " track clusters."; - pc.outputs().snapshot(of::Output{"MID", "TRACKROFS", 0, of::Lifetime::Timeframe}, mTracker->getTrackROFRecords()); + pc.outputs().snapshot(of::Output{"MID", "TRACKROFS", 0}, mTracker->getTrackROFRecords()); LOG(debug) << "Sent " << mTracker->getTrackROFRecords().size() << " ROFs."; - pc.outputs().snapshot(of::Output{"MID", "TRCLUSROFS", 0, of::Lifetime::Timeframe}, mTracker->getClusterROFRecords()); + pc.outputs().snapshot(of::Output{"MID", "TRCLUSROFS", 0}, mTracker->getClusterROFRecords()); LOG(debug) << "Sent " << mTracker->getClusterROFRecords().size() << " ROFs."; mTimer += std::chrono::high_resolution_clock::now() - tStart; diff --git a/Detectors/MUON/MID/Workflow/src/ZeroSuppressionSpec.cxx b/Detectors/MUON/MID/Workflow/src/ZeroSuppressionSpec.cxx index ccfe1641b87b6..5d89eee81c629 100644 --- a/Detectors/MUON/MID/Workflow/src/ZeroSuppressionSpec.cxx +++ b/Detectors/MUON/MID/Workflow/src/ZeroSuppressionSpec.cxx @@ -90,7 +90,7 @@ class ZeroSuppressionDeviceDPL } if (mUseMC) { - pc.outputs().snapshot(of::Output{header::gDataOriginMID, "DATALABELS", 0, of::Lifetime::Timeframe}, outMCContainer); + pc.outputs().snapshot(of::Output{header::gDataOriginMID, "DATALABELS", 0}, outMCContainer); } } @@ -113,4 +113,4 @@ framework::DataProcessorSpec getZeroSuppressionSpec(bool useMC, std::string_view of::AlgorithmSpec{of::adaptFromTask(useMC)}}; } } // namespace mid -} // namespace o2 \ No newline at end of file +} // namespace o2 diff --git a/Detectors/PHOS/calib/src/PHOSBadMapCalibDevice.cxx b/Detectors/PHOS/calib/src/PHOSBadMapCalibDevice.cxx index 226630a8b1d81..4f81300431934 100644 --- a/Detectors/PHOS/calib/src/PHOSBadMapCalibDevice.cxx +++ b/Detectors/PHOS/calib/src/PHOSBadMapCalibDevice.cxx @@ -177,7 +177,7 @@ void PHOSBadMapCalibDevice::sendOutput(DataAllocator& output) // Send change to QC LOG(info) << "[PHOSBadMapCalibDevice - run] Sending QC "; - output.snapshot(o2::framework::Output{"PHS", "BADMAPDIFF", 0, o2::framework::Lifetime::Timeframe}, mBadMapDiff); + output.snapshot(o2::framework::Output{"PHS", "BADMAPDIFF", 0}, mBadMapDiff); } bool PHOSBadMapCalibDevice::calculateBadMap() diff --git a/Detectors/PHOS/calib/src/PHOSHGLGRatioCalibDevice.cxx b/Detectors/PHOS/calib/src/PHOSHGLGRatioCalibDevice.cxx index 0a37589d3abe7..5c07c6fee8493 100644 --- a/Detectors/PHOS/calib/src/PHOSHGLGRatioCalibDevice.cxx +++ b/Detectors/PHOS/calib/src/PHOSHGLGRatioCalibDevice.cxx @@ -229,7 +229,7 @@ void PHOSHGLGRatioCalibDevice::sendOutput(DataAllocator& output) } // Anyway send change to QC LOG(info) << "[PHOSHGLGRatioCalibDevice - sendOutput] Sending QC "; - output.snapshot(o2::framework::Output{"PHS", "CALIBDIFF", 0, o2::framework::Lifetime::Timeframe}, mRatioDiff); + output.snapshot(o2::framework::Output{"PHS", "CALIBDIFF", 0}, mRatioDiff); } DataProcessorSpec o2::phos::getHGLGRatioCalibSpec(bool useCCDB, bool forceUpdate) diff --git a/Detectors/PHOS/calib/src/PHOSPedestalCalibDevice.cxx b/Detectors/PHOS/calib/src/PHOSPedestalCalibDevice.cxx index 96f680f710fb0..42289a910debb 100644 --- a/Detectors/PHOS/calib/src/PHOSPedestalCalibDevice.cxx +++ b/Detectors/PHOS/calib/src/PHOSPedestalCalibDevice.cxx @@ -135,7 +135,7 @@ void PHOSPedestalCalibDevice::sendOutput(DataAllocator& output) } // Anyway send change to QC LOG(info) << "[PHOSPedestalCalibDevice - run] Sending QC "; - output.snapshot(o2::framework::Output{"PHS", "CALIBDIFF", 0, o2::framework::Lifetime::Timeframe}, mPedDiff); + output.snapshot(o2::framework::Output{"PHS", "CALIBDIFF", 0}, mPedDiff); } void PHOSPedestalCalibDevice::calculatePedestals() diff --git a/Detectors/PHOS/workflow/src/CellConverterSpec.cxx b/Detectors/PHOS/workflow/src/CellConverterSpec.cxx index 330dd27c45251..370df48a8fc40 100644 --- a/Detectors/PHOS/workflow/src/CellConverterSpec.cxx +++ b/Detectors/PHOS/workflow/src/CellConverterSpec.cxx @@ -44,12 +44,12 @@ void CellConverterSpec::run(framework::ProcessingContext& ctx) auto digitsTR = ctx.inputs().get>("digitTriggerRecords"); if (!digitsTR.size()) { // nothing to process mOutputCells.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", 0, o2::framework::Lifetime::Timeframe}, mOutputCells); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", 0}, mOutputCells); mOutputCellTrigRecs.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputCellTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", 0}, mOutputCellTrigRecs); if (mPropagateMC) { mOutputTruthCont.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLSMCTR", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLSMCTR", 0}, mOutputTruthCont); } return; } @@ -128,10 +128,10 @@ void CellConverterSpec::run(framework::ProcessingContext& ctx) } LOG(info) << "[PHOSCellConverter - run] Writing " << mOutputCells.size() << " cells, " << mOutputCellTrigRecs.size() << " Trig Records " << mOutputTruthCont.getNElements() << " PHOS labels "; - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", 0, o2::framework::Lifetime::Timeframe}, mOutputCells); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputCellTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", 0}, mOutputCells); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", 0}, mOutputCellTrigRecs); if (mPropagateMC) { - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLSMCTR", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLSMCTR", 0}, mOutputTruthCont); } } diff --git a/Detectors/PHOS/workflow/src/CellReaderSpec.cxx b/Detectors/PHOS/workflow/src/CellReaderSpec.cxx index 64f54c1baf189..c7d93fc20301f 100644 --- a/Detectors/PHOS/workflow/src/CellReaderSpec.cxx +++ b/Detectors/PHOS/workflow/src/CellReaderSpec.cxx @@ -44,10 +44,10 @@ void CellReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mCells.size() << " Cells in " << mTRs.size() << " TriggerRecords at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "CELLS", 0, Lifetime::Timeframe}, mCells); - pc.outputs().snapshot(Output{mOrigin, "CELLTRIGREC", 0, Lifetime::Timeframe}, mTRs); + pc.outputs().snapshot(Output{mOrigin, "CELLS", 0}, mCells); + pc.outputs().snapshot(Output{mOrigin, "CELLTRIGREC", 0}, mTRs); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "CELLSMCTR", 0, Lifetime::Timeframe}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "CELLSMCTR", 0}, mMCTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/PHOS/workflow/src/ClusterizerSpec.cxx b/Detectors/PHOS/workflow/src/ClusterizerSpec.cxx index 27bce0f8745f4..13aa21258125d 100644 --- a/Detectors/PHOS/workflow/src/ClusterizerSpec.cxx +++ b/Detectors/PHOS/workflow/src/ClusterizerSpec.cxx @@ -78,16 +78,16 @@ void ClusterizerSpec::run(framework::ProcessingContext& ctx) auto digitsTR = ctx.inputs().get>("digitTriggerRecords"); if (!digitsTR.size()) { // nothing to process mOutputClusters.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusters); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERS", 0}, mOutputClusters); if (mFullCluOutput) { mOutputCluElements.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUELEMENTS", 0, o2::framework::Lifetime::Timeframe}, mOutputCluElements); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUELEMENTS", 0}, mOutputCluElements); } mOutputClusterTrigRecs.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputClusterTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRIGREC", 0}, mOutputClusterTrigRecs); if (mPropagateMC) { mOutputTruthCont.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRUEMC", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRUEMC", 0}, mOutputTruthCont); } return; } @@ -120,13 +120,13 @@ void ClusterizerSpec::run(framework::ProcessingContext& ctx) } else { LOG(debug) << "[PHOSClusterizer - run] Writing " << mOutputClusters.size() << " clusters and " << mOutputClusterTrigRecs.size() << " TR"; } - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERS", 0, o2::framework::Lifetime::Timeframe}, mOutputClusters); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERS", 0}, mOutputClusters); if (mFullCluOutput) { - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUELEMENTS", 0, o2::framework::Lifetime::Timeframe}, mOutputCluElements); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUELEMENTS", 0}, mOutputCluElements); } - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRIGREC", 0, o2::framework::Lifetime::Timeframe}, mOutputClusterTrigRecs); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRIGREC", 0}, mOutputClusterTrigRecs); if (mPropagateMC) { - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRUEMC", 0, o2::framework::Lifetime::Timeframe}, mOutputTruthCont); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CLUSTERTRUEMC", 0}, mOutputTruthCont); } } diff --git a/Detectors/PHOS/workflow/src/DigitReaderSpec.cxx b/Detectors/PHOS/workflow/src/DigitReaderSpec.cxx index 737856051af0c..70f5077b2f0c9 100644 --- a/Detectors/PHOS/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/PHOS/workflow/src/DigitReaderSpec.cxx @@ -44,10 +44,10 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mDigits.size() << " Digits in " << mTRs.size() << " TriggerRecords at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}, mDigits); - pc.outputs().snapshot(Output{mOrigin, "DIGITTRIGREC", 0, Lifetime::Timeframe}, mTRs); + pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0}, mDigits); + pc.outputs().snapshot(Output{mOrigin, "DIGITTRIGREC", 0}, mTRs); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMCTR", 0}, mMCTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/PHOS/workflow/src/EntropyEncoderSpec.cxx b/Detectors/PHOS/workflow/src/EntropyEncoderSpec.cxx index 226184c1179c6..a932a45f1bb53 100644 --- a/Detectors/PHOS/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/PHOS/workflow/src/EntropyEncoderSpec.cxx @@ -54,7 +54,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"PHS", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"PHS", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, triggers, cells); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/PHOS/workflow/src/EventBuilderSpec.cxx b/Detectors/PHOS/workflow/src/EventBuilderSpec.cxx index d6562fdadfee6..9a1289889424e 100644 --- a/Detectors/PHOS/workflow/src/EventBuilderSpec.cxx +++ b/Detectors/PHOS/workflow/src/EventBuilderSpec.cxx @@ -178,8 +178,8 @@ void EventBuilderSpec::run(framework::ProcessingContext& ctx) } } - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginPHS, "CELLS", 0, framework::Lifetime::Timeframe}, outputCells); - ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginPHS, "CELLTRIGREC", 0, framework::Lifetime::Timeframe}, outputTriggers); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginPHS, "CELLS", 0}, outputCells); + ctx.outputs().snapshot(framework::Output{o2::header::gDataOriginPHS, "CELLTRIGREC", 0}, outputTriggers); } o2::framework::DataProcessorSpec o2::phos::getEventBuilderSpec() diff --git a/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx b/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx index ea0336f0ab19a..292e083d4bb48 100644 --- a/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx +++ b/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx @@ -108,11 +108,11 @@ void RawToCellConverterSpec::run(framework::ProcessingContext& ctx) contDeadBeef == maxWarn ? fmt::format(". {} such inputs in row received, stopping reporting", contDeadBeef) : ""); } mOutputCells.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", mflpId, o2::framework::Lifetime::Timeframe}, mOutputCells); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", mflpId}, mOutputCells); mOutputTriggerRecords.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", mflpId, o2::framework::Lifetime::Timeframe}, mOutputTriggerRecords); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", mflpId}, mOutputTriggerRecords); mOutputHWErrors.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0, o2::framework::Lifetime::Timeframe}, mOutputHWErrors); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0}, mOutputHWErrors); if (mFillChi2) { mOutputFitChi.clear(); ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0, o2::framework::Lifetime::QA}, mOutputFitChi); @@ -290,9 +290,9 @@ void RawToCellConverterSpec::run(framework::ProcessingContext& ctx) mLastSize = 1.1 * mOutputCells.size(); LOG(debug) << "[PHOSRawToCellConverter - run] Writing " << mOutputCells.size() << " cells ..."; - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", mflpId, o2::framework::Lifetime::Timeframe}, mOutputCells); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", mflpId, o2::framework::Lifetime::Timeframe}, mOutputTriggerRecords); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0, o2::framework::Lifetime::Timeframe}, mOutputHWErrors); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLS", mflpId}, mOutputCells); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", mflpId}, mOutputTriggerRecords); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0}, mOutputHWErrors); if (mFillChi2) { ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0, o2::framework::Lifetime::QA}, mOutputFitChi); } diff --git a/Detectors/PHOS/workflow/src/ReaderSpec.cxx b/Detectors/PHOS/workflow/src/ReaderSpec.cxx index b68046b47c156..9a1fb23a553df 100644 --- a/Detectors/PHOS/workflow/src/ReaderSpec.cxx +++ b/Detectors/PHOS/workflow/src/ReaderSpec.cxx @@ -58,7 +58,6 @@ DataProcessorSpec getDigitsReaderSpec(bool propagateMC) processAttributes->terminateOnEod = ic.options().get("terminate-on-eod"); processAttributes->finished = false; processAttributes->datatype = "PHOSDigit"; - constexpr auto persistency = Lifetime::Timeframe; o2::header::DataHeader::SubSpecificationType subSpec = 0; if (propagateMC) { processAttributes->reader = std::make_shared(treename.c_str(), // tree name @@ -66,19 +65,19 @@ DataProcessorSpec getDigitsReaderSpec(bool propagateMC) nofEvents, // number of entries to publish publishingMode, RootTreeReader::BranchDefinition>{ - Output{"PHS", "DIGITS", subSpec, persistency}, "PHOSDigit"}, + Output{"PHS", "DIGITS", subSpec}, "PHOSDigit"}, RootTreeReader::BranchDefinition>{ - Output{"PHS", "DIGITTRIGREC", subSpec, persistency}, "PHOSDigitTrigRecords"}, - Output{"PHS", "DIGITSMCTR", subSpec, persistency}, "PHOSDigitMCTruth"); // name of mc label branch + Output{"PHS", "DIGITTRIGREC", subSpec}, "PHOSDigitTrigRecords"}, + Output{"PHS", "DIGITSMCTR", subSpec}, "PHOSDigitMCTruth"); // name of mc label branch } else { processAttributes->reader = std::make_shared(treename.c_str(), // tree name filename.c_str(), // input file name nofEvents, // number of entries to publish publishingMode, RootTreeReader::BranchDefinition>{ - Output{"PHS", "DIGITS", subSpec, persistency}, "PHOSDigit"}, + Output{"PHS", "DIGITS", subSpec}, "PHOSDigit"}, RootTreeReader::BranchDefinition>{ - Output{"PHS", "DIGITTRIGREC", subSpec, persistency}, "PHOSDigitTrigRecords"}); + Output{"PHS", "DIGITTRIGREC", subSpec}, "PHOSDigitTrigRecords"}); } } @@ -156,7 +155,6 @@ DataProcessorSpec getCellReaderSpec(bool propagateMC) processAttributes->terminateOnEod = ic.options().get("terminate-on-eod"); processAttributes->finished = false; processAttributes->datatype = "PHOSCell"; - constexpr auto persistency = Lifetime::Timeframe; o2::header::DataHeader::SubSpecificationType subSpec = 0; if (propagateMC) { processAttributes->reader = std::make_shared(treename.c_str(), // tree name @@ -164,10 +162,10 @@ DataProcessorSpec getCellReaderSpec(bool propagateMC) nofEvents, // number of entries to publish publishingMode, RootTreeReader::BranchDefinition>{ - Output{"PHS", "CELLS", subSpec, persistency}, "PHOSCell"}, + Output{"PHS", "CELLS", subSpec}, "PHOSCell"}, RootTreeReader::BranchDefinition>{ - Output{"PHS", "CELLTRIGREC", subSpec, persistency}, "PHOSCellTrigRec"}, - Output{"PHS", "CELLSMCTR", subSpec, persistency}, + Output{"PHS", "CELLTRIGREC", subSpec}, "PHOSCellTrigRec"}, + Output{"PHS", "CELLSMCTR", subSpec}, "PHOSCellTrueMC"); // name of mc label branch } else { processAttributes->reader = std::make_shared(treename.c_str(), // tree name @@ -175,9 +173,9 @@ DataProcessorSpec getCellReaderSpec(bool propagateMC) nofEvents, // number of entries to publish publishingMode, RootTreeReader::BranchDefinition>{ - Output{"PHS", "CELLS", subSpec, persistency}, "PHOSCell"}, + Output{"PHS", "CELLS", subSpec}, "PHOSCell"}, RootTreeReader::BranchDefinition>{ - Output{"PHS", "CELLTRIGREC", subSpec, persistency}, "PHOSCellTrigRec"}); + Output{"PHS", "CELLTRIGREC", subSpec}, "PHOSCellTrigRec"}); } } diff --git a/Detectors/PHOS/workflow/src/StandaloneAODProducerSpec.cxx b/Detectors/PHOS/workflow/src/StandaloneAODProducerSpec.cxx index 736a569cacd34..454be7a5fcb83 100644 --- a/Detectors/PHOS/workflow/src/StandaloneAODProducerSpec.cxx +++ b/Detectors/PHOS/workflow/src/StandaloneAODProducerSpec.cxx @@ -136,8 +136,8 @@ void StandaloneAODProducerSpec::run(ProcessingContext& pc) } // end of event loop // std::cout << "Finished cell loop" << std::endl; - pc.outputs().snapshot(Output{"TFN", "TFNumber", 0, Lifetime::Timeframe}, tfNumber); - pc.outputs().snapshot(Output{"TFF", "TFFilename", 0, Lifetime::Timeframe}, ""); + pc.outputs().snapshot(Output{"TFN", "TFNumber", 0}, tfNumber); + pc.outputs().snapshot(Output{"TFF", "TFFilename", 0}, ""); mTimer.Stop(); } diff --git a/Detectors/TOF/calibration/testWorkflow/TOFCalibCollectorSpec.h b/Detectors/TOF/calibration/testWorkflow/TOFCalibCollectorSpec.h index f42c5676d55e5..4beef9541eb55 100644 --- a/Detectors/TOF/calibration/testWorkflow/TOFCalibCollectorSpec.h +++ b/Detectors/TOF/calibration/testWorkflow/TOFCalibCollectorSpec.h @@ -113,8 +113,8 @@ class TOFCalibCollectorDevice : public o2::framework::Task auto entries = collectedInfo.size(); // this means that we are ready to send the output auto entriesPerChannel = mCollector->getEntriesPerChannel(); - output.snapshot(Output{o2::header::gDataOriginTOF, "COLLECTEDINFO", 0, Lifetime::Timeframe}, collectedInfo); - output.snapshot(Output{o2::header::gDataOriginTOF, "ENTRIESCH", 0, Lifetime::Timeframe}, entriesPerChannel); + output.snapshot(Output{o2::header::gDataOriginTOF, "COLLECTEDINFO", 0}, collectedInfo); + output.snapshot(Output{o2::header::gDataOriginTOF, "ENTRIESCH", 0}, entriesPerChannel); mCollector->initOutput(); // reset the output for the next round } } diff --git a/Detectors/TOF/workflow/src/CompressedDecodingTask.cxx b/Detectors/TOF/workflow/src/CompressedDecodingTask.cxx index ebc61f05e85c0..fc0e2b9b2c30d 100644 --- a/Detectors/TOF/workflow/src/CompressedDecodingTask.cxx +++ b/Detectors/TOF/workflow/src/CompressedDecodingTask.cxx @@ -109,18 +109,18 @@ void CompressedDecodingTask::postData(ProcessingContext& pc) // LOG(info) << "TOF: N tof window decoded = " << n_tof_window << "(orbits = " << n_orbits << ") with " << digit_size << " digits"; // add digits in the output snapshot - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0, Lifetime::Timeframe}, *alldigits); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0, Lifetime::Timeframe}, *row); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0}, *alldigits); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0}, *row); std::vector& patterns = mDecoder.getPatterns(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0, Lifetime::Timeframe}, patterns); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0}, patterns); std::vector& errors = mDecoder.getErrors(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ERRORS", 0, Lifetime::Timeframe}, errors); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ERRORS", 0}, errors); DigitHeader& digitH = mDecoder.getDigitHeader(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITHEADER", 0, Lifetime::Timeframe}, digitH); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITHEADER", 0}, digitH); auto diagnosticFrequency = mDecoder.getDiagnosticFrequency(); diagnosticFrequency.setTimeStamp(mCreationTime / 1000); @@ -130,7 +130,7 @@ void CompressedDecodingTask::postData(ProcessingContext& pc) diagnosticFrequency.setTFIDInfo(tfinfo); //diagnosticFrequency.print(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0, Lifetime::Timeframe}, diagnosticFrequency); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0}, diagnosticFrequency); mDecoder.clear(); diff --git a/Detectors/TOF/workflow/src/EntropyDecoderSpec.cxx b/Detectors/TOF/workflow/src/EntropyDecoderSpec.cxx index b2c735af87241..400914c64021f 100644 --- a/Detectors/TOF/workflow/src/EntropyDecoderSpec.cxx +++ b/Detectors/TOF/workflow/src/EntropyDecoderSpec.cxx @@ -81,7 +81,7 @@ void EntropyDecoderSpec::run(ProcessingContext& pc) o2::dataformats::TFIDInfo tfinfo; o2::base::TFIDInfoHelper::fillTFIDInfo(pc, tfinfo); diagnostic.setTFIDInfo(tfinfo); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0, Lifetime::Timeframe}, diagnostic); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0}, diagnostic); pc.outputs().snapshot({"ctfrep", 0}, iosize); mTimer.Stop(); LOG(info) << "Decoded " << digits.size() << " digits in " << row.size() << " ROF, (" << iosize.asString() << ") in " << mTimer.CpuTime() - cput << " s"; diff --git a/Detectors/TOF/workflow/src/EntropyEncoderSpec.cxx b/Detectors/TOF/workflow/src/EntropyEncoderSpec.cxx index 8f215a0572961..3fc47955f53c0 100644 --- a/Detectors/TOF/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/TOF/workflow/src/EntropyEncoderSpec.cxx @@ -55,7 +55,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{o2::header::gDataOriginTOF, "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{o2::header::gDataOriginTOF, "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, rofs, compDigits, pspan); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/TOF/workflow/src/TOFClusterizerSpec.cxx b/Detectors/TOF/workflow/src/TOFClusterizerSpec.cxx index 4d3e0589c186d..48b1508692c24 100644 --- a/Detectors/TOF/workflow/src/TOFClusterizerSpec.cxx +++ b/Detectors/TOF/workflow/src/TOFClusterizerSpec.cxx @@ -224,25 +224,25 @@ class TOFDPLClustererTask } // send clusters - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERS", 0, Lifetime::Timeframe}, mClustersArray); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMULT", 0, Lifetime::Timeframe}, mMultPerLongBC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERS", 0}, mClustersArray); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMULT", 0}, mMultPerLongBC); // send labels if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, mClsLabels); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMCTR", 0}, mClsLabels); } if (mIsCalib) { std::vector* clusterCalInfo = mClusterer.getInfoFromCluster(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCALCLUS", 0, Lifetime::Timeframe}, *clusterCalInfo); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCALCLUS", 0}, *clusterCalInfo); } if (mIsCosmic) { std::vector* cosmicInfo = mCosmicProcessor.getCosmicInfo(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCOSMICS", 0, Lifetime::Timeframe}, *cosmicInfo); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCOSMICS", 0}, *cosmicInfo); std::vector* cosmicTrack = mCosmicProcessor.getCosmicTrack(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKCOS", 0, Lifetime::Timeframe}, *cosmicTrack); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKCOS", 0}, *cosmicTrack); std::vector* cosmicTrackSize = mCosmicProcessor.getCosmicTrackSize(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKSIZE", 0, Lifetime::Timeframe}, *cosmicTrackSize); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKSIZE", 0}, *cosmicTrackSize); } mTimer.Stop(); diff --git a/Detectors/TOF/workflowIO/src/CalibClusReaderSpec.cxx b/Detectors/TOF/workflowIO/src/CalibClusReaderSpec.cxx index 9be38858250ca..116f93a06c208 100644 --- a/Detectors/TOF/workflowIO/src/CalibClusReaderSpec.cxx +++ b/Detectors/TOF/workflowIO/src/CalibClusReaderSpec.cxx @@ -39,13 +39,13 @@ void CalibClusReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(debug) << "Pushing " << mPclusInfos->size() << " TOF clusters calib info at entry " << ent; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCALCLUS", 0, Lifetime::Timeframe}, mClusInfos); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCALCLUS", 0}, mClusInfos); if (mIsCosmics) { LOG(debug) << "Pushing " << mPcosmicInfo->size() << " TOF cosmics info at entry " << ent; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCOSMICS", 0, Lifetime::Timeframe}, mCosmicInfo); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKCOS", 0, Lifetime::Timeframe}, mCosmicTrack); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKSIZE", 0, Lifetime::Timeframe}, mCosmicTrackSize); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOCOSMICS", 0}, mCosmicInfo); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKCOS", 0}, mCosmicTrack); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "INFOTRACKSIZE", 0}, mCosmicTrackSize); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/TOF/workflowIO/src/CalibInfoReaderSpec.cxx b/Detectors/TOF/workflowIO/src/CalibInfoReaderSpec.cxx index d81905fb38888..1854be2f1b88f 100644 --- a/Detectors/TOF/workflowIO/src/CalibInfoReaderSpec.cxx +++ b/Detectors/TOF/workflowIO/src/CalibInfoReaderSpec.cxx @@ -90,9 +90,9 @@ void CalibInfoReader::run(ProcessingContext& pc) LOG(debug) << "Current entry " << mCurrentEntry; LOG(debug) << "Send " << mVect.size() << " calib infos"; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, mTOFTPC ? ddCalib_tpc : ddCalib, 0, Lifetime::Timeframe}, mVect); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, mTOFTPC ? ddCalib_tpc : ddCalib, 0}, mVect); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddDia, 0, Lifetime::Timeframe}, mDia); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddDia, 0}, mDia); usleep(100); } mGlobalEntry++; diff --git a/Detectors/TOF/workflowIO/src/ClusterReaderSpec.cxx b/Detectors/TOF/workflowIO/src/ClusterReaderSpec.cxx index 70f10b109a70e..e2979a8fc0dbf 100644 --- a/Detectors/TOF/workflowIO/src/ClusterReaderSpec.cxx +++ b/Detectors/TOF/workflowIO/src/ClusterReaderSpec.cxx @@ -44,10 +44,10 @@ void ClusterReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(debug) << "Pushing " << mClustersPtr->size() << " TOF clusters at entry " << ent; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERS", 0, Lifetime::Timeframe}, mClusters); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMULT", 0, Lifetime::Timeframe}, mClustersMult); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERS", 0}, mClusters); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMULT", 0}, mClustersMult); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CLUSTERSMCTR", 0}, mLabels); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/TOF/workflowIO/src/DigitReaderSpec.cxx b/Detectors/TOF/workflowIO/src/DigitReaderSpec.cxx index f9fd57cf6b58f..30ddea9d5b9e7 100644 --- a/Detectors/TOF/workflowIO/src/DigitReaderSpec.cxx +++ b/Detectors/TOF/workflowIO/src/DigitReaderSpec.cxx @@ -81,18 +81,18 @@ void DigitReader::run(ProcessingContext& pc) mDiagnostic = mFiller.getDiagnosticFrequency(); // add digits loaded in the output snapshot - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0, Lifetime::Timeframe}, mDigits); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0, Lifetime::Timeframe}, mRow); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0, Lifetime::Timeframe}, mPatterns); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0, Lifetime::Timeframe}, mDiagnostic); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0}, mDigits); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0}, mRow); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0}, mPatterns); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIAFREQ", 0}, mDiagnostic); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITSMCTR", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITSMCTR", 0}, mLabels); } static o2::parameters::GRPObject::ROMode roMode = o2::parameters::GRPObject::CONTINUOUS; LOG(debug) << "TOF: Sending ROMode= " << roMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ROMode", 0, Lifetime::Timeframe}, roMode); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ROMode", 0}, roMode); } else { LOG(error) << "Cannot read the TOF digits !"; return; diff --git a/Detectors/TOF/workflowIO/src/TOFMatchedReaderSpec.cxx b/Detectors/TOF/workflowIO/src/TOFMatchedReaderSpec.cxx index 43264eb70cb58..48ccb83ca3cc3 100644 --- a/Detectors/TOF/workflowIO/src/TOFMatchedReaderSpec.cxx +++ b/Detectors/TOF/workflowIO/src/TOFMatchedReaderSpec.cxx @@ -74,12 +74,12 @@ void TOFMatchedReader::run(ProcessingContext& pc) LOG(debug) << "Pushing " << mMatches.size() << " TOF matchings at entry " << currEntry; uint32_t tpcMatchSS = o2::globaltracking::getSubSpec(mSubSpecStrict && (!mMode) ? o2::globaltracking::MatchingType::Strict : o2::globaltracking::MatchingType::Standard); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddMatchInfo[mMode], tpcMatchSS, Lifetime::Timeframe}, mMatches); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddMatchInfo[mMode], tpcMatchSS}, mMatches); if (mReadTracks && (!mMode)) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "TOFTRACKS_TPC", tpcMatchSS, Lifetime::Timeframe}, mTracks); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "TOFTRACKS_TPC", tpcMatchSS}, mTracks); } if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddMCMatchTOF[mMode], tpcMatchSS, Lifetime::Timeframe}, mLabelTOF); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, ddMCMatchTOF[mMode], tpcMatchSS}, mLabelTOF); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFLPIDCSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFLPIDCSpec.h index 6316824966856..02857398f0e07 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFLPIDCSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFLPIDCSpec.h @@ -195,14 +195,14 @@ class TPCFLPIDCDevice : public o2::framework::Task fill1DIDCs(cru); LOGP(debug, "Sending 1D-IDCs to EPNs of size {} and weights of size {}", mOneDIDCs.first.size(), mOneDIDCs.second.size()); - output.snapshot(Output{gDataOriginTPC, getDataDescription1DIDCEPN(), subSpec, Lifetime::Timeframe}, mOneDIDCs.first); - output.snapshot(Output{gDataOriginTPC, getDataDescription1DIDCEPNWeights(), subSpec, Lifetime::Timeframe}, mOneDIDCs.second); + output.snapshot(Output{gDataOriginTPC, getDataDescription1DIDCEPN(), subSpec}, mOneDIDCs.first); + output.snapshot(Output{gDataOriginTPC, getDataDescription1DIDCEPNWeights(), subSpec}, mOneDIDCs.second); } void sendOutput(DataAllocator& output, const uint32_t cru) { const header::DataHeader::SubSpecificationType subSpec{cru << 7}; - output.adoptContainer(Output{gDataOriginTPC, getDataDescriptionIDCGroup(CRU(cru).side()), subSpec, Lifetime::Timeframe}, std::move(mIDCs[cru])); + output.adoptContainer(Output{gDataOriginTPC, getDataDescriptionIDCGroup(CRU(cru).side()), subSpec}, std::move(mIDCs[cru])); } void fill1DIDCs(const uint32_t cru) diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformEPNSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformEPNSpec.h index 7fd4c923feb19..e52234b3bb9ef 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformEPNSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCFourierTransformEPNSpec.h @@ -120,7 +120,7 @@ class TPCFourierTransformEPNSpec : public o2::framework::Task void sendOutput(DataAllocator& output, const Side side) { - output.snapshot(Output{gDataOriginTPC, getDataDescription(), header::DataHeader::SubSpecificationType{side}, Lifetime::Timeframe}, mIDCFourierTransform.getFourierCoefficients().getFourierCoefficients()); + output.snapshot(Output{gDataOriginTPC, getDataDescription(), header::DataHeader::SubSpecificationType{side}}, mIDCFourierTransform.getFourierCoefficients().getFourierCoefficients()); } }; diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCIntegrateIDCSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCIntegrateIDCSpec.h index 99d64220a2238..3ce0553f8dd7f 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCIntegrateIDCSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCIntegrateIDCSpec.h @@ -92,14 +92,14 @@ class TPCIntegrateIDCDevice : public o2::framework::Task for (const auto& idcs : mIDCs[sector].get()) { const header::DataHeader::SubSpecificationType subSpec{cru << 7}; if (mIDCFormat == IDCFormat::Sim) { - output.snapshot(Output{gDataOriginTPC, getDataDescription(mIDCFormat), subSpec, Lifetime::Timeframe}, idcs); + output.snapshot(Output{gDataOriginTPC, getDataDescription(mIDCFormat), subSpec}, idcs); } else { // TODO // convert to format from thorsten here // send....... // DUMMY FOR NOW // const TPCCRUHeader cruheader{cru, mIntegrationIntervalsPerTF}; - output.snapshot(Output{gDataOriginTPC, getDataDescription(mIDCFormat), subSpec, Lifetime::Timeframe}, idcs); + output.snapshot(Output{gDataOriginTPC, getDataDescription(mIDCFormat), subSpec}, idcs); } ++cru; } diff --git a/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/PublisherSpec.h b/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/PublisherSpec.h index 531bf635a1e5b..d73f80adf8b4b 100644 --- a/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/PublisherSpec.h +++ b/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/PublisherSpec.h @@ -62,22 +62,21 @@ framework::DataProcessorSpec getPublisherSpec(PublisherConf const& config, bool // a creator callback for the actual reader instance auto creator = [dto, mco, propagateMC](const char* treename, const char* filename, int nofEvents, Reader::PublishingMode publishingMode, o2::header::DataHeader::SubSpecificationType subSpec, const char* branchname, const char* mcbranchname, Reader::SpecialPublishHook* publishhook = nullptr) { - constexpr auto persistency = o2::framework::Lifetime::Timeframe; if (propagateMC) { return std::make_shared(treename, filename, nofEvents, publishingMode, - Output{mco.origin, mco.description, subSpec, persistency}, + Output{mco.origin, mco.description, subSpec}, mcbranchname, - Reader::BranchDefinition{Output{dto.origin, dto.description, subSpec, persistency}, branchname}, + Reader::BranchDefinition{Output{dto.origin, dto.description, subSpec}, branchname}, publishhook); } else { return std::make_shared(treename, filename, nofEvents, publishingMode, - Reader::BranchDefinition{Output{dto.origin, dto.description, subSpec, persistency}, branchname}, + Reader::BranchDefinition{Output{dto.origin, dto.description, subSpec}, branchname}, publishhook); } }; diff --git a/Detectors/TPC/workflow/readers/src/PublisherSpec.cxx b/Detectors/TPC/workflow/readers/src/PublisherSpec.cxx index e02a78b3dca3a..7abd744440517 100644 --- a/Detectors/TPC/workflow/readers/src/PublisherSpec.cxx +++ b/Detectors/TPC/workflow/readers/src/PublisherSpec.cxx @@ -204,9 +204,9 @@ DataProcessorSpec createPublisherSpec(PublisherConf const& config, bool propagat header.sectorBits = 0; header.activeSectors = processAttributes->activeSectors; for (auto const& subSpec : processAttributes->zeroLengthOutputs) { - pc.outputs().make({dto.origin, dto.description, subSpec, Lifetime::Timeframe, {header}}); + pc.outputs().make({dto.origin, dto.description, subSpec, {header}}); if (pc.outputs().isAllowed({mco.origin, mco.description, subSpec})) { - pc.outputs().make({mco.origin, mco.description, subSpec, Lifetime::Timeframe, {header}}); + pc.outputs().make({mco.origin, mco.description, subSpec, {header}}); } } } diff --git a/Detectors/TPC/workflow/readers/src/TrackReaderSpec.cxx b/Detectors/TPC/workflow/readers/src/TrackReaderSpec.cxx index 66575dd843458..bfe1644bd8b5d 100644 --- a/Detectors/TPC/workflow/readers/src/TrackReaderSpec.cxx +++ b/Detectors/TPC/workflow/readers/src/TrackReaderSpec.cxx @@ -68,10 +68,10 @@ void TrackReader::run(ProcessingContext& pc) } } - pc.outputs().snapshot(Output{"TPC", "TRACKS", 0, Lifetime::Timeframe}, mTracksOut); - pc.outputs().snapshot(Output{"TPC", "CLUSREFS", 0, Lifetime::Timeframe}, mCluRefVecOut); + pc.outputs().snapshot(Output{"TPC", "TRACKS", 0}, mTracksOut); + pc.outputs().snapshot(Output{"TPC", "CLUSREFS", 0}, mCluRefVecOut); if (mUseMC) { - pc.outputs().snapshot(Output{"TPC", "TRACKSMCLBL", 0, Lifetime::Timeframe}, mMCTruthOut); + pc.outputs().snapshot(Output{"TPC", "TRACKSMCLBL", 0}, mMCTruthOut); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/TPC/workflow/readers/src/TriggerReaderSpec.cxx b/Detectors/TPC/workflow/readers/src/TriggerReaderSpec.cxx index 2c363f4f7ed93..5dd3c16b3b1a8 100644 --- a/Detectors/TPC/workflow/readers/src/TriggerReaderSpec.cxx +++ b/Detectors/TPC/workflow/readers/src/TriggerReaderSpec.cxx @@ -36,7 +36,7 @@ void TriggerReader::run(ProcessingContext& pc) auto ent = mTree->GetReadEntry() + 1; mTree->GetEntry(ent); - pc.outputs().snapshot(Output{"TPC", "TRIGGERWORDS", 0, Lifetime::Timeframe}, *mTrig); + pc.outputs().snapshot(Output{"TPC", "TRIGGERWORDS", 0}, *mTrig); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Detectors/TPC/workflow/src/ChunkedDigitPublisher.cxx b/Detectors/TPC/workflow/src/ChunkedDigitPublisher.cxx index cf11e3c7b39ce..adf0cba944c03 100644 --- a/Detectors/TPC/workflow/src/ChunkedDigitPublisher.cxx +++ b/Detectors/TPC/workflow/src/ChunkedDigitPublisher.cxx @@ -111,8 +111,7 @@ auto makePublishBuffer(framework::ProcessingContext& pc, int sector, uint64_t ac o2::tpc::TPCSectorHeader header{sector}; header.activeSectors = activeSectors; - return &pc.outputs().make(Output{"TPC", "DIGITS", static_cast(sector), Lifetime::Timeframe, - header}); + return &pc.outputs().make(Output{"TPC", "DIGITS", static_cast(sector), header}); } template <> @@ -138,7 +137,7 @@ void publishBuffer(framework::ProcessingContext& pc, int secto LabelType* sharedlabels; #pragma omp critical sharedlabels = &pc.outputs().make>( - Output{"TPC", "DIGITSMCTR", static_cast(sector), Lifetime::Timeframe, header}); + Output{"TPC", "DIGITSMCTR", static_cast(sector), header}); accum->flatten_to(*sharedlabels); delete accum; diff --git a/Detectors/TPC/workflow/src/ClusterDecoderRawSpec.cxx b/Detectors/TPC/workflow/src/ClusterDecoderRawSpec.cxx index bb20bf86d531a..9e2e70e8dde46 100644 --- a/Detectors/TPC/workflow/src/ClusterDecoderRawSpec.cxx +++ b/Detectors/TPC/workflow/src/ClusterDecoderRawSpec.cxx @@ -88,7 +88,7 @@ DataProcessorSpec getClusterDecoderRawSpec(bool sendMC) o2::header::Stack actual{*sectorHeaderMC}; std::swap(mcHeaderStack, actual); if (sectorHeaderMC->sector() < 0) { - pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLNATIVEMCLBL"), fanSpec, Lifetime::Timeframe, std::move(mcHeaderStack)}, fanSpec); + pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLNATIVEMCLBL"), fanSpec, std::move(mcHeaderStack)}, fanSpec); } } } @@ -97,7 +97,7 @@ DataProcessorSpec getClusterDecoderRawSpec(bool sendMC) o2::header::Stack actual{*sectorHeader}; std::swap(rawHeaderStack, actual); if (sectorHeader->sector() < 0) { - pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLUSTERNATIVE"), fanSpec, Lifetime::Timeframe, std::move(rawHeaderStack)}, fanSpec); + pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLUSTERNATIVE"), fanSpec, std::move(rawHeaderStack)}, fanSpec); return; } } @@ -167,7 +167,7 @@ DataProcessorSpec getClusterDecoderRawSpec(bool sendMC) // containers are created for clusters and MC labels per (sector,globalPadRow) address char* outputBuffer = nullptr; auto outputAllocator = [&pc, &fanSpec, &outputBuffer, &rawHeaderStack](size_t size) -> char* { - outputBuffer = pc.outputs().newChunk(Output{gDataOriginTPC, DataDescription("CLUSTERNATIVE"), fanSpec, Lifetime::Timeframe, std::move(rawHeaderStack)}, size).data(); + outputBuffer = pc.outputs().newChunk(Output{gDataOriginTPC, DataDescription("CLUSTERNATIVE"), fanSpec, std::move(rawHeaderStack)}, size).data(); return outputBuffer; }; MCLabelContainer mcout; @@ -188,7 +188,7 @@ DataProcessorSpec getClusterDecoderRawSpec(bool sendMC) // serialize the complete list of MC label containers ConstMCLabelContainer labelsFlat; mcout.flatten_to(labelsFlat); - pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLNATIVEMCLBL"), fanSpec, Lifetime::Timeframe, std::move(mcHeaderStack)}, labelsFlat); + pc.outputs().snapshot(Output{gDataOriginTPC, DataDescription("CLNATIVEMCLBL"), fanSpec, std::move(mcHeaderStack)}, labelsFlat); } }; diff --git a/Detectors/TPC/workflow/src/ClustererSpec.cxx b/Detectors/TPC/workflow/src/ClustererSpec.cxx index 71043db3935be..310551a67753e 100644 --- a/Detectors/TPC/workflow/src/ClustererSpec.cxx +++ b/Detectors/TPC/workflow/src/ClustererSpec.cxx @@ -81,9 +81,9 @@ DataProcessorSpec getClustererSpec(bool sendMC) // forward the control information // FIXME define and use flags in TPCSectorHeader o2::tpc::TPCSectorHeader header{sector}; - pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHW", fanSpec, Lifetime::Timeframe, {header}}, fanSpec); + pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHW", fanSpec, {header}}, fanSpec); if (DataRefUtils::isValid(mclabelref)) { - pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHWMCLBL", fanSpec, Lifetime::Timeframe, {header}}, fanSpec); + pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHWMCLBL", fanSpec, {header}}, fanSpec); } return; } @@ -131,12 +131,12 @@ DataProcessorSpec getClustererSpec(bool sendMC) } // FIXME: that should be a case for pmr, want to send the content of the vector as a binary // block by using move semantics - auto outputPages = pc.outputs().make(Output{gDataOriginTPC, "CLUSTERHW", fanSpec, Lifetime::Timeframe, {*sectorHeader}}, clusterArray.size()); + auto outputPages = pc.outputs().make(Output{gDataOriginTPC, "CLUSTERHW", fanSpec, {*sectorHeader}}, clusterArray.size()); std::copy(clusterArray.begin(), clusterArray.end(), outputPages.begin()); if (DataRefUtils::isValid(mclabelref)) { ConstMCLabelContainer mcflat; mctruthArray.flatten_to(mcflat); - pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHWMCLBL", fanSpec, Lifetime::Timeframe, {*sectorHeader}}, mcflat); + pc.outputs().snapshot(Output{gDataOriginTPC, "CLUSTERHWMCLBL", fanSpec, {*sectorHeader}}, mcflat); } }; diff --git a/Detectors/TPC/workflow/src/EntropyEncoderSpec.cxx b/Detectors/TPC/workflow/src/EntropyEncoderSpec.cxx index 7e03748da0bc4..75fedb4a0effe 100644 --- a/Detectors/TPC/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/TPC/workflow/src/EntropyEncoderSpec.cxx @@ -133,7 +133,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) auto triggers = pc.inputs().get>("trigger"); auto cput = mTimer.CpuTime(); mTimer.Start(false); - auto& buffer = pc.outputs().make>(Output{"TPC", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"TPC", "CTFDATA", 0}); std::vector rejectHits, rejectTracks, rejectTrackHits, rejectTrackHitsReduced; CompressedClusters clustersFiltered = clusters; std::vector, std::vector>> tmpBuffer(std::max(mNThreads, 1)); diff --git a/Detectors/TPC/workflow/src/KryptonClustererSpec.cxx b/Detectors/TPC/workflow/src/KryptonClustererSpec.cxx index d0119f719193c..e946488073bf9 100644 --- a/Detectors/TPC/workflow/src/KryptonClustererSpec.cxx +++ b/Detectors/TPC/workflow/src/KryptonClustererSpec.cxx @@ -75,7 +75,7 @@ class KrBoxClusterFinderDevice : public o2::framework::Task { o2::tpc::TPCSectorHeader header{sector}; header.activeSectors = (0x1 << sector); - output.snapshot(Output{gDataOriginTPC, "KRCLUSTERS", static_cast(sector), Lifetime::Timeframe, header}, clusters); + output.snapshot(Output{gDataOriginTPC, "KRCLUSTERS", static_cast(sector), header}, clusters); } }; diff --git a/Detectors/TPC/workflow/src/KryptonRawFilterSpec.cxx b/Detectors/TPC/workflow/src/KryptonRawFilterSpec.cxx index 36dfa5edc65f7..d968876eefccb 100644 --- a/Detectors/TPC/workflow/src/KryptonRawFilterSpec.cxx +++ b/Detectors/TPC/workflow/src/KryptonRawFilterSpec.cxx @@ -223,7 +223,7 @@ class KrRawFilterDevice : public o2::framework::Task { o2::tpc::TPCSectorHeader header{sector}; header.activeSectors = (0x1 << sector); - output.snapshot(Output{gDataOriginTPC, "FILTERDIG", static_cast(sector), Lifetime::Timeframe, header}, digits); + output.snapshot(Output{gDataOriginTPC, "FILTERDIG", static_cast(sector), header}, digits); } }; diff --git a/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx b/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx index c7758d9f2bb4e..c42a93f7a7f04 100644 --- a/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx +++ b/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx @@ -110,7 +110,7 @@ o2::framework::DataProcessorSpec getLinkZSToDigitsSpec(int channel, const std::s header.activeSectors = processAttributes->activeSectors; // digit for now are transported per sector, not per lane // pc.outputs().snapshot(Output{"TPC", "DIGITS", static_cast(channel), Lifetime::Timeframe, header}, - pc.outputs().snapshot(Output{"TPC", "DIGITS", static_cast(sector), Lifetime::Timeframe, header}, + pc.outputs().snapshot(Output{"TPC", "DIGITS", static_cast(sector), header}, const_cast&>(digits)); }; diff --git a/Detectors/TPC/workflow/src/MIPTrackFilterSpec.cxx b/Detectors/TPC/workflow/src/MIPTrackFilterSpec.cxx index d4291552ec151..2e60ba0bc72f6 100644 --- a/Detectors/TPC/workflow/src/MIPTrackFilterSpec.cxx +++ b/Detectors/TPC/workflow/src/MIPTrackFilterSpec.cxx @@ -141,7 +141,7 @@ void MIPTrackFilterDevice::run(ProcessingContext& pc) mMIPTracks.clear(); } -void MIPTrackFilterDevice::sendOutput(DataAllocator& output) { output.snapshot(Output{header::gDataOriginTPC, "MIPS", 0, Lifetime::Timeframe}, mMIPTracks); } +void MIPTrackFilterDevice::sendOutput(DataAllocator& output) { output.snapshot(Output{header::gDataOriginTPC, "MIPS", 0}, mMIPTracks); } void MIPTrackFilterDevice::endOfStream(EndOfStreamContext& eos) { diff --git a/Detectors/TPC/workflow/src/RawToDigitsSpec.cxx b/Detectors/TPC/workflow/src/RawToDigitsSpec.cxx index 5c465be53b8c8..658a60fb75fbf 100644 --- a/Detectors/TPC/workflow/src/RawToDigitsSpec.cxx +++ b/Detectors/TPC/workflow/src/RawToDigitsSpec.cxx @@ -226,10 +226,10 @@ class TPCDigitDumpDevice : public o2::framework::Task o2::tpc::TPCSectorHeader header{isector}; header.activeSectors = mActiveSectors; // digit for now are transported per sector, not per lane - output.snapshot(Output{"TPC", "DIGITS", static_cast(isector), Lifetime::Timeframe, header}, + output.snapshot(Output{"TPC", "DIGITS", static_cast(isector), header}, mDigitDump.getDigits(isector)); if (mSendCEdigits) { - output.snapshot(Output{"TPC", "CEDIGITS", static_cast(isector), Lifetime::Timeframe, header}, + output.snapshot(Output{"TPC", "CEDIGITS", static_cast(isector), header}, ceDigits[isector]); } } diff --git a/Detectors/TPC/workflow/src/SACProcessorSpec.cxx b/Detectors/TPC/workflow/src/SACProcessorSpec.cxx index 226bd8a410dc9..e69533a0bb6d3 100644 --- a/Detectors/TPC/workflow/src/SACProcessorSpec.cxx +++ b/Detectors/TPC/workflow/src/SACProcessorSpec.cxx @@ -129,8 +129,8 @@ class SACProcessorDevice : public Task void sendData(DataAllocator& output) { - output.snapshot(Output{"TPC", "REFTIMESAC", 0, Lifetime::Timeframe}, mDecoder.getDecodedData().referenceTime); - output.snapshot(Output{"TPC", "DECODEDSAC", 0, Lifetime::Timeframe}, mDecoder.getDecodedData().getGoodData()); + output.snapshot(Output{"TPC", "REFTIMESAC", 0}, mDecoder.getDecodedData().referenceTime); + output.snapshot(Output{"TPC", "DECODEDSAC", 0}, mDecoder.getDecodedData().getGoodData()); mDecoder.clearDecodedData(); } diff --git a/Detectors/TPC/workflow/src/ZSSpec.cxx b/Detectors/TPC/workflow/src/ZSSpec.cxx index c73d2ae406ebd..00c78ba463507 100644 --- a/Detectors/TPC/workflow/src/ZSSpec.cxx +++ b/Detectors/TPC/workflow/src/ZSSpec.cxx @@ -133,8 +133,8 @@ DataProcessorSpec getZSEncoderSpec(std::vector const& tpcSectors, bool outR } o2::tpc::TPCSectorHeader sh{0}; gsl::span outp(&page[0], offset); - pc.outputs().snapshot(Output{gDataOriginTPC, "TPCZS", 0, Lifetime::Timeframe, sh}, outp); - pc.outputs().snapshot(Output{gDataOriginTPC, "ZSSIZES", 0, Lifetime::Timeframe, sh}, sizes); + pc.outputs().snapshot(Output{gDataOriginTPC, "TPCZS", 0, sh}, outp); + pc.outputs().snapshot(Output{gDataOriginTPC, "ZSSIZES", 0, sh}, sizes); if (outRaw) { // ===| set up raw writer |=================================================== @@ -301,7 +301,7 @@ DataProcessorSpec getZStoDigitsSpec(std::vector const& tpcSectors) for (int i = 0; i < NSectors; i++) { LOG(info) << "digits in sector " << i << " : " << outDigits[i].size(); o2::tpc::TPCSectorHeader sh{i}; - pc.outputs().snapshot(Output{gDataOriginTPC, "DIGITS", (unsigned int)i, Lifetime::Timeframe, sh}, outDigits[i]); + pc.outputs().snapshot(Output{gDataOriginTPC, "DIGITS", (unsigned int)i, sh}, outDigits[i]); } }; return processingFct; diff --git a/Detectors/TPC/workflow/test/test_ft_EPN_Aggregator.cxx b/Detectors/TPC/workflow/test/test_ft_EPN_Aggregator.cxx index ed4be20a22569..e8255f98eff6c 100644 --- a/Detectors/TPC/workflow/test/test_ft_EPN_Aggregator.cxx +++ b/Detectors/TPC/workflow/test/test_ft_EPN_Aggregator.cxx @@ -243,7 +243,7 @@ DataProcessorSpec generateIDCsCRU(int lane, const unsigned int maxTFs, const std } } } - ctx.outputs().adoptContainer(Output{gDataOriginTPC, TPCIntegrateIDCDevice::getDataDescription(TPCIntegrateIDCDevice::IDCFormat::Sim), o2::header::DataHeader::SubSpecificationType{icru << 7}, Lifetime::Timeframe}, std::move(idcs)); + ctx.outputs().adoptContainer(Output{gDataOriginTPC, TPCIntegrateIDCDevice::getDataDescription(TPCIntegrateIDCDevice::IDCFormat::Sim), o2::header::DataHeader::SubSpecificationType{icru << 7}}, std::move(idcs)); } if (delay) { diff --git a/Detectors/TRD/reconstruction/src/EventRecord.cxx b/Detectors/TRD/reconstruction/src/EventRecord.cxx index 147b052a8ca3f..0e1a134e45b8f 100644 --- a/Detectors/TRD/reconstruction/src/EventRecord.cxx +++ b/Detectors/TRD/reconstruction/src/EventRecord.cxx @@ -77,16 +77,16 @@ void EventRecordContainer::sendData(o2::framework::ProcessingContext& pc, bool g counters.push_back(event.getCounters()); } - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "DIGITS", 0, o2::framework::Lifetime::Timeframe}, digits); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "TRACKLETS", 0, o2::framework::Lifetime::Timeframe}, tracklets); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "TRKTRGRD", 0, o2::framework::Lifetime::Timeframe}, triggers); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "DIGITS", 0}, digits); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "TRACKLETS", 0}, tracklets); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "TRKTRGRD", 0}, triggers); if (generatestats) { accumulateStats(); o2::base::TFIDInfoHelper::fillTFIDInfo(pc, mTFStats.mTFIDInfo); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "RAWSTATS", 0, o2::framework::Lifetime::Timeframe}, mTFStats); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "RAWSTATS", 0}, mTFStats); } if (sendLinkStats) { - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "LINKSTATS", 0, o2::framework::Lifetime::Timeframe}, counters); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginTRD, "LINKSTATS", 0}, counters); } std::chrono::duration dataReadTime = std::chrono::high_resolution_clock::now() - dataReadStart; diff --git a/Detectors/TRD/workflow/include/TRDWorkflow/KrClustererSpec.h b/Detectors/TRD/workflow/include/TRDWorkflow/KrClustererSpec.h index 26be31d51c084..8f50c1af91183 100644 --- a/Detectors/TRD/workflow/include/TRDWorkflow/KrClustererSpec.h +++ b/Detectors/TRD/workflow/include/TRDWorkflow/KrClustererSpec.h @@ -67,8 +67,8 @@ void TRDKrClustererDevice::run(ProcessingContext& pc) LOGP(info, "Found {} Kr clusters in {} input trigger records. Timing: CPU: {}, Real: {}", mKrClFinder.getKrClusters().size(), triggerRecords.size(), timer.CpuTime(), timer.RealTime()); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "KRCLUSTER", 0, Lifetime::Timeframe}, mKrClFinder.getKrClusters()); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGKRCLS", 0, Lifetime::Timeframe}, mKrClFinder.getKrTrigRecs()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "KRCLUSTER", 0}, mKrClFinder.getKrClusters()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGKRCLS", 0}, mKrClFinder.getKrTrigRecs()); } void TRDKrClustererDevice::endOfStream(EndOfStreamContext& ec) diff --git a/Detectors/TRD/workflow/include/TRDWorkflow/TRDGlobalTrackingQCSpec.h b/Detectors/TRD/workflow/include/TRDWorkflow/TRDGlobalTrackingQCSpec.h index 375b36c54af17..a3c5b28aa8b63 100644 --- a/Detectors/TRD/workflow/include/TRDWorkflow/TRDGlobalTrackingQCSpec.h +++ b/Detectors/TRD/workflow/include/TRDWorkflow/TRDGlobalTrackingQCSpec.h @@ -63,7 +63,7 @@ class TRDGlobalTrackingQC : public Task mQC.reset(); mQC.setInput(recoData); mQC.run(); - pc.outputs().snapshot(Output{"TRD", "TRACKINGQC", 0, Lifetime::Timeframe}, mQC.getTrackQC()); + pc.outputs().snapshot(Output{"TRD", "TRACKINGQC", 0}, mQC.getTrackQC()); } void endOfStream(framework::EndOfStreamContext& ec) final {} void finaliseCCDB(framework::ConcreteDataMatcher& matcher, void* obj) final diff --git a/Detectors/TRD/workflow/include/TRDWorkflow/TRDPulseHeightSpec.h b/Detectors/TRD/workflow/include/TRDWorkflow/TRDPulseHeightSpec.h index 6ba185ee28ccb..a1e35f3a3482e 100644 --- a/Detectors/TRD/workflow/include/TRDWorkflow/TRDPulseHeightSpec.h +++ b/Detectors/TRD/workflow/include/TRDWorkflow/TRDPulseHeightSpec.h @@ -52,7 +52,7 @@ class PuseHeightDevice : public o2::framework::Task } if (mRunStopRequested) { std::vector mPHValues{}; // the calibration expects data at every TF, so inject dummy - pc.outputs().snapshot(Output{"TRD", "PULSEHEIGHT", 0, Lifetime::Timeframe}, mPHValues); + pc.outputs().snapshot(Output{"TRD", "PULSEHEIGHT", 0}, mPHValues); return; } RecoContainer recoData; @@ -61,7 +61,7 @@ class PuseHeightDevice : public o2::framework::Task mPulseHeight->setInput(recoData, &digits); mPulseHeight->reset(); mPulseHeight->process(); - pc.outputs().snapshot(Output{"TRD", "PULSEHEIGHT", 0, Lifetime::Timeframe}, mPulseHeight->getPHData()); + pc.outputs().snapshot(Output{"TRD", "PULSEHEIGHT", 0}, mPulseHeight->getPHData()); if (pc.transitionState() == TransitionHandlingState::Requested) { LOG(info) << "Run stop requested, finalizing"; mRunStopRequested = true; diff --git a/Detectors/TRD/workflow/io/src/TRDCalibReaderSpec.cxx b/Detectors/TRD/workflow/io/src/TRDCalibReaderSpec.cxx index d3a565b4c9804..cb76bb8bbad69 100644 --- a/Detectors/TRD/workflow/io/src/TRDCalibReaderSpec.cxx +++ b/Detectors/TRD/workflow/io/src/TRDCalibReaderSpec.cxx @@ -53,7 +53,7 @@ void TRDCalibReader::run(ProcessingContext& pc) assert(currEntry < mTree->GetEntries()); // this should not happen mTree->GetEntry(currEntry); LOG(info) << "Pushing angular residual histograms filled with " << mAngResids.getNEntries() << " entries at tree entry " << currEntry; - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "ANGRESHISTS", 0, Lifetime::Timeframe}, mAngResids); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "ANGRESHISTS", 0}, mAngResids); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/TRD/workflow/io/src/TRDDigitReaderSpec.cxx b/Detectors/TRD/workflow/io/src/TRDDigitReaderSpec.cxx index 4ca6fd46e4492..92d001bc5120a 100644 --- a/Detectors/TRD/workflow/io/src/TRDDigitReaderSpec.cxx +++ b/Detectors/TRD/workflow/io/src/TRDDigitReaderSpec.cxx @@ -58,13 +58,13 @@ void TRDDigitReaderSpec::run(ProcessingContext& pc) assert(currEntry < mTreeDigits->GetEntries()); // this should not happen mTreeDigits->GetEntry(currEntry); LOGP(info, "Pushing {} digits for tree entry {}", mDigits.size(), currEntry); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "DIGITS", mSubSpec, Lifetime::Timeframe}, mDigits); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "DIGITS", mSubSpec}, mDigits); if (mUseTriggerRecords) { LOGP(info, "Pushing {} trigger records for tree entry {}", mTriggerRecords.size(), currEntry); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRKTRGRD", mSubSpec, Lifetime::Timeframe}, mTriggerRecords); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRKTRGRD", mSubSpec}, mTriggerRecords); } if (mUseMC) { - auto& sharedlabels = pc.outputs().make>(Output{o2::header::gDataOriginTRD, "LABELS", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{o2::header::gDataOriginTRD, "LABELS", 0}); mLabels->copyandflatten(sharedlabels); } if (mTreeDigits->GetReadEntry() + 1 >= mTreeDigits->GetEntries()) { diff --git a/Detectors/TRD/workflow/io/src/TRDPHReaderSpec.cxx b/Detectors/TRD/workflow/io/src/TRDPHReaderSpec.cxx index 235c58c2ad302..7f873beb2ece1 100644 --- a/Detectors/TRD/workflow/io/src/TRDPHReaderSpec.cxx +++ b/Detectors/TRD/workflow/io/src/TRDPHReaderSpec.cxx @@ -54,7 +54,7 @@ void TRDPHReader::run(ProcessingContext& pc) mTree->GetEntry(currEntry); LOG(info) << "Pushing vector of PH values filled with " << mPHValues.size() << " entries at tree entry " << currEntry; - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "PULSEHEIGHT", 0, Lifetime::Timeframe}, mPHValues); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "PULSEHEIGHT", 0}, mPHValues); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/TRD/workflow/io/src/TRDTrackReaderSpec.cxx b/Detectors/TRD/workflow/io/src/TRDTrackReaderSpec.cxx index 20a7b5d5ac234..cd9702a3d2385 100644 --- a/Detectors/TRD/workflow/io/src/TRDTrackReaderSpec.cxx +++ b/Detectors/TRD/workflow/io/src/TRDTrackReaderSpec.cxx @@ -50,18 +50,18 @@ void TRDTrackReader::run(ProcessingContext& pc) if (mMode == Mode::TPCTRD) { uint32_t ss = o2::globaltracking::getSubSpec(mSubSpecStrict ? o2::globaltracking::MatchingType::Strict : o2::globaltracking::MatchingType::Standard); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_TPC", ss, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_TPC", ss, Lifetime::Timeframe}, mTrigRec); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_TPC", ss}, mTracks); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_TPC", ss}, mTrigRec); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC", ss, Lifetime::Timeframe}, mLabelsMatch); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC_TRD", ss, Lifetime::Timeframe}, mLabelsTrd); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC", ss}, mLabelsMatch); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC_TRD", ss}, mLabelsTrd); } } else if (mMode == Mode::ITSTPCTRD) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_ITSTPC", 0, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_ITSTPC", 0, Lifetime::Timeframe}, mTrigRec); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_ITSTPC", 0}, mTracks); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_ITSTPC", 0}, mTrigRec); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC", 0, Lifetime::Timeframe}, mLabelsMatch); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC_TRD", 0, Lifetime::Timeframe}, mLabelsTrd); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC", 0}, mLabelsMatch); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC_TRD", 0}, mLabelsTrd); } } diff --git a/Detectors/TRD/workflow/io/src/TRDTrackletReaderSpec.cxx b/Detectors/TRD/workflow/io/src/TRDTrackletReaderSpec.cxx index c588da696e321..864cc3d665af5 100644 --- a/Detectors/TRD/workflow/io/src/TRDTrackletReaderSpec.cxx +++ b/Detectors/TRD/workflow/io/src/TRDTrackletReaderSpec.cxx @@ -72,20 +72,20 @@ void TRDTrackletReader::run(ProcessingContext& pc) mTreeTrklt->GetEntry(currEntry); LOG(info) << "Pushing " << mTriggerRecords.size() << " TRD trigger records at entry " << currEntry; LOG(info) << "Pushing " << mTracklets.size() << " uncalibrated TRD tracklets for these trigger records"; - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRACKLETS", 0, Lifetime::Timeframe}, mTracklets); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRACKLETS", 0}, mTracklets); if (mUseTrackletTransform) { assert(mTreeTrklt->GetEntries() == mTreeCTrklt->GetEntries()); mTreeCTrklt->GetEntry(currEntry); LOG(info) << "Pushing " << mTrackletsCal.size() << " calibrated TRD tracklets for these trigger records"; LOG(info) << "Pushing " << mTrigRecMask.size() << " flags for the given TRD trigger records"; - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "CTRACKLETS", 0, Lifetime::Timeframe}, mTrackletsCal); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRIGRECMASK", 0, Lifetime::Timeframe}, mTrigRecMask); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "CTRACKLETS", 0}, mTrackletsCal); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRIGRECMASK", 0}, mTrigRecMask); } - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRKTRGRD", 0, Lifetime::Timeframe}, mTriggerRecords); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRKTRGRD", 0}, mTriggerRecords); if (mUseMC) { LOG(info) << "Pushing " << mLabels.getNElements() << " TRD tracklet labels"; - pc.outputs().snapshot(Output{"TRD", "TRKLABELS", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"TRD", "TRKLABELS", 0}, mLabels); } if (mTreeTrklt->GetReadEntry() + 1 >= mTreeTrklt->GetEntries()) { diff --git a/Detectors/TRD/workflow/src/EntropyEncoderSpec.cxx b/Detectors/TRD/workflow/src/EntropyEncoderSpec.cxx index 07243d2b316cb..d345dd74141ed 100644 --- a/Detectors/TRD/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/TRD/workflow/src/EntropyEncoderSpec.cxx @@ -76,7 +76,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) if (mSelIR) { mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"TRD", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"TRD", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, triggers, tracklets, digits); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/TRD/workflow/src/TRDDigitizerSpec.cxx b/Detectors/TRD/workflow/src/TRDDigitizerSpec.cxx index c0a4d85acd1bc..1cd145a6c2e36 100644 --- a/Detectors/TRD/workflow/src/TRDDigitizerSpec.cxx +++ b/Detectors/TRD/workflow/src/TRDDigitizerSpec.cxx @@ -177,17 +177,17 @@ class TRDDPLDigitizerTask : public o2::base::BaseDPLDigitizer LOGF(info, "TRD digitization timing: Cpu: %.3e Real: %.3e s", timer.CpuTime(), timer.RealTime()); LOG(info) << "TRD: Sending " << digitsAccum.size() << " digits"; - pc.outputs().snapshot(Output{"TRD", "DIGITS", 1, Lifetime::Timeframe}, digitsAccum); + pc.outputs().snapshot(Output{"TRD", "DIGITS", 1}, digitsAccum); if (mctruth) { LOG(info) << "TRD: Sending " << labelsAccum.getNElements() << " labels"; // we are flattening the labels and write to managed shared memory container for further communication - auto& sharedlabels = pc.outputs().make>(Output{"TRD", "LABELS", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{"TRD", "LABELS", 0}); labelsAccum.flatten_to(sharedlabels); } LOG(info) << "TRD: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"TRD", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"TRD", "ROMode", 0}, mROMode); LOG(info) << "TRD: Sending trigger records"; - pc.outputs().snapshot(Output{"TRD", "TRKTRGRD", 1, Lifetime::Timeframe}, triggers); + pc.outputs().snapshot(Output{"TRD", "TRKTRGRD", 1}, triggers); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); finished = true; diff --git a/Detectors/TRD/workflow/src/TRDGlobalTrackingSpec.cxx b/Detectors/TRD/workflow/src/TRDGlobalTrackingSpec.cxx index 1be328b4bcbe9..abe3a6f45ec70 100644 --- a/Detectors/TRD/workflow/src/TRDGlobalTrackingSpec.cxx +++ b/Detectors/TRD/workflow/src/TRDGlobalTrackingSpec.cxx @@ -496,19 +496,19 @@ void TRDGlobalTracking::run(ProcessingContext& pc) uint32_t ss = o2::globaltracking::getSubSpec(mStrict ? o2::globaltracking::MatchingType::Strict : o2::globaltracking::MatchingType::Standard); if (GTrackID::includesSource(GTrackID::Source::ITSTPC, mTrkMask)) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_ITSTPC", 0, Lifetime::Timeframe}, tracksOutITSTPC); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_ITSTPC", 0, Lifetime::Timeframe}, trackTrigRecITSTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_ITSTPC", 0}, tracksOutITSTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_ITSTPC", 0}, trackTrigRecITSTPC); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC", 0, Lifetime::Timeframe}, matchLabelsITSTPC); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC_TRD", 0, Lifetime::Timeframe}, trdLabelsITSTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC", 0}, matchLabelsITSTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_ITSTPC_TRD", 0}, trdLabelsITSTPC); } } if (GTrackID::includesSource(GTrackID::Source::TPC, mTrkMask)) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_TPC", ss, Lifetime::Timeframe}, tracksOutTPC); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_TPC", ss, Lifetime::Timeframe}, trackTrigRecTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MATCH_TPC", ss}, tracksOutTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "TRGREC_TPC", ss}, trackTrigRecTPC); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC", ss, Lifetime::Timeframe}, matchLabelsTPC); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC_TRD", ss, Lifetime::Timeframe}, trdLabelsTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC", ss}, matchLabelsTPC); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "MCLB_TPC_TRD", ss}, trdLabelsTPC); } } diff --git a/Detectors/TRD/workflow/src/TRDTrackletTransformerSpec.cxx b/Detectors/TRD/workflow/src/TRDTrackletTransformerSpec.cxx index bb6d552cf3fee..0ac920c6e49c6 100644 --- a/Detectors/TRD/workflow/src/TRDTrackletTransformerSpec.cxx +++ b/Detectors/TRD/workflow/src/TRDTrackletTransformerSpec.cxx @@ -113,8 +113,8 @@ void TRDTrackletTransformerSpec::run(o2::framework::ProcessingContext& pc) LOGF(info, "Found %lu tracklets in %lu trigger records. Applied filter for ITS IR frames: %i. Transformed %i tracklets.", tracklets.size(), trigRecs.size(), mTrigRecFilterActive, nTrackletsTransformed); - pc.outputs().snapshot(Output{"TRD", "CTRACKLETS", 0, Lifetime::Timeframe}, calibratedTracklets); - pc.outputs().snapshot(Output{"TRD", "TRIGRECMASK", 0, Lifetime::Timeframe}, trigRecBitfield); + pc.outputs().snapshot(Output{"TRD", "CTRACKLETS", 0}, calibratedTracklets); + pc.outputs().snapshot(Output{"TRD", "TRIGRECMASK", 0}, trigRecBitfield); } void TRDTrackletTransformerSpec::updateTimeDependentParams(ProcessingContext& pc) diff --git a/Detectors/TRD/workflow/src/TRDTrapSimulatorSpec.cxx b/Detectors/TRD/workflow/src/TRDTrapSimulatorSpec.cxx index 4c6ac24a0dc43..20d0b719b9bf7 100644 --- a/Detectors/TRD/workflow/src/TRDTrapSimulatorSpec.cxx +++ b/Detectors/TRD/workflow/src/TRDTrapSimulatorSpec.cxx @@ -328,11 +328,11 @@ void TRDDPLTrapSimulatorTask::run(o2::framework::ProcessingContext& pc) LOG(info) << "Digit Sorting took: " << std::chrono::duration_cast(sortTime).count() << "ms"; LOG(info) << "Processing time for parallel region: " << std::chrono::duration_cast(parallelTime).count() << "ms"; - pc.outputs().snapshot(Output{"TRD", "TRACKLETS", 0, Lifetime::Timeframe}, tracklets); - pc.outputs().snapshot(Output{"TRD", "TRKTRGRD", 0, Lifetime::Timeframe}, triggerRecords); - pc.outputs().snapshot(Output{"TRD", "DIGITS", 0, Lifetime::Timeframe}, digitsOut); + pc.outputs().snapshot(Output{"TRD", "TRACKLETS", 0}, tracklets); + pc.outputs().snapshot(Output{"TRD", "TRKTRGRD", 0}, triggerRecords); + pc.outputs().snapshot(Output{"TRD", "DIGITS", 0}, digitsOut); if (mUseMC) { - pc.outputs().snapshot(Output{"TRD", "TRKLABELS", 0, Lifetime::Timeframe}, lblTracklets); + pc.outputs().snapshot(Output{"TRD", "TRKLABELS", 0}, lblTracklets); } LOG(debug) << "TRD Trap Simulator Device exiting"; diff --git a/Detectors/TRD/workflow/src/TrackBasedCalibSpec.cxx b/Detectors/TRD/workflow/src/TrackBasedCalibSpec.cxx index 6011e194685ad..4418b9af61300 100644 --- a/Detectors/TRD/workflow/src/TrackBasedCalibSpec.cxx +++ b/Detectors/TRD/workflow/src/TrackBasedCalibSpec.cxx @@ -75,12 +75,12 @@ void TRDTrackBasedCalibDevice::run(ProcessingContext& pc) if (mDoVdExBCalib) { mCalibrator.calculateAngResHistos(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "ANGRESHISTS", 0, Lifetime::Timeframe}, mCalibrator.getAngResHistos()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "ANGRESHISTS", 0}, mCalibrator.getAngResHistos()); } if (mDoGainCalib) { mCalibrator.calculateGainCalibObjs(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "GAINCALIBHISTS", 0, Lifetime::Timeframe}, mCalibrator.getGainCalibHistos()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTRD, "GAINCALIBHISTS", 0}, mCalibrator.getGainCalibHistos()); } mCalibrator.reset(); diff --git a/Detectors/Upgrades/ITS3/workflow/src/ClustererSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/ClustererSpec.cxx index e6c9b5bc4b910..5512964eec950 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/ClustererSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/ClustererSpec.cxx @@ -109,17 +109,17 @@ void ClustererDPL::run(ProcessingContext& pc) clusterLabels = std::make_unique>(); } mClusterer->process(mNThreads, reader, &clusCompVec, mPatterns ? &clusPattVec : nullptr, &clusROFVec, clusterLabels.get()); - pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0, Lifetime::Timeframe}, clusCompVec); - pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0, Lifetime::Timeframe}, clusROFVec); - pc.outputs().snapshot(Output{orig, "PATTERNS", 0, Lifetime::Timeframe}, clusPattVec); + pc.outputs().snapshot(Output{orig, "COMPCLUSTERS", 0}, clusCompVec); + pc.outputs().snapshot(Output{orig, "CLUSTERSROF", 0}, clusROFVec); + pc.outputs().snapshot(Output{orig, "PATTERNS", 0}, clusPattVec); if (mUseMC) { - pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, *clusterLabels.get()); // at the moment requires snapshot + pc.outputs().snapshot(Output{orig, "CLUSTERSMCTR", 0}, *clusterLabels.get()); // at the moment requires snapshot std::vector clusterMC2ROframes(mc2rofs.size()); for (int i = mc2rofs.size(); i--;) { clusterMC2ROframes[i] = mc2rofs[i]; // Simply, replicate it from digits ? } - pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0, Lifetime::Timeframe}, clusterMC2ROframes); + pc.outputs().snapshot(Output{orig, "CLUSTERSMC2ROF", 0}, clusterMC2ROframes); } // TODO: in principle, after masking "overflow" pixels the MC2ROFRecord maxROF supposed to change, nominally to minROF diff --git a/Detectors/Upgrades/ITS3/workflow/src/DigitReaderSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/DigitReaderSpec.cxx index a679beed2c530..c2380be77f956 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/DigitReaderSpec.cxx @@ -71,17 +71,17 @@ void DigitReader::run(ProcessingContext& pc) // This is a very ugly way of providing DataDescription, which anyway does not need to contain detector name. // To be fixed once the names-definition class is ready - pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0, Lifetime::Timeframe}, mDigROFRec); - pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}, mDigits); + pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0}, mDigROFRec); + pc.outputs().snapshot(Output{mOrigin, "DIGITS", 0}, mDigits); if (mUseCalib) { - pc.outputs().snapshot(Output{mOrigin, "GBTCALIB", 0, Lifetime::Timeframe}, mCalib); + pc.outputs().snapshot(Output{mOrigin, "GBTCALIB", 0}, mCalib); } if (mUseMC) { - auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0}); plabels->copyandflatten(sharedlabels); delete plabels; - pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0, Lifetime::Timeframe}, mDigMC2ROFs); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0}, mDigMC2ROFs); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/Upgrades/ITS3/workflow/src/TrackReaderSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/TrackReaderSpec.cxx index 3d048bf981ea3..54d8951b5a0d0 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/TrackReaderSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/TrackReaderSpec.cxx @@ -44,14 +44,14 @@ void TrackReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "Pushing " << mTracks.size() << " track in " << mROFRec.size() << " ROFs at entry " << ent; - pc.outputs().snapshot(Output{mOrigin, "IT3TrackROF", 0, Lifetime::Timeframe}, mROFRec); - pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0, Lifetime::Timeframe}, mTracks); - pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0, Lifetime::Timeframe}, mClusInd); - pc.outputs().snapshot(Output{"IT3", "VERTICES", 0, Lifetime::Timeframe}, mVertices); - pc.outputs().snapshot(Output{"IT3", "VERTICESROF", 0, Lifetime::Timeframe}, mVerticesROFRec); + pc.outputs().snapshot(Output{mOrigin, "IT3TrackROF", 0}, mROFRec); + pc.outputs().snapshot(Output{mOrigin, "TRACKS", 0}, mTracks); + pc.outputs().snapshot(Output{mOrigin, "TRACKCLSID", 0}, mClusInd); + pc.outputs().snapshot(Output{"IT3", "VERTICES", 0}, mVertices); + pc.outputs().snapshot(Output{"IT3", "VERTICESROF", 0}, mVerticesROFRec); if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0, Lifetime::Timeframe}, mMCTruth); - pc.outputs().snapshot(Output{mOrigin, "VERTICESMCTR", 0, Lifetime::Timeframe}, mMCVertTruth); + pc.outputs().snapshot(Output{mOrigin, "TRACKSMCTR", 0}, mMCTruth); + pc.outputs().snapshot(Output{mOrigin, "VERTICESMCTR", 0}, mMCVertTruth); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { diff --git a/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx index e2078046d1966..e4240401928b6 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/TrackerSpec.cxx @@ -171,9 +171,9 @@ void TrackerDPL::run(ProcessingContext& pc) // the output vector however is created directly inside the message memory thus avoiding copy by // snapshot auto rofsinput = pc.inputs().get>("ROframes"); - auto& rofs = pc.outputs().make>(Output{"IT3", "IT3TrackROF", 0, Lifetime::Timeframe}, rofsinput.begin(), rofsinput.end()); + auto& rofs = pc.outputs().make>(Output{"IT3", "IT3TrackROF", 0}, rofsinput.begin(), rofsinput.end()); - auto& irFrames = pc.outputs().make>(Output{"IT3", "IRFRAMES", 0, Lifetime::Timeframe}); + auto& irFrames = pc.outputs().make>(Output{"IT3", "IRFRAMES", 0}); const auto& alpParams = o2::itsmft::DPLAlpideParam::Instance(); // RS: this should come from CCDB int nBCPerTF = alpParams.roFrameLengthInBC; @@ -189,15 +189,15 @@ void TrackerDPL::run(ProcessingContext& pc) LOG(info) << labels->getIndexedSize() << " MC label objects , in " << mc2rofs.size() << " MC events"; } - auto& allClusIdx = pc.outputs().make>(Output{"IT3", "TRACKCLSID", 0, Lifetime::Timeframe}); + auto& allClusIdx = pc.outputs().make>(Output{"IT3", "TRACKCLSID", 0}); std::vector trackLabels; std::vector verticesLabels; - auto& allTracks = pc.outputs().make>(Output{"IT3", "TRACKS", 0, Lifetime::Timeframe}); + auto& allTracks = pc.outputs().make>(Output{"IT3", "TRACKS", 0}); std::vector allTrackLabels; std::vector allVerticesLabels; - auto& vertROFvec = pc.outputs().make>(Output{"IT3", "VERTICESROF", 0, Lifetime::Timeframe}); - auto& vertices = pc.outputs().make>(Output{"IT3", "VERTICES", 0, Lifetime::Timeframe}); + auto& vertROFvec = pc.outputs().make>(Output{"IT3", "VERTICESROF", 0}); + auto& vertices = pc.outputs().make>(Output{"IT3", "VERTICES", 0}); TimeFrame* timeFrame = mChainITS->GetITSTimeframe(); timeFrame->resizeVectors(mNLayers); @@ -314,9 +314,9 @@ void TrackerDPL::run(ProcessingContext& pc) LOGP(info, "ITS3Tracker pushed {} track labels", allTrackLabels.size()); LOGP(info, "ITS3Tracker pushed {} vertex labels", allVerticesLabels.size()); - pc.outputs().snapshot(Output{"IT3", "TRACKSMCTR", 0, Lifetime::Timeframe}, allTrackLabels); - pc.outputs().snapshot(Output{"IT3", "VERTICESMCTR", 0, Lifetime::Timeframe}, allVerticesLabels); - pc.outputs().snapshot(Output{"IT3", "IT3TrackMC2ROF", 0, Lifetime::Timeframe}, mc2rofs); + pc.outputs().snapshot(Output{"IT3", "TRACKSMCTR", 0}, allTrackLabels); + pc.outputs().snapshot(Output{"IT3", "VERTICESMCTR", 0}, allVerticesLabels); + pc.outputs().snapshot(Output{"IT3", "IT3TrackMC2ROF", 0}, mc2rofs); } } mTimer.Stop(); diff --git a/Detectors/Upgrades/ITS3/workflow/src/VertexReaderSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/VertexReaderSpec.cxx index 015aa1a78977b..25fd3e530161a 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/VertexReaderSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/VertexReaderSpec.cxx @@ -41,8 +41,8 @@ void VertexReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "Pushing " << mVerticesPtr->size() << " vertices in " << mVerticesROFRecPtr->size() << " ROFs at entry " << ent; - pc.outputs().snapshot(Output{"IT3", "VERTICES", 0, Lifetime::Timeframe}, mVertices); - pc.outputs().snapshot(Output{"IT3", "VERTICESROF", 0, Lifetime::Timeframe}, mVerticesROFRec); + pc.outputs().snapshot(Output{"IT3", "VERTICES", 0}, mVertices); + pc.outputs().snapshot(Output{"IT3", "VERTICESROF", 0}, mVerticesROFRec); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/Vertexing/src/SVertexer.cxx b/Detectors/Vertexing/src/SVertexer.cxx index de167af74526e..120d49d44b4e5 100644 --- a/Detectors/Vertexing/src/SVertexer.cxx +++ b/Detectors/Vertexing/src/SVertexer.cxx @@ -117,15 +117,15 @@ void SVertexer::produceOutput(o2::framework::ProcessingContext& pc) std::sort(nbodySortID.begin(), nbodySortID.end(), [](const vid& a, const vid& b) { return a.vtxID < b.vtxID; }); // dpl output - auto& v0sIdx = pc.outputs().make>(o2f::Output{"GLO", "V0S_IDX", 0, o2f::Lifetime::Timeframe}); - auto& cascsIdx = pc.outputs().make>(o2f::Output{"GLO", "CASCS_IDX", 0, o2f::Lifetime::Timeframe}); - auto& body3Idx = pc.outputs().make>(o2f::Output{"GLO", "DECAYS3BODY_IDX", 0, o2f::Lifetime::Timeframe}); - auto& fullv0s = pc.outputs().make>(o2f::Output{"GLO", "V0S", 0, o2f::Lifetime::Timeframe}); - auto& fullcascs = pc.outputs().make>(o2f::Output{"GLO", "CASCS", 0, o2f::Lifetime::Timeframe}); - auto& full3body = pc.outputs().make>(o2f::Output{"GLO", "DECAYS3BODY", 0, o2f::Lifetime::Timeframe}); - auto& v0Refs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_V0REFS", 0, o2f::Lifetime::Timeframe}); - auto& cascRefs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_CASCREFS", 0, o2f::Lifetime::Timeframe}); - auto& vtx3bodyRefs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_3BODYREFS", 0, o2f::Lifetime::Timeframe}); + auto& v0sIdx = pc.outputs().make>(o2f::Output{"GLO", "V0S_IDX", 0}); + auto& cascsIdx = pc.outputs().make>(o2f::Output{"GLO", "CASCS_IDX", 0}); + auto& body3Idx = pc.outputs().make>(o2f::Output{"GLO", "DECAYS3BODY_IDX", 0}); + auto& fullv0s = pc.outputs().make>(o2f::Output{"GLO", "V0S", 0}); + auto& fullcascs = pc.outputs().make>(o2f::Output{"GLO", "CASCS", 0}); + auto& full3body = pc.outputs().make>(o2f::Output{"GLO", "DECAYS3BODY", 0}); + auto& v0Refs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_V0REFS", 0}); + auto& cascRefs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_CASCREFS", 0}); + auto& vtx3bodyRefs = pc.outputs().make>(o2f::Output{"GLO", "PVTX_3BODYREFS", 0}); // sorted V0s v0sIdx.reserve(mNV0s); @@ -214,8 +214,8 @@ void SVertexer::produceOutput(o2::framework::ProcessingContext& pc) } } - auto& strTracksOut = pc.outputs().make>(o2f::Output{"GLO", "STRANGETRACKS", 0, o2f::Lifetime::Timeframe}); - auto& strClustOut = pc.outputs().make>(o2f::Output{"GLO", "CLUSUPDATES", 0, o2f::Lifetime::Timeframe}); + auto& strTracksOut = pc.outputs().make>(o2f::Output{"GLO", "STRANGETRACKS", 0}); + auto& strClustOut = pc.outputs().make>(o2f::Output{"GLO", "CLUSUPDATES", 0}); o2::pmr::vector mcLabsOut; strTracksOut.resize(mNStrangeTracks); strClustOut.resize(mNStrangeTracks); @@ -239,7 +239,7 @@ void SVertexer::produceOutput(o2::framework::ProcessingContext& pc) } if (mStrTracker->getMCTruthOn()) { - auto& strTrMCLableOut = pc.outputs().make>(o2f::Output{"GLO", "STRANGETRACKS_MC", 0, o2f::Lifetime::Timeframe}); + auto& strTrMCLableOut = pc.outputs().make>(o2f::Output{"GLO", "STRANGETRACKS_MC", 0}); strTrMCLableOut.swap(mcLabsOut); } } diff --git a/Detectors/ZDC/calib/src/InterCalibEPNSpec.cxx b/Detectors/ZDC/calib/src/InterCalibEPNSpec.cxx index 80b23c696b5a4..a1a83aada50f0 100644 --- a/Detectors/ZDC/calib/src/InterCalibEPNSpec.cxx +++ b/Detectors/ZDC/calib/src/InterCalibEPNSpec.cxx @@ -101,16 +101,16 @@ void InterCalibEPNSpec::run(ProcessingContext& pc) mWorker.process(bcrec, energy, tdc, info); // Send intermediate calibration data and histograms - o2::framework::Output output("ZDC", "INTERCALIBDATA", 0, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "INTERCALIBDATA", 0); pc.outputs().snapshot(output, mWorker.mData); for (int ih = 0; ih < (2 * InterCalibData::NH); ih++) { if (mWorker.mH[ih] != nullptr) { - o2::framework::Output output("ZDC", "INTER_1DH", ih, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "INTER_1DH", ih); pc.outputs().snapshot(output, mWorker.mH[ih]->getBase()); } } for (int ih = 0; ih < InterCalibData::NH; ih++) { - o2::framework::Output output("ZDC", "INTER_2DH", ih, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "INTER_2DH", ih); pc.outputs().snapshot(output, mWorker.mC[ih]->getBase()); } } diff --git a/Detectors/ZDC/calib/src/TDCCalibEPNSpec.cxx b/Detectors/ZDC/calib/src/TDCCalibEPNSpec.cxx index 8b90ee3b0af36..6125baaa197f8 100644 --- a/Detectors/ZDC/calib/src/TDCCalibEPNSpec.cxx +++ b/Detectors/ZDC/calib/src/TDCCalibEPNSpec.cxx @@ -114,10 +114,10 @@ void TDCCalibEPNSpec::run(ProcessingContext& pc) if (mVerbosity > DbgMedium && mModTF > 0) { LOG(info) << "Send intermediate calibration data mProcessed=" << mProcessed << " >= mModTF=" << mModTF; } - o2::framework::Output output("ZDC", "TDCCALIBDATA", 0, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "TDCCALIBDATA", 0); pc.outputs().snapshot(output, mWorker.mData); for (int ih = 0; ih < TDCCalibData::NTDC; ih++) { - o2::framework::Output output("ZDC", "TDC_1DH", ih, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "TDC_1DH", ih); pc.outputs().snapshot(output, mWorker.mTDC[ih]->getBase()); } mWorker.clear(); diff --git a/Detectors/ZDC/calib/src/WaveformCalibEPNSpec.cxx b/Detectors/ZDC/calib/src/WaveformCalibEPNSpec.cxx index 62ee862905524..72f606cc4f6ed 100644 --- a/Detectors/ZDC/calib/src/WaveformCalibEPNSpec.cxx +++ b/Detectors/ZDC/calib/src/WaveformCalibEPNSpec.cxx @@ -107,7 +107,7 @@ void WaveformCalibEPNSpec::run(ProcessingContext& pc) mWorker.process(bcrec, energy, tdc, info, wave); // Send intermediate calibration data - o2::framework::Output output("ZDC", "WAVECALIBDATA", 0, Lifetime::Timeframe); + o2::framework::Output output("ZDC", "WAVECALIBDATA", 0); pc.outputs().snapshot(output, mWorker.mData); } diff --git a/Detectors/ZDC/raw/include/ZDCRaw/RawReaderZDC.h b/Detectors/ZDC/raw/include/ZDCRaw/RawReaderZDC.h index b27f7e34353c0..70f7ce30647f5 100644 --- a/Detectors/ZDC/raw/include/ZDCRaw/RawReaderZDC.h +++ b/Detectors/ZDC/raw/include/ZDCRaw/RawReaderZDC.h @@ -95,9 +95,9 @@ class RawReaderZDC void makeSnapshot(o2::framework::ProcessingContext& pc) { - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSBC", 0, o2::framework::Lifetime::Timeframe}, mDigitsBC); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSCH", 0, o2::framework::Lifetime::Timeframe}, mDigitsCh); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSPD", 0, o2::framework::Lifetime::Timeframe}, mOrbitData); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSBC", 0}, mDigitsBC); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSCH", 0}, mDigitsCh); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginZDC, "DIGITSPD", 0}, mOrbitData); } }; } // namespace zdc diff --git a/Detectors/ZDC/workflow/src/DigitReaderSpec.cxx b/Detectors/ZDC/workflow/src/DigitReaderSpec.cxx index 59b0e34d16ecd..e952111e0c6c3 100644 --- a/Detectors/ZDC/workflow/src/DigitReaderSpec.cxx +++ b/Detectors/ZDC/workflow/src/DigitReaderSpec.cxx @@ -69,11 +69,11 @@ void DigitReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "ZDCDigitReader pushed " << zdcOrbitData.size() << " orbits with " << zdcBCData.size() << " bcs and " << zdcChData.size() << " digits"; - pc.outputs().snapshot(Output{"ZDC", "DIGITSPD", 0, Lifetime::Timeframe}, zdcOrbitData); - pc.outputs().snapshot(Output{"ZDC", "DIGITSBC", 0, Lifetime::Timeframe}, zdcBCData); - pc.outputs().snapshot(Output{"ZDC", "DIGITSCH", 0, Lifetime::Timeframe}, zdcChData); + pc.outputs().snapshot(Output{"ZDC", "DIGITSPD", 0}, zdcOrbitData); + pc.outputs().snapshot(Output{"ZDC", "DIGITSBC", 0}, zdcBCData); + pc.outputs().snapshot(Output{"ZDC", "DIGITSCH", 0}, zdcChData); if (mUseMC) { - pc.outputs().snapshot(Output{"ZDC", "DIGITSLBL", 0, Lifetime::Timeframe}, labels); + pc.outputs().snapshot(Output{"ZDC", "DIGITSLBL", 0}, labels); } uint64_t nextEntry = mTree->GetReadEntry() + 1; if (nextEntry >= mTree->GetEntries() || (mLastEntry >= 0 && nextEntry > mLastEntry)) { diff --git a/Detectors/ZDC/workflow/src/DigitRecoSpec.cxx b/Detectors/ZDC/workflow/src/DigitRecoSpec.cxx index d504d33483081..e9b63c5b49d5b 100644 --- a/Detectors/ZDC/workflow/src/DigitRecoSpec.cxx +++ b/Detectors/ZDC/workflow/src/DigitRecoSpec.cxx @@ -264,11 +264,11 @@ void DigitRecoSpec::run(ProcessingContext& pc) } // TODO: rate information for all channels // TODO: summary of reconstruction to be collected by DQM? - pc.outputs().snapshot(Output{"ZDC", "BCREC", 0, Lifetime::Timeframe}, recEvent.mRecBC); - pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0, Lifetime::Timeframe}, recEvent.mEnergy); - pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0, Lifetime::Timeframe}, recEvent.mTDCData); - pc.outputs().snapshot(Output{"ZDC", "INFO", 0, Lifetime::Timeframe}, recEvent.mInfo); - pc.outputs().snapshot(Output{"ZDC", "WAVE", 0, Lifetime::Timeframe}, recEvent.mWaveform); + pc.outputs().snapshot(Output{"ZDC", "BCREC", 0}, recEvent.mRecBC); + pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0}, recEvent.mEnergy); + pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0}, recEvent.mTDCData); + pc.outputs().snapshot(Output{"ZDC", "INFO", 0}, recEvent.mInfo); + pc.outputs().snapshot(Output{"ZDC", "WAVE", 0}, recEvent.mWaveform); mTimer.Stop(); } diff --git a/Detectors/ZDC/workflow/src/EntropyEncoderSpec.cxx b/Detectors/ZDC/workflow/src/EntropyEncoderSpec.cxx index 7c30a0f34d151..abbd821fcb749 100644 --- a/Detectors/ZDC/workflow/src/EntropyEncoderSpec.cxx +++ b/Detectors/ZDC/workflow/src/EntropyEncoderSpec.cxx @@ -58,7 +58,7 @@ void EntropyEncoderSpec::run(ProcessingContext& pc) mCTFCoder.setSelectedIRFrames(pc.inputs().get>("selIRFrames")); } - auto& buffer = pc.outputs().make>(Output{"ZDC", "CTFDATA", 0, Lifetime::Timeframe}); + auto& buffer = pc.outputs().make>(Output{"ZDC", "CTFDATA", 0}); auto iosize = mCTFCoder.encode(buffer, bcdata, chans, peds); pc.outputs().snapshot({"ctfrep", 0}, iosize); if (mSelIR) { diff --git a/Detectors/ZDC/workflow/src/RecEventReaderSpec.cxx b/Detectors/ZDC/workflow/src/RecEventReaderSpec.cxx index acb0cef3e29be..18c620e427569 100644 --- a/Detectors/ZDC/workflow/src/RecEventReaderSpec.cxx +++ b/Detectors/ZDC/workflow/src/RecEventReaderSpec.cxx @@ -49,10 +49,10 @@ void RecEventReader::run(ProcessingContext& pc) mTree->GetEntry(ent); LOG(info) << "ZDC RecEventReader pushes " << mBCRecData->size() << " events with " << mBCRecData->size() << " energy, " << mZDCTDCData->size() << " TDC and " << mZDCInfo->size() << " info records at entry " << ent; - pc.outputs().snapshot(Output{"ZDC", "BCREC", 0, Lifetime::Timeframe}, *mBCRecData); - pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0, Lifetime::Timeframe}, *mZDCEnergy); - pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0, Lifetime::Timeframe}, *mZDCTDCData); - pc.outputs().snapshot(Output{"ZDC", "INFO", 0, Lifetime::Timeframe}, *mZDCInfo); + pc.outputs().snapshot(Output{"ZDC", "BCREC", 0}, *mBCRecData); + pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0}, *mZDCEnergy); + pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0}, *mZDCTDCData); + pc.outputs().snapshot(Output{"ZDC", "INFO", 0}, *mZDCInfo); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); diff --git a/Detectors/ZDC/workflow/src/RecoReaderSpec.cxx b/Detectors/ZDC/workflow/src/RecoReaderSpec.cxx index 20b9510243be4..ebea51c932c5c 100644 --- a/Detectors/ZDC/workflow/src/RecoReaderSpec.cxx +++ b/Detectors/ZDC/workflow/src/RecoReaderSpec.cxx @@ -64,10 +64,10 @@ void RecoReader::run(ProcessingContext& pc) assert(ent < mTree->GetEntries()); // this should not happen mTree->GetEntry(ent); LOG(info) << "ZDCRecoReader pushed " << RecBC.size() << " b.c. " << Energy.size() << " Energies " << TDCData.size() << " TDCs " << Info.size() << " Infos"; - pc.outputs().snapshot(Output{"ZDC", "BCREC", 0, Lifetime::Timeframe}, RecBC); - pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0, Lifetime::Timeframe}, Energy); - pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0, Lifetime::Timeframe}, TDCData); - pc.outputs().snapshot(Output{"ZDC", "INFO", 0, Lifetime::Timeframe}, Info); + pc.outputs().snapshot(Output{"ZDC", "BCREC", 0}, RecBC); + pc.outputs().snapshot(Output{"ZDC", "ENERGY", 0}, Energy); + pc.outputs().snapshot(Output{"ZDC", "TDCDATA", 0}, TDCData); + pc.outputs().snapshot(Output{"ZDC", "INFO", 0}, Info); if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { pc.services().get().endOfStream(); pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Framework/Core/include/Framework/Output.h b/Framework/Core/include/Framework/Output.h index b7d26e8dfd7ee..9f85d6e8edb92 100644 --- a/Framework/Core/include/Framework/Output.h +++ b/Framework/Core/include/Framework/Output.h @@ -43,6 +43,11 @@ struct Output { { } + Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, header::Stack&& stack) + : origin(o), description(d), subSpec(s), metaHeader(std::move(stack)) + { + } + Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, Lifetime l, header::Stack&& stack) : origin(o), description(d), subSpec(s), lifetime(l), metaHeader(std::move(stack)) diff --git a/GPU/Workflow/src/GPUWorkflowITS.cxx b/GPU/Workflow/src/GPUWorkflowITS.cxx index e82f2348255d7..1785ddd05f39c 100644 --- a/GPU/Workflow/src/GPUWorkflowITS.cxx +++ b/GPU/Workflow/src/GPUWorkflowITS.cxx @@ -134,8 +134,8 @@ int GPURecoWorkflowSpec::runITSTracking(o2::framework::ProcessingContext& pc) auto rofsinput = pc.inputs().get>("ROframes"); - auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0, Lifetime::Timeframe}, rofsinput.begin(), rofsinput.end()); - auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0, Lifetime::Timeframe}); + auto& rofs = pc.outputs().make>(Output{"ITS", "ITSTrackROF", 0}, rofsinput.begin(), rofsinput.end()); + auto& irFrames = pc.outputs().make>(Output{"ITS", "IRFRAMES", 0}); irFrames.reserve(rofs.size()); const auto& alpParams = o2::itsmft::DPLAlpideParam::Instance(); // RS: this should come from CCDB @@ -148,19 +148,19 @@ int GPURecoWorkflowSpec::runITSTracking(o2::framework::ProcessingContext& pc) if (mSpecConfig.processMC) { labels = pc.inputs().get*>("itsmclabels").release(); // get the array as read-only span, a snapshot is sent forward - pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0, Lifetime::Timeframe}, pc.inputs().get>("ITSMC2ROframes")); + pc.outputs().snapshot(Output{"ITS", "ITSTrackMC2ROF", 0}, pc.inputs().get>("ITSMC2ROframes")); LOG(info) << labels->getIndexedSize() << " MC label objects , in " << mc2rofs.size() << " MC events"; } - auto& allClusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0, Lifetime::Timeframe}); - auto& allTracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0, Lifetime::Timeframe}); - auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0, Lifetime::Timeframe}); - auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0, Lifetime::Timeframe}); + auto& allClusIdx = pc.outputs().make>(Output{"ITS", "TRACKCLSID", 0}); + auto& allTracks = pc.outputs().make>(Output{"ITS", "TRACKS", 0}); + auto& vertROFvec = pc.outputs().make>(Output{"ITS", "VERTICESROF", 0}); + auto& vertices = pc.outputs().make>(Output{"ITS", "VERTICES", 0}); // MC static pmr::vector dummyMCLabTracks, dummyMCLabVerts; - auto& allTrackLabels = mSpecConfig.processMC ? pc.outputs().make>(Output{"ITS", "TRACKSMCTR", 0, Lifetime::Timeframe}) : dummyMCLabTracks; - auto& allVerticesLabels = mSpecConfig.processMC ? pc.outputs().make>(Output{"ITS", "VERTICESMCTR", 0, Lifetime::Timeframe}) : dummyMCLabVerts; + auto& allTrackLabels = mSpecConfig.processMC ? pc.outputs().make>(Output{"ITS", "TRACKSMCTR", 0}) : dummyMCLabTracks; + auto& allVerticesLabels = mSpecConfig.processMC ? pc.outputs().make>(Output{"ITS", "VERTICESMCTR", 0}) : dummyMCLabVerts; std::uint32_t roFrame = 0; diff --git a/GPU/Workflow/src/GPUWorkflowPipeline.cxx b/GPU/Workflow/src/GPUWorkflowPipeline.cxx index c50f8f67cbd32..0352151b342c6 100644 --- a/GPU/Workflow/src/GPUWorkflowPipeline.cxx +++ b/GPU/Workflow/src/GPUWorkflowPipeline.cxx @@ -177,7 +177,7 @@ int GPURecoWorkflowSpec::handlePipeline(ProcessingContext& pc, GPUTrackingInOutP ptrs.tpcZS = &tpcZS; } if (mSpecConfig.enableDoublePipeline == 2) { - auto prepareBuffer = pc.outputs().make>(Output{gDataOriginGPU, "PIPELINEPREPARE", 0, Lifetime::Timeframe}, 0u); + auto prepareBuffer = pc.outputs().make>(Output{gDataOriginGPU, "PIPELINEPREPARE", 0}, 0u); size_t ptrsTotal = 0; const void* firstPtr = nullptr; diff --git a/GPU/Workflow/src/GPUWorkflowSpec.cxx b/GPU/Workflow/src/GPUWorkflowSpec.cxx index 40b6c13495a66..fb482df5b11c0 100644 --- a/GPU/Workflow/src/GPUWorkflowSpec.cxx +++ b/GPU/Workflow/src/GPUWorkflowSpec.cxx @@ -734,7 +734,7 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) }; setOutputAllocator("COMPCLUSTERSFLAT", mSpecConfig.outputCompClustersFlat, outputRegions.compressedClusters, std::make_tuple(gDataOriginTPC, (DataDescription) "COMPCLUSTERSFLAT", 0)); - setOutputAllocator("CLUSTERNATIVE", mClusterOutputIds.size() > 0, outputRegions.clustersNative, std::make_tuple(gDataOriginTPC, mSpecConfig.sendClustersPerSector ? (DataDescription) "CLUSTERNATIVETMP" : (DataDescription) "CLUSTERNATIVE", NSectors, Lifetime::Timeframe, clusterOutputSectorHeader), sizeof(o2::tpc::ClusterCountIndex)); + setOutputAllocator("CLUSTERNATIVE", mClusterOutputIds.size() > 0, outputRegions.clustersNative, std::make_tuple(gDataOriginTPC, mSpecConfig.sendClustersPerSector ? (DataDescription) "CLUSTERNATIVETMP" : (DataDescription) "CLUSTERNATIVE", NSectors, clusterOutputSectorHeader), sizeof(o2::tpc::ClusterCountIndex)); setOutputAllocator("CLSHAREDMAP", mSpecConfig.outputSharedClusterMap, outputRegions.sharedClusterMap, std::make_tuple(gDataOriginTPC, (DataDescription) "CLSHAREDMAP", 0)); setOutputAllocator("TRACKS", mSpecConfig.outputTracks, outputRegions.tpcTracksO2, std::make_tuple(gDataOriginTPC, (DataDescription) "TRACKS", 0)); setOutputAllocator("CLUSREFS", mSpecConfig.outputTracks, outputRegions.tpcTracksO2ClusRefs, std::make_tuple(gDataOriginTPC, (DataDescription) "CLUSREFS", 0)); @@ -909,7 +909,7 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) if (mTPCSectorMask & (1ul << i)) { DataHeader::SubSpecificationType subspec = i; clusterOutputSectorHeader.sectorBits = (1ul << i); - char* buffer = pc.outputs().make({gDataOriginTPC, "CLUSTERNATIVE", subspec, Lifetime::Timeframe, {clusterOutputSectorHeader}}, accessIndex.nClustersSector[i] * sizeof(*accessIndex.clustersLinear) + sizeof(o2::tpc::ClusterCountIndex)).data(); + char* buffer = pc.outputs().make({gDataOriginTPC, "CLUSTERNATIVE", subspec, {clusterOutputSectorHeader}}, accessIndex.nClustersSector[i] * sizeof(*accessIndex.clustersLinear) + sizeof(o2::tpc::ClusterCountIndex)).data(); o2::tpc::ClusterCountIndex* outIndex = reinterpret_cast(buffer); memset(outIndex, 0, sizeof(*outIndex)); for (int j = 0; j < o2::tpc::constants::MAXGLOBALPADROW; j++) { @@ -926,7 +926,7 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) } ConstMCLabelContainer contflat; cont.flatten_to(contflat); - pc.outputs().snapshot({gDataOriginTPC, "CLNATIVEMCLBL", subspec, Lifetime::Timeframe, {clusterOutputSectorHeader}}, contflat); + pc.outputs().snapshot({gDataOriginTPC, "CLNATIVEMCLBL", subspec, {clusterOutputSectorHeader}}, contflat); } } } @@ -937,7 +937,7 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) static_assert(sizeof(o2::tpc::ClusterCountIndex) == sizeof(accessIndex.nClusters)); memcpy(outIndex, &accessIndex.nClusters[0][0], sizeof(o2::tpc::ClusterCountIndex)); if (mSpecConfig.processMC && mSpecConfig.caClusterer && accessIndex.clustersMCTruth) { - pc.outputs().snapshot({gDataOriginTPC, "CLNATIVEMCLBL", subspec, Lifetime::Timeframe, {clusterOutputSectorHeader}}, clustersMCBuffer.first); + pc.outputs().snapshot({gDataOriginTPC, "CLNATIVEMCLBL", subspec, {clusterOutputSectorHeader}}, clustersMCBuffer.first); } } } @@ -952,17 +952,17 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) if (sendQAOutput) { mQA->postprocessExternal(copy1, copy2, copy3, copy4, out, mQATaskMask ? mQATaskMask : -1); } - pc.outputs().snapshot({gDataOriginTPC, "TRACKINGQA", 0, Lifetime::Timeframe}, out); + pc.outputs().snapshot({gDataOriginTPC, "TRACKINGQA", 0}, out); if (sendQAOutput) { mQA->cleanup(); } } if (mSpecConfig.outputErrorQA) { - pc.outputs().snapshot({gDataOriginGPU, "ERRORQA", 0, Lifetime::Timeframe}, mErrorQA); + pc.outputs().snapshot({gDataOriginGPU, "ERRORQA", 0}, mErrorQA); mErrorQA.clear(); // FIXME: This is a race condition once we run multi-threaded! } if (mSpecConfig.tpcTriggerHandling && !(mSpecConfig.zsOnTheFly || mSpecConfig.zsDecoder)) { - pc.outputs().make>(Output{gDataOriginTPC, "TRIGGERWORDS", 0, Lifetime::Timeframe}, 0u); + pc.outputs().make>(Output{gDataOriginTPC, "TRIGGERWORDS", 0}, 0u); } mTimer->Stop(); LOG(info) << "GPU Reconstruction time for this TF " << mTimer->CpuTime() - cput << " s (cpu), " << mTimer->RealTime() - realt << " s (wall)"; diff --git a/Steer/DigitizerWorkflow/src/CPVDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/CPVDigitizerSpec.cxx index 57fb11249b312..fc64106e39d79 100644 --- a/Steer/DigitizerWorkflow/src/CPVDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/CPVDigitizerSpec.cxx @@ -171,15 +171,15 @@ void DigitizerSpec::run(framework::ProcessingContext& pc) } LOG(debug) << "Have " << mLabels.getNElements() << " CPV labels "; // here we have all digits and we can send them to consumer (aka snapshot it onto output) - pc.outputs().snapshot(Output{"CPV", "DIGITS", 0, Lifetime::Timeframe}, mDigitsOut); - pc.outputs().snapshot(Output{"CPV", "DIGITTRIGREC", 0, Lifetime::Timeframe}, triggers); + pc.outputs().snapshot(Output{"CPV", "DIGITS", 0}, mDigitsOut); + pc.outputs().snapshot(Output{"CPV", "DIGITTRIGREC", 0}, triggers); if (pc.outputs().isAllowed({"CPV", "DIGITSMCTR", 0})) { - pc.outputs().snapshot(Output{"CPV", "DIGITSMCTR", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"CPV", "DIGITSMCTR", 0}, mLabels); } // CPV is always a triggered detector const o2::parameters::GRPObject::ROMode roMode = o2::parameters::GRPObject::PRESENT; LOG(debug) << "CPV: Sending ROMode= " << roMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"CPV", "ROMode", 0, Lifetime::Timeframe}, roMode); + pc.outputs().snapshot(Output{"CPV", "ROMode", 0}, roMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/CTPDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/CTPDigitizerSpec.cxx index 412e6d6129a76..a72b9e26581eb 100644 --- a/Steer/DigitizerWorkflow/src/CTPDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/CTPDigitizerSpec.cxx @@ -79,9 +79,9 @@ class CTPDPLDigitizerTask : public o2::base::BaseDPLDigitizer auto digits = mDigitizer.process(ginputs); // send out to next stage LOG(info) << "CTP DIGITS being sent."; - pc.outputs().snapshot(Output{"CTP", "DIGITS", 0, Lifetime::Timeframe}, digits); + pc.outputs().snapshot(Output{"CTP", "DIGITS", 0}, digits); LOG(info) << "CTP PRESENT being sent."; - pc.outputs().snapshot(Output{"CTP", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"CTP", "ROMode", 0}, mROMode); timer.Stop(); LOG(info) << "CTP Digitization took " << timer.CpuTime() << "s"; } diff --git a/Steer/DigitizerWorkflow/src/FDDDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/FDDDigitizerSpec.cxx index 99150a3cb83fb..901c7e709d265 100644 --- a/Steer/DigitizerWorkflow/src/FDDDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/FDDDigitizerSpec.cxx @@ -120,17 +120,17 @@ class FDDDPLDigitizerTask : public o2::base::BaseDPLDigitizer mDigitizer.flush(mDigitsBC, mDigitsCh, mDigitsTrig, labels); // send out to next stage - pc.outputs().snapshot(Output{"FDD", "DIGITSBC", 0, Lifetime::Timeframe}, mDigitsBC); - pc.outputs().snapshot(Output{"FDD", "DIGITSCH", 0, Lifetime::Timeframe}, mDigitsCh); - pc.outputs().snapshot(Output{"FDD", "TRIGGERINPUT", 0, Lifetime::Timeframe}, mDigitsTrig); + pc.outputs().snapshot(Output{"FDD", "DIGITSBC", 0}, mDigitsBC); + pc.outputs().snapshot(Output{"FDD", "DIGITSCH", 0}, mDigitsCh); + pc.outputs().snapshot(Output{"FDD", "TRIGGERINPUT", 0}, mDigitsTrig); if (pc.outputs().isAllowed({"FDD", "DIGITLBL", 0})) { - auto& sharedlabels = pc.outputs().make>(Output{"FDD", "DIGITLBL", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{"FDD", "DIGITLBL", 0}); labels.flatten_to(sharedlabels); labels.clear_andfreememory(); } LOG(info) << "FDD: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"FDD", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"FDD", "ROMode", 0}, mROMode); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Steer/DigitizerWorkflow/src/FT0DigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/FT0DigitizerSpec.cxx index 4eda984358634..9098240d17627 100644 --- a/Steer/DigitizerWorkflow/src/FT0DigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/FT0DigitizerSpec.cxx @@ -138,14 +138,14 @@ class FT0DPLDigitizerTask : public o2::base::BaseDPLDigitizer mDigitizer.flush_all(mDigitsBC, mDigitsCh, mDigitsTrig, labels); // send out to next stage - pc.outputs().snapshot(Output{"FT0", "DIGITSBC", 0, Lifetime::Timeframe}, mDigitsBC); - pc.outputs().snapshot(Output{"FT0", "DIGITSCH", 0, Lifetime::Timeframe}, mDigitsCh); - pc.outputs().snapshot(Output{"FT0", "TRIGGERINPUT", 0, Lifetime::Timeframe}, mDigitsTrig); + pc.outputs().snapshot(Output{"FT0", "DIGITSBC", 0}, mDigitsBC); + pc.outputs().snapshot(Output{"FT0", "DIGITSCH", 0}, mDigitsCh); + pc.outputs().snapshot(Output{"FT0", "TRIGGERINPUT", 0}, mDigitsTrig); if (pc.outputs().isAllowed({"FT0", "DIGITSMCTR", 0})) { - pc.outputs().snapshot(Output{"FT0", "DIGITSMCTR", 0, Lifetime::Timeframe}, labels); + pc.outputs().snapshot(Output{"FT0", "DIGITSMCTR", 0}, labels); } LOG(info) << "FT0: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"FT0", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"FT0", "ROMode", 0}, mROMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/FV0DigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/FV0DigitizerSpec.cxx index 151175e0c0758..bb5174dfcbb2c 100644 --- a/Steer/DigitizerWorkflow/src/FV0DigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/FV0DigitizerSpec.cxx @@ -102,14 +102,14 @@ class FV0DPLDigitizerTask : public o2::base::BaseDPLDigitizer LOG(info) << "FV0: Sending " << mDigitsBC.size() << " digitsBC and " << mDigitsCh.size() << " digitsCh."; // send out to next stage - pc.outputs().snapshot(Output{"FV0", "DIGITSBC", 0, Lifetime::Timeframe}, mDigitsBC); - pc.outputs().snapshot(Output{"FV0", "DIGITSCH", 0, Lifetime::Timeframe}, mDigitsCh); - pc.outputs().snapshot(Output{"FV0", "TRIGGERINPUT", 0, Lifetime::Timeframe}, mDigitsTrig); + pc.outputs().snapshot(Output{"FV0", "DIGITSBC", 0}, mDigitsBC); + pc.outputs().snapshot(Output{"FV0", "DIGITSCH", 0}, mDigitsCh); + pc.outputs().snapshot(Output{"FV0", "TRIGGERINPUT", 0}, mDigitsTrig); if (pc.outputs().isAllowed({"FV0", "DIGITLBL", 0})) { - pc.outputs().snapshot(Output{"FV0", "DIGITLBL", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"FV0", "DIGITLBL", 0}, mLabels); } LOG(info) << "FV0: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"FV0", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"FV0", "ROMode", 0}, mROMode); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Steer/DigitizerWorkflow/src/HMPIDDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/HMPIDDigitizerSpec.cxx index 22ab3e5d7ef84..0adb9eb0a90df 100644 --- a/Steer/DigitizerWorkflow/src/HMPIDDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/HMPIDDigitizerSpec.cxx @@ -132,13 +132,13 @@ class HMPIDDPLDigitizerTask : public o2::base::BaseDPLDigitizer flushDigitsAndLabels(); // send out to next stage - pc.outputs().snapshot(Output{"HMP", "DIGITS", 0, Lifetime::Timeframe}, digitsAccum); - pc.outputs().snapshot(Output{"HMP", "INTRECORDS", 0, Lifetime::Timeframe}, mIntRecord); + pc.outputs().snapshot(Output{"HMP", "DIGITS", 0}, digitsAccum); + pc.outputs().snapshot(Output{"HMP", "INTRECORDS", 0}, mIntRecord); if (pc.outputs().isAllowed({"HMP", "DIGITLBL", 0})) { - pc.outputs().snapshot(Output{"HMP", "DIGITLBL", 0, Lifetime::Timeframe}, labelAccum); + pc.outputs().snapshot(Output{"HMP", "DIGITLBL", 0}, labelAccum); } LOG(info) << "HMP: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"HMP", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"HMP", "ROMode", 0}, mROMode); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Steer/DigitizerWorkflow/src/ITS3DigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/ITS3DigitizerSpec.cxx index 9f03bd6733729..21f808f1e1ed2 100644 --- a/Steer/DigitizerWorkflow/src/ITS3DigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/ITS3DigitizerSpec.cxx @@ -143,7 +143,7 @@ class ITS3DPLDigitizerTask : BaseDPLDigitizer mDigitizer.setMCLabels(&mLabels); // digits are directly put into DPL owned resource - auto& digitsAccum = pc.outputs().make>(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}); + auto& digitsAccum = pc.outputs().make>(Output{mOrigin, "DIGITS", 0}); auto accumulate = [this, &digitsAccum]() { // accumulate result of single event processing, called after processing every event supplied @@ -216,17 +216,17 @@ class ITS3DPLDigitizerTask : BaseDPLDigitizer // here we have all digits and labels and we can send them to consumer (aka snapshot it onto output) - pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0, Lifetime::Timeframe}, mROFRecordsAccum); + pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0}, mROFRecordsAccum); if (mWithMCTruth) { - pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0, Lifetime::Timeframe}, mMC2ROFRecordsAccum); - auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0}, mMC2ROFRecordsAccum); + auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0}); mLabelsAccum.flatten_to(sharedlabels); // free space of existing label containers mLabels.clear_andfreememory(); mLabelsAccum.clear_andfreememory(); } LOG(info) << mID.getName() << ": Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{mOrigin, "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{mOrigin, "ROMode", 0}, mROMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/ITSMFTDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/ITSMFTDigitizerSpec.cxx index e1291fe0cb0d8..2a8c28bb4dbbd 100644 --- a/Steer/DigitizerWorkflow/src/ITSMFTDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/ITSMFTDigitizerSpec.cxx @@ -82,7 +82,7 @@ class ITSMFTDPLDigitizerTask : BaseDPLDigitizer mDigitizer.setMCLabels(&mLabels); // digits are directly put into DPL owned resource - auto& digitsAccum = pc.outputs().make>(Output{mOrigin, "DIGITS", 0, Lifetime::Timeframe}); + auto& digitsAccum = pc.outputs().make>(Output{mOrigin, "DIGITS", 0}); auto accumulate = [this, &digitsAccum]() { // accumulate result of single event processing, called after processing every event supplied @@ -160,17 +160,17 @@ class ITSMFTDPLDigitizerTask : BaseDPLDigitizer // here we have all digits and labels and we can send them to consumer (aka snapshot it onto output) - pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0, Lifetime::Timeframe}, mROFRecordsAccum); + pc.outputs().snapshot(Output{mOrigin, "DIGITSROF", 0}, mROFRecordsAccum); if (mWithMCTruth) { - pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0, Lifetime::Timeframe}, mMC2ROFRecordsAccum); - auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0, Lifetime::Timeframe}); + pc.outputs().snapshot(Output{mOrigin, "DIGITSMC2ROF", 0}, mMC2ROFRecordsAccum); + auto& sharedlabels = pc.outputs().make>(Output{mOrigin, "DIGITSMCTR", 0}); mLabelsAccum.flatten_to(sharedlabels); // free space of existing label containers mLabels.clear_andfreememory(); mLabelsAccum.clear_andfreememory(); } LOG(info) << mID.getName() << ": Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{mOrigin, "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{mOrigin, "ROMode", 0}, mROMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/MCHDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/MCHDigitizerSpec.cxx index fc92020f25b21..187c24f962c1f 100644 --- a/Steer/DigitizerWorkflow/src/MCHDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/MCHDigitizerSpec.cxx @@ -111,12 +111,12 @@ class MCHDPLDigitizerTask : public o2::base::BaseDPLDigitizer dataformats::MCLabelContainer labels{}; auto nPileup = mDigitizer->digitize(rofs, digits, labels); - pc.outputs().snapshot(Output{"MCH", "DIGITS", 0, Lifetime::Timeframe}, digits); - pc.outputs().snapshot(Output{"MCH", "DIGITROFS", 0, Lifetime::Timeframe}, rofs); + pc.outputs().snapshot(Output{"MCH", "DIGITS", 0}, digits); + pc.outputs().snapshot(Output{"MCH", "DIGITROFS", 0}, rofs); if (pc.outputs().isAllowed({"MCH", "DIGITSLABELS", 0})) { - pc.outputs().snapshot(Output{"MCH", "DIGITSLABELS", 0, Lifetime::Timeframe}, labels); + pc.outputs().snapshot(Output{"MCH", "DIGITSLABELS", 0}, labels); } - pc.outputs().snapshot(Output{"MCH", "ROMode", 0, Lifetime::Timeframe}, + pc.outputs().snapshot(Output{"MCH", "ROMode", 0}, DigitizerParam::Instance().continuous ? o2::parameters::GRPObject::CONTINUOUS : o2::parameters::GRPObject::TRIGGERING); // we should be only called once; tell DPL that this process is ready to exit diff --git a/Steer/DigitizerWorkflow/src/MCTruthReaderSpec.h b/Steer/DigitizerWorkflow/src/MCTruthReaderSpec.h index e3359af01b1e5..8204f4e437cc3 100644 --- a/Steer/DigitizerWorkflow/src/MCTruthReaderSpec.h +++ b/Steer/DigitizerWorkflow/src/MCTruthReaderSpec.h @@ -61,7 +61,7 @@ class MCTruthReaderTask : public o2::framework::Task br->GetEntry(0); // publish the labels in a const shared memory container - auto& sharedlabels = pc.outputs().make>(Output{"TST", "LABELS2", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{"TST", "LABELS2", 0}); iocontainer->copyandflatten(sharedlabels); } else { @@ -74,7 +74,7 @@ class MCTruthReaderTask : public o2::framework::Task LOG(info) << "MCCONTAINER CHECK" << mccontainer->getNElements(); // publish the original labels - pc.outputs().snapshot(Output{"TST", "LABELS2", 0, Lifetime::Timeframe}, *mccontainer); + pc.outputs().snapshot(Output{"TST", "LABELS2", 0}, *mccontainer); } // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Steer/DigitizerWorkflow/src/MCTruthSourceSpec.cxx b/Steer/DigitizerWorkflow/src/MCTruthSourceSpec.cxx index 1b0f325cba9d6..6888b118fb06c 100644 --- a/Steer/DigitizerWorkflow/src/MCTruthSourceSpec.cxx +++ b/Steer/DigitizerWorkflow/src/MCTruthSourceSpec.cxx @@ -54,12 +54,12 @@ class MCTruthSourceTask : public o2::framework::Task if (mNew) { LOG(info) << "New serialization"; // we need to flatten it and write to managed shared memory container - auto& sharedlabels = pc.outputs().make>(Output{"TST", "LABELS", 0, Lifetime::Timeframe}); + auto& sharedlabels = pc.outputs().make>(Output{"TST", "LABELS", 0}); container.flatten_to(sharedlabels); sleep(1); } else { LOG(info) << "Old serialization"; - pc.outputs().snapshot({"TST", "LABELS", 0, Lifetime::Timeframe}, container); + pc.outputs().snapshot({"TST", "LABELS", 0}, container); sleep(1); } diff --git a/Steer/DigitizerWorkflow/src/MCTruthWriterSpec.cxx b/Steer/DigitizerWorkflow/src/MCTruthWriterSpec.cxx index 9ce78d97418c3..78fb8988a2419 100644 --- a/Steer/DigitizerWorkflow/src/MCTruthWriterSpec.cxx +++ b/Steer/DigitizerWorkflow/src/MCTruthWriterSpec.cxx @@ -84,7 +84,7 @@ class MCTruthWriterTask : public o2::framework::Task } if (mIO) { // this triggers the reader process - pc.outputs().snapshot({"TST", "TRIGGERREAD", 0, Lifetime::Timeframe}, labelfilename); + pc.outputs().snapshot({"TST", "TRIGGERREAD", 0}, labelfilename); } // we should be only called once; tell DPL that this process is ready to exit diff --git a/Steer/DigitizerWorkflow/src/MIDDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/MIDDigitizerSpec.cxx index c0a3be730c411..1dbd392fb9f73 100644 --- a/Steer/DigitizerWorkflow/src/MIDDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/MIDDigitizerSpec.cxx @@ -117,13 +117,13 @@ class MIDDPLDigitizerTask : public o2::base::BaseDPLDigitizer mDigitsMerger.process(digitsAccum, labelsAccum, rofRecords); LOG(debug) << "MID: Sending " << digitsAccum.size() << " digits."; - pc.outputs().snapshot(Output{"MID", "DIGITS", 0, Lifetime::Timeframe}, mDigitsMerger.getColumnData()); - pc.outputs().snapshot(Output{"MID", "DIGITSROF", 0, Lifetime::Timeframe}, mDigitsMerger.getROFRecords()); + pc.outputs().snapshot(Output{"MID", "DIGITS", 0}, mDigitsMerger.getColumnData()); + pc.outputs().snapshot(Output{"MID", "DIGITSROF", 0}, mDigitsMerger.getROFRecords()); if (pc.outputs().isAllowed({"MID", "DIGITLABELS", 0})) { - pc.outputs().snapshot(Output{"MID", "DIGITLABELS", 0, Lifetime::Timeframe}, mDigitsMerger.getMCContainer()); + pc.outputs().snapshot(Output{"MID", "DIGITLABELS", 0}, mDigitsMerger.getMCContainer()); } LOG(debug) << "MID: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"MID", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"MID", "ROMode", 0}, mROMode); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Steer/DigitizerWorkflow/src/PHOSDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/PHOSDigitizerSpec.cxx index db20e811941c2..0b7b968d22ff6 100644 --- a/Steer/DigitizerWorkflow/src/PHOSDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/PHOSDigitizerSpec.cxx @@ -160,16 +160,16 @@ void DigitizerSpec::run(framework::ProcessingContext& pc) } LOG(debug) << "Have " << mLabels.getNElements() << " PHOS labels "; // here we have all digits and we can send them to consumer (aka snapshot it onto output) - pc.outputs().snapshot(Output{"PHS", "DIGITS", 0, Lifetime::Timeframe}, mDigitsOut); - pc.outputs().snapshot(Output{"PHS", "DIGITTRIGREC", 0, Lifetime::Timeframe}, triggers); + pc.outputs().snapshot(Output{"PHS", "DIGITS", 0}, mDigitsOut); + pc.outputs().snapshot(Output{"PHS", "DIGITTRIGREC", 0}, triggers); if (pc.outputs().isAllowed({"PHS", "DIGITSMCTR", 0})) { - pc.outputs().snapshot(Output{"PHS", "DIGITSMCTR", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"PHS", "DIGITSMCTR", 0}, mLabels); } // PHOS is always a triggering detector const o2::parameters::GRPObject::ROMode roMode = o2::parameters::GRPObject::TRIGGERING; LOG(debug) << "PHOS: Sending ROMode= " << roMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"PHS", "ROMode", 0, Lifetime::Timeframe}, roMode); + pc.outputs().snapshot(Output{"PHS", "ROMode", 0}, roMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/TOFDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/TOFDigitizerSpec.cxx index c74e5ca6327fc..e512659686c86 100644 --- a/Steer/DigitizerWorkflow/src/TOFDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/TOFDigitizerSpec.cxx @@ -259,21 +259,21 @@ class TOFDPLDigitizerTask : public o2::base::BaseDPLDigitizer LOG(info) << "Post " << digitsVector->size() << " digits in " << readoutwindow->size() << " RO windows"; // here we have all digits and we can send them to consumer (aka snapshot it onto output) - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0, Lifetime::Timeframe}, *digitsVector); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITS", 0}, *digitsVector); if (pc.outputs().isAllowed({o2::header::gDataOriginTOF, "DIGITSMCTR", 0})) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITSMCTR", 0, Lifetime::Timeframe}, *mcLabVecOfVec); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITSMCTR", 0}, *mcLabVecOfVec); } - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0, Lifetime::Timeframe}, *readoutwindow); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "READOUTWINDOW", 0}, *readoutwindow); // send empty pattern from digitizer (it may change in future) std::vector& patterns = mDigitizer->getPatterns(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0, Lifetime::Timeframe}, patterns); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "PATTERNS", 0}, patterns); DigitHeader& digitH = mDigitizer->getDigitHeader(); - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITHEADER", 0, Lifetime::Timeframe}, digitH); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "DIGITHEADER", 0}, digitH); LOG(info) << "TOF: Sending ROMode= " << roMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ROMode", 0, Lifetime::Timeframe}, roMode); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "ROMode", 0}, roMode); timer.Stop(); LOG(info) << "Digitization took " << timer.CpuTime() << "s"; diff --git a/Steer/DigitizerWorkflow/src/TPCDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/TPCDigitizerSpec.cxx index 59f8589a56449..3f24bb1a9ee12 100644 --- a/Steer/DigitizerWorkflow/src/TPCDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/TPCDigitizerSpec.cxx @@ -282,7 +282,7 @@ class TPCDPLDigitizerTask : public BaseDPLDigitizer auto roMode = isContinuous ? o2::parameters::GRPObject::CONTINUOUS : o2::parameters::GRPObject::PRESENT; LOG(info) << "TPC: Sending ROMode= " << (mDigitizer.isContinuousReadout() ? "Continuous" : "Triggered") << " to GRPUpdater from channel " << dh->subSpecification; - pc.outputs().snapshot(Output{"TPC", "ROMode", 0, Lifetime::Timeframe}, roMode); + pc.outputs().snapshot(Output{"TPC", "ROMode", 0}, roMode); } mWriteGRP = false; @@ -309,7 +309,7 @@ class TPCDPLDigitizerTask : public BaseDPLDigitizer return ContainerType(nullptr); } else { // default case - return &pc.outputs().make>(Output{"TPC", "DIGITS", static_cast(dh->subSpecification), Lifetime::Timeframe, header}); + return &pc.outputs().make>(Output{"TPC", "DIGITS", static_cast(dh->subSpecification), header}); } }; // lambda that snapshots the common mode vector to be sent out; prepares and attaches header with sector information @@ -318,8 +318,7 @@ class TPCDPLDigitizerTask : public BaseDPLDigitizer header.activeSectors = activeSectors; if (!mInternalWriter) { // note that snapshoting only works with non-const references (to be fixed?) - pc.outputs().snapshot(Output{"TPC", "COMMONMODE", static_cast(dh->subSpecification), Lifetime::Timeframe, - header}, + pc.outputs().snapshot(Output{"TPC", "COMMONMODE", static_cast(dh->subSpecification), header}, const_cast&>(commonMode)); } }; @@ -329,7 +328,7 @@ class TPCDPLDigitizerTask : public BaseDPLDigitizer header.activeSectors = activeSectors; if (mWithMCTruth) { if (!mInternalWriter) { - auto& sharedlabels = pc.outputs().make>(Output{"TPC", "DIGITSMCTR", static_cast(dh->subSpecification), Lifetime::Timeframe, header}); + auto& sharedlabels = pc.outputs().make>(Output{"TPC", "DIGITSMCTR", static_cast(dh->subSpecification), header}); labels.flatten_to(sharedlabels); } } @@ -340,8 +339,7 @@ class TPCDPLDigitizerTask : public BaseDPLDigitizer header.activeSectors = activeSectors; if (!mInternalWriter) { LOG(info) << "TPC: Send TRIGGERS for sector " << sector << " channel " << dh->subSpecification << " | size " << events.size(); - pc.outputs().snapshot(Output{"TPC", "DIGTRIGGERS", static_cast(dh->subSpecification), Lifetime::Timeframe, - header}, + pc.outputs().snapshot(Output{"TPC", "DIGTRIGGERS", static_cast(dh->subSpecification), header}, const_cast&>(events)); } }; diff --git a/Steer/DigitizerWorkflow/src/ZDCDigitizerSpec.cxx b/Steer/DigitizerWorkflow/src/ZDCDigitizerSpec.cxx index d616df2fadb35..cd07f6389c43f 100644 --- a/Steer/DigitizerWorkflow/src/ZDCDigitizerSpec.cxx +++ b/Steer/DigitizerWorkflow/src/ZDCDigitizerSpec.cxx @@ -147,15 +147,15 @@ class ZDCDPLDigitizerTask : public o2::base::BaseDPLDigitizer mDigitizer.Finalize(mDigitsBC, mOrbitData); // send out to next stage - pc.outputs().snapshot(Output{"ZDC", "DIGITSBC", 0, Lifetime::Timeframe}, mDigitsBC); - pc.outputs().snapshot(Output{"ZDC", "DIGITSCH", 0, Lifetime::Timeframe}, mDigitsCh); - pc.outputs().snapshot(Output{"ZDC", "DIGITSPD", 0, Lifetime::Timeframe}, mOrbitData); + pc.outputs().snapshot(Output{"ZDC", "DIGITSBC", 0}, mDigitsBC); + pc.outputs().snapshot(Output{"ZDC", "DIGITSCH", 0}, mDigitsCh); + pc.outputs().snapshot(Output{"ZDC", "DIGITSPD", 0}, mOrbitData); if (pc.outputs().isAllowed({"ZDC", "DIGITSLBL", 0})) { - pc.outputs().snapshot(Output{"ZDC", "DIGITSLBL", 0, Lifetime::Timeframe}, mLabels); + pc.outputs().snapshot(Output{"ZDC", "DIGITSLBL", 0}, mLabels); } LOG(info) << "ZDC: Sending ROMode= " << mROMode << " to GRPUpdater"; - pc.outputs().snapshot(Output{"ZDC", "ROMode", 0, Lifetime::Timeframe}, mROMode); + pc.outputs().snapshot(Output{"ZDC", "ROMode", 0}, mROMode); // we should be only called once; tell DPL that this process is ready to exit pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Utilities/DataSampling/src/DataSamplingPolicy.cxx b/Utilities/DataSampling/src/DataSamplingPolicy.cxx index de7aa5d8f6865..44a7a63efcf51 100644 --- a/Utilities/DataSampling/src/DataSamplingPolicy.cxx +++ b/Utilities/DataSampling/src/DataSamplingPolicy.cxx @@ -128,7 +128,7 @@ Output DataSamplingPolicy::prepareOutput(const ConcreteDataMatcher& input, Lifet auto result = mPaths.find(input); if (result != mPaths.end()) { auto dataType = DataSpecUtils::asConcreteDataTypeMatcher(result->second); - return Output{dataType.origin, dataType.description, input.subSpec, lifetime}; + return Output{dataType.origin, dataType.description, input.subSpec}; } else { return Output{header::gDataOriginInvalid, header::gDataDescriptionInvalid}; } diff --git a/Utilities/DataSampling/src/Dispatcher.cxx b/Utilities/DataSampling/src/Dispatcher.cxx index b32218ea43e58..de78993ff083e 100644 --- a/Utilities/DataSampling/src/Dispatcher.cxx +++ b/Utilities/DataSampling/src/Dispatcher.cxx @@ -114,7 +114,6 @@ void Dispatcher::run(ProcessingContext& ctx) routeAsConcreteDataType.origin, routeAsConcreteDataType.description, partInputHeader->subSpecification, - part.spec->lifetime, std::move(headerStack)}; send(ctx.outputs(), part, output); } diff --git a/run/dpl_eventgen.cxx b/run/dpl_eventgen.cxx index 9700ac32c6938..3090944031ad3 100644 --- a/run/dpl_eventgen.cxx +++ b/run/dpl_eventgen.cxx @@ -69,8 +69,8 @@ struct GeneratorTask { for (auto i = 0; i < std::min((int)aggregate, nEvents - eventCounter); ++i) { mctracks.clear(); genservice.generateEvent_MCTracks(mctracks, mcheader); - pc.outputs().snapshot(Output{"MC", "MCHEADER", 0, Lifetime::Timeframe}, mcheader); - pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0, Lifetime::Timeframe}, mctracks); + pc.outputs().snapshot(Output{"MC", "MCHEADER", 0}, mcheader); + pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0}, mctracks); ++eventCounter; } // report number of TFs injected for the rate limiter to work diff --git a/run/o2sim_hepmc_publisher.cxx b/run/o2sim_hepmc_publisher.cxx index 76fbddc1993f3..7172248ca68ac 100644 --- a/run/o2sim_hepmc_publisher.cxx +++ b/run/o2sim_hepmc_publisher.cxx @@ -135,8 +135,8 @@ struct O2simHepmcPublisher { } // add to the message - pc.outputs().snapshot(Output{"MC", "MCHEADER", 0, Lifetime::Timeframe}, mcHeader); - pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0, Lifetime::Timeframe}, mcTracks); + pc.outputs().snapshot(Output{"MC", "MCHEADER", 0}, mcHeader); + pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0}, mcTracks); mcTracks.clear(); ++eventCounter; } diff --git a/run/o2sim_kine_publisher.cxx b/run/o2sim_kine_publisher.cxx index 35fd48af199e0..83f82fd69cca3 100644 --- a/run/o2sim_kine_publisher.cxx +++ b/run/o2sim_kine_publisher.cxx @@ -44,8 +44,8 @@ struct O2simKinePublisher { for (auto i = 0; i < std::min((int)aggregate, nEvents - eventCounter); ++i) { auto mcevent = mcKinReader->getMCEventHeader(0, eventCounter); auto mctracks = mcKinReader->getTracks(0, eventCounter); - pc.outputs().snapshot(Output{"MC", "MCHEADER", 0, Lifetime::Timeframe}, mcevent); - pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0, Lifetime::Timeframe}, mctracks); + pc.outputs().snapshot(Output{"MC", "MCHEADER", 0}, mcevent); + pc.outputs().snapshot(Output{"MC", "MCTRACKS", 0}, mctracks); ++eventCounter; } // report number of TFs injected for the rate limiter to work diff --git a/run/o2sim_mctracks_to_aod.cxx b/run/o2sim_mctracks_to_aod.cxx index e5460e346c3f9..ca64ce0705f1a 100644 --- a/run/o2sim_mctracks_to_aod.cxx +++ b/run/o2sim_mctracks_to_aod.cxx @@ -171,8 +171,8 @@ struct MctracksToAod { } } ++timeframe; - pc.outputs().snapshot(Output{"TFF", "TFFilename", 0, Lifetime::Timeframe}, ""); - pc.outputs().snapshot(Output{"TFN", "TFNumber", 0, Lifetime::Timeframe}, timeframe); + pc.outputs().snapshot(Output{"TFF", "TFFilename", 0}, ""); + pc.outputs().snapshot(Output{"TFN", "TFNumber", 0}, timeframe); } }; From b0d60773d27fcbe609e426502ae3196f4e1cd8b4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:56:45 +0100 Subject: [PATCH 153/726] Remove Lifetime specification from Output The .lifetime field in Output is to be removed. This includes cases in which the value is not the default. --- .../src/GlobalFwdMatchingAssessmentSpec.cxx | 2 +- Detectors/ITSMFT/MFT/workflow/src/MFTAssessmentSpec.cxx | 2 +- Detectors/PHOS/calib/src/PHOSL1phaseCalibDevice.cxx | 2 +- Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx | 4 ++-- .../workflow/include/TPCWorkflow/TPCDistributeSACSpec.h | 2 +- Detectors/TPC/workflow/src/OccupancyFilterSpec.cxx | 2 +- Detectors/TPC/workflow/src/TPCVDriftTglCalibSpec.cxx | 4 ++-- Detectors/ZDC/calib/src/BaselineCalibEPNSpec.cxx | 2 +- Detectors/ZDC/calib/src/NoiseCalibEPNSpec.cxx | 8 ++++---- 9 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingAssessmentSpec.cxx b/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingAssessmentSpec.cxx index e4c045d2a7d93..e896c097cd03d 100644 --- a/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingAssessmentSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/src/GlobalFwdMatchingAssessmentSpec.cxx @@ -109,7 +109,7 @@ void GlobalFwdAssessmentSpec::sendOutput(DataAllocator& output) TObjArray objar; mGloFwdAssessment->getHistos(objar); - output.snapshot(Output{"GLO", "FWDASSESSMENT", 0, Lifetime::Sporadic}, objar); + output.snapshot(Output{"GLO", "FWDASSESSMENT", 0}, objar); TFile* f = new TFile(Form("GlobalForwardAssessment.root"), "RECREATE"); objar.Write(); diff --git a/Detectors/ITSMFT/MFT/workflow/src/MFTAssessmentSpec.cxx b/Detectors/ITSMFT/MFT/workflow/src/MFTAssessmentSpec.cxx index 9778120a75389..d3e4c33457ae0 100644 --- a/Detectors/ITSMFT/MFT/workflow/src/MFTAssessmentSpec.cxx +++ b/Detectors/ITSMFT/MFT/workflow/src/MFTAssessmentSpec.cxx @@ -90,7 +90,7 @@ void MFTAssessmentSpec::sendOutput(DataAllocator& output) TObjArray objar; mMFTAssessment->getHistos(objar); - output.snapshot(Output{"MFT", "MFTASSESSMENT", 0, Lifetime::Sporadic}, objar); + output.snapshot(Output{"MFT", "MFTASSESSMENT", 0}, objar); TFile* f = new TFile(Form("MFTAssessment.root"), "RECREATE"); objar.Write(); diff --git a/Detectors/PHOS/calib/src/PHOSL1phaseCalibDevice.cxx b/Detectors/PHOS/calib/src/PHOSL1phaseCalibDevice.cxx index 1ed04baa77892..9ce61d0af38cb 100644 --- a/Detectors/PHOS/calib/src/PHOSL1phaseCalibDevice.cxx +++ b/Detectors/PHOS/calib/src/PHOSL1phaseCalibDevice.cxx @@ -71,7 +71,7 @@ void PHOSL1phaseCalibDevice::endOfStream(o2::framework::EndOfStreamContext& ec) ec.outputs().snapshot(Output{o2::calibration::Utils::gDataOriginCDBWrapper, "PHOS_L1phase", 0}, info); // Send summary to QC LOG(info) << "Sending histos to QC "; - ec.outputs().snapshot(o2::framework::Output{"PHS", "L1PHASEHISTO", 0, o2::framework::Lifetime::Sporadic}, mCalibrator->getQcHistos()); + ec.outputs().snapshot(o2::framework::Output{"PHS", "L1PHASEHISTO", 0}, mCalibrator->getQcHistos()); } o2::framework::DataProcessorSpec o2::phos::getPHOSL1phaseCalibDeviceSpec() diff --git a/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx b/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx index 292e083d4bb48..bc650dfaa54b8 100644 --- a/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx +++ b/Detectors/PHOS/workflow/src/RawToCellConverterSpec.cxx @@ -115,7 +115,7 @@ void RawToCellConverterSpec::run(framework::ProcessingContext& ctx) ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0}, mOutputHWErrors); if (mFillChi2) { mOutputFitChi.clear(); - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0, o2::framework::Lifetime::QA}, mOutputFitChi); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0}, mOutputFitChi); } return; // empty TF, nothing to process } @@ -294,7 +294,7 @@ void RawToCellConverterSpec::run(framework::ProcessingContext& ctx) ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLTRIGREC", mflpId}, mOutputTriggerRecords); ctx.outputs().snapshot(o2::framework::Output{"PHS", "RAWHWERRORS", 0}, mOutputHWErrors); if (mFillChi2) { - ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0, o2::framework::Lifetime::QA}, mOutputFitChi); + ctx.outputs().snapshot(o2::framework::Output{"PHS", "CELLFITQA", 0}, mOutputFitChi); } } diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/TPCDistributeSACSpec.h b/Detectors/TPC/workflow/include/TPCWorkflow/TPCDistributeSACSpec.h index f7c207ba3a27b..ee05a58b5dcbe 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/TPCDistributeSACSpec.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/TPCDistributeSACSpec.h @@ -102,7 +102,7 @@ class TPCDistributeSACSpec : public o2::framework::Task void sendOutput(o2::framework::ProcessingContext& pc) { LOGP(info, "Sending SACs on lane: {} for {} TFs", mCurrentOutLane, mProcessedTFs); - pc.outputs().snapshot(Output{gDataOriginTPC, getDataDescriptionSACCCDB(), 0, Lifetime::Sporadic}, mCCDBTimeStamp); + pc.outputs().snapshot(Output{gDataOriginTPC, getDataDescriptionSACCCDB(), 0}, mCCDBTimeStamp); for (unsigned int i = 0; i < o2::tpc::GEMSTACKS; ++i) { pc.outputs().snapshot(Output{gDataOriginTPC, mDataDescrOut[mCurrentOutLane], header::DataHeader::SubSpecificationType{i}}, mSACs[i]); } diff --git a/Detectors/TPC/workflow/src/OccupancyFilterSpec.cxx b/Detectors/TPC/workflow/src/OccupancyFilterSpec.cxx index 9891d80a39503..5ef2bdc7adc51 100644 --- a/Detectors/TPC/workflow/src/OccupancyFilterSpec.cxx +++ b/Detectors/TPC/workflow/src/OccupancyFilterSpec.cxx @@ -106,7 +106,7 @@ class OccupancyFilterDevice : public o2::framework::Task { o2::tpc::TPCSectorHeader header{sector}; header.activeSectors = (0x1 << sector); - output.snapshot(Output{gDataOriginTPC, "FILTERDIG", static_cast(sector), Lifetime::Sporadic, header}, digits); + output.snapshot(Output{gDataOriginTPC, "FILTERDIG", static_cast(sector), header}, digits); } }; diff --git a/Detectors/TPC/workflow/src/TPCVDriftTglCalibSpec.cxx b/Detectors/TPC/workflow/src/TPCVDriftTglCalibSpec.cxx index d48dde325c614..ea50a32c6a906 100644 --- a/Detectors/TPC/workflow/src/TPCVDriftTglCalibSpec.cxx +++ b/Detectors/TPC/workflow/src/TPCVDriftTglCalibSpec.cxx @@ -104,8 +104,8 @@ void TPCVDriftTglCalibSpec::sendOutput(DataAllocator& output) auto image = o2::ccdb::CcdbApi::createObjectImage(&payloadVec[i], &w); LOG(info) << "Sending object " << w.getPath() << "/" << w.getFileName() << " of size " << image->size() << " bytes, valid for " << w.getStartValidityTimestamp() << " : " << w.getEndValidityTimestamp(); - output.snapshot(Output{o2::calibration::Utils::gDataOriginCDBPayload, "TPCVDTGL", i, Lifetime::Sporadic}, *image.get()); // vector - output.snapshot(Output{o2::calibration::Utils::gDataOriginCDBWrapper, "TPCVDTGL", i, Lifetime::Sporadic}, w); // root-serialized + output.snapshot(Output{o2::calibration::Utils::gDataOriginCDBPayload, "TPCVDTGL", i}, *image.get()); // vector + output.snapshot(Output{o2::calibration::Utils::gDataOriginCDBWrapper, "TPCVDTGL", i}, w); // root-serialized } if (payloadVec.size()) { mCalibrator->initOutput(); // reset the outputs once they are already sent diff --git a/Detectors/ZDC/calib/src/BaselineCalibEPNSpec.cxx b/Detectors/ZDC/calib/src/BaselineCalibEPNSpec.cxx index 422221157f877..328f824918cd4 100644 --- a/Detectors/ZDC/calib/src/BaselineCalibEPNSpec.cxx +++ b/Detectors/ZDC/calib/src/BaselineCalibEPNSpec.cxx @@ -127,7 +127,7 @@ void BaselineCalibEPNSpec::run(ProcessingContext& pc) } // Send intermediate calibration data auto& summary = mWorker.mData.getSummary(); - o2::framework::Output outputData("ZDC", "BASECALIBDATA", 0, Lifetime::Sporadic); + o2::framework::Output outputData("ZDC", "BASECALIBDATA", 0); pc.outputs().snapshot(outputData, summary); if (pc.transitionState() == TransitionHandlingState::Requested) { // End of processing for this run diff --git a/Detectors/ZDC/calib/src/NoiseCalibEPNSpec.cxx b/Detectors/ZDC/calib/src/NoiseCalibEPNSpec.cxx index 32fe2ee2eb1fb..186cec68517ea 100644 --- a/Detectors/ZDC/calib/src/NoiseCalibEPNSpec.cxx +++ b/Detectors/ZDC/calib/src/NoiseCalibEPNSpec.cxx @@ -113,19 +113,19 @@ void NoiseCalibEPNSpec::run(ProcessingContext& pc) if (mVerbosity > DbgMedium && mModTF > 0) { LOG(info) << "Send intermediate calibration data mProcessed=" << mProcessed << " >= mModTF=" << mModTF; } - o2::framework::Output outputData("ZDC", "NOISECALIBDATA", 0, Lifetime::Sporadic); + o2::framework::Output outputData("ZDC", "NOISECALIBDATA", 0); pc.outputs().snapshot(outputData, summary); for (int ih = 0; ih < NChannels; ih++) { { - o2::framework::Output output("ZDC", "NOISE_1DH", ih, Lifetime::Sporadic); + o2::framework::Output output("ZDC", "NOISE_1DH", ih); pc.outputs().snapshot(output, mWorker.mH[0][ih]->getBase()); } { - o2::framework::Output output("ZDC", "NOISE_1DH_S", ih, Lifetime::Sporadic); + o2::framework::Output output("ZDC", "NOISE_1DH_S", ih); pc.outputs().snapshot(output, mWorker.mH[1][ih]->getBase()); } { - o2::framework::Output output("ZDC", "NOISE_1DH_D", ih, Lifetime::Sporadic); + o2::framework::Output output("ZDC", "NOISE_1DH_D", ih); pc.outputs().snapshot(output, mWorker.mH[2][ih]->getBase()); } } From 01b54b1b6334f3ddd758394d93409178ac92e585 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Dec 2023 15:27:13 +0100 Subject: [PATCH 154/726] DPL: drop lifetime member from Output This is apparently not really used for anything and in general not passed correctly. Lifetime is a static property of the data flow, so it does not make sense to have it on a per message basis. --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 4 ++-- Framework/Core/include/Framework/Output.h | 19 ++---------------- Framework/Core/src/CommonServices.cxx | 2 +- Framework/Core/src/DataAllocator.cxx | 4 ++-- Framework/Core/test/test_DataAllocator.cxx | 20 +++++++++---------- Framework/Core/test/test_ParallelPipeline.cxx | 8 ++++---- Framework/Core/test/test_ProcessorOptions.cxx | 2 +- .../Core/test/test_StaggeringWorkflow.cxx | 6 +++--- Framework/Core/test/test_TableBuilder.cxx | 2 +- .../Utils/include/DPLUtils/RootTreeReader.h | 8 ++++---- Framework/Utils/src/Utils.cxx | 2 +- Framework/Utils/test/test_RootTreeReader.cxx | 5 ++--- .../test/test_RootTreeWriterWorkflow.cxx | 2 +- 13 files changed, 34 insertions(+), 50 deletions(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 0e067dc2f2d52..e45e1c6996a98 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -185,7 +185,7 @@ auto populateCacheWith(std::shared_ptr const& helper, LOGP(debug, "Fetching object for route {}", DataSpecUtils::describe(route.matcher)); objCnt++; auto concrete = DataSpecUtils::asConcreteDataMatcher(route.matcher); - Output output{concrete.origin, concrete.description, concrete.subSpec, route.matcher.lifetime}; + Output output{concrete.origin, concrete.description, concrete.subSpec}; auto&& v = allocator.makeVector(output); std::map metadata; std::map headers; @@ -343,7 +343,7 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() checkValidity = true; // never skip check if the cache is empty } LOG(debug) << "checkValidity = " << checkValidity << " for TF " << timingInfo.timeslice; - Output output{"CTP", "OrbitReset", 0, Lifetime::Condition}; + Output output{"CTP", "OrbitReset", 0}; Long64_t newOrbitResetTime = orbitResetTime; auto&& v = allocator.makeVector(output); const auto& api = helper->getAPI(path); diff --git a/Framework/Core/include/Framework/Output.h b/Framework/Core/include/Framework/Output.h index 9f85d6e8edb92..a2abf5f7600a9 100644 --- a/Framework/Core/include/Framework/Output.h +++ b/Framework/Core/include/Framework/Output.h @@ -28,7 +28,6 @@ struct Output { header::DataOrigin origin; header::DataDescription description; header::DataHeader::SubSpecificationType subSpec = 0; - enum Lifetime lifetime = Lifetime::Timeframe; header::Stack metaHeader = {}; Output(header::DataOrigin o, header::DataDescription d) : origin(o), description(d) {} @@ -38,22 +37,11 @@ struct Output { { } - Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, Lifetime l) - : origin(o), description(d), subSpec(s), lifetime(l) - { - } - Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, header::Stack&& stack) : origin(o), description(d), subSpec(s), metaHeader(std::move(stack)) { } - Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, Lifetime l, - header::Stack&& stack) - : origin(o), description(d), subSpec(s), lifetime(l), metaHeader(std::move(stack)) - { - } - Output(header::DataHeader const& header) : origin(header.dataOrigin), description(header.dataDescription), subSpec(header.subSpecification) { @@ -65,7 +53,6 @@ struct Output { : origin(rhs.origin), description(rhs.description), subSpec(rhs.subSpec), - lifetime(rhs.lifetime), metaHeader(std::move(rhs.metaHeader)) { } @@ -77,16 +64,14 @@ struct Output { origin = rhs.origin; description = rhs.description; subSpec = rhs.subSpec; - lifetime = rhs.lifetime; metaHeader = std::move(rhs.metaHeader); return *this; } bool operator==(const Output& that) const { - return origin == that.origin && description == that.description && subSpec == that.subSpec && - lifetime == that.lifetime; - }; + return origin == that.origin && description == that.description && subSpec == that.subSpec; + } }; } // namespace o2 diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 7d502216bc509..05b8fba47e532 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -425,7 +425,7 @@ o2::framework::ServiceSpec CommonServices::ccdbSupportSpec() if (concrete.subSpec == 0) { continue; } - auto& stfDist = pc.outputs().make(Output{concrete.origin, concrete.description, concrete.subSpec, output.matcher.lifetime}); + auto& stfDist = pc.outputs().make(Output{concrete.origin, concrete.description, concrete.subSpec}); stfDist.id = timingInfo.timeslice; stfDist.firstOrbit = timingInfo.firstTForbit; stfDist.runNumber = timingInfo.runNumber; diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index 296e094bffe26..1230d2e942f6e 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -294,7 +294,7 @@ Output DataAllocator::getOutputByBind(OutputRef&& ref) if (allowedOutputRoutes[ri].matcher.binding.value == ref.label) { auto spec = allowedOutputRoutes[ri].matcher; auto dataType = DataSpecUtils::asConcreteDataTypeMatcher(spec); - return Output{dataType.origin, dataType.description, ref.subSpec, spec.lifetime, std::move(ref.headerStack)}; + return Output{dataType.origin, dataType.description, ref.subSpec, std::move(ref.headerStack)}; } } std::string availableRoutes; @@ -342,7 +342,7 @@ void DataAllocator::cookDeadBeef(const Output& spec) // We get the output route from the original spec, but we send it // using the binding of the deadbeef subSpecification. RouteIndex routeIndex = matchDataHeader(spec, timingInfo.timeslice); - auto deadBeefOutput = Output{spec.origin, spec.description, 0xdeadbeef, Lifetime::Timeframe}; + auto deadBeefOutput = Output{spec.origin, spec.description, 0xdeadbeef}; auto headerMessage = headerMessageFromOutput(deadBeefOutput, routeIndex, header::gSerializationMethodNone, 0); addPartToContext(proxy.createOutputMessage(routeIndex, 0), deadBeefOutput, header::gSerializationMethodNone); diff --git a/Framework/Core/test/test_DataAllocator.cxx b/Framework/Core/test/test_DataAllocator.cxx index d880da72d7cb4..894fb5f38368a 100644 --- a/Framework/Core/test/test_DataAllocator.cxx +++ b/Framework/Core/test/test_DataAllocator.cxx @@ -45,7 +45,7 @@ using namespace o2::framework; // this function is only used to do the static checks for API return types void doTypeChecks() { - const Output output{"TST", "DUMMY", 0, Lifetime::Timeframe}; + const Output output{"TST", "DUMMY", 0}; // we require references to objects owned by allocator context static_assert(std::is_lvalue_reference().make(output))>::value); static_assert(std::is_lvalue_reference().make(output, "test"))>::value); @@ -83,24 +83,24 @@ DataProcessorSpec getSourceSpec() // picked by the framework is no serialization test::MetaHeader meta1{42}; test::MetaHeader meta2{23}; - pc.outputs().snapshot(Output{"TST", "MESSAGEABLE", 0, Lifetime::Timeframe, {meta1, meta2}}, a); - pc.outputs().snapshot(Output{"TST", "MSGBLEROOTSRLZ", 0, Lifetime::Timeframe}, + pc.outputs().snapshot(Output{"TST", "MESSAGEABLE", 0, {meta1, meta2}}, a); + pc.outputs().snapshot(Output{"TST", "MSGBLEROOTSRLZ", 0}, o2::framework::ROOTSerialized(a)); // class Polymorphic is not messageable, so the serialization type is deduced // from the fact that the type has a dictionary and can be ROOT-serialized. - pc.outputs().snapshot(Output{"TST", "ROOTNONTOBJECT", 0, Lifetime::Timeframe}, b); + pc.outputs().snapshot(Output{"TST", "ROOTNONTOBJECT", 0}, b); // vector of ROOT serializable class - pc.outputs().snapshot(Output{"TST", "ROOTVECTOR", 0, Lifetime::Timeframe}, c); + pc.outputs().snapshot(Output{"TST", "ROOTVECTOR", 0}, c); // deque of simple types - pc.outputs().snapshot(Output{"TST", "DEQUE", 0, Lifetime::Timeframe}, testDequePayload); + pc.outputs().snapshot(Output{"TST", "DEQUE", 0}, testDequePayload); // likewise, passed anonymously with char type and class name o2::framework::ROOTSerialized d(*((char*)&c), "vector"); - pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC", 0, Lifetime::Timeframe}, d); + pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC", 0}, d); // vector of ROOT serializable class wrapped with TClass info as hint auto* cl = TClass::GetClass(typeid(decltype(c))); ASSERT_ERROR(cl != nullptr); o2::framework::ROOTSerialized e(*((char*)&c), cl); - pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC2", 0, Lifetime::Timeframe}, e); + pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC2", 0}, e); // test the 'make' methods pc.outputs().make(OutputRef{"makesingle", 0}) = a; auto& multi = pc.outputs().make(OutputRef{"makespan", 0}, 3); @@ -111,7 +111,7 @@ DataProcessorSpec getSourceSpec() // test the adopt method auto freefct = [](void* data, void* hint) {}; // simply ignore the cleanup for the test static std::string teststring = "adoptchunk"; - pc.outputs().adoptChunk(Output{"TST", "ADOPTCHUNK", 0, Lifetime::Timeframe}, teststring.data(), teststring.length(), freefct, nullptr); + pc.outputs().adoptChunk(Output{"TST", "ADOPTCHUNK", 0}, teststring.data(), teststring.length(), freefct, nullptr); // test resizable data chunk, initial size 0 and grow auto& growchunk = pc.outputs().newChunk(OutputRef{"growchunk", 0}, 0); growchunk.resize(sizeof(o2::test::TriviallyCopyable)); @@ -307,7 +307,7 @@ DataProcessorSpec getSinkSpec() ASSERT_ERROR((object12[0] == o2::test::TriviallyCopyable{42, 23, 0xdead})); ASSERT_ERROR((object12[1] == o2::test::TriviallyCopyable{10, 20, 0xacdc})); // forward the read-only span on a different route - pc.outputs().snapshot(Output{"TST", "MSGABLVECTORCPY", 0, Lifetime::Timeframe}, object12); + pc.outputs().snapshot(Output{"TST", "MSGABLVECTORCPY", 0}, object12); LOG(info) << "extracting TNamed object from input13"; auto object13 = pc.inputs().get("input13"); diff --git a/Framework/Core/test/test_ParallelPipeline.cxx b/Framework/Core/test/test_ParallelPipeline.cxx index e13869873fca1..47106855a8f74 100644 --- a/Framework/Core/test/test_ParallelPipeline.cxx +++ b/Framework/Core/test/test_ParallelPipeline.cxx @@ -64,7 +64,7 @@ std::vector defineDataProcessing(ConfigContext const&) << *input.spec << ": " << *((int*)input.payload); auto const* dataheader = DataRefUtils::getHeader(input); //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); - auto& data = ctx.outputs().make(Output{"TST", "PREPROC", dataheader->subSpecification, Lifetime::Timeframe}); + auto& data = ctx.outputs().make(Output{"TST", "PREPROC", dataheader->subSpecification}); ASSERT_ERROR(ctx.inputs().get(input.spec->binding.c_str()) == parallelContext.index1D()); data = parallelContext.index1D(); } @@ -85,10 +85,10 @@ std::vector defineDataProcessing(ConfigContext const&) // TODO: there is a bug in the API for using OutputRef, returns an rvalue which can not be bound to // lvalue reference //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); - auto& data = ctx.outputs().make(Output{"TST", "DATA", dataheader->subSpecification, Lifetime::Timeframe}); + auto& data = ctx.outputs().make(Output{"TST", "DATA", dataheader->subSpecification}); data = ctx.inputs().get(input.spec->binding.c_str()); //auto& meta = ctx.outputs().make(OutputRef{"metadt", dataheader->subSpecification}); - auto& meta = ctx.outputs().make(Output{"TST", "META", dataheader->subSpecification, Lifetime::Timeframe}); + auto& meta = ctx.outputs().make(Output{"TST", "META", dataheader->subSpecification}); meta = dataheader->subSpecification; } }}}, @@ -145,7 +145,7 @@ std::vector defineDataProcessing(ConfigContext const&) if (multiplicities[pipeline] == 0) { continue; } - ctx.outputs().make(Output{"TST", "TRIGGER", subspecs[index], Lifetime::Timeframe}) = pipeline; + ctx.outputs().make(Output{"TST", "TRIGGER", subspecs[index]}) = pipeline; multiplicities[pipeline++]--; if (pipeline >= nPipelines) { pipeline = 0; diff --git a/Framework/Core/test/test_ProcessorOptions.cxx b/Framework/Core/test/test_ProcessorOptions.cxx index 8de90e0e23441..1751176eb639e 100644 --- a/Framework/Core/test/test_ProcessorOptions.cxx +++ b/Framework/Core/test/test_ProcessorOptions.cxx @@ -45,7 +45,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const&) } // there is nothing to do, simply stop the workflow but we have to send at least one message // to make sure that the callback of the consumer is called - ctx.outputs().make(Output{"TST", "TEST", 0, Lifetime::Timeframe}) = 42; + ctx.outputs().make(Output{"TST", "TEST", 0}) = 42; ctx.services().get().endOfStream(); *isReady = true; }; diff --git a/Framework/Core/test/test_StaggeringWorkflow.cxx b/Framework/Core/test/test_StaggeringWorkflow.cxx index 0e98c4324bc76..edc07d598465b 100644 --- a/Framework/Core/test/test_StaggeringWorkflow.cxx +++ b/Framework/Core/test/test_StaggeringWorkflow.cxx @@ -85,9 +85,9 @@ std::vector defineDataProcessing(ConfigContext const&) // since the snapshot copy is ready for sending it is scheduled but held back // because of the CompletionPolicy trigger matcher. This message will be // sent together with the second message. - outputs.snapshot(Output{"PROD", "CHANNEL", subspec, Lifetime::Timeframe}, subspec); + outputs.snapshot(Output{"PROD", "CHANNEL", subspec}, subspec); device.waitFor(100); - outputs.snapshot(Output{"PROD", "TRIGGER", subspec, Lifetime::Timeframe}, subspec); + outputs.snapshot(Output{"PROD", "TRIGGER", subspec}, subspec); device.waitFor(100); } control.endOfStream(); @@ -106,7 +106,7 @@ std::vector defineDataProcessing(ConfigContext const&) LOG(info) << "processing " << input.spec->binding << " " << data; // check if the channel binding starts with 'trigger' if (input.spec->binding.find("trigger") == 0) { - pc.outputs().make(Output{"PROC", "CHANNEL", data, Lifetime::Timeframe}) = data; + pc.outputs().make(Output{"PROC", "CHANNEL", data}) = data; } nActiveInputs++; } diff --git a/Framework/Core/test/test_TableBuilder.cxx b/Framework/Core/test/test_TableBuilder.cxx index a7b4739054d03..26a0f5a930b73 100644 --- a/Framework/Core/test/test_TableBuilder.cxx +++ b/Framework/Core/test/test_TableBuilder.cxx @@ -240,7 +240,7 @@ TEST_CASE("TestSoAIntegration") TEST_CASE("TestDataAllocatorReturnType") { - const Output output{"TST", "DUMMY", 0, Lifetime::Timeframe}; + const Output output{"TST", "DUMMY", 0}; } TEST_CASE("TestPodInjestion") diff --git a/Framework/Utils/include/DPLUtils/RootTreeReader.h b/Framework/Utils/include/DPLUtils/RootTreeReader.h index 42a8c9d01d0d0..109d79f9fad78 100644 --- a/Framework/Utils/include/DPLUtils/RootTreeReader.h +++ b/Framework/Utils/include/DPLUtils/RootTreeReader.h @@ -51,11 +51,11 @@ struct DefaultKey { enum Lifetime lifetime = Lifetime::Timeframe; DefaultKey(const Output& desc) - : origin(desc.origin), description(desc.description), subSpec(desc.subSpec), lifetime(desc.lifetime) + : origin(desc.origin), description(desc.description), subSpec(desc.subSpec) { } - operator Output() const { return Output{origin, description, subSpec, lifetime}; } + operator Output() const { return Output{origin, description, subSpec}; } }; } // namespace rtr @@ -302,7 +302,7 @@ class GenericRootTreeReader } auto snapshot = [&context, &stackcreator](const KeyType& key, const auto& object) { - context.outputs().snapshot(Output{key.origin, key.description, key.subSpec, key.lifetime, std::move(stackcreator())}, object); + context.outputs().snapshot(Output{key.origin, key.description, key.subSpec, std::move(stackcreator())}, object); }; char* data = nullptr; @@ -310,7 +310,7 @@ class GenericRootTreeReader mBranch->GetEntry(entry); // execute hook if it was registered; if this return true do not proceed further - if (mPublishHook != nullptr && (*mPublishHook).hook(mName, context, Output{mKey.origin, mKey.description, mKey.subSpec, mKey.lifetime, std::move(stackcreator())}, data)) { + if (mPublishHook != nullptr && (*mPublishHook).hook(mName, context, Output{mKey.origin, mKey.description, mKey.subSpec, std::move(stackcreator())}, data)) { } // try to figureout when we need to do something special diff --git a/Framework/Utils/src/Utils.cxx b/Framework/Utils/src/Utils.cxx index 3a52c46e074af..1dc0882c2f55d 100644 --- a/Framework/Utils/src/Utils.cxx +++ b/Framework/Utils/src/Utils.cxx @@ -28,7 +28,7 @@ namespace workflows Output getOutput(const o2f::OutputSpec outputSpec) { auto concrete = DataSpecUtils::asConcreteDataMatcher(outputSpec); - return Output{concrete.origin, concrete.description, concrete.subSpec, outputSpec.lifetime}; + return Output{concrete.origin, concrete.description, concrete.subSpec}; } // This method can convert a vector of OutputSpec into a vector of Output. diff --git a/Framework/Utils/test/test_RootTreeReader.cxx b/Framework/Utils/test/test_RootTreeReader.cxx index 0390a29f807b1..bf652fca6f97a 100644 --- a/Framework/Utils/test/test_RootTreeReader.cxx +++ b/Framework/Utils/test/test_RootTreeReader.cxx @@ -67,11 +67,10 @@ DataProcessorSpec getSourceSpec() testFile->Close(); } - constexpr auto persistency = Lifetime::Transient; auto reader = std::make_shared("testtree", // tree name fileName.c_str(), // input file name - RootTreeReader::BranchDefinition>{Output{"TST", "ARRAYOFMSGBL", 0, persistency}, "msgblarray"}, - Output{"TST", "ARRAYOFDATA", 0, persistency}, + RootTreeReader::BranchDefinition>{Output{"TST", "ARRAYOFMSGBL", 0}, "msgblarray"}, + Output{"TST", "ARRAYOFDATA", 0}, "dataarray", RootTreeReader::PublishingMode::Single); diff --git a/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx b/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx index 3213c1775c758..9a9b849d8d9d2 100644 --- a/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx +++ b/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx @@ -143,7 +143,7 @@ DataProcessorSpec getSourceSpec() o2::test::Polymorphic a(*counter); pc.outputs().snapshot(OutputRef{"output", 0}, a); pc.outputs().snapshot(OutputRef{"output", 1}, a); - int& metadata = pc.outputs().make(Output{"TST", "METADATA", 0, Lifetime::Timeframe}); + int& metadata = pc.outputs().make(Output{"TST", "METADATA", 0}); metadata = *counter; *counter = *counter + 1; if (*counter >= sTreeSize) { From 582c6d50c4d5cbbc79016f486b0f453cf5f6aa39 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Dec 2023 21:49:04 +0100 Subject: [PATCH 155/726] DPL: drop lifetime member from Output This is apparently not really used for anything and in general not passed correctly. Lifetime is a static property of the data flow, so it does not make sense to have it on a per message basis. --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 4 ++-- Framework/Core/include/Framework/Output.h | 19 ++---------------- Framework/Core/src/CommonServices.cxx | 2 +- Framework/Core/src/DataAllocator.cxx | 4 ++-- Framework/Core/test/test_DataAllocator.cxx | 20 +++++++++---------- Framework/Core/test/test_ParallelPipeline.cxx | 8 ++++---- Framework/Core/test/test_ProcessorOptions.cxx | 2 +- .../Core/test/test_StaggeringWorkflow.cxx | 6 +++--- Framework/Core/test/test_TableBuilder.cxx | 2 +- .../Utils/include/DPLUtils/RootTreeReader.h | 8 ++++---- Framework/Utils/src/Utils.cxx | 2 +- Framework/Utils/test/test_RootTreeReader.cxx | 5 ++--- .../test/test_RootTreeWriterWorkflow.cxx | 2 +- 13 files changed, 34 insertions(+), 50 deletions(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 0e067dc2f2d52..e45e1c6996a98 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -185,7 +185,7 @@ auto populateCacheWith(std::shared_ptr const& helper, LOGP(debug, "Fetching object for route {}", DataSpecUtils::describe(route.matcher)); objCnt++; auto concrete = DataSpecUtils::asConcreteDataMatcher(route.matcher); - Output output{concrete.origin, concrete.description, concrete.subSpec, route.matcher.lifetime}; + Output output{concrete.origin, concrete.description, concrete.subSpec}; auto&& v = allocator.makeVector(output); std::map metadata; std::map headers; @@ -343,7 +343,7 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() checkValidity = true; // never skip check if the cache is empty } LOG(debug) << "checkValidity = " << checkValidity << " for TF " << timingInfo.timeslice; - Output output{"CTP", "OrbitReset", 0, Lifetime::Condition}; + Output output{"CTP", "OrbitReset", 0}; Long64_t newOrbitResetTime = orbitResetTime; auto&& v = allocator.makeVector(output); const auto& api = helper->getAPI(path); diff --git a/Framework/Core/include/Framework/Output.h b/Framework/Core/include/Framework/Output.h index 9f85d6e8edb92..a2abf5f7600a9 100644 --- a/Framework/Core/include/Framework/Output.h +++ b/Framework/Core/include/Framework/Output.h @@ -28,7 +28,6 @@ struct Output { header::DataOrigin origin; header::DataDescription description; header::DataHeader::SubSpecificationType subSpec = 0; - enum Lifetime lifetime = Lifetime::Timeframe; header::Stack metaHeader = {}; Output(header::DataOrigin o, header::DataDescription d) : origin(o), description(d) {} @@ -38,22 +37,11 @@ struct Output { { } - Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, Lifetime l) - : origin(o), description(d), subSpec(s), lifetime(l) - { - } - Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, header::Stack&& stack) : origin(o), description(d), subSpec(s), metaHeader(std::move(stack)) { } - Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, Lifetime l, - header::Stack&& stack) - : origin(o), description(d), subSpec(s), lifetime(l), metaHeader(std::move(stack)) - { - } - Output(header::DataHeader const& header) : origin(header.dataOrigin), description(header.dataDescription), subSpec(header.subSpecification) { @@ -65,7 +53,6 @@ struct Output { : origin(rhs.origin), description(rhs.description), subSpec(rhs.subSpec), - lifetime(rhs.lifetime), metaHeader(std::move(rhs.metaHeader)) { } @@ -77,16 +64,14 @@ struct Output { origin = rhs.origin; description = rhs.description; subSpec = rhs.subSpec; - lifetime = rhs.lifetime; metaHeader = std::move(rhs.metaHeader); return *this; } bool operator==(const Output& that) const { - return origin == that.origin && description == that.description && subSpec == that.subSpec && - lifetime == that.lifetime; - }; + return origin == that.origin && description == that.description && subSpec == that.subSpec; + } }; } // namespace o2 diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 7d502216bc509..05b8fba47e532 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -425,7 +425,7 @@ o2::framework::ServiceSpec CommonServices::ccdbSupportSpec() if (concrete.subSpec == 0) { continue; } - auto& stfDist = pc.outputs().make(Output{concrete.origin, concrete.description, concrete.subSpec, output.matcher.lifetime}); + auto& stfDist = pc.outputs().make(Output{concrete.origin, concrete.description, concrete.subSpec}); stfDist.id = timingInfo.timeslice; stfDist.firstOrbit = timingInfo.firstTForbit; stfDist.runNumber = timingInfo.runNumber; diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index 296e094bffe26..1230d2e942f6e 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -294,7 +294,7 @@ Output DataAllocator::getOutputByBind(OutputRef&& ref) if (allowedOutputRoutes[ri].matcher.binding.value == ref.label) { auto spec = allowedOutputRoutes[ri].matcher; auto dataType = DataSpecUtils::asConcreteDataTypeMatcher(spec); - return Output{dataType.origin, dataType.description, ref.subSpec, spec.lifetime, std::move(ref.headerStack)}; + return Output{dataType.origin, dataType.description, ref.subSpec, std::move(ref.headerStack)}; } } std::string availableRoutes; @@ -342,7 +342,7 @@ void DataAllocator::cookDeadBeef(const Output& spec) // We get the output route from the original spec, but we send it // using the binding of the deadbeef subSpecification. RouteIndex routeIndex = matchDataHeader(spec, timingInfo.timeslice); - auto deadBeefOutput = Output{spec.origin, spec.description, 0xdeadbeef, Lifetime::Timeframe}; + auto deadBeefOutput = Output{spec.origin, spec.description, 0xdeadbeef}; auto headerMessage = headerMessageFromOutput(deadBeefOutput, routeIndex, header::gSerializationMethodNone, 0); addPartToContext(proxy.createOutputMessage(routeIndex, 0), deadBeefOutput, header::gSerializationMethodNone); diff --git a/Framework/Core/test/test_DataAllocator.cxx b/Framework/Core/test/test_DataAllocator.cxx index d880da72d7cb4..894fb5f38368a 100644 --- a/Framework/Core/test/test_DataAllocator.cxx +++ b/Framework/Core/test/test_DataAllocator.cxx @@ -45,7 +45,7 @@ using namespace o2::framework; // this function is only used to do the static checks for API return types void doTypeChecks() { - const Output output{"TST", "DUMMY", 0, Lifetime::Timeframe}; + const Output output{"TST", "DUMMY", 0}; // we require references to objects owned by allocator context static_assert(std::is_lvalue_reference().make(output))>::value); static_assert(std::is_lvalue_reference().make(output, "test"))>::value); @@ -83,24 +83,24 @@ DataProcessorSpec getSourceSpec() // picked by the framework is no serialization test::MetaHeader meta1{42}; test::MetaHeader meta2{23}; - pc.outputs().snapshot(Output{"TST", "MESSAGEABLE", 0, Lifetime::Timeframe, {meta1, meta2}}, a); - pc.outputs().snapshot(Output{"TST", "MSGBLEROOTSRLZ", 0, Lifetime::Timeframe}, + pc.outputs().snapshot(Output{"TST", "MESSAGEABLE", 0, {meta1, meta2}}, a); + pc.outputs().snapshot(Output{"TST", "MSGBLEROOTSRLZ", 0}, o2::framework::ROOTSerialized(a)); // class Polymorphic is not messageable, so the serialization type is deduced // from the fact that the type has a dictionary and can be ROOT-serialized. - pc.outputs().snapshot(Output{"TST", "ROOTNONTOBJECT", 0, Lifetime::Timeframe}, b); + pc.outputs().snapshot(Output{"TST", "ROOTNONTOBJECT", 0}, b); // vector of ROOT serializable class - pc.outputs().snapshot(Output{"TST", "ROOTVECTOR", 0, Lifetime::Timeframe}, c); + pc.outputs().snapshot(Output{"TST", "ROOTVECTOR", 0}, c); // deque of simple types - pc.outputs().snapshot(Output{"TST", "DEQUE", 0, Lifetime::Timeframe}, testDequePayload); + pc.outputs().snapshot(Output{"TST", "DEQUE", 0}, testDequePayload); // likewise, passed anonymously with char type and class name o2::framework::ROOTSerialized d(*((char*)&c), "vector"); - pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC", 0, Lifetime::Timeframe}, d); + pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC", 0}, d); // vector of ROOT serializable class wrapped with TClass info as hint auto* cl = TClass::GetClass(typeid(decltype(c))); ASSERT_ERROR(cl != nullptr); o2::framework::ROOTSerialized e(*((char*)&c), cl); - pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC2", 0, Lifetime::Timeframe}, e); + pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC2", 0}, e); // test the 'make' methods pc.outputs().make(OutputRef{"makesingle", 0}) = a; auto& multi = pc.outputs().make(OutputRef{"makespan", 0}, 3); @@ -111,7 +111,7 @@ DataProcessorSpec getSourceSpec() // test the adopt method auto freefct = [](void* data, void* hint) {}; // simply ignore the cleanup for the test static std::string teststring = "adoptchunk"; - pc.outputs().adoptChunk(Output{"TST", "ADOPTCHUNK", 0, Lifetime::Timeframe}, teststring.data(), teststring.length(), freefct, nullptr); + pc.outputs().adoptChunk(Output{"TST", "ADOPTCHUNK", 0}, teststring.data(), teststring.length(), freefct, nullptr); // test resizable data chunk, initial size 0 and grow auto& growchunk = pc.outputs().newChunk(OutputRef{"growchunk", 0}, 0); growchunk.resize(sizeof(o2::test::TriviallyCopyable)); @@ -307,7 +307,7 @@ DataProcessorSpec getSinkSpec() ASSERT_ERROR((object12[0] == o2::test::TriviallyCopyable{42, 23, 0xdead})); ASSERT_ERROR((object12[1] == o2::test::TriviallyCopyable{10, 20, 0xacdc})); // forward the read-only span on a different route - pc.outputs().snapshot(Output{"TST", "MSGABLVECTORCPY", 0, Lifetime::Timeframe}, object12); + pc.outputs().snapshot(Output{"TST", "MSGABLVECTORCPY", 0}, object12); LOG(info) << "extracting TNamed object from input13"; auto object13 = pc.inputs().get("input13"); diff --git a/Framework/Core/test/test_ParallelPipeline.cxx b/Framework/Core/test/test_ParallelPipeline.cxx index e13869873fca1..47106855a8f74 100644 --- a/Framework/Core/test/test_ParallelPipeline.cxx +++ b/Framework/Core/test/test_ParallelPipeline.cxx @@ -64,7 +64,7 @@ std::vector defineDataProcessing(ConfigContext const&) << *input.spec << ": " << *((int*)input.payload); auto const* dataheader = DataRefUtils::getHeader(input); //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); - auto& data = ctx.outputs().make(Output{"TST", "PREPROC", dataheader->subSpecification, Lifetime::Timeframe}); + auto& data = ctx.outputs().make(Output{"TST", "PREPROC", dataheader->subSpecification}); ASSERT_ERROR(ctx.inputs().get(input.spec->binding.c_str()) == parallelContext.index1D()); data = parallelContext.index1D(); } @@ -85,10 +85,10 @@ std::vector defineDataProcessing(ConfigContext const&) // TODO: there is a bug in the API for using OutputRef, returns an rvalue which can not be bound to // lvalue reference //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); - auto& data = ctx.outputs().make(Output{"TST", "DATA", dataheader->subSpecification, Lifetime::Timeframe}); + auto& data = ctx.outputs().make(Output{"TST", "DATA", dataheader->subSpecification}); data = ctx.inputs().get(input.spec->binding.c_str()); //auto& meta = ctx.outputs().make(OutputRef{"metadt", dataheader->subSpecification}); - auto& meta = ctx.outputs().make(Output{"TST", "META", dataheader->subSpecification, Lifetime::Timeframe}); + auto& meta = ctx.outputs().make(Output{"TST", "META", dataheader->subSpecification}); meta = dataheader->subSpecification; } }}}, @@ -145,7 +145,7 @@ std::vector defineDataProcessing(ConfigContext const&) if (multiplicities[pipeline] == 0) { continue; } - ctx.outputs().make(Output{"TST", "TRIGGER", subspecs[index], Lifetime::Timeframe}) = pipeline; + ctx.outputs().make(Output{"TST", "TRIGGER", subspecs[index]}) = pipeline; multiplicities[pipeline++]--; if (pipeline >= nPipelines) { pipeline = 0; diff --git a/Framework/Core/test/test_ProcessorOptions.cxx b/Framework/Core/test/test_ProcessorOptions.cxx index 8de90e0e23441..1751176eb639e 100644 --- a/Framework/Core/test/test_ProcessorOptions.cxx +++ b/Framework/Core/test/test_ProcessorOptions.cxx @@ -45,7 +45,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const&) } // there is nothing to do, simply stop the workflow but we have to send at least one message // to make sure that the callback of the consumer is called - ctx.outputs().make(Output{"TST", "TEST", 0, Lifetime::Timeframe}) = 42; + ctx.outputs().make(Output{"TST", "TEST", 0}) = 42; ctx.services().get().endOfStream(); *isReady = true; }; diff --git a/Framework/Core/test/test_StaggeringWorkflow.cxx b/Framework/Core/test/test_StaggeringWorkflow.cxx index 0e98c4324bc76..edc07d598465b 100644 --- a/Framework/Core/test/test_StaggeringWorkflow.cxx +++ b/Framework/Core/test/test_StaggeringWorkflow.cxx @@ -85,9 +85,9 @@ std::vector defineDataProcessing(ConfigContext const&) // since the snapshot copy is ready for sending it is scheduled but held back // because of the CompletionPolicy trigger matcher. This message will be // sent together with the second message. - outputs.snapshot(Output{"PROD", "CHANNEL", subspec, Lifetime::Timeframe}, subspec); + outputs.snapshot(Output{"PROD", "CHANNEL", subspec}, subspec); device.waitFor(100); - outputs.snapshot(Output{"PROD", "TRIGGER", subspec, Lifetime::Timeframe}, subspec); + outputs.snapshot(Output{"PROD", "TRIGGER", subspec}, subspec); device.waitFor(100); } control.endOfStream(); @@ -106,7 +106,7 @@ std::vector defineDataProcessing(ConfigContext const&) LOG(info) << "processing " << input.spec->binding << " " << data; // check if the channel binding starts with 'trigger' if (input.spec->binding.find("trigger") == 0) { - pc.outputs().make(Output{"PROC", "CHANNEL", data, Lifetime::Timeframe}) = data; + pc.outputs().make(Output{"PROC", "CHANNEL", data}) = data; } nActiveInputs++; } diff --git a/Framework/Core/test/test_TableBuilder.cxx b/Framework/Core/test/test_TableBuilder.cxx index a7b4739054d03..26a0f5a930b73 100644 --- a/Framework/Core/test/test_TableBuilder.cxx +++ b/Framework/Core/test/test_TableBuilder.cxx @@ -240,7 +240,7 @@ TEST_CASE("TestSoAIntegration") TEST_CASE("TestDataAllocatorReturnType") { - const Output output{"TST", "DUMMY", 0, Lifetime::Timeframe}; + const Output output{"TST", "DUMMY", 0}; } TEST_CASE("TestPodInjestion") diff --git a/Framework/Utils/include/DPLUtils/RootTreeReader.h b/Framework/Utils/include/DPLUtils/RootTreeReader.h index 42a8c9d01d0d0..109d79f9fad78 100644 --- a/Framework/Utils/include/DPLUtils/RootTreeReader.h +++ b/Framework/Utils/include/DPLUtils/RootTreeReader.h @@ -51,11 +51,11 @@ struct DefaultKey { enum Lifetime lifetime = Lifetime::Timeframe; DefaultKey(const Output& desc) - : origin(desc.origin), description(desc.description), subSpec(desc.subSpec), lifetime(desc.lifetime) + : origin(desc.origin), description(desc.description), subSpec(desc.subSpec) { } - operator Output() const { return Output{origin, description, subSpec, lifetime}; } + operator Output() const { return Output{origin, description, subSpec}; } }; } // namespace rtr @@ -302,7 +302,7 @@ class GenericRootTreeReader } auto snapshot = [&context, &stackcreator](const KeyType& key, const auto& object) { - context.outputs().snapshot(Output{key.origin, key.description, key.subSpec, key.lifetime, std::move(stackcreator())}, object); + context.outputs().snapshot(Output{key.origin, key.description, key.subSpec, std::move(stackcreator())}, object); }; char* data = nullptr; @@ -310,7 +310,7 @@ class GenericRootTreeReader mBranch->GetEntry(entry); // execute hook if it was registered; if this return true do not proceed further - if (mPublishHook != nullptr && (*mPublishHook).hook(mName, context, Output{mKey.origin, mKey.description, mKey.subSpec, mKey.lifetime, std::move(stackcreator())}, data)) { + if (mPublishHook != nullptr && (*mPublishHook).hook(mName, context, Output{mKey.origin, mKey.description, mKey.subSpec, std::move(stackcreator())}, data)) { } // try to figureout when we need to do something special diff --git a/Framework/Utils/src/Utils.cxx b/Framework/Utils/src/Utils.cxx index 3a52c46e074af..1dc0882c2f55d 100644 --- a/Framework/Utils/src/Utils.cxx +++ b/Framework/Utils/src/Utils.cxx @@ -28,7 +28,7 @@ namespace workflows Output getOutput(const o2f::OutputSpec outputSpec) { auto concrete = DataSpecUtils::asConcreteDataMatcher(outputSpec); - return Output{concrete.origin, concrete.description, concrete.subSpec, outputSpec.lifetime}; + return Output{concrete.origin, concrete.description, concrete.subSpec}; } // This method can convert a vector of OutputSpec into a vector of Output. diff --git a/Framework/Utils/test/test_RootTreeReader.cxx b/Framework/Utils/test/test_RootTreeReader.cxx index 0390a29f807b1..bf652fca6f97a 100644 --- a/Framework/Utils/test/test_RootTreeReader.cxx +++ b/Framework/Utils/test/test_RootTreeReader.cxx @@ -67,11 +67,10 @@ DataProcessorSpec getSourceSpec() testFile->Close(); } - constexpr auto persistency = Lifetime::Transient; auto reader = std::make_shared("testtree", // tree name fileName.c_str(), // input file name - RootTreeReader::BranchDefinition>{Output{"TST", "ARRAYOFMSGBL", 0, persistency}, "msgblarray"}, - Output{"TST", "ARRAYOFDATA", 0, persistency}, + RootTreeReader::BranchDefinition>{Output{"TST", "ARRAYOFMSGBL", 0}, "msgblarray"}, + Output{"TST", "ARRAYOFDATA", 0}, "dataarray", RootTreeReader::PublishingMode::Single); diff --git a/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx b/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx index 3213c1775c758..9a9b849d8d9d2 100644 --- a/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx +++ b/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx @@ -143,7 +143,7 @@ DataProcessorSpec getSourceSpec() o2::test::Polymorphic a(*counter); pc.outputs().snapshot(OutputRef{"output", 0}, a); pc.outputs().snapshot(OutputRef{"output", 1}, a); - int& metadata = pc.outputs().make(Output{"TST", "METADATA", 0, Lifetime::Timeframe}); + int& metadata = pc.outputs().make(Output{"TST", "METADATA", 0}); metadata = *counter; *counter = *counter + 1; if (*counter >= sTreeSize) { From 546a0985e104e43a0a2112ce0a009f8b5548b926 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 6 Dec 2023 12:03:33 +0100 Subject: [PATCH 156/726] DPL: improve crash reason message --- Framework/Core/src/runDataProcessing.cxx | 25 ++++++++---------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 54af8e9b4a25e..d42eeafb63006 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -578,31 +578,22 @@ void handle_crash(int sig) { // dump demangled stack trace void* array[1024]; - int size = backtrace(array, 1024); { - char const* msg = "*** Program crashed (Segmentation fault, FPE, BUS, ABRT, KILL, Unhandled Exception, ...)\nBacktrace by DPL:\n"; - auto retVal = write(STDERR_FILENO, msg, strlen(msg)); - msg = "UNKNOWN SIGNAL\n"; - if (sig == SIGSEGV) { - msg = "SEGMENTATION FAULT\n"; - } else if (sig == SIGABRT) { - msg = "ABRT\n"; - } else if (sig == SIGBUS) { - msg = "BUS ERROR\n"; - } else if (sig == SIGILL) { - msg = "ILLEGAL INSTRUCTION\n"; - } else if (sig == SIGFPE) { + char buffer[1024]; + char const* msg = "*** Program crashed (%s)\nBacktrace by DPL:\n"; + snprintf(buffer, 1024, msg, strsignal(sig)); + if (sig == SIGFPE) { if (std::fetestexcept(FE_DIVBYZERO)) { - msg = "FLOATING POINT EXCEPTION (DIVISION BY ZERO)\n"; + snprintf(buffer, 1024, msg, "FLOATING POINT EXCEPTION - DIVISION BY ZERO"); } else if (std::fetestexcept(FE_INVALID)) { - msg = "FLOATING POINT EXCEPTION (INVALID RESULT)\n"; + snprintf(buffer, 1024, msg, "FLOATING POINT EXCEPTION - INVALID RESULT"); } else { - msg = "FLOATING POINT EXCEPTION (UNKNOWN REASON)\n"; + snprintf(buffer, 1024, msg, "FLOATING POINT EXCEPTION - UNKNOWN REASON"); } } - retVal = write(STDERR_FILENO, msg, strlen(msg)); + auto retVal = write(STDERR_FILENO, buffer, strlen(buffer)); (void)retVal; } demangled_backtrace_symbols(array, size, STDERR_FILENO); From c1e355cfcfeab45548852b62bfe2f32b805ee775 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 7 Dec 2023 22:08:39 +0100 Subject: [PATCH 157/726] DPL: exit with 128 + signal number when dying due to a signal --- Framework/Core/src/runDataProcessing.cxx | 1 + 1 file changed, 1 insertion(+) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index d42eeafb63006..21bf8fba49a61 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -874,6 +874,7 @@ bool processSigChild(DeviceInfos& infos, DeviceSpecs& specs) } else { if (WIFSIGNALED(status)) { int exitSignal = WTERMSIG(status); + es = exitSignal + 128; LOGP(error, "Workflow crashed - pid {} ({}) was killed abnormally with {} and exited with {}", pid, id, strsignal(exitSignal), es); } else { LOGP(error, "pid {} ({}) crashed with or was killed with exit code {}", pid, id, es); From 1878f8738fea3ce850090e824d288d5d9036bdb4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 7 Dec 2023 22:08:39 +0100 Subject: [PATCH 158/726] DPL: exit with 128 + signal number when dying due to a signal --- Framework/Core/src/runDataProcessing.cxx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index d42eeafb63006..def8a76a12c49 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -874,9 +874,11 @@ bool processSigChild(DeviceInfos& infos, DeviceSpecs& specs) } else { if (WIFSIGNALED(status)) { int exitSignal = WTERMSIG(status); - LOGP(error, "Workflow crashed - pid {} ({}) was killed abnormally with {} and exited with {}", pid, id, strsignal(exitSignal), es); + es = exitSignal + 128; + LOGP(error, "Workflow crashed - PID {} ({}) was killed abnormally with {} and exited code was set to {}.", pid, id, strsignal(exitSignal), es); } else { - LOGP(error, "pid {} ({}) crashed with or was killed with exit code {}", pid, id, es); + es = 128; + LOGP(error, "PID {} ({}) did not exit correctly however it's not clear why. Exit code forced to {}.", pid, id, es); } } hasError |= true; From 9410a658a04f85f840512bffc15e6dfa67eabdd5 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 9 Dec 2023 17:29:50 +0100 Subject: [PATCH 159/726] Fix a few more Output with Lifetime::Timeframe --- .../include/CommonDataFormat/FlatHisto1D.h | 2 +- .../include/CommonDataFormat/FlatHisto2D.h | 2 +- .../workflow/include/FT0Workflow/RawReaderFT0.h | 16 ++++++++-------- .../FT0/workflow/src/FT0DataDecoderDPLSpec.cxx | 9 ++++----- .../tofworkflow/src/RecoWorkflowSpec.cxx | 6 +++--- .../src/TPCResidualReaderSpec.cxx | 2 +- .../PHOS/calib/src/PHOSTurnonCalibDevice.cxx | 4 ++-- .../TPC/workflow/src/LinkZSToDigitsSpec.cxx | 2 +- .../ITS3/workflow/src/ClusterReaderSpec.cxx | 10 +++++----- 9 files changed, 26 insertions(+), 27 deletions(-) diff --git a/DataFormats/common/include/CommonDataFormat/FlatHisto1D.h b/DataFormats/common/include/CommonDataFormat/FlatHisto1D.h index ac91eafc4d147..f8d49042dd8c5 100644 --- a/DataFormats/common/include/CommonDataFormat/FlatHisto1D.h +++ b/DataFormats/common/include/CommonDataFormat/FlatHisto1D.h @@ -34,7 +34,7 @@ namespace dataformats Fast 1D histo class which can be messages as FlatHisto1D histo(nbins, xmin, xmax); histo.fill(...); - pc.outputs().snapshot(Output{"Origin", "Desc", 0, Lifetime::Timeframe}, histo.getBase()); + pc.outputs().snapshot(Output{"Origin", "Desc", 0}, histo.getBase()); and received (read only!) as const auto hdata = pc.inputs().get>("histodata"); diff --git a/DataFormats/common/include/CommonDataFormat/FlatHisto2D.h b/DataFormats/common/include/CommonDataFormat/FlatHisto2D.h index 9102d53edeaaa..47793618c0649 100644 --- a/DataFormats/common/include/CommonDataFormat/FlatHisto2D.h +++ b/DataFormats/common/include/CommonDataFormat/FlatHisto2D.h @@ -35,7 +35,7 @@ namespace dataformats Fast 2D histo class which can be messages as FlatHisto2D histo(nbinsX, xmin, xmax, nbinsY, ymin, ymax); histo.fill(...); - pc.outputs().snapshot(Output{"Origin", "Desc", 0, Lifetime::Timeframe}, histo.getBase()); + pc.outputs().snapshot(Output{"Origin", "Desc", 0}, histo.getBase()); and received (read only!) as const auto hdata = pc.inputs().get>("histodata"); diff --git a/Detectors/FIT/FT0/workflow/include/FT0Workflow/RawReaderFT0.h b/Detectors/FIT/FT0/workflow/include/FT0Workflow/RawReaderFT0.h index f50b3d224b32c..f7729394db652 100644 --- a/Detectors/FIT/FT0/workflow/include/FT0Workflow/RawReaderFT0.h +++ b/Detectors/FIT/FT0/workflow/include/FT0Workflow/RawReaderFT0.h @@ -80,10 +80,10 @@ class RawReaderFT0 : public RawReaderFT0BaseNorm } void makeSnapshot(o2::framework::ProcessingContext& pc) { - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSBC", 0, o2::framework::Lifetime::Timeframe}, mVecDigits); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSCH", 0, o2::framework::Lifetime::Timeframe}, mVecChannelData); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSBC", 0}, mVecDigits); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSCH", 0}, mVecChannelData); if constexpr (sUseTrgInput) { - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "TRIGGERINPUT", 0, o2::framework::Lifetime::Timeframe}, mVecTriggerInput); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "TRIGGERINPUT", 0}, mVecTriggerInput); } } bool mDumpData; @@ -136,11 +136,11 @@ class RawReaderFT0ext : public RawReaderFT0BaseExt } void makeSnapshot(o2::framework::ProcessingContext& pc) { - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSBC", 0, o2::framework::Lifetime::Timeframe}, mVecDigits); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSCH", 0, o2::framework::Lifetime::Timeframe}, mVecChannelData); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSTRGEXT", 0, o2::framework::Lifetime::Timeframe}, mVecTrgExt); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSBC", 0}, mVecDigits); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSCH", 0}, mVecChannelData); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSTRGEXT", 0}, mVecTrgExt); if constexpr (sUseTrgInput) { - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "TRIGGERINPUT", 0, o2::framework::Lifetime::Timeframe}, mVecTriggerInput); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "TRIGGERINPUT", 0}, mVecTriggerInput); } } bool mDumpData; @@ -153,4 +153,4 @@ class RawReaderFT0ext : public RawReaderFT0BaseExt } // namespace ft0 } // namespace o2 -#endif \ No newline at end of file +#endif diff --git a/Detectors/FIT/FT0/workflow/src/FT0DataDecoderDPLSpec.cxx b/Detectors/FIT/FT0/workflow/src/FT0DataDecoderDPLSpec.cxx index 945e7ad9d84d1..6d81740dd2173 100644 --- a/Detectors/FIT/FT0/workflow/src/FT0DataDecoderDPLSpec.cxx +++ b/Detectors/FIT/FT0/workflow/src/FT0DataDecoderDPLSpec.cxx @@ -28,12 +28,11 @@ namespace ft0 void FT0DataDecoderDPLSpec::run(ProcessingContext& pc) { auto t1 = std::chrono::high_resolution_clock::now(); - auto dummyOutput = [&pc, this]() { this->mVecDigits.resize(0); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSBC", 0, o2::framework::Lifetime::Timeframe}, mVecDigits); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSBC", 0}, mVecDigits); this->mVecChannelData.resize(0); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSCH", 0, o2::framework::Lifetime::Timeframe}, mVecChannelData); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSCH", 0}, mVecChannelData); }; // if we see requested data type input with 0xDEADBEEF subspec and 0 payload this means that the "delayed message" @@ -529,8 +528,8 @@ void FT0DataDecoderDPLSpec::run(ProcessingContext& pc) // Due to empty Digit container this dummy object will never participate in any further tasks. mVecChannelData.emplace_back(); } - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSBC", 0, o2::framework::Lifetime::Timeframe}, mVecDigits); - pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSCH", 0, o2::framework::Lifetime::Timeframe}, mVecChannelData); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSBC", 0}, mVecDigits); + pc.outputs().snapshot(o2::framework::Output{o2::header::gDataOriginFT0, "DIGITSCH", 0}, mVecChannelData); auto t2 = std::chrono::high_resolution_clock::now(); auto delay = std::chrono::duration_cast(t2 - t1); LOG(debug) << "Decoder delay: " << delay.count(); diff --git a/Detectors/GlobalTrackingWorkflow/tofworkflow/src/RecoWorkflowSpec.cxx b/Detectors/GlobalTrackingWorkflow/tofworkflow/src/RecoWorkflowSpec.cxx index 18b26355ff5ec..ab4f90464b31b 100644 --- a/Detectors/GlobalTrackingWorkflow/tofworkflow/src/RecoWorkflowSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/tofworkflow/src/RecoWorkflowSpec.cxx @@ -97,11 +97,11 @@ class TOFDPLRecoWorkflowTask // << " DIGITS TO " << mClustersArray.size() << " CLUSTERS"; // send matching-info - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_ITSTPC", 0, Lifetime::Timeframe}, mMatcher.getMatchedTrackVector()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MTC_ITSTPC", 0}, mMatcher.getMatchedTrackVector()); if (mUseMC) { - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMATCHTOF", 0, Lifetime::Timeframe}, mMatcher.getMatchedTOFLabelsVector()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "MCMATCHTOF", 0}, mMatcher.getMatchedTOFLabelsVector()); } - pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CALIBDATA", 0, Lifetime::Timeframe}, mMatcher.getCalibVector()); + pc.outputs().snapshot(Output{o2::header::gDataOriginTOF, "CALIBDATA", 0}, mMatcher.getCalibVector()); mTimer.Stop(); } diff --git a/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCResidualReaderSpec.cxx b/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCResidualReaderSpec.cxx index 23a6c8fd06694..b3040d99bc4f2 100644 --- a/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCResidualReaderSpec.cxx +++ b/Detectors/GlobalTrackingWorkflow/tpcinterpolationworkflow/src/TPCResidualReaderSpec.cxx @@ -251,7 +251,7 @@ void TPCResidualReader::run(ProcessingContext& pc) mTrackResiduals.closeOutputFile(); // FIXME remove when map output is handled properly // const auto& voxResArray = mTrackResiduals.getVoxelResults(); // array with one vector of results per sector - // pc.outputs().snapshot(Output{"GLO", "VOXELRESULTS", 0, Lifetime::Timeframe}, voxResArray); // send results as one large vector? + // pc.outputs().snapshot(Output{"GLO", "VOXELRESULTS", 0}, voxResArray); // send results as one large vector? pc.services().get().endOfStream(); pc.services().get().readyToQuit(QuitRequest::Me); diff --git a/Detectors/PHOS/calib/src/PHOSTurnonCalibDevice.cxx b/Detectors/PHOS/calib/src/PHOSTurnonCalibDevice.cxx index f4ff1a5c4042b..52ec8cef0b438 100644 --- a/Detectors/PHOS/calib/src/PHOSTurnonCalibDevice.cxx +++ b/Detectors/PHOS/calib/src/PHOSTurnonCalibDevice.cxx @@ -80,8 +80,8 @@ void PHOSTurnonCalibDevice::endOfStream(o2::framework::EndOfStreamContext& ec) LOG(alarm) << "Incorrect fit results"; } // //Send result to QC - // ec.outputs().snapshot(o2::framework::Output{"PHS", "TRIGMAPDIFF", 0, o2::framework::Lifetime::Timeframe}, mTrigMapDiff); - // ec.outputs().snapshot(o2::framework::Output{"PHS", "TURNONDIFF", 0, o2::framework::Lifetime::Timeframe}, mTurnOnDiff); + // ec.outputs().snapshot(o2::framework::Output{"PHS", "TRIGMAPDIFF", 0}, mTrigMapDiff); + // ec.outputs().snapshot(o2::framework::Output{"PHS", "TURNONDIFF", 0}, mTurnOnDiff); } o2::framework::DataProcessorSpec o2::phos::getPHOSTurnonCalibDeviceSpec(bool useCCDB) diff --git a/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx b/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx index c42a93f7a7f04..a278809cbeccd 100644 --- a/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx +++ b/Detectors/TPC/workflow/src/LinkZSToDigitsSpec.cxx @@ -109,7 +109,7 @@ o2::framework::DataProcessorSpec getLinkZSToDigitsSpec(int channel, const std::s o2::tpc::TPCSectorHeader header{sector}; header.activeSectors = processAttributes->activeSectors; // digit for now are transported per sector, not per lane - // pc.outputs().snapshot(Output{"TPC", "DIGITS", static_cast(channel), Lifetime::Timeframe, header}, + // pc.outputs().snapshot(Output{"TPC", "DIGITS", static_cast(channel), header}, pc.outputs().snapshot(Output{"TPC", "DIGITS", static_cast(sector), header}, const_cast&>(digits)); }; diff --git a/Detectors/Upgrades/ITS3/workflow/src/ClusterReaderSpec.cxx b/Detectors/Upgrades/ITS3/workflow/src/ClusterReaderSpec.cxx index b50ab65d3720f..093e53c92eaf3 100644 --- a/Detectors/Upgrades/ITS3/workflow/src/ClusterReaderSpec.cxx +++ b/Detectors/Upgrades/ITS3/workflow/src/ClusterReaderSpec.cxx @@ -53,14 +53,14 @@ void ClusterReader::run(ProcessingContext& pc) // This is a very ugly way of providing DataDescription, which anyway does not need to contain detector name. // To be fixed once the names-definition class is ready - pc.outputs().snapshot(Output{mOrigin, "CLUSTERSROF", 0, Lifetime::Timeframe}, mClusROFRec); - pc.outputs().snapshot(Output{mOrigin, "COMPCLUSTERS", 0, Lifetime::Timeframe}, mClusterCompArray); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERSROF", 0}, mClusROFRec); + pc.outputs().snapshot(Output{mOrigin, "COMPCLUSTERS", 0}, mClusterCompArray); if (mUsePatterns) { - pc.outputs().snapshot(Output{mOrigin, "PATTERNS", 0, Lifetime::Timeframe}, mPatternsArray); + pc.outputs().snapshot(Output{mOrigin, "PATTERNS", 0}, mPatternsArray); } if (mUseMC) { - pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMCTR", 0, Lifetime::Timeframe}, mClusterMCTruth); - pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMC2ROF", 0, Lifetime::Timeframe}, mClusMC2ROFs); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMCTR", 0}, mClusterMCTruth); + pc.outputs().snapshot(Output{mOrigin, "CLUSTERSMC2ROF", 0}, mClusMC2ROFs); } if (mTree->GetReadEntry() + 1 >= mTree->GetEntries()) { From e89c48c00dddeeccc32892887a19e1f7169f46c0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 7 Dec 2023 22:22:43 +0100 Subject: [PATCH 160/726] DPL: drop lifetime member from Output This is apparently not really used for anything and in general not passed correctly. Lifetime is a static property of the data flow, so it does not make sense to have it on a per message basis. --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 4 ++-- Framework/Core/include/Framework/Output.h | 19 ++---------------- Framework/Core/src/CommonServices.cxx | 2 +- Framework/Core/src/DataAllocator.cxx | 4 ++-- Framework/Core/test/test_DataAllocator.cxx | 20 +++++++++---------- Framework/Core/test/test_ParallelPipeline.cxx | 8 ++++---- Framework/Core/test/test_ProcessorOptions.cxx | 2 +- .../Core/test/test_StaggeringWorkflow.cxx | 6 +++--- Framework/Core/test/test_TableBuilder.cxx | 2 +- .../Utils/include/DPLUtils/RootTreeReader.h | 8 ++++---- Framework/Utils/src/Utils.cxx | 2 +- Framework/Utils/test/test_RootTreeReader.cxx | 5 ++--- .../test/test_RootTreeWriterWorkflow.cxx | 2 +- 13 files changed, 34 insertions(+), 50 deletions(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 0e067dc2f2d52..e45e1c6996a98 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -185,7 +185,7 @@ auto populateCacheWith(std::shared_ptr const& helper, LOGP(debug, "Fetching object for route {}", DataSpecUtils::describe(route.matcher)); objCnt++; auto concrete = DataSpecUtils::asConcreteDataMatcher(route.matcher); - Output output{concrete.origin, concrete.description, concrete.subSpec, route.matcher.lifetime}; + Output output{concrete.origin, concrete.description, concrete.subSpec}; auto&& v = allocator.makeVector(output); std::map metadata; std::map headers; @@ -343,7 +343,7 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() checkValidity = true; // never skip check if the cache is empty } LOG(debug) << "checkValidity = " << checkValidity << " for TF " << timingInfo.timeslice; - Output output{"CTP", "OrbitReset", 0, Lifetime::Condition}; + Output output{"CTP", "OrbitReset", 0}; Long64_t newOrbitResetTime = orbitResetTime; auto&& v = allocator.makeVector(output); const auto& api = helper->getAPI(path); diff --git a/Framework/Core/include/Framework/Output.h b/Framework/Core/include/Framework/Output.h index 9f85d6e8edb92..a2abf5f7600a9 100644 --- a/Framework/Core/include/Framework/Output.h +++ b/Framework/Core/include/Framework/Output.h @@ -28,7 +28,6 @@ struct Output { header::DataOrigin origin; header::DataDescription description; header::DataHeader::SubSpecificationType subSpec = 0; - enum Lifetime lifetime = Lifetime::Timeframe; header::Stack metaHeader = {}; Output(header::DataOrigin o, header::DataDescription d) : origin(o), description(d) {} @@ -38,22 +37,11 @@ struct Output { { } - Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, Lifetime l) - : origin(o), description(d), subSpec(s), lifetime(l) - { - } - Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, header::Stack&& stack) : origin(o), description(d), subSpec(s), metaHeader(std::move(stack)) { } - Output(header::DataOrigin o, header::DataDescription d, header::DataHeader::SubSpecificationType s, Lifetime l, - header::Stack&& stack) - : origin(o), description(d), subSpec(s), lifetime(l), metaHeader(std::move(stack)) - { - } - Output(header::DataHeader const& header) : origin(header.dataOrigin), description(header.dataDescription), subSpec(header.subSpecification) { @@ -65,7 +53,6 @@ struct Output { : origin(rhs.origin), description(rhs.description), subSpec(rhs.subSpec), - lifetime(rhs.lifetime), metaHeader(std::move(rhs.metaHeader)) { } @@ -77,16 +64,14 @@ struct Output { origin = rhs.origin; description = rhs.description; subSpec = rhs.subSpec; - lifetime = rhs.lifetime; metaHeader = std::move(rhs.metaHeader); return *this; } bool operator==(const Output& that) const { - return origin == that.origin && description == that.description && subSpec == that.subSpec && - lifetime == that.lifetime; - }; + return origin == that.origin && description == that.description && subSpec == that.subSpec; + } }; } // namespace o2 diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 7d502216bc509..05b8fba47e532 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -425,7 +425,7 @@ o2::framework::ServiceSpec CommonServices::ccdbSupportSpec() if (concrete.subSpec == 0) { continue; } - auto& stfDist = pc.outputs().make(Output{concrete.origin, concrete.description, concrete.subSpec, output.matcher.lifetime}); + auto& stfDist = pc.outputs().make(Output{concrete.origin, concrete.description, concrete.subSpec}); stfDist.id = timingInfo.timeslice; stfDist.firstOrbit = timingInfo.firstTForbit; stfDist.runNumber = timingInfo.runNumber; diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index 296e094bffe26..1230d2e942f6e 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -294,7 +294,7 @@ Output DataAllocator::getOutputByBind(OutputRef&& ref) if (allowedOutputRoutes[ri].matcher.binding.value == ref.label) { auto spec = allowedOutputRoutes[ri].matcher; auto dataType = DataSpecUtils::asConcreteDataTypeMatcher(spec); - return Output{dataType.origin, dataType.description, ref.subSpec, spec.lifetime, std::move(ref.headerStack)}; + return Output{dataType.origin, dataType.description, ref.subSpec, std::move(ref.headerStack)}; } } std::string availableRoutes; @@ -342,7 +342,7 @@ void DataAllocator::cookDeadBeef(const Output& spec) // We get the output route from the original spec, but we send it // using the binding of the deadbeef subSpecification. RouteIndex routeIndex = matchDataHeader(spec, timingInfo.timeslice); - auto deadBeefOutput = Output{spec.origin, spec.description, 0xdeadbeef, Lifetime::Timeframe}; + auto deadBeefOutput = Output{spec.origin, spec.description, 0xdeadbeef}; auto headerMessage = headerMessageFromOutput(deadBeefOutput, routeIndex, header::gSerializationMethodNone, 0); addPartToContext(proxy.createOutputMessage(routeIndex, 0), deadBeefOutput, header::gSerializationMethodNone); diff --git a/Framework/Core/test/test_DataAllocator.cxx b/Framework/Core/test/test_DataAllocator.cxx index d880da72d7cb4..894fb5f38368a 100644 --- a/Framework/Core/test/test_DataAllocator.cxx +++ b/Framework/Core/test/test_DataAllocator.cxx @@ -45,7 +45,7 @@ using namespace o2::framework; // this function is only used to do the static checks for API return types void doTypeChecks() { - const Output output{"TST", "DUMMY", 0, Lifetime::Timeframe}; + const Output output{"TST", "DUMMY", 0}; // we require references to objects owned by allocator context static_assert(std::is_lvalue_reference().make(output))>::value); static_assert(std::is_lvalue_reference().make(output, "test"))>::value); @@ -83,24 +83,24 @@ DataProcessorSpec getSourceSpec() // picked by the framework is no serialization test::MetaHeader meta1{42}; test::MetaHeader meta2{23}; - pc.outputs().snapshot(Output{"TST", "MESSAGEABLE", 0, Lifetime::Timeframe, {meta1, meta2}}, a); - pc.outputs().snapshot(Output{"TST", "MSGBLEROOTSRLZ", 0, Lifetime::Timeframe}, + pc.outputs().snapshot(Output{"TST", "MESSAGEABLE", 0, {meta1, meta2}}, a); + pc.outputs().snapshot(Output{"TST", "MSGBLEROOTSRLZ", 0}, o2::framework::ROOTSerialized(a)); // class Polymorphic is not messageable, so the serialization type is deduced // from the fact that the type has a dictionary and can be ROOT-serialized. - pc.outputs().snapshot(Output{"TST", "ROOTNONTOBJECT", 0, Lifetime::Timeframe}, b); + pc.outputs().snapshot(Output{"TST", "ROOTNONTOBJECT", 0}, b); // vector of ROOT serializable class - pc.outputs().snapshot(Output{"TST", "ROOTVECTOR", 0, Lifetime::Timeframe}, c); + pc.outputs().snapshot(Output{"TST", "ROOTVECTOR", 0}, c); // deque of simple types - pc.outputs().snapshot(Output{"TST", "DEQUE", 0, Lifetime::Timeframe}, testDequePayload); + pc.outputs().snapshot(Output{"TST", "DEQUE", 0}, testDequePayload); // likewise, passed anonymously with char type and class name o2::framework::ROOTSerialized d(*((char*)&c), "vector"); - pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC", 0, Lifetime::Timeframe}, d); + pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC", 0}, d); // vector of ROOT serializable class wrapped with TClass info as hint auto* cl = TClass::GetClass(typeid(decltype(c))); ASSERT_ERROR(cl != nullptr); o2::framework::ROOTSerialized e(*((char*)&c), cl); - pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC2", 0, Lifetime::Timeframe}, e); + pc.outputs().snapshot(Output{"TST", "ROOTSERLZDVEC2", 0}, e); // test the 'make' methods pc.outputs().make(OutputRef{"makesingle", 0}) = a; auto& multi = pc.outputs().make(OutputRef{"makespan", 0}, 3); @@ -111,7 +111,7 @@ DataProcessorSpec getSourceSpec() // test the adopt method auto freefct = [](void* data, void* hint) {}; // simply ignore the cleanup for the test static std::string teststring = "adoptchunk"; - pc.outputs().adoptChunk(Output{"TST", "ADOPTCHUNK", 0, Lifetime::Timeframe}, teststring.data(), teststring.length(), freefct, nullptr); + pc.outputs().adoptChunk(Output{"TST", "ADOPTCHUNK", 0}, teststring.data(), teststring.length(), freefct, nullptr); // test resizable data chunk, initial size 0 and grow auto& growchunk = pc.outputs().newChunk(OutputRef{"growchunk", 0}, 0); growchunk.resize(sizeof(o2::test::TriviallyCopyable)); @@ -307,7 +307,7 @@ DataProcessorSpec getSinkSpec() ASSERT_ERROR((object12[0] == o2::test::TriviallyCopyable{42, 23, 0xdead})); ASSERT_ERROR((object12[1] == o2::test::TriviallyCopyable{10, 20, 0xacdc})); // forward the read-only span on a different route - pc.outputs().snapshot(Output{"TST", "MSGABLVECTORCPY", 0, Lifetime::Timeframe}, object12); + pc.outputs().snapshot(Output{"TST", "MSGABLVECTORCPY", 0}, object12); LOG(info) << "extracting TNamed object from input13"; auto object13 = pc.inputs().get("input13"); diff --git a/Framework/Core/test/test_ParallelPipeline.cxx b/Framework/Core/test/test_ParallelPipeline.cxx index e13869873fca1..47106855a8f74 100644 --- a/Framework/Core/test/test_ParallelPipeline.cxx +++ b/Framework/Core/test/test_ParallelPipeline.cxx @@ -64,7 +64,7 @@ std::vector defineDataProcessing(ConfigContext const&) << *input.spec << ": " << *((int*)input.payload); auto const* dataheader = DataRefUtils::getHeader(input); //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); - auto& data = ctx.outputs().make(Output{"TST", "PREPROC", dataheader->subSpecification, Lifetime::Timeframe}); + auto& data = ctx.outputs().make(Output{"TST", "PREPROC", dataheader->subSpecification}); ASSERT_ERROR(ctx.inputs().get(input.spec->binding.c_str()) == parallelContext.index1D()); data = parallelContext.index1D(); } @@ -85,10 +85,10 @@ std::vector defineDataProcessing(ConfigContext const&) // TODO: there is a bug in the API for using OutputRef, returns an rvalue which can not be bound to // lvalue reference //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); - auto& data = ctx.outputs().make(Output{"TST", "DATA", dataheader->subSpecification, Lifetime::Timeframe}); + auto& data = ctx.outputs().make(Output{"TST", "DATA", dataheader->subSpecification}); data = ctx.inputs().get(input.spec->binding.c_str()); //auto& meta = ctx.outputs().make(OutputRef{"metadt", dataheader->subSpecification}); - auto& meta = ctx.outputs().make(Output{"TST", "META", dataheader->subSpecification, Lifetime::Timeframe}); + auto& meta = ctx.outputs().make(Output{"TST", "META", dataheader->subSpecification}); meta = dataheader->subSpecification; } }}}, @@ -145,7 +145,7 @@ std::vector defineDataProcessing(ConfigContext const&) if (multiplicities[pipeline] == 0) { continue; } - ctx.outputs().make(Output{"TST", "TRIGGER", subspecs[index], Lifetime::Timeframe}) = pipeline; + ctx.outputs().make(Output{"TST", "TRIGGER", subspecs[index]}) = pipeline; multiplicities[pipeline++]--; if (pipeline >= nPipelines) { pipeline = 0; diff --git a/Framework/Core/test/test_ProcessorOptions.cxx b/Framework/Core/test/test_ProcessorOptions.cxx index 8de90e0e23441..1751176eb639e 100644 --- a/Framework/Core/test/test_ProcessorOptions.cxx +++ b/Framework/Core/test/test_ProcessorOptions.cxx @@ -45,7 +45,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const&) } // there is nothing to do, simply stop the workflow but we have to send at least one message // to make sure that the callback of the consumer is called - ctx.outputs().make(Output{"TST", "TEST", 0, Lifetime::Timeframe}) = 42; + ctx.outputs().make(Output{"TST", "TEST", 0}) = 42; ctx.services().get().endOfStream(); *isReady = true; }; diff --git a/Framework/Core/test/test_StaggeringWorkflow.cxx b/Framework/Core/test/test_StaggeringWorkflow.cxx index 0e98c4324bc76..edc07d598465b 100644 --- a/Framework/Core/test/test_StaggeringWorkflow.cxx +++ b/Framework/Core/test/test_StaggeringWorkflow.cxx @@ -85,9 +85,9 @@ std::vector defineDataProcessing(ConfigContext const&) // since the snapshot copy is ready for sending it is scheduled but held back // because of the CompletionPolicy trigger matcher. This message will be // sent together with the second message. - outputs.snapshot(Output{"PROD", "CHANNEL", subspec, Lifetime::Timeframe}, subspec); + outputs.snapshot(Output{"PROD", "CHANNEL", subspec}, subspec); device.waitFor(100); - outputs.snapshot(Output{"PROD", "TRIGGER", subspec, Lifetime::Timeframe}, subspec); + outputs.snapshot(Output{"PROD", "TRIGGER", subspec}, subspec); device.waitFor(100); } control.endOfStream(); @@ -106,7 +106,7 @@ std::vector defineDataProcessing(ConfigContext const&) LOG(info) << "processing " << input.spec->binding << " " << data; // check if the channel binding starts with 'trigger' if (input.spec->binding.find("trigger") == 0) { - pc.outputs().make(Output{"PROC", "CHANNEL", data, Lifetime::Timeframe}) = data; + pc.outputs().make(Output{"PROC", "CHANNEL", data}) = data; } nActiveInputs++; } diff --git a/Framework/Core/test/test_TableBuilder.cxx b/Framework/Core/test/test_TableBuilder.cxx index a7b4739054d03..26a0f5a930b73 100644 --- a/Framework/Core/test/test_TableBuilder.cxx +++ b/Framework/Core/test/test_TableBuilder.cxx @@ -240,7 +240,7 @@ TEST_CASE("TestSoAIntegration") TEST_CASE("TestDataAllocatorReturnType") { - const Output output{"TST", "DUMMY", 0, Lifetime::Timeframe}; + const Output output{"TST", "DUMMY", 0}; } TEST_CASE("TestPodInjestion") diff --git a/Framework/Utils/include/DPLUtils/RootTreeReader.h b/Framework/Utils/include/DPLUtils/RootTreeReader.h index 42a8c9d01d0d0..109d79f9fad78 100644 --- a/Framework/Utils/include/DPLUtils/RootTreeReader.h +++ b/Framework/Utils/include/DPLUtils/RootTreeReader.h @@ -51,11 +51,11 @@ struct DefaultKey { enum Lifetime lifetime = Lifetime::Timeframe; DefaultKey(const Output& desc) - : origin(desc.origin), description(desc.description), subSpec(desc.subSpec), lifetime(desc.lifetime) + : origin(desc.origin), description(desc.description), subSpec(desc.subSpec) { } - operator Output() const { return Output{origin, description, subSpec, lifetime}; } + operator Output() const { return Output{origin, description, subSpec}; } }; } // namespace rtr @@ -302,7 +302,7 @@ class GenericRootTreeReader } auto snapshot = [&context, &stackcreator](const KeyType& key, const auto& object) { - context.outputs().snapshot(Output{key.origin, key.description, key.subSpec, key.lifetime, std::move(stackcreator())}, object); + context.outputs().snapshot(Output{key.origin, key.description, key.subSpec, std::move(stackcreator())}, object); }; char* data = nullptr; @@ -310,7 +310,7 @@ class GenericRootTreeReader mBranch->GetEntry(entry); // execute hook if it was registered; if this return true do not proceed further - if (mPublishHook != nullptr && (*mPublishHook).hook(mName, context, Output{mKey.origin, mKey.description, mKey.subSpec, mKey.lifetime, std::move(stackcreator())}, data)) { + if (mPublishHook != nullptr && (*mPublishHook).hook(mName, context, Output{mKey.origin, mKey.description, mKey.subSpec, std::move(stackcreator())}, data)) { } // try to figureout when we need to do something special diff --git a/Framework/Utils/src/Utils.cxx b/Framework/Utils/src/Utils.cxx index 3a52c46e074af..1dc0882c2f55d 100644 --- a/Framework/Utils/src/Utils.cxx +++ b/Framework/Utils/src/Utils.cxx @@ -28,7 +28,7 @@ namespace workflows Output getOutput(const o2f::OutputSpec outputSpec) { auto concrete = DataSpecUtils::asConcreteDataMatcher(outputSpec); - return Output{concrete.origin, concrete.description, concrete.subSpec, outputSpec.lifetime}; + return Output{concrete.origin, concrete.description, concrete.subSpec}; } // This method can convert a vector of OutputSpec into a vector of Output. diff --git a/Framework/Utils/test/test_RootTreeReader.cxx b/Framework/Utils/test/test_RootTreeReader.cxx index 0390a29f807b1..bf652fca6f97a 100644 --- a/Framework/Utils/test/test_RootTreeReader.cxx +++ b/Framework/Utils/test/test_RootTreeReader.cxx @@ -67,11 +67,10 @@ DataProcessorSpec getSourceSpec() testFile->Close(); } - constexpr auto persistency = Lifetime::Transient; auto reader = std::make_shared("testtree", // tree name fileName.c_str(), // input file name - RootTreeReader::BranchDefinition>{Output{"TST", "ARRAYOFMSGBL", 0, persistency}, "msgblarray"}, - Output{"TST", "ARRAYOFDATA", 0, persistency}, + RootTreeReader::BranchDefinition>{Output{"TST", "ARRAYOFMSGBL", 0}, "msgblarray"}, + Output{"TST", "ARRAYOFDATA", 0}, "dataarray", RootTreeReader::PublishingMode::Single); diff --git a/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx b/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx index 3213c1775c758..9a9b849d8d9d2 100644 --- a/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx +++ b/Framework/Utils/test/test_RootTreeWriterWorkflow.cxx @@ -143,7 +143,7 @@ DataProcessorSpec getSourceSpec() o2::test::Polymorphic a(*counter); pc.outputs().snapshot(OutputRef{"output", 0}, a); pc.outputs().snapshot(OutputRef{"output", 1}, a); - int& metadata = pc.outputs().make(Output{"TST", "METADATA", 0, Lifetime::Timeframe}); + int& metadata = pc.outputs().make(Output{"TST", "METADATA", 0}); metadata = *counter; *counter = *counter + 1; if (*counter >= sTreeSize) { From 97b5ea02a641dbe1cb0af59b6fd8180cf76d63ff Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 11 Dec 2023 08:45:47 +0100 Subject: [PATCH 161/726] Fix error message --- Detectors/Upgrades/ITS3/macros/test/CreateDictionariesITS3.C | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/Upgrades/ITS3/macros/test/CreateDictionariesITS3.C b/Detectors/Upgrades/ITS3/macros/test/CreateDictionariesITS3.C index 1c53390946c01..b466ebf222878 100644 --- a/Detectors/Upgrades/ITS3/macros/test/CreateDictionariesITS3.C +++ b/Detectors/Upgrades/ITS3/macros/test/CreateDictionariesITS3.C @@ -168,7 +168,7 @@ void CreateDictionariesITS3(bool saveDeltas = false, } clusTree->GetEntry(0); if (clusTree->GetEntries() > 1 && !hitfile.empty()) { - LOGP(error, "Hits are provided but the cluster tree containes {} entries, looks like real data"); + LOGP(error, "Hits are provided but the cluster tree containes {} entries, looks like real data", clusTree->GetEntries()); return; } From 3ec19bc77c2403b3014446ab9921bc648d18b2c1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 11 Dec 2023 23:05:54 +0100 Subject: [PATCH 162/726] DPL: add test to verify query builder with Lifetime::timeframe --- .../Core/test/test_DataDescriptorMatcher.cxx | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/Framework/Core/test/test_DataDescriptorMatcher.cxx b/Framework/Core/test/test_DataDescriptorMatcher.cxx index 8b74f7d9f6b6b..a76fddb925817 100644 --- a/Framework/Core/test/test_DataDescriptorMatcher.cxx +++ b/Framework/Core/test/test_DataDescriptorMatcher.cxx @@ -660,7 +660,7 @@ TEST_CASE("DataQuery") auto result5 = DataDescriptorQueryBuilder::parse("x:TST/A1/0?lifetime=sporadic&ccdb-path=GLO/Config/GRPECS&key3=value3"); REQUIRE(result5.size() == 1); - result5[0].lifetime = Lifetime::Sporadic; + REQUIRE(result5[0].lifetime == Lifetime::Sporadic); REQUIRE(result5[0].metadata.size() == 3); REQUIRE(result5[0].metadata[0].name == "lifetime"); REQUIRE(result5[0].metadata[0].defaultValue.get() == "sporadic"); @@ -668,6 +668,29 @@ TEST_CASE("DataQuery") REQUIRE(result5[0].metadata[1].defaultValue.get() == "GLO/Config/GRPECS"); REQUIRE(result5[0].metadata[2].name == "key3"); REQUIRE(result5[0].metadata[2].defaultValue.get() == "value3"); + +} + +TEST_CASE("DataQueryLifetime") { + auto result0 = DataDescriptorQueryBuilder::parse("x:TST/A1?lifetime=timeframe"); + REQUIRE(result0.size() == 1); + REQUIRE(result0[0].lifetime == Lifetime::Timeframe); + auto result1 = DataDescriptorQueryBuilder::parse("x:TST/A1?lifetime=sporadic"); + REQUIRE(result1.size() == 1); + REQUIRE(result1[0].lifetime == Lifetime::Sporadic); + auto result2 = DataDescriptorQueryBuilder::parse("x:TST/A1?lifetime=condition"); + REQUIRE(result2.size() == 1); + REQUIRE(result2[0].lifetime == Lifetime::Condition); + + auto result3 = DataDescriptorQueryBuilder::parse("x:TST/A1/1?lifetime=timeframe"); + REQUIRE(result3.size() == 1); + REQUIRE(result3[0].lifetime == Lifetime::Timeframe); + auto result4 = DataDescriptorQueryBuilder::parse("x:TST/A1/2?lifetime=sporadic"); + REQUIRE(result4.size() == 1); + REQUIRE(result4[0].lifetime == Lifetime::Sporadic); + auto result5 = DataDescriptorQueryBuilder::parse("x:TST/A1/3?lifetime=condition"); + REQUIRE(result5.size() == 1); + REQUIRE(result5[0].lifetime == Lifetime::Condition); } // Make sure that 10 and 1 subspect are matched differently From ced2ae84ecabcbfbf77a43da72aa0ede70ea2b98 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 11 Dec 2023 23:40:05 +0100 Subject: [PATCH 163/726] DPL: add test to verify query builder with Lifetime::timeframe --- .../Core/test/test_DataDescriptorMatcher.cxx | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/Framework/Core/test/test_DataDescriptorMatcher.cxx b/Framework/Core/test/test_DataDescriptorMatcher.cxx index 8b74f7d9f6b6b..e372ed06a8cb7 100644 --- a/Framework/Core/test/test_DataDescriptorMatcher.cxx +++ b/Framework/Core/test/test_DataDescriptorMatcher.cxx @@ -660,7 +660,7 @@ TEST_CASE("DataQuery") auto result5 = DataDescriptorQueryBuilder::parse("x:TST/A1/0?lifetime=sporadic&ccdb-path=GLO/Config/GRPECS&key3=value3"); REQUIRE(result5.size() == 1); - result5[0].lifetime = Lifetime::Sporadic; + REQUIRE(result5[0].lifetime == Lifetime::Sporadic); REQUIRE(result5[0].metadata.size() == 3); REQUIRE(result5[0].metadata[0].name == "lifetime"); REQUIRE(result5[0].metadata[0].defaultValue.get() == "sporadic"); @@ -670,6 +670,29 @@ TEST_CASE("DataQuery") REQUIRE(result5[0].metadata[2].defaultValue.get() == "value3"); } +TEST_CASE("DataQueryLifetime") +{ + auto result0 = DataDescriptorQueryBuilder::parse("x:TST/A1?lifetime=timeframe"); + REQUIRE(result0.size() == 1); + REQUIRE(result0[0].lifetime == Lifetime::Timeframe); + auto result1 = DataDescriptorQueryBuilder::parse("x:TST/A1?lifetime=sporadic"); + REQUIRE(result1.size() == 1); + REQUIRE(result1[0].lifetime == Lifetime::Sporadic); + auto result2 = DataDescriptorQueryBuilder::parse("x:TST/A1?lifetime=condition"); + REQUIRE(result2.size() == 1); + REQUIRE(result2[0].lifetime == Lifetime::Condition); + + auto result3 = DataDescriptorQueryBuilder::parse("x:TST/A1/1?lifetime=timeframe"); + REQUIRE(result3.size() == 1); + REQUIRE(result3[0].lifetime == Lifetime::Timeframe); + auto result4 = DataDescriptorQueryBuilder::parse("x:TST/A1/2?lifetime=sporadic"); + REQUIRE(result4.size() == 1); + REQUIRE(result4[0].lifetime == Lifetime::Sporadic); + auto result5 = DataDescriptorQueryBuilder::parse("x:TST/A1/3?lifetime=condition"); + REQUIRE(result5.size() == 1); + REQUIRE(result5[0].lifetime == Lifetime::Condition); +} + // Make sure that 10 and 1 subspect are matched differently TEST_CASE("MatchSubspec") From 7700c44dbca21d6e5ae89ddb3cc2ec84522c3ec0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 12 Dec 2023 13:06:16 +0100 Subject: [PATCH 164/726] Fix sign in AltroDecoder A 1 bit signed bitfield is -1, not 1. --- Detectors/PHOS/reconstruction/src/AltroDecoder.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/PHOS/reconstruction/src/AltroDecoder.cxx b/Detectors/PHOS/reconstruction/src/AltroDecoder.cxx index a1e205cbd2cbc..7202b27325c0e 100644 --- a/Detectors/PHOS/reconstruction/src/AltroDecoder.cxx +++ b/Detectors/PHOS/reconstruction/src/AltroDecoder.cxx @@ -284,7 +284,7 @@ void AltroDecoder::readTRUDigits(short absId, int payloadSize) currentsample += bunchlength + 2; } truDigitPack dp = {0}; - dp.mHeader = 1; + dp.mHeader = -1; dp.mAmp = maxAmp; dp.mTime = timeBin; int chId = (absId - Mapping::NCHANNELS - 1) % 224; From acfe390e6b8458ad29fc13da5332775fe7f64e37 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 12 Dec 2023 13:26:27 +0100 Subject: [PATCH 165/726] DPL: improve test to check for broken case in QC --- Framework/Core/test/test_DataDescriptorMatcher.cxx | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Framework/Core/test/test_DataDescriptorMatcher.cxx b/Framework/Core/test/test_DataDescriptorMatcher.cxx index e372ed06a8cb7..9f83c2b5b5ac7 100644 --- a/Framework/Core/test/test_DataDescriptorMatcher.cxx +++ b/Framework/Core/test/test_DataDescriptorMatcher.cxx @@ -12,6 +12,7 @@ #include "Framework/DataDescriptorMatcher.h" #include "Framework/DataDescriptorQueryBuilder.h" #include "Framework/InputSpec.h" +#include "Framework/DataSpecUtils.h" #include "Headers/Stack.h" #include @@ -688,8 +689,11 @@ TEST_CASE("DataQueryLifetime") auto result4 = DataDescriptorQueryBuilder::parse("x:TST/A1/2?lifetime=sporadic"); REQUIRE(result4.size() == 1); REQUIRE(result4[0].lifetime == Lifetime::Sporadic); - auto result5 = DataDescriptorQueryBuilder::parse("x:TST/A1/3?lifetime=condition"); + auto result5 = DataDescriptorQueryBuilder::parse("noiseMap:TRD/NOISEMAP?lifetime=condition&ccdb-path=TRD/Calib/NoiseMapMCM"); REQUIRE(result5.size() == 1); + auto concrete = DataSpecUtils::asConcreteDataTypeMatcher(result5[0]); + REQUIRE(concrete.origin.as() == "TRD"); + REQUIRE(concrete.description.as() == "NOISEMAP"); REQUIRE(result5[0].lifetime == Lifetime::Condition); } From 022b9cc2f60c778c47cb24d0489423c4af12ee8b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 12 Dec 2023 15:35:24 +0100 Subject: [PATCH 166/726] DPL: get rid of simplified CompletionPolicy The full blown version has been there for a while now, and in any case it's required if one wants to access the oldest possible timeframe. --- .../TPCSectorCompletionPolicy.h | 2 +- .../Core/include/Framework/CompletionPolicy.h | 9 ++------- Framework/Core/src/CompletionPolicyHelpers.cxx | 17 +++++++---------- Framework/Core/src/DataRelayer.cxx | 13 +++++-------- Framework/Core/test/test_CompletionPolicy.cxx | 9 +++++++-- Framework/Core/test/test_StaggeringWorkflow.cxx | 2 +- 6 files changed, 23 insertions(+), 29 deletions(-) diff --git a/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/TPCSectorCompletionPolicy.h b/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/TPCSectorCompletionPolicy.h index 33abded35624a..9f2a8b31dbdba 100644 --- a/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/TPCSectorCompletionPolicy.h +++ b/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/TPCSectorCompletionPolicy.h @@ -91,7 +91,7 @@ class TPCSectorCompletionPolicy return std::regex_match(device.name.begin(), device.name.end(), std::regex(expression.c_str())); }; - auto callback = [bRequireAll = mRequireAll, inputMatchers = mInputMatchers, externalInputMatchers = mExternalInputMatchers, pTpcSectorMask = mTpcSectorMask, orderCheck = mOrderCheck](framework::InputSpan const& inputs) -> framework::CompletionPolicy::CompletionOp { + auto callback = [bRequireAll = mRequireAll, inputMatchers = mInputMatchers, externalInputMatchers = mExternalInputMatchers, pTpcSectorMask = mTpcSectorMask, orderCheck = mOrderCheck](framework::InputSpan const& inputs, auto const&, auto&) -> framework::CompletionPolicy::CompletionOp { unsigned long tpcSectorMask = pTpcSectorMask ? *pTpcSectorMask : 0xFFFFFFFFF; std::bitset validSectors = 0; bool haveMatchedInput = false; diff --git a/Framework/Core/include/Framework/CompletionPolicy.h b/Framework/Core/include/Framework/CompletionPolicy.h index eda45bd315471..55d3014166956 100644 --- a/Framework/Core/include/Framework/CompletionPolicy.h +++ b/Framework/Core/include/Framework/CompletionPolicy.h @@ -64,26 +64,21 @@ struct CompletionPolicy { using Matcher = std::function; using InputSetElement = DataRef; - using Callback = std::function; using CallbackFull = std::function const&, ServiceRegistryRef&)>; using CallbackConfigureRelayer = std::function; /// Constructor CompletionPolicy() - : name{}, matcher{}, callback{} {} + : name{}, matcher{}, callbackFull{} {} /// Constructor for emplace_back - CompletionPolicy(std::string _name, Matcher _matcher, Callback _callback, bool _balanceChannels = true) - : name(std::move(_name)), matcher(std::move(_matcher)), callback(std::move(_callback)), callbackFull{nullptr}, balanceChannels{_balanceChannels} {} CompletionPolicy(std::string _name, Matcher _matcher, CallbackFull _callback, bool _balanceChannels = true) - : name(std::move(_name)), matcher(std::move(_matcher)), callback(nullptr), callbackFull{std::move(_callback)}, balanceChannels{_balanceChannels} {} + : name(std::move(_name)), matcher(std::move(_matcher)), callbackFull{std::move(_callback)}, balanceChannels{_balanceChannels} {} /// Name of the policy itself. std::string name = ""; /// Callback to be used to understand if the policy should apply /// to the given device. Matcher matcher = nullptr; - /// Actual policy which decides what to do with a partial InputRecord. - Callback callback = nullptr; /// Actual policy which decides what to do with a partial InputRecord, extended version CallbackFull callbackFull = nullptr; /// A callback which allows you to configure the behavior of the data relayer associated diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 34fc002428e12..5c38446fedaee 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -21,9 +21,6 @@ #include #include -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpedantic" - namespace o2::framework { @@ -35,7 +32,7 @@ CompletionPolicy CompletionPolicyHelpers::defineByNameOrigin(std::string const& auto originReceived = std::make_shared>(); - auto callback = [originReceived, origin, op](InputSpan const& inputRefs) -> CompletionPolicy::CompletionOp { + auto callback = [originReceived, origin, op](InputSpan const& inputRefs, std::vector const&, ServiceRegistryRef&) -> CompletionPolicy::CompletionOp { // update list of the start times of inputs with origin @origin for (auto& ref : inputRefs) { if (ref.header != nullptr) { @@ -77,7 +74,7 @@ CompletionPolicy CompletionPolicyHelpers::defineByName(std::string const& name, auto matcher = [name](DeviceSpec const& device) -> bool { return std::regex_match(device.name.begin(), device.name.end(), std::regex(name)); }; - auto callback = [op](InputSpan const&) -> CompletionPolicy::CompletionOp { + auto callback = [op](InputSpan const&, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { return op; }; switch (op) { @@ -108,7 +105,8 @@ CompletionPolicy CompletionPolicyHelpers::defineByName(std::string const& name, CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, CompletionPolicy::Matcher matcher) { - auto callback = [](InputSpan const& inputs) -> CompletionPolicy::CompletionOp { + auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { + assert(inputs.size() == spec.size()); for (auto& input : inputs) { if (input.header == nullptr) { return CompletionPolicy::CompletionOp::Wait; @@ -123,7 +121,7 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAllOrdered(const char* name { auto callbackFull = [](InputSpan const& inputs, std::vector const&, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { auto& decongestionService = ref.get(); - decongestionService.orderedCompletionPolicyActive = 1; + decongestionService.orderedCompletionPolicyActive = true; for (auto& input : inputs) { if (input.header == nullptr) { return CompletionPolicy::CompletionOp::Wait; @@ -199,7 +197,7 @@ CompletionPolicy CompletionPolicyHelpers::consumeExistingWhenAny(const char* nam CompletionPolicy CompletionPolicyHelpers::consumeWhenAny(const char* name, CompletionPolicy::Matcher matcher) { - auto callback = [](InputSpan const& inputs) -> CompletionPolicy::CompletionOp { + auto callback = [](InputSpan const& inputs, std::vector const&, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { for (auto& input : inputs) { if (input.header != nullptr) { return CompletionPolicy::CompletionOp::Consume; @@ -289,7 +287,7 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAnyWithAllConditions(std::s CompletionPolicy CompletionPolicyHelpers::processWhenAny(const char* name, CompletionPolicy::Matcher matcher) { - auto callback = [](InputSpan const& inputs) -> CompletionPolicy::CompletionOp { + auto callback = [](InputSpan const& inputs, std::vector const&, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { size_t present = 0; for (auto& input : inputs) { if (input.header != nullptr) { @@ -307,4 +305,3 @@ CompletionPolicy CompletionPolicyHelpers::processWhenAny(const char* name, Compl } } // namespace o2::framework -#pragma GCC diagnostic pop diff --git a/Framework/Core/src/DataRelayer.cxx b/Framework/Core/src/DataRelayer.cxx index e9591a9cc76ea..b3d20e19852fa 100644 --- a/Framework/Core/src/DataRelayer.cxx +++ b/Framework/Core/src/DataRelayer.cxx @@ -675,6 +675,9 @@ void DataRelayer::getReadyToProcess(std::vector& comp notDirty++; continue; } + if (!mCompletionPolicy.callbackFull) { + throw runtime_error_f("Completion police %s has no callback set", mCompletionPolicy.name.c_str()); + } auto partial = getPartialRecord(li); // TODO: get the data ref from message model auto getter = [&partial](size_t idx, size_t part) { @@ -692,14 +695,8 @@ void DataRelayer::getReadyToProcess(std::vector& comp return partial[idx].size(); }; InputSpan span{getter, nPartsGetter, static_cast(partial.size())}; - CompletionPolicy::CompletionOp action; - if (mCompletionPolicy.callback) { - action = mCompletionPolicy.callback(span); - } else if (mCompletionPolicy.callbackFull) { - action = mCompletionPolicy.callbackFull(span, mInputs, mContext); - } else { - throw runtime_error_f("Completion police %s has no callback set", mCompletionPolicy.name.c_str()); - } + CompletionPolicy::CompletionOp action = mCompletionPolicy.callbackFull(span, mInputs, mContext); + auto& variables = mTimesliceIndex.getVariablesForSlot(slot); auto timeslice = std::get_if(&variables.get(0)); switch (action) { diff --git a/Framework/Core/test/test_CompletionPolicy.cxx b/Framework/Core/test/test_CompletionPolicy.cxx index 3bb2dc12caff0..059f20b352b3d 100644 --- a/Framework/Core/test/test_CompletionPolicy.cxx +++ b/Framework/Core/test/test_CompletionPolicy.cxx @@ -12,6 +12,7 @@ #include #include "Framework/CompletionPolicy.h" #include "Framework/CompletionPolicyHelpers.h" +#include "Framework/ServiceRegistry.h" #include "Headers/DataHeader.h" #include "Headers/NameHeader.h" #include "Framework/CompletionPolicy.h" @@ -39,7 +40,9 @@ TEST_CASE("TestCompletionPolicy_callback") return true; }; - auto callback = [&stack](InputSpan const& inputRefs) { + ServiceRegistry services; + + auto callback = [&stack](InputSpan const& inputRefs, std::vector const&, ServiceRegistryRef&) { for (auto const& ref : inputRefs) { auto const* header = CompletionPolicyHelpers::getHeader(ref); REQUIRE(header == reinterpret_cast(stack.data())); @@ -53,7 +56,9 @@ TEST_CASE("TestCompletionPolicy_callback") {"test", matcher, callback}}; CompletionPolicy::InputSetElement ref{nullptr, reinterpret_cast(stack.data()), nullptr}; InputSpan const& inputs{[&ref](size_t) { return ref; }, 1}; + std::vector specs; + ServiceRegistryRef servicesRef{services}; for (auto& policy : policies) { - policy.callback(inputs); + policy.callbackFull(inputs, specs, servicesRef); } } diff --git a/Framework/Core/test/test_StaggeringWorkflow.cxx b/Framework/Core/test/test_StaggeringWorkflow.cxx index edc07d598465b..590ce83cef467 100644 --- a/Framework/Core/test/test_StaggeringWorkflow.cxx +++ b/Framework/Core/test/test_StaggeringWorkflow.cxx @@ -53,7 +53,7 @@ void customize(std::vector& policies) // search for spec names starting with "processor" return spec.name.find("processor") == 0; }, - [](auto const&) { return o2::framework::CompletionPolicy::CompletionOp::Consume; }}); + [](auto const&, auto const&, auto &) { return o2::framework::CompletionPolicy::CompletionOp::Consume; }}); } #include "Framework/runDataProcessing.h" From bdecacebf99acbac801cdbc35e45973a9f32b600 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 12 Dec 2023 15:35:24 +0100 Subject: [PATCH 167/726] DPL: wait as long as possible for Sporadic inputs Right now if we have the standard consumeWhenAll policy and we have a sporadic input, it will wait indefinitely until all the inputs arrive or it will drop timeframes without that sporadic input. This changes the behavior and waits only until the oldest possible timeframe does not allow the Sporadic input to be there. At which point, it schedules the processing in any case, under the assumption that a task declaring a sporadic input knows what to do in case it's not there. --- .../Core/src/CompletionPolicyHelpers.cxx | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 5c38446fedaee..769692c1d6637 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -15,6 +15,7 @@ #include "Framework/DeviceSpec.h" #include "Framework/CompilerBuiltins.h" #include "Framework/Logger.h" +#include "Framework/TimesliceIndex.h" #include "Framework/TimingInfo.h" #include "DecongestionService.h" @@ -107,10 +108,32 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl { auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { assert(inputs.size() == spec.size()); + + size_t si = 0; + bool missingSporadic = false; + size_t currentTimeslice = -1; for (auto& input : inputs) { - if (input.header == nullptr) { + assert(si < specs.size()); + auto& spec = specs[si++]; + if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { return CompletionPolicy::CompletionOp::Wait; } + if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { + missingSporadic |= true; + } + if (input.header != nullptr) { + auto const* dph = framework::DataRefUtils::getHeader(input); + if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { + currentTimeslice = dph->startTime; + break; + } + } + } + // If some sporadic inputs are missing, we wait for them util we are sure they will not come, + // i.e. until the oldest possible timeslice is beyond the timeslice of the input. + auto& timesliceIndex = ref.get(); + if (missingSporadic && currentTimeslice >= timesliceIndex.getOldestPossibleInput().timeslice.value) { + return CompletionPolicy::CompletionOp::Wait; } return CompletionPolicy::CompletionOp::Consume; }; From 11347973a5173489725d27b02e9a32ad0299c55d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 12 Dec 2023 23:21:04 +0100 Subject: [PATCH 168/726] Fix missing argument in logging statement --- Detectors/ITSMFT/ITS/macros/test/CreateDictionaries.C | 2 +- Detectors/ITSMFT/MFT/macros/test/CreateDictionaries.C | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Detectors/ITSMFT/ITS/macros/test/CreateDictionaries.C b/Detectors/ITSMFT/ITS/macros/test/CreateDictionaries.C index f0a2ef0de6398..3f906ebc04e17 100644 --- a/Detectors/ITSMFT/ITS/macros/test/CreateDictionaries.C +++ b/Detectors/ITSMFT/ITS/macros/test/CreateDictionaries.C @@ -140,7 +140,7 @@ void CreateDictionaries(bool saveDeltas = false, } clusTree->GetEntry(0); if (clusTree->GetEntries() > 1 && !hitfile.empty()) { - LOGP(error, "Hits are provided but the cluster tree containes {} entries, looks like real data"); + LOGP(error, "Hits are provided but the cluster tree containes {} entries, looks like real data", clusTree->GetEntries()); return; } diff --git a/Detectors/ITSMFT/MFT/macros/test/CreateDictionaries.C b/Detectors/ITSMFT/MFT/macros/test/CreateDictionaries.C index 3d6fb3a7ef59f..b610156f0cc4c 100644 --- a/Detectors/ITSMFT/MFT/macros/test/CreateDictionaries.C +++ b/Detectors/ITSMFT/MFT/macros/test/CreateDictionaries.C @@ -140,7 +140,7 @@ void CreateDictionaries(bool saveDeltas = false, } clusTree->GetEntry(0); if (clusTree->GetEntries() > 1 && !hitfile.empty()) { - LOGP(error, "Hits are provided but the cluster tree containes {} entries, looks like real data"); + LOGP(error, "Hits are provided but the cluster tree containes {} entries, looks like real data", clusTree->GetEntries()); return; } From 1937f15524250f78b59e2ce1e66ede378648d298 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 13 Dec 2023 09:32:09 +0100 Subject: [PATCH 169/726] Add missing header --- .../ITSMFT/common/include/DataFormatsITSMFT/CompCluster.h | 1 + 1 file changed, 1 insertion(+) diff --git a/DataFormats/Detectors/ITSMFT/common/include/DataFormatsITSMFT/CompCluster.h b/DataFormats/Detectors/ITSMFT/common/include/DataFormatsITSMFT/CompCluster.h index 772a9ae12a81a..0cd5d0e11f19f 100644 --- a/DataFormats/Detectors/ITSMFT/common/include/DataFormatsITSMFT/CompCluster.h +++ b/DataFormats/Detectors/ITSMFT/common/include/DataFormatsITSMFT/CompCluster.h @@ -15,6 +15,7 @@ #define ALICEO2_ITSMFT_COMPCLUSTER_H #include +#include namespace o2 { From ac663ddbfab4ea63ca551243b48ed88d4eb58177 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 13 Dec 2023 11:26:50 +0100 Subject: [PATCH 170/726] Fix brown paperbag indexing --- .../GlobalTracking/src/MatchITSTPCQC.cxx | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/Detectors/GlobalTracking/src/MatchITSTPCQC.cxx b/Detectors/GlobalTracking/src/MatchITSTPCQC.cxx index be787d14196d9..ade9e4b4ec6ff 100644 --- a/Detectors/GlobalTracking/src/MatchITSTPCQC.cxx +++ b/Detectors/GlobalTracking/src/MatchITSTPCQC.cxx @@ -811,48 +811,48 @@ void MatchITSTPCQC::finalize() // first we use denominators and nominators to set the TEfficiency; later they are scaled // some checks - for (int i = 0; i < matchType::SIZE; ++i) { - for (int i = 0; i < mPtDen[i]->GetNbinsX(); ++i) { + for (int ti = 0; ti < matchType::SIZE; ++ti) { + for (int i = 0; i < mPtDen[ti]->GetNbinsX(); ++i) { if (mPtDen[i]->GetBinContent(i + 1) < mPtNum[i]->GetBinContent(i + 1)) { - LOG(error) << title[i] << ": bin " << i + 1 << " in [" << mPtNum[i]->GetBinLowEdge(i + 1) << " , " << mPtNum[i]->GetBinLowEdge(i + 1) + mPtNum[i]->GetBinWidth(i + 1) << "]: mPtDen[i] = " << mPtDen[i]->GetBinContent(i + 1) << ", mPtNum[i] = " << mPtNum[i]->GetBinContent(i + 1); + LOG(error) << title[ti] << ": bin " << i + 1 << " in [" << mPtNum[ti]->GetBinLowEdge(i + 1) << " , " << mPtNum[ti]->GetBinLowEdge(i + 1) + mPtNum[ti]->GetBinWidth(i + 1) << "]: mPtDen[i] = " << mPtDen[ti]->GetBinContent(i + 1) << ", mPtNum[i] = " << mPtNum[ti]->GetBinContent(i + 1); } } - for (int i = 0; i < mPtDen_noEta0[i]->GetNbinsX(); ++i) { - if (mPtDen_noEta0[i]->GetBinContent(i + 1) < mPtNum_noEta0[i]->GetBinContent(i + 1)) { - LOG(error) << title[i] << ": bin " << i + 1 << " in [" << mPtNum_noEta0[i]->GetBinLowEdge(i + 1) << " , " << mPtNum_noEta0[i]->GetBinLowEdge(i + 1) + mPtNum_noEta0[i]->GetBinWidth(i + 1) << "]: mPtDen_noEta0[i] = " << mPtDen_noEta0[i]->GetBinContent(i + 1) << ", mPtNum_noEta0[i] = " << mPtNum_noEta0[i]->GetBinContent(i + 1); + for (int i = 0; i < mPtDen_noEta0[ti]->GetNbinsX(); ++i) { + if (mPtDen_noEta0[ti]->GetBinContent(i + 1) < mPtNum_noEta0[ti]->GetBinContent(i + 1)) { + LOG(error) << title[ti] << ": bin " << i + 1 << " in [" << mPtNum_noEta0[ti]->GetBinLowEdge(i + 1) << " , " << mPtNum_noEta0[ti]->GetBinLowEdge(i + 1) + mPtNum_noEta0[ti]->GetBinWidth(i + 1) << "]: mPtDen_noEta0[i] = " << mPtDen_noEta0[ti]->GetBinContent(i + 1) << ", mPtNum_noEta0[i] = " << mPtNum_noEta0[ti]->GetBinContent(i + 1); } } - for (int i = 0; i < mPhiDen[i]->GetNbinsX(); ++i) { - if (mPhiDen[i]->GetBinContent(i + 1) < mPhiNum[i]->GetBinContent(i + 1)) { - LOG(error) << title[i] << ": bin " << i + 1 << " in [" << mPhiNum[i]->GetBinLowEdge(i + 1) << " , " << mPhiNum[i]->GetBinLowEdge(i + 1) + mPhiNum[i]->GetBinWidth(i + 1) << "]: mPhiDen[i] = " << mPhiDen[i]->GetBinContent(i + 1) << ", mPhiNum[i] = " << mPhiNum[i]->GetBinContent(i + 1); + for (int i = 0; i < mPhiDen[ti]->GetNbinsX(); ++i) { + if (mPhiDen[ti]->GetBinContent(i + 1) < mPhiNum[ti]->GetBinContent(i + 1)) { + LOG(error) << title[ti] << ": bin " << i + 1 << " in [" << mPhiNum[ti]->GetBinLowEdge(i + 1) << " , " << mPhiNum[ti]->GetBinLowEdge(i + 1) + mPhiNum[ti]->GetBinWidth(i + 1) << "]: mPhiDen[i] = " << mPhiDen[ti]->GetBinContent(i + 1) << ", mPhiNum[i] = " << mPhiNum[ti]->GetBinContent(i + 1); } } - for (int i = 0; i < mEtaDen[i]->GetNbinsX(); ++i) { - if (mEtaDen[i]->GetBinContent(i + 1) < mEtaNum[i]->GetBinContent(i + 1)) { - LOG(error) << title[i] << ": bin " << i + 1 << " in [" << mEtaNum[i]->GetBinLowEdge(i + 1) << " , " << mEtaNum[i]->GetBinLowEdge(i + 1) + mEtaNum[i]->GetBinWidth(i + 1) << "]: mEtaDen[i] = " << mEtaDen[i]->GetBinContent(i + 1) << ", mEtaNum[i] = " << mEtaNum[i]->GetBinContent(i + 1); + for (int i = 0; i < mEtaDen[ti]->GetNbinsX(); ++i) { + if (mEtaDen[ti]->GetBinContent(i + 1) < mEtaNum[ti]->GetBinContent(i + 1)) { + LOG(error) << title[ti] << ": bin " << i + 1 << " in [" << mEtaNum[ti]->GetBinLowEdge(i + 1) << " , " << mEtaNum[ti]->GetBinLowEdge(i + 1) + mEtaNum[ti]->GetBinWidth(i + 1) << "]: mEtaDen[i] = " << mEtaDen[ti]->GetBinContent(i + 1) << ", mEtaNum[i] = " << mEtaNum[ti]->GetBinContent(i + 1); } } // filling the efficiency - setEfficiency(mFractionITSTPCmatch[i], mPtNum[i], mPtDen[i]); - setEfficiency(mFractionITSTPCmatch_noEta0[i], mPtNum_noEta0[i], mPtDen_noEta0[i]); - setEfficiency(mFractionITSTPCmatchPhi[i], mPhiNum[i], mPhiDen[i]); - setEfficiency(mFractionITSTPCmatchEta[i], mEtaNum[i], mEtaDen[i]); - setEfficiency(mFractionITSTPCmatchPhiVsPt[i], mPhiVsPtNum[i], mPhiVsPtDen[i], true); - setEfficiency(mFractionITSTPCmatchEtaVsPt[i], mEtaVsPtNum[i], mEtaVsPtDen[i], true); - setEfficiency(mFractionITSTPCmatch1OverPt[i], m1OverPtNum[i], m1OverPtDen[i]); + setEfficiency(mFractionITSTPCmatch[ti], mPtNum[ti], mPtDen[ti]); + setEfficiency(mFractionITSTPCmatch_noEta0[ti], mPtNum_noEta0[ti], mPtDen_noEta0[ti]); + setEfficiency(mFractionITSTPCmatchPhi[ti], mPhiNum[ti], mPhiDen[ti]); + setEfficiency(mFractionITSTPCmatchEta[ti], mEtaNum[ti], mEtaDen[ti]); + setEfficiency(mFractionITSTPCmatchPhiVsPt[ti], mPhiVsPtNum[ti], mPhiVsPtDen[ti], true); + setEfficiency(mFractionITSTPCmatchEtaVsPt[ti], mEtaVsPtNum[ti], mEtaVsPtDen[ti], true); + setEfficiency(mFractionITSTPCmatch1OverPt[ti], m1OverPtNum[ti], m1OverPtDen[ti]); if (mUseTrkPID) { // Vs Tracking PID hypothesis for (int j = 0; j < o2::track::PID::NIDs; ++j) { - setEfficiency(mFractionITSTPCmatchPtVsTrkPID[i][j], mPtNumVsTrkPID[i][j], mPtDenVsTrkPID[i][j]); - setEfficiency(mFractionITSTPCmatchPhiVsTrkPID[i][j], mPhiNumVsTrkPID[i][j], mPhiDenVsTrkPID[i][j]); - setEfficiency(mFractionITSTPCmatchEtaVsTrkPID[i][j], mEtaNumVsTrkPID[i][j], mEtaDenVsTrkPID[i][j]); + setEfficiency(mFractionITSTPCmatchPtVsTrkPID[ti][j], mPtNumVsTrkPID[ti][j], mPtDenVsTrkPID[ti][j]); + setEfficiency(mFractionITSTPCmatchPhiVsTrkPID[ti][j], mPhiNumVsTrkPID[ti][j], mPhiDenVsTrkPID[ti][j]); + setEfficiency(mFractionITSTPCmatchEtaVsTrkPID[ti][j], mEtaNumVsTrkPID[ti][j], mEtaDenVsTrkPID[ti][j]); } } if (mUseMC) { - setEfficiency(mFractionITSTPCmatchPhysPrim[i], mPtPhysPrimNum[i], mPtPhysPrimDen[i]); - setEfficiency(mFractionITSTPCmatchPhiPhysPrim[i], mPhiPhysPrimNum[i], mPhiPhysPrimDen[i]); - setEfficiency(mFractionITSTPCmatchEtaPhysPrim[i], mEtaPhysPrimNum[i], mEtaPhysPrimDen[i]); - setEfficiency(mFractionITSTPCmatchPhysPrim1OverPt[i], m1OverPtPhysPrimNum[i], m1OverPtPhysPrimDen[i]); + setEfficiency(mFractionITSTPCmatchPhysPrim[ti], mPtPhysPrimNum[ti], mPtPhysPrimDen[ti]); + setEfficiency(mFractionITSTPCmatchPhiPhysPrim[ti], mPhiPhysPrimNum[ti], mPhiPhysPrimDen[ti]); + setEfficiency(mFractionITSTPCmatchEtaPhysPrim[ti], mEtaPhysPrimNum[ti], mEtaPhysPrimDen[ti]); + setEfficiency(mFractionITSTPCmatchPhysPrim1OverPt[ti], m1OverPtPhysPrimNum[ti], m1OverPtPhysPrimDen[ti]); } } /* From e88ed40161f331a31e47cdd0afdc64162ec28587 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 13 Dec 2023 11:30:38 +0100 Subject: [PATCH 171/726] Fix brown paperbag indexing --- .../GlobalTracking/src/MatchITSTPCQC.cxx | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/Detectors/GlobalTracking/src/MatchITSTPCQC.cxx b/Detectors/GlobalTracking/src/MatchITSTPCQC.cxx index be787d14196d9..fea0a2b2af3a3 100644 --- a/Detectors/GlobalTracking/src/MatchITSTPCQC.cxx +++ b/Detectors/GlobalTracking/src/MatchITSTPCQC.cxx @@ -811,48 +811,48 @@ void MatchITSTPCQC::finalize() // first we use denominators and nominators to set the TEfficiency; later they are scaled // some checks - for (int i = 0; i < matchType::SIZE; ++i) { - for (int i = 0; i < mPtDen[i]->GetNbinsX(); ++i) { - if (mPtDen[i]->GetBinContent(i + 1) < mPtNum[i]->GetBinContent(i + 1)) { - LOG(error) << title[i] << ": bin " << i + 1 << " in [" << mPtNum[i]->GetBinLowEdge(i + 1) << " , " << mPtNum[i]->GetBinLowEdge(i + 1) + mPtNum[i]->GetBinWidth(i + 1) << "]: mPtDen[i] = " << mPtDen[i]->GetBinContent(i + 1) << ", mPtNum[i] = " << mPtNum[i]->GetBinContent(i + 1); + for (int ti = 0; ti < matchType::SIZE; ++ti) { + for (int i = 0; i < mPtDen[ti]->GetNbinsX(); ++i) { + if (mPtDen[ti]->GetBinContent(i + 1) < mPtNum[ti]->GetBinContent(i + 1)) { + LOG(error) << title[ti] << ": bin " << i + 1 << " in [" << mPtNum[ti]->GetBinLowEdge(i + 1) << " , " << mPtNum[ti]->GetBinLowEdge(i + 1) + mPtNum[ti]->GetBinWidth(i + 1) << "]: mPtDen[i] = " << mPtDen[ti]->GetBinContent(i + 1) << ", mPtNum[i] = " << mPtNum[ti]->GetBinContent(i + 1); } } - for (int i = 0; i < mPtDen_noEta0[i]->GetNbinsX(); ++i) { - if (mPtDen_noEta0[i]->GetBinContent(i + 1) < mPtNum_noEta0[i]->GetBinContent(i + 1)) { - LOG(error) << title[i] << ": bin " << i + 1 << " in [" << mPtNum_noEta0[i]->GetBinLowEdge(i + 1) << " , " << mPtNum_noEta0[i]->GetBinLowEdge(i + 1) + mPtNum_noEta0[i]->GetBinWidth(i + 1) << "]: mPtDen_noEta0[i] = " << mPtDen_noEta0[i]->GetBinContent(i + 1) << ", mPtNum_noEta0[i] = " << mPtNum_noEta0[i]->GetBinContent(i + 1); + for (int i = 0; i < mPtDen_noEta0[ti]->GetNbinsX(); ++i) { + if (mPtDen_noEta0[ti]->GetBinContent(i + 1) < mPtNum_noEta0[ti]->GetBinContent(i + 1)) { + LOG(error) << title[ti] << ": bin " << i + 1 << " in [" << mPtNum_noEta0[ti]->GetBinLowEdge(i + 1) << " , " << mPtNum_noEta0[ti]->GetBinLowEdge(i + 1) + mPtNum_noEta0[ti]->GetBinWidth(i + 1) << "]: mPtDen_noEta0[i] = " << mPtDen_noEta0[ti]->GetBinContent(i + 1) << ", mPtNum_noEta0[i] = " << mPtNum_noEta0[ti]->GetBinContent(i + 1); } } - for (int i = 0; i < mPhiDen[i]->GetNbinsX(); ++i) { - if (mPhiDen[i]->GetBinContent(i + 1) < mPhiNum[i]->GetBinContent(i + 1)) { - LOG(error) << title[i] << ": bin " << i + 1 << " in [" << mPhiNum[i]->GetBinLowEdge(i + 1) << " , " << mPhiNum[i]->GetBinLowEdge(i + 1) + mPhiNum[i]->GetBinWidth(i + 1) << "]: mPhiDen[i] = " << mPhiDen[i]->GetBinContent(i + 1) << ", mPhiNum[i] = " << mPhiNum[i]->GetBinContent(i + 1); + for (int i = 0; i < mPhiDen[ti]->GetNbinsX(); ++i) { + if (mPhiDen[ti]->GetBinContent(i + 1) < mPhiNum[ti]->GetBinContent(i + 1)) { + LOG(error) << title[ti] << ": bin " << i + 1 << " in [" << mPhiNum[ti]->GetBinLowEdge(i + 1) << " , " << mPhiNum[ti]->GetBinLowEdge(i + 1) + mPhiNum[ti]->GetBinWidth(i + 1) << "]: mPhiDen[i] = " << mPhiDen[ti]->GetBinContent(i + 1) << ", mPhiNum[i] = " << mPhiNum[ti]->GetBinContent(i + 1); } } - for (int i = 0; i < mEtaDen[i]->GetNbinsX(); ++i) { - if (mEtaDen[i]->GetBinContent(i + 1) < mEtaNum[i]->GetBinContent(i + 1)) { - LOG(error) << title[i] << ": bin " << i + 1 << " in [" << mEtaNum[i]->GetBinLowEdge(i + 1) << " , " << mEtaNum[i]->GetBinLowEdge(i + 1) + mEtaNum[i]->GetBinWidth(i + 1) << "]: mEtaDen[i] = " << mEtaDen[i]->GetBinContent(i + 1) << ", mEtaNum[i] = " << mEtaNum[i]->GetBinContent(i + 1); + for (int i = 0; i < mEtaDen[ti]->GetNbinsX(); ++i) { + if (mEtaDen[ti]->GetBinContent(i + 1) < mEtaNum[ti]->GetBinContent(i + 1)) { + LOG(error) << title[ti] << ": bin " << i + 1 << " in [" << mEtaNum[ti]->GetBinLowEdge(i + 1) << " , " << mEtaNum[ti]->GetBinLowEdge(i + 1) + mEtaNum[ti]->GetBinWidth(i + 1) << "]: mEtaDen[i] = " << mEtaDen[ti]->GetBinContent(i + 1) << ", mEtaNum[i] = " << mEtaNum[ti]->GetBinContent(i + 1); } } // filling the efficiency - setEfficiency(mFractionITSTPCmatch[i], mPtNum[i], mPtDen[i]); - setEfficiency(mFractionITSTPCmatch_noEta0[i], mPtNum_noEta0[i], mPtDen_noEta0[i]); - setEfficiency(mFractionITSTPCmatchPhi[i], mPhiNum[i], mPhiDen[i]); - setEfficiency(mFractionITSTPCmatchEta[i], mEtaNum[i], mEtaDen[i]); - setEfficiency(mFractionITSTPCmatchPhiVsPt[i], mPhiVsPtNum[i], mPhiVsPtDen[i], true); - setEfficiency(mFractionITSTPCmatchEtaVsPt[i], mEtaVsPtNum[i], mEtaVsPtDen[i], true); - setEfficiency(mFractionITSTPCmatch1OverPt[i], m1OverPtNum[i], m1OverPtDen[i]); + setEfficiency(mFractionITSTPCmatch[ti], mPtNum[ti], mPtDen[ti]); + setEfficiency(mFractionITSTPCmatch_noEta0[ti], mPtNum_noEta0[ti], mPtDen_noEta0[ti]); + setEfficiency(mFractionITSTPCmatchPhi[ti], mPhiNum[ti], mPhiDen[ti]); + setEfficiency(mFractionITSTPCmatchEta[ti], mEtaNum[ti], mEtaDen[ti]); + setEfficiency(mFractionITSTPCmatchPhiVsPt[ti], mPhiVsPtNum[ti], mPhiVsPtDen[ti], true); + setEfficiency(mFractionITSTPCmatchEtaVsPt[ti], mEtaVsPtNum[ti], mEtaVsPtDen[ti], true); + setEfficiency(mFractionITSTPCmatch1OverPt[ti], m1OverPtNum[ti], m1OverPtDen[ti]); if (mUseTrkPID) { // Vs Tracking PID hypothesis for (int j = 0; j < o2::track::PID::NIDs; ++j) { - setEfficiency(mFractionITSTPCmatchPtVsTrkPID[i][j], mPtNumVsTrkPID[i][j], mPtDenVsTrkPID[i][j]); - setEfficiency(mFractionITSTPCmatchPhiVsTrkPID[i][j], mPhiNumVsTrkPID[i][j], mPhiDenVsTrkPID[i][j]); - setEfficiency(mFractionITSTPCmatchEtaVsTrkPID[i][j], mEtaNumVsTrkPID[i][j], mEtaDenVsTrkPID[i][j]); + setEfficiency(mFractionITSTPCmatchPtVsTrkPID[ti][j], mPtNumVsTrkPID[ti][j], mPtDenVsTrkPID[ti][j]); + setEfficiency(mFractionITSTPCmatchPhiVsTrkPID[ti][j], mPhiNumVsTrkPID[ti][j], mPhiDenVsTrkPID[ti][j]); + setEfficiency(mFractionITSTPCmatchEtaVsTrkPID[ti][j], mEtaNumVsTrkPID[ti][j], mEtaDenVsTrkPID[ti][j]); } } if (mUseMC) { - setEfficiency(mFractionITSTPCmatchPhysPrim[i], mPtPhysPrimNum[i], mPtPhysPrimDen[i]); - setEfficiency(mFractionITSTPCmatchPhiPhysPrim[i], mPhiPhysPrimNum[i], mPhiPhysPrimDen[i]); - setEfficiency(mFractionITSTPCmatchEtaPhysPrim[i], mEtaPhysPrimNum[i], mEtaPhysPrimDen[i]); - setEfficiency(mFractionITSTPCmatchPhysPrim1OverPt[i], m1OverPtPhysPrimNum[i], m1OverPtPhysPrimDen[i]); + setEfficiency(mFractionITSTPCmatchPhysPrim[ti], mPtPhysPrimNum[ti], mPtPhysPrimDen[ti]); + setEfficiency(mFractionITSTPCmatchPhiPhysPrim[ti], mPhiPhysPrimNum[ti], mPhiPhysPrimDen[ti]); + setEfficiency(mFractionITSTPCmatchEtaPhysPrim[ti], mEtaPhysPrimNum[ti], mEtaPhysPrimDen[ti]); + setEfficiency(mFractionITSTPCmatchPhysPrim1OverPt[ti], m1OverPtPhysPrimNum[ti], m1OverPtPhysPrimDen[ti]); } } /* From 0953e70583a59468ad741b08a174338f5badbf9b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 13 Dec 2023 21:45:27 +0100 Subject: [PATCH 172/726] DPL: get rid of simplified CompletionPolicy The full blown version has been there for a while now, and in any case it's required if one wants to access the oldest possible timeframe. --- .../TPCSectorCompletionPolicy.h | 2 +- .../Core/include/Framework/CompletionPolicy.h | 9 ++------- Framework/Core/src/CompletionPolicyHelpers.cxx | 17 +++++++---------- Framework/Core/src/DataRelayer.cxx | 13 +++++-------- Framework/Core/test/test_CompletionPolicy.cxx | 9 +++++++-- Framework/Core/test/test_StaggeringWorkflow.cxx | 2 +- 6 files changed, 23 insertions(+), 29 deletions(-) diff --git a/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/TPCSectorCompletionPolicy.h b/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/TPCSectorCompletionPolicy.h index 33abded35624a..9f2a8b31dbdba 100644 --- a/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/TPCSectorCompletionPolicy.h +++ b/Detectors/TPC/workflow/readers/include/TPCReaderWorkflow/TPCSectorCompletionPolicy.h @@ -91,7 +91,7 @@ class TPCSectorCompletionPolicy return std::regex_match(device.name.begin(), device.name.end(), std::regex(expression.c_str())); }; - auto callback = [bRequireAll = mRequireAll, inputMatchers = mInputMatchers, externalInputMatchers = mExternalInputMatchers, pTpcSectorMask = mTpcSectorMask, orderCheck = mOrderCheck](framework::InputSpan const& inputs) -> framework::CompletionPolicy::CompletionOp { + auto callback = [bRequireAll = mRequireAll, inputMatchers = mInputMatchers, externalInputMatchers = mExternalInputMatchers, pTpcSectorMask = mTpcSectorMask, orderCheck = mOrderCheck](framework::InputSpan const& inputs, auto const&, auto&) -> framework::CompletionPolicy::CompletionOp { unsigned long tpcSectorMask = pTpcSectorMask ? *pTpcSectorMask : 0xFFFFFFFFF; std::bitset validSectors = 0; bool haveMatchedInput = false; diff --git a/Framework/Core/include/Framework/CompletionPolicy.h b/Framework/Core/include/Framework/CompletionPolicy.h index eda45bd315471..55d3014166956 100644 --- a/Framework/Core/include/Framework/CompletionPolicy.h +++ b/Framework/Core/include/Framework/CompletionPolicy.h @@ -64,26 +64,21 @@ struct CompletionPolicy { using Matcher = std::function; using InputSetElement = DataRef; - using Callback = std::function; using CallbackFull = std::function const&, ServiceRegistryRef&)>; using CallbackConfigureRelayer = std::function; /// Constructor CompletionPolicy() - : name{}, matcher{}, callback{} {} + : name{}, matcher{}, callbackFull{} {} /// Constructor for emplace_back - CompletionPolicy(std::string _name, Matcher _matcher, Callback _callback, bool _balanceChannels = true) - : name(std::move(_name)), matcher(std::move(_matcher)), callback(std::move(_callback)), callbackFull{nullptr}, balanceChannels{_balanceChannels} {} CompletionPolicy(std::string _name, Matcher _matcher, CallbackFull _callback, bool _balanceChannels = true) - : name(std::move(_name)), matcher(std::move(_matcher)), callback(nullptr), callbackFull{std::move(_callback)}, balanceChannels{_balanceChannels} {} + : name(std::move(_name)), matcher(std::move(_matcher)), callbackFull{std::move(_callback)}, balanceChannels{_balanceChannels} {} /// Name of the policy itself. std::string name = ""; /// Callback to be used to understand if the policy should apply /// to the given device. Matcher matcher = nullptr; - /// Actual policy which decides what to do with a partial InputRecord. - Callback callback = nullptr; /// Actual policy which decides what to do with a partial InputRecord, extended version CallbackFull callbackFull = nullptr; /// A callback which allows you to configure the behavior of the data relayer associated diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 34fc002428e12..b3e0621bf984e 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -21,9 +21,6 @@ #include #include -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wpedantic" - namespace o2::framework { @@ -35,7 +32,7 @@ CompletionPolicy CompletionPolicyHelpers::defineByNameOrigin(std::string const& auto originReceived = std::make_shared>(); - auto callback = [originReceived, origin, op](InputSpan const& inputRefs) -> CompletionPolicy::CompletionOp { + auto callback = [originReceived, origin, op](InputSpan const& inputRefs, std::vector const&, ServiceRegistryRef&) -> CompletionPolicy::CompletionOp { // update list of the start times of inputs with origin @origin for (auto& ref : inputRefs) { if (ref.header != nullptr) { @@ -77,7 +74,7 @@ CompletionPolicy CompletionPolicyHelpers::defineByName(std::string const& name, auto matcher = [name](DeviceSpec const& device) -> bool { return std::regex_match(device.name.begin(), device.name.end(), std::regex(name)); }; - auto callback = [op](InputSpan const&) -> CompletionPolicy::CompletionOp { + auto callback = [op](InputSpan const&, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { return op; }; switch (op) { @@ -108,7 +105,8 @@ CompletionPolicy CompletionPolicyHelpers::defineByName(std::string const& name, CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, CompletionPolicy::Matcher matcher) { - auto callback = [](InputSpan const& inputs) -> CompletionPolicy::CompletionOp { + auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { + assert(inputs.size() == specs.size()); for (auto& input : inputs) { if (input.header == nullptr) { return CompletionPolicy::CompletionOp::Wait; @@ -123,7 +121,7 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAllOrdered(const char* name { auto callbackFull = [](InputSpan const& inputs, std::vector const&, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { auto& decongestionService = ref.get(); - decongestionService.orderedCompletionPolicyActive = 1; + decongestionService.orderedCompletionPolicyActive = true; for (auto& input : inputs) { if (input.header == nullptr) { return CompletionPolicy::CompletionOp::Wait; @@ -199,7 +197,7 @@ CompletionPolicy CompletionPolicyHelpers::consumeExistingWhenAny(const char* nam CompletionPolicy CompletionPolicyHelpers::consumeWhenAny(const char* name, CompletionPolicy::Matcher matcher) { - auto callback = [](InputSpan const& inputs) -> CompletionPolicy::CompletionOp { + auto callback = [](InputSpan const& inputs, std::vector const&, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { for (auto& input : inputs) { if (input.header != nullptr) { return CompletionPolicy::CompletionOp::Consume; @@ -289,7 +287,7 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAnyWithAllConditions(std::s CompletionPolicy CompletionPolicyHelpers::processWhenAny(const char* name, CompletionPolicy::Matcher matcher) { - auto callback = [](InputSpan const& inputs) -> CompletionPolicy::CompletionOp { + auto callback = [](InputSpan const& inputs, std::vector const&, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { size_t present = 0; for (auto& input : inputs) { if (input.header != nullptr) { @@ -307,4 +305,3 @@ CompletionPolicy CompletionPolicyHelpers::processWhenAny(const char* name, Compl } } // namespace o2::framework -#pragma GCC diagnostic pop diff --git a/Framework/Core/src/DataRelayer.cxx b/Framework/Core/src/DataRelayer.cxx index e9591a9cc76ea..b3d20e19852fa 100644 --- a/Framework/Core/src/DataRelayer.cxx +++ b/Framework/Core/src/DataRelayer.cxx @@ -675,6 +675,9 @@ void DataRelayer::getReadyToProcess(std::vector& comp notDirty++; continue; } + if (!mCompletionPolicy.callbackFull) { + throw runtime_error_f("Completion police %s has no callback set", mCompletionPolicy.name.c_str()); + } auto partial = getPartialRecord(li); // TODO: get the data ref from message model auto getter = [&partial](size_t idx, size_t part) { @@ -692,14 +695,8 @@ void DataRelayer::getReadyToProcess(std::vector& comp return partial[idx].size(); }; InputSpan span{getter, nPartsGetter, static_cast(partial.size())}; - CompletionPolicy::CompletionOp action; - if (mCompletionPolicy.callback) { - action = mCompletionPolicy.callback(span); - } else if (mCompletionPolicy.callbackFull) { - action = mCompletionPolicy.callbackFull(span, mInputs, mContext); - } else { - throw runtime_error_f("Completion police %s has no callback set", mCompletionPolicy.name.c_str()); - } + CompletionPolicy::CompletionOp action = mCompletionPolicy.callbackFull(span, mInputs, mContext); + auto& variables = mTimesliceIndex.getVariablesForSlot(slot); auto timeslice = std::get_if(&variables.get(0)); switch (action) { diff --git a/Framework/Core/test/test_CompletionPolicy.cxx b/Framework/Core/test/test_CompletionPolicy.cxx index 3bb2dc12caff0..059f20b352b3d 100644 --- a/Framework/Core/test/test_CompletionPolicy.cxx +++ b/Framework/Core/test/test_CompletionPolicy.cxx @@ -12,6 +12,7 @@ #include #include "Framework/CompletionPolicy.h" #include "Framework/CompletionPolicyHelpers.h" +#include "Framework/ServiceRegistry.h" #include "Headers/DataHeader.h" #include "Headers/NameHeader.h" #include "Framework/CompletionPolicy.h" @@ -39,7 +40,9 @@ TEST_CASE("TestCompletionPolicy_callback") return true; }; - auto callback = [&stack](InputSpan const& inputRefs) { + ServiceRegistry services; + + auto callback = [&stack](InputSpan const& inputRefs, std::vector const&, ServiceRegistryRef&) { for (auto const& ref : inputRefs) { auto const* header = CompletionPolicyHelpers::getHeader(ref); REQUIRE(header == reinterpret_cast(stack.data())); @@ -53,7 +56,9 @@ TEST_CASE("TestCompletionPolicy_callback") {"test", matcher, callback}}; CompletionPolicy::InputSetElement ref{nullptr, reinterpret_cast(stack.data()), nullptr}; InputSpan const& inputs{[&ref](size_t) { return ref; }, 1}; + std::vector specs; + ServiceRegistryRef servicesRef{services}; for (auto& policy : policies) { - policy.callback(inputs); + policy.callbackFull(inputs, specs, servicesRef); } } diff --git a/Framework/Core/test/test_StaggeringWorkflow.cxx b/Framework/Core/test/test_StaggeringWorkflow.cxx index edc07d598465b..590ce83cef467 100644 --- a/Framework/Core/test/test_StaggeringWorkflow.cxx +++ b/Framework/Core/test/test_StaggeringWorkflow.cxx @@ -53,7 +53,7 @@ void customize(std::vector& policies) // search for spec names starting with "processor" return spec.name.find("processor") == 0; }, - [](auto const&) { return o2::framework::CompletionPolicy::CompletionOp::Consume; }}); + [](auto const&, auto const&, auto &) { return o2::framework::CompletionPolicy::CompletionOp::Consume; }}); } #include "Framework/runDataProcessing.h" From 8642aeeac6d26535a84e2dd101a0bd148e5f417c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 13 Dec 2023 21:45:27 +0100 Subject: [PATCH 173/726] DPL: wait as long as possible for Sporadic inputs Right now if we have the standard consumeWhenAll policy and we have a sporadic input, it will wait indefinitely until all the inputs arrive or it will drop timeframes without that sporadic input. This changes the behavior and waits only until the oldest possible timeframe does not allow the Sporadic input to be there. At which point, it schedules the processing in any case, under the assumption that a task declaring a sporadic input knows what to do in case it's not there. --- .../Core/src/CompletionPolicyHelpers.cxx | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index b3e0621bf984e..ed18d89eea013 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -15,6 +15,7 @@ #include "Framework/DeviceSpec.h" #include "Framework/CompilerBuiltins.h" #include "Framework/Logger.h" +#include "Framework/TimesliceIndex.h" #include "Framework/TimingInfo.h" #include "DecongestionService.h" @@ -107,10 +108,32 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl { auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { assert(inputs.size() == specs.size()); + + size_t si = 0; + bool missingSporadic = false; + size_t currentTimeslice = -1; for (auto& input : inputs) { - if (input.header == nullptr) { + assert(si < specs.size()); + auto& spec = specs[si++]; + if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { return CompletionPolicy::CompletionOp::Wait; } + if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { + missingSporadic |= true; + } + if (input.header != nullptr) { + auto const* dph = framework::DataRefUtils::getHeader(input); + if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { + currentTimeslice = dph->startTime; + break; + } + } + } + // If some sporadic inputs are missing, we wait for them util we are sure they will not come, + // i.e. until the oldest possible timeslice is beyond the timeslice of the input. + auto& timesliceIndex = ref.get(); + if (missingSporadic && currentTimeslice >= timesliceIndex.getOldestPossibleInput().timeslice.value) { + return CompletionPolicy::CompletionOp::Wait; } return CompletionPolicy::CompletionOp::Consume; }; From a1e0e758fa1e163c7301f981f55c103eb6393939 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sun, 31 Dec 2023 10:44:50 +0100 Subject: [PATCH 174/726] DPL: ignore SIGUSR1 until we are ready to handle it This should fix a race condition when the parent decides to quit too quickly. --- Framework/Core/src/runDataProcessing.cxx | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 66679d83dc4ec..e9d1f3d4ee314 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -646,6 +646,12 @@ void spawnDevice(uv_loop_t* loop, if (id == 0) { // We allow being debugged and do not terminate on SIGTRAP signal(SIGTRAP, SIG_IGN); + // We immediately ignore SIGUSR1 and SIGUSR2 so that we do not + // get killed by the parent trying to force stepping children. + // We will re-enable them later on, when it is actually safe to + // do so. + signal(SIGUSR1, SIG_IGN); + signal(SIGUSR2, SIG_IGN); // This is the child. // For stdout / stderr, we close the read part of the pipe, the From 5ee4098581dbac30c31d9a76e9dd38861684053f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 11:24:22 +0100 Subject: [PATCH 175/726] DPL: get rid of warning --- Framework/Core/src/FairMQDeviceProxy.cxx | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Framework/Core/src/FairMQDeviceProxy.cxx b/Framework/Core/src/FairMQDeviceProxy.cxx index a83057e9578be..06a404360d741 100644 --- a/Framework/Core/src/FairMQDeviceProxy.cxx +++ b/Framework/Core/src/FairMQDeviceProxy.cxx @@ -320,10 +320,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto mInputRoutes.emplace_back(RouteState{channelIndex, false}); ri++; } - for (auto& route : mInputRoutes) { - assert(route.channel.value != -1); - assert(route.channel.value < mInputChannels.size()); - } + assert(std::all_of(mInputRoutes.begin(), mInputRoutes.end(), [s=mInputChannels.size()](InputRoute const&route) {return route.channel.value != -1 && route.channel.value < s;})); LOGP(detail, "Total input channels found {}, total routes {}", mInputChannels.size(), mInputRoutes.size()); assert(mInputRoutes.size() == inputs.size()); } From 2f1c99314b819dc2a288f89a2e3d9539ac83b3c8 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:29:24 +0100 Subject: [PATCH 176/726] Fix CheckClusterSize.C to work with C++20 --- Detectors/Upgrades/ITS3/macros/test/CheckClusterSize.C | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Detectors/Upgrades/ITS3/macros/test/CheckClusterSize.C b/Detectors/Upgrades/ITS3/macros/test/CheckClusterSize.C index cd69cb9c2cf3c..c433ab855b70d 100755 --- a/Detectors/Upgrades/ITS3/macros/test/CheckClusterSize.C +++ b/Detectors/Upgrades/ITS3/macros/test/CheckClusterSize.C @@ -43,8 +43,9 @@ #include "SimulationDataFormat/MCCompLabel.h" #include "SimulationDataFormat/MCEventHeader.h" #include "SimulationDataFormat/MCTrack.h" -#include "SimulationDataFormat/MCTruthContainer.h" #endif +#define ENABLE_UPGRADES +#include "SimulationDataFormat/MCTruthContainer.h" static constexpr int nLayers = 4; // 3 Layers + 1 combined outer layer From 865a73033cdde0a20b7a61fda20fd98122d20d2f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:29:41 +0100 Subject: [PATCH 177/726] Get readZDCDigits.C to work with C++20 --- Detectors/ZDC/macro/readZDCDigits.C | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/ZDC/macro/readZDCDigits.C b/Detectors/ZDC/macro/readZDCDigits.C index b5baa77d0ef99..d6987ecc98994 100644 --- a/Detectors/ZDC/macro/readZDCDigits.C +++ b/Detectors/ZDC/macro/readZDCDigits.C @@ -5,13 +5,13 @@ #include #include #include -#include "DataFormatsZDC/BCData.h" #include "DataFormatsZDC/ChannelData.h" #include "SimulationDataFormat/MCTruthContainer.h" #include "SimulationDataFormat/MCCompLabel.h" #endif +#include "DataFormatsZDC/BCData.h" #include "DataFormatsZDC/MCLabel.h" #include "Framework/Logger.h" From cc70cb60b0db5998c114e73dd17b546f6271275a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:29:41 +0100 Subject: [PATCH 178/726] Fix run_digi2rawVarPage_its.C to work with C++20 --- Detectors/ITSMFT/ITS/macros/test/run_digi2rawVarPage_its.C | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/ITSMFT/ITS/macros/test/run_digi2rawVarPage_its.C b/Detectors/ITSMFT/ITS/macros/test/run_digi2rawVarPage_its.C index 253726f9c90fe..1c682fc25fff1 100644 --- a/Detectors/ITSMFT/ITS/macros/test/run_digi2rawVarPage_its.C +++ b/Detectors/ITSMFT/ITS/macros/test/run_digi2rawVarPage_its.C @@ -12,9 +12,9 @@ #include "DataFormatsITSMFT/ROFRecord.h" #include "DataFormatsParameters/GRPObject.h" #include "DataFormatsITSMFT/Digit.h" -#include "ITSMFTSimulation/MC2RawEncoder.h" #endif +#include "ITSMFTSimulation/MC2RawEncoder.h" // demo macro the MC->raw conversion with new (variable page size) format void setupLinks(o2::itsmft::MC2RawEncoder& m2r, const std::string& outPrefix); From e0a802c422ef02ba198e454f5333deeaaca1da70 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:29:41 +0100 Subject: [PATCH 179/726] Fix findTOFclusterFromLabel.C for C++20 --- Detectors/TOF/prototyping/findTOFclusterFromLabel.C | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Detectors/TOF/prototyping/findTOFclusterFromLabel.C b/Detectors/TOF/prototyping/findTOFclusterFromLabel.C index 6dc6299f3d46b..67ed487ecebc1 100644 --- a/Detectors/TOF/prototyping/findTOFclusterFromLabel.C +++ b/Detectors/TOF/prototyping/findTOFclusterFromLabel.C @@ -1,16 +1,16 @@ #if !defined(__CLING__) || defined(__ROOTCLING__) #include "TFile.h" #include "TTree.h" -#include "DataFormatsTOF/Cluster.h" -#include "SimulationDataFormat/MCTruthContainer.h" #include "SimulationDataFormat/MCCompLabel.h" #include "SimulationDataFormat/MCTrack.h" -#include "DataFormatsTPC/TrackTPC.h" #include "ReconstructionDataFormats/TrackTPCITS.h" #include "CommonDataFormat/EvIndex.h" #include "TOFBase/Geo.h" +#include "DataFormatsTPC/TrackTPC.h" #include "TOFBase/Digit.h" #endif +#include "SimulationDataFormat/MCTruthContainer.h" +#include "DataFormatsTOF/Cluster.h" void findTOFclusterFromLabel(int trackID, int eventID = 0, int sourceID = 0) { From f915223c762b5e4d9151891c90c649d4f6e964dd Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:30:53 +0100 Subject: [PATCH 180/726] Fix CheckTracks.C for C++20 --- Detectors/ITSMFT/ITS/macros/test/CheckTracks.C | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Detectors/ITSMFT/ITS/macros/test/CheckTracks.C b/Detectors/ITSMFT/ITS/macros/test/CheckTracks.C index 79b2f52ad29df..11b03531d4af3 100644 --- a/Detectors/ITSMFT/ITS/macros/test/CheckTracks.C +++ b/Detectors/ITSMFT/ITS/macros/test/CheckTracks.C @@ -15,16 +15,16 @@ #include "TGeoGlobalMagField.h" #include "Field/MagneticField.h" -#include "DetectorsBase/Propagator.h" #include "ITSBase/GeometryTGeo.h" -#include "SimulationDataFormat/TrackReference.h" +#include "DataFormatsITS/TrackITS.h" #include "SimulationDataFormat/MCTrack.h" #include "SimulationDataFormat/MCCompLabel.h" -#include "SimulationDataFormat/MCTruthContainer.h" -#include "DataFormatsITSMFT/CompCluster.h" -#include "DataFormatsITS/TrackITS.h" +#include "SimulationDataFormat/TrackReference.h" +#include "DetectorsBase/Propagator.h" #endif +#include "DataFormatsITSMFT/CompCluster.h" +#include "SimulationDataFormat/MCTruthContainer.h" using namespace std; From 402b1eaf54aaa12711b90f8538df8118e33755ee Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:31:14 +0100 Subject: [PATCH 181/726] Fix CheckClusters.C for C++20 --- Detectors/ITSMFT/ITS/macros/test/CheckClusters.C | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/Detectors/ITSMFT/ITS/macros/test/CheckClusters.C b/Detectors/ITSMFT/ITS/macros/test/CheckClusters.C index 3f17e42b3a712..d4707bd683a30 100644 --- a/Detectors/ITSMFT/ITS/macros/test/CheckClusters.C +++ b/Detectors/ITSMFT/ITS/macros/test/CheckClusters.C @@ -1,6 +1,18 @@ /// \file CheckClusters.C /// \brief Simple macro to check ITSU clusters +#include +#include +#include +#include +#include +#include + +#include "ITSMFTBase/SegmentationAlpide.h" +#include "ITSBase/GeometryTGeo.h" +#include "DataFormatsITSMFT/CompCluster.h" +#include "DataFormatsITSMFT/TopologyDictionary.h" +#include "ITSMFTSimulation/Hit.h" #if !defined(__CLING__) || defined(__ROOTCLING__) #include #include From 065aeb939f41ccc61c91369ce3996a93eae316dd Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:37:00 +0100 Subject: [PATCH 182/726] Fix CheckVertices.C for C++20 --- Detectors/ITSMFT/ITS/macros/test/CheckVertices.C | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Detectors/ITSMFT/ITS/macros/test/CheckVertices.C b/Detectors/ITSMFT/ITS/macros/test/CheckVertices.C index f348272425b77..2da3ed9b97a28 100644 --- a/Detectors/ITSMFT/ITS/macros/test/CheckVertices.C +++ b/Detectors/ITSMFT/ITS/macros/test/CheckVertices.C @@ -29,6 +29,7 @@ #include "DataFormatsITSMFT/ROFRecord.h" #include "DataFormatsITS/TrackITS.h" #endif +#include "DataFormatsITSMFT/CompCluster.h" o2::MCCompLabel getMainLabel(std::vector& labs); @@ -335,4 +336,4 @@ void CheckVertices(const int dumprof = -1, std::string path = "tf1/", std::strin LOGP(info, "Summary:"); LOGP(info, "Found {} vertices in {} usable out of {} simulated", nvt, nevts, simVerts.size()); LOGP(info, "Average good vertexing efficiency: {}%", (addeff / (float)nroffilled) * 100); -} \ No newline at end of file +} From 9621b319d0fd99a98075bafd9c7aacec11b809f2 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:31:14 +0100 Subject: [PATCH 183/726] Fix convertTreeTo02object.C for C++20 --- Detectors/TOF/prototyping/convertTreeTo02object.C | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/TOF/prototyping/convertTreeTo02object.C b/Detectors/TOF/prototyping/convertTreeTo02object.C index 060072ef21e6a..2e46e9842bf33 100644 --- a/Detectors/TOF/prototyping/convertTreeTo02object.C +++ b/Detectors/TOF/prototyping/convertTreeTo02object.C @@ -3,8 +3,8 @@ #include "TTree.h" #include "TLeaf.h" #include -#include "DataFormatsTOF/CalibInfoTOF.h" #endif +#include "DataFormatsTOF/CalibInfoTOF.h" void convertTreeTo02object() { From adc9d104ce7bebacb6f87a9ec25d5f22c6720e02 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:31:14 +0100 Subject: [PATCH 184/726] Fix CheckHits.C for C++20 --- Detectors/Upgrades/ITS3/macros/test/CheckHits.C | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/Upgrades/ITS3/macros/test/CheckHits.C b/Detectors/Upgrades/ITS3/macros/test/CheckHits.C index bb1a8073fc646..5c0f945898471 100644 --- a/Detectors/Upgrades/ITS3/macros/test/CheckHits.C +++ b/Detectors/Upgrades/ITS3/macros/test/CheckHits.C @@ -31,12 +31,12 @@ #include #include +#endif #define ENABLE_UPGRADES #include "CommonConstants/MathConstants.h" #include "ITS3Base/SegmentationSuperAlpide.h" #include "ITSMFTSimulation/Hit.h" #include "SimulationDataFormat/MCTrack.h" -#endif using o2::itsmft::Hit; From b74c34495bd145e454e0455e0cab0e2714b3dc6f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:31:14 +0100 Subject: [PATCH 185/726] Fix CheckDuplicates.C for C++20 --- Detectors/ITSMFT/ITS/macros/test/CheckDuplicates.C | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Detectors/ITSMFT/ITS/macros/test/CheckDuplicates.C b/Detectors/ITSMFT/ITS/macros/test/CheckDuplicates.C index 2736a79beb528..e36f249ef70dc 100644 --- a/Detectors/ITSMFT/ITS/macros/test/CheckDuplicates.C +++ b/Detectors/ITSMFT/ITS/macros/test/CheckDuplicates.C @@ -23,11 +23,10 @@ #include #include -#include "DataFormatsITSMFT/ROFRecord.h" -#include "DataFormatsITS/TrackITS.h" -#include "Framework/Logger.h" +#include "DataFormatsITS/TrackITS.h" #endif +#include "DataFormatsITSMFT/ROFRecord.h" void CheckDuplicates(TString tracfile = "./o2trac_its.root", TString output = ".", TString rootSave = "", int rofStart = 0, int rofEnd = -1, int windowSize = 2, bool includeSame = 1) { From d50929303da5930023ad16168a5c30db888910d5 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:31:14 +0100 Subject: [PATCH 186/726] Fix CheckTracksITS3.C for C++20 --- .../ITS3/macros/test/CheckTracksITS3.C | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Detectors/Upgrades/ITS3/macros/test/CheckTracksITS3.C b/Detectors/Upgrades/ITS3/macros/test/CheckTracksITS3.C index 8b0383246e7fc..c04dae6309d7e 100644 --- a/Detectors/Upgrades/ITS3/macros/test/CheckTracksITS3.C +++ b/Detectors/Upgrades/ITS3/macros/test/CheckTracksITS3.C @@ -12,7 +12,17 @@ /// \file CheckTracksITS3.C /// \brief Simple macro to check ITS3 tracks +#include #if !defined(__CLING__) || defined(__ROOTCLING__) +#include + +#include "DataFormatsITS/TrackITS.h" +#include "TGeoGlobalMagField.h" +#include "Field/MagneticField.h" +#include "DetectorsBase/Propagator.h" +#include "ITSBase/GeometryTGeo.h" +#include "DataFormatsITS3/CompCluster.h" +#endif #include #include #include @@ -26,23 +36,13 @@ #include #include #include -#include - -#include "TGeoGlobalMagField.h" -#include "Field/MagneticField.h" -#include "DataFormatsITS3/CompCluster.h" #include "DataFormatsITSMFT/CompCluster.h" -#include "DataFormatsITS/TrackITS.h" -#include "DetectorsBase/Propagator.h" -#include "ITSBase/GeometryTGeo.h" #include "SimulationDataFormat/TrackReference.h" #include "SimulationDataFormat/MCTrack.h" #include "SimulationDataFormat/MCCompLabel.h" #include "SimulationDataFormat/MCTruthContainer.h" #include "SimulationDataFormat/MCEventHeader.h" -#endif - using namespace std; struct ParticleInfo { From a7db50078b637bad188eed12b300383fb0d37b8f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:31:14 +0100 Subject: [PATCH 187/726] Fix misAlign.C to work with C++20 --- Detectors/MUON/MCH/Geometry/Test/misAlign.C | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/Detectors/MUON/MCH/Geometry/Test/misAlign.C b/Detectors/MUON/MCH/Geometry/Test/misAlign.C index 38a4a7c32eaeb..c49fe5717a36b 100644 --- a/Detectors/MUON/MCH/Geometry/Test/misAlign.C +++ b/Detectors/MUON/MCH/Geometry/Test/misAlign.C @@ -1,21 +1,14 @@ +#include "DetectorsBase/MaterialManager.h" +#include "DetectorsCommonDataFormats/AlignParam.h" #if !defined(__CLING__) || defined(__ROOTCLING__) //#include "MCHGeometryTest/Helpers.h" -#include "DetectorsBase/GeometryManager.h" -#include "DetectorsBase/MaterialManager.h" -#include "DetectorsCommonDataFormats/AlignParam.h" #include "DetectorsCommonDataFormats/DetID.h" #include "DetectorsCommonDataFormats/DetectorNameConf.h" #include "MCHGeometryMisAligner/MisAligner.h" #include "MCHGeometryTransformer/Transformations.h" -#include "MCHGeometryTest/Helpers.h" -#include "MCHGeometryCreator/Geometry.h" -#include "CCDB/CcdbApi.h" - -#include "MathUtils/Cartesian.h" -#include "Math/GenVector/Cartesian3D.h" #include "TGLRnrCtx.h" #include "TGLViewer.h" #include "TGeoManager.h" @@ -26,7 +19,13 @@ #include #include +#include "MCHGeometryTest/Helpers.h" +#include "MCHGeometryCreator/Geometry.h" +#include "CCDB/CcdbApi.h" +#include "MathUtils/Cartesian.h" +#include "Math/GenVector/Cartesian3D.h" +#include "DetectorsBase/GeometryManager.h" #endif // void misAlign() From fe746e2806acae6e75370067b536769fa16880f0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:39:19 +0100 Subject: [PATCH 188/726] Fix CheckTracksCA.C to work with C++20 --- Detectors/ITSMFT/ITS/macros/test/CheckTracksCA.C | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/ITSMFT/ITS/macros/test/CheckTracksCA.C b/Detectors/ITSMFT/ITS/macros/test/CheckTracksCA.C index f1a3d2af4e469..7312a6845f1a3 100644 --- a/Detectors/ITSMFT/ITS/macros/test/CheckTracksCA.C +++ b/Detectors/ITSMFT/ITS/macros/test/CheckTracksCA.C @@ -21,10 +21,10 @@ #include "SimulationDataFormat/MCTrack.h" #include "SimulationDataFormat/MCCompLabel.h" #include "SimulationDataFormat/MCTruthContainer.h" -#include "DataFormatsITSMFT/CompCluster.h" #include "DataFormatsITS/TrackITS.h" #endif +#include "DataFormatsITSMFT/CompCluster.h" using namespace std; From 695718eab76b4eb7cf6f4284837c2b86cdd72cc8 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:37:04 +0100 Subject: [PATCH 189/726] Fix toyCluster.C to work with C++20 --- Detectors/TPC/simulation/macro/toyCluster.C | 1 + 1 file changed, 1 insertion(+) diff --git a/Detectors/TPC/simulation/macro/toyCluster.C b/Detectors/TPC/simulation/macro/toyCluster.C index 2087a7773fe22..b7c5f74633cfc 100644 --- a/Detectors/TPC/simulation/macro/toyCluster.C +++ b/Detectors/TPC/simulation/macro/toyCluster.C @@ -59,6 +59,7 @@ #include "TPCSimulation/GEMAmplification.h" #endif #include +#include "DataFormatsTPC/Helpers.h" using namespace o2::tpc; void fillTPCHits(const float theta, const float phi, const float dedx, std::vector& hitGroupSector, std::pair& trackInfo); From d70a4c956b66f3a94079dd5d3e445353d76b0e56 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 13:16:55 +0100 Subject: [PATCH 190/726] Fix macros to work with C++20 --- macro/run_clus_tpc.C | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/macro/run_clus_tpc.C b/macro/run_clus_tpc.C index ce703e65ae999..950eb45f68851 100644 --- a/macro/run_clus_tpc.C +++ b/macro/run_clus_tpc.C @@ -14,8 +14,8 @@ #include "FairParRootFileIo.h" #include -#include "TPCReconstruction/ClustererTask.h" #endif +#include "TPCReconstruction/ClustererTask.h" void run_clus_tpc(std::string outputfile = "o2clus_tpc.root", std::string inputfile = "o2dig.root", std::string paramfile = "o2sim_par.root", bool isContinuous = true, unsigned threads = 0); From e83336c1a32c547d627c1b50956e6ffb3331d068 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 13:24:09 +0100 Subject: [PATCH 191/726] Fix macros to compile with C++20 --- Framework/CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 Framework/CHANGELOG.md diff --git a/Framework/CHANGELOG.md b/Framework/CHANGELOG.md new file mode 100644 index 0000000000000..2973174f81d28 --- /dev/null +++ b/Framework/CHANGELOG.md @@ -0,0 +1,4 @@ +* Improved C++20 support. Most of the macros which were failing when C++20 + support is enabled now seem to work fine. The issue seems to be related to + some forward declaration logic which seems to be not working correctly in + ROOT 6.30.01. The issue is discussed in and it seems to be not trivial to fix with the current ROOT version. From 65ff7892821b932a2a4864b19be3c0096e13734f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:39:18 +0100 Subject: [PATCH 192/726] Fix CheckDuplicates.C for C++20 --- Detectors/ITSMFT/ITS/macros/test/CheckDuplicates.C | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Detectors/ITSMFT/ITS/macros/test/CheckDuplicates.C b/Detectors/ITSMFT/ITS/macros/test/CheckDuplicates.C index 2736a79beb528..abe707225ab30 100644 --- a/Detectors/ITSMFT/ITS/macros/test/CheckDuplicates.C +++ b/Detectors/ITSMFT/ITS/macros/test/CheckDuplicates.C @@ -23,11 +23,9 @@ #include #include -#include "DataFormatsITSMFT/ROFRecord.h" #include "DataFormatsITS/TrackITS.h" -#include "Framework/Logger.h" - #endif +#include "DataFormatsITSMFT/ROFRecord.h" void CheckDuplicates(TString tracfile = "./o2trac_its.root", TString output = ".", TString rootSave = "", int rofStart = 0, int rofEnd = -1, int windowSize = 2, bool includeSame = 1) { From 48a887cd562980018f5329a86bdfdc1c76b443ee Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:39:19 +0100 Subject: [PATCH 193/726] Fix misAlign.C to work with C++20 --- Detectors/MUON/MCH/Geometry/Test/misAlign.C | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/Detectors/MUON/MCH/Geometry/Test/misAlign.C b/Detectors/MUON/MCH/Geometry/Test/misAlign.C index 38a4a7c32eaeb..c49fe5717a36b 100644 --- a/Detectors/MUON/MCH/Geometry/Test/misAlign.C +++ b/Detectors/MUON/MCH/Geometry/Test/misAlign.C @@ -1,21 +1,14 @@ +#include "DetectorsBase/MaterialManager.h" +#include "DetectorsCommonDataFormats/AlignParam.h" #if !defined(__CLING__) || defined(__ROOTCLING__) //#include "MCHGeometryTest/Helpers.h" -#include "DetectorsBase/GeometryManager.h" -#include "DetectorsBase/MaterialManager.h" -#include "DetectorsCommonDataFormats/AlignParam.h" #include "DetectorsCommonDataFormats/DetID.h" #include "DetectorsCommonDataFormats/DetectorNameConf.h" #include "MCHGeometryMisAligner/MisAligner.h" #include "MCHGeometryTransformer/Transformations.h" -#include "MCHGeometryTest/Helpers.h" -#include "MCHGeometryCreator/Geometry.h" -#include "CCDB/CcdbApi.h" - -#include "MathUtils/Cartesian.h" -#include "Math/GenVector/Cartesian3D.h" #include "TGLRnrCtx.h" #include "TGLViewer.h" #include "TGeoManager.h" @@ -26,7 +19,13 @@ #include #include +#include "MCHGeometryTest/Helpers.h" +#include "MCHGeometryCreator/Geometry.h" +#include "CCDB/CcdbApi.h" +#include "MathUtils/Cartesian.h" +#include "Math/GenVector/Cartesian3D.h" +#include "DetectorsBase/GeometryManager.h" #endif // void misAlign() From 902ceb512df89a817a439edda65e6c5a3aeb9440 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:39:20 +0100 Subject: [PATCH 194/726] Fix CheckTracksCA.C to work with C++20 --- Detectors/ITSMFT/ITS/macros/test/CheckTracksCA.C | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/ITSMFT/ITS/macros/test/CheckTracksCA.C b/Detectors/ITSMFT/ITS/macros/test/CheckTracksCA.C index f1a3d2af4e469..7312a6845f1a3 100644 --- a/Detectors/ITSMFT/ITS/macros/test/CheckTracksCA.C +++ b/Detectors/ITSMFT/ITS/macros/test/CheckTracksCA.C @@ -21,10 +21,10 @@ #include "SimulationDataFormat/MCTrack.h" #include "SimulationDataFormat/MCCompLabel.h" #include "SimulationDataFormat/MCTruthContainer.h" -#include "DataFormatsITSMFT/CompCluster.h" #include "DataFormatsITS/TrackITS.h" #endif +#include "DataFormatsITSMFT/CompCluster.h" using namespace std; From 0394453dbf8d15484f5d92d3f6676484134b0c5a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:39:20 +0100 Subject: [PATCH 195/726] Fix toyCluster.C to work with C++20 --- Detectors/TPC/simulation/macro/toyCluster.C | 1 + 1 file changed, 1 insertion(+) diff --git a/Detectors/TPC/simulation/macro/toyCluster.C b/Detectors/TPC/simulation/macro/toyCluster.C index 2087a7773fe22..b7c5f74633cfc 100644 --- a/Detectors/TPC/simulation/macro/toyCluster.C +++ b/Detectors/TPC/simulation/macro/toyCluster.C @@ -59,6 +59,7 @@ #include "TPCSimulation/GEMAmplification.h" #endif #include +#include "DataFormatsTPC/Helpers.h" using namespace o2::tpc; void fillTPCHits(const float theta, const float phi, const float dedx, std::vector& hitGroupSector, std::pair& trackInfo); From 41e1fcd045cf0313af4925e49d10a159a3b40c98 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 13:28:05 +0100 Subject: [PATCH 196/726] Fix run_clus_tpc to work with C++20 --- macro/run_clus_tpc.C | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/macro/run_clus_tpc.C b/macro/run_clus_tpc.C index ce703e65ae999..950eb45f68851 100644 --- a/macro/run_clus_tpc.C +++ b/macro/run_clus_tpc.C @@ -14,8 +14,8 @@ #include "FairParRootFileIo.h" #include -#include "TPCReconstruction/ClustererTask.h" #endif +#include "TPCReconstruction/ClustererTask.h" void run_clus_tpc(std::string outputfile = "o2clus_tpc.root", std::string inputfile = "o2dig.root", std::string paramfile = "o2sim_par.root", bool isContinuous = true, unsigned threads = 0); From e41c4dad9bdd5e37f1bdcc91403ac759f6313eaa Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 13:28:13 +0100 Subject: [PATCH 197/726] Fix macros to compile with C++20 --- Framework/CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 Framework/CHANGELOG.md diff --git a/Framework/CHANGELOG.md b/Framework/CHANGELOG.md new file mode 100644 index 0000000000000..2973174f81d28 --- /dev/null +++ b/Framework/CHANGELOG.md @@ -0,0 +1,4 @@ +* Improved C++20 support. Most of the macros which were failing when C++20 + support is enabled now seem to work fine. The issue seems to be related to + some forward declaration logic which seems to be not working correctly in + ROOT 6.30.01. The issue is discussed in and it seems to be not trivial to fix with the current ROOT version. From b10d39b0634af85e70e6e77b87b747ce720ca445 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 9 Jan 2024 13:46:44 +0100 Subject: [PATCH 198/726] Drop --old option for real --- Detectors/TOF/compression/src/tof-compressor.cxx | 5 ----- 1 file changed, 5 deletions(-) diff --git a/Detectors/TOF/compression/src/tof-compressor.cxx b/Detectors/TOF/compression/src/tof-compressor.cxx index 3bb0899f5e91c..dde9cc3cc6c23 100644 --- a/Detectors/TOF/compression/src/tof-compressor.cxx +++ b/Detectors/TOF/compression/src/tof-compressor.cxx @@ -44,7 +44,6 @@ void customize(std::vector& workflowOptions) workflowOptions.push_back(paranoid); workflowOptions.push_back(ignoreStf); workflowOptions.push_back(payloadlim); - workflowOptions.emplace_back(ConfigParamSpec{"old", VariantType::Bool, false, {"use the non-DPL version of the compressor"}}); workflowOptions.push_back(ConfigParamSpec{"configKeyValues", VariantType::String, "", {"Semicolon separated key=value strings"}}); } @@ -60,10 +59,6 @@ WorkflowSpec defineDataProcessing(ConfigContext const& cfgc) auto verbose = cfgc.options().get("tof-compressor-verbose"); auto paranoid = cfgc.options().get("tof-compressor-paranoid"); auto ignoreStf = cfgc.options().get("ignore-dist-stf"); - auto old = cfgc.options().get("old"); - if (old) { - LOGP(error, "--old option is not actually supported, using the new version"); - } auto payloadLim = cfgc.options().get("payload-limit"); std::vector outputs; From 91f782e229996d55cb7343b2cd0e695cd612df40 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 10 Jan 2024 11:41:03 +0100 Subject: [PATCH 199/726] DPL: fix warning and simplify assert. --- Framework/Core/include/Framework/MessageContext.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Framework/Core/include/Framework/MessageContext.h b/Framework/Core/include/Framework/MessageContext.h index 4f6674576e8ef..c3b0e1f822b81 100644 --- a/Framework/Core/include/Framework/MessageContext.h +++ b/Framework/Core/include/Framework/MessageContext.h @@ -498,9 +498,7 @@ class MessageContext void clear() { // Verify that everything has been sent on clear. - for (auto& m : mMessages) { - assert(m->empty()); - } + assert(std::all_of(mMessages.begin(), mMessages.end(), [](auto& m) { return m->empty(); })); mMessages.clear(); } From 42a900a173bab9553508242361dea19030544f84 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 10 Jan 2024 11:43:57 +0100 Subject: [PATCH 200/726] DPL: move MessageContext::schedule out of line --- .../Core/include/Framework/MessageContext.h | 33 +------------------ Framework/Core/src/MessageContext.cxx | 33 +++++++++++++++++++ 2 files changed, 34 insertions(+), 32 deletions(-) diff --git a/Framework/Core/include/Framework/MessageContext.h b/Framework/Core/include/Framework/MessageContext.h index c3b0e1f822b81..c1f2c695ff58a 100644 --- a/Framework/Core/include/Framework/MessageContext.h +++ b/Framework/Core/include/Framework/MessageContext.h @@ -444,38 +444,7 @@ class MessageContext /// Schedule a context object for sending. /// The object is considered complete at this point and is sent directly through the dispatcher callback /// of the context if initialized. - void schedule(Messages::value_type&& message) - { - auto const* header = message->header(); - if (header == nullptr) { - throw std::logic_error("No valid header message found"); - } - mScheduledMessages.emplace_back(std::move(message)); - if (mDispatchControl.dispatch != nullptr) { - // send all scheduled messages if there is no trigger callback or its result is true - if (mDispatchControl.trigger == nullptr || mDispatchControl.trigger(*header)) { - std::vector outputsPerChannel; - outputsPerChannel.resize(mProxy.getNumOutputChannels()); - for (auto& message : mScheduledMessages) { - fair::mq::Parts parts = message->finalize(); - assert(message->empty()); - assert(parts.Size() == 2); - for (auto& part : parts) { - outputsPerChannel[mProxy.getOutputChannelIndex(message->route()).value].AddPart(std::move(part)); - } - } - for (int ci = 0; ci < mProxy.getNumOutputChannels(); ++ci) { - auto& parts = outputsPerChannel[ci]; - if (parts.Size() == 0) { - continue; - } - mDispatchControl.dispatch(std::move(parts), ChannelIndex{ci}, DefaultChannelIndex); - } - mDidDispatch = mScheduledMessages.empty() == false; - mScheduledMessages.clear(); - } - } - } + void schedule(Messages::value_type&& message); Messages getMessagesForSending() { diff --git a/Framework/Core/src/MessageContext.cxx b/Framework/Core/src/MessageContext.cxx index 74a7a838aa78e..a7365594785c7 100644 --- a/Framework/Core/src/MessageContext.cxx +++ b/Framework/Core/src/MessageContext.cxx @@ -123,4 +123,37 @@ void MessageContext::pruneFromCache(int64_t id) mMessageCache.erase(id); } +void MessageContext::schedule(Messages::value_type&& message) +{ + auto const* header = message->header(); + if (header == nullptr) { + throw std::logic_error("No valid header message found"); + } + mScheduledMessages.emplace_back(std::move(message)); + if (mDispatchControl.dispatch != nullptr) { + // send all scheduled messages if there is no trigger callback or its result is true + if (mDispatchControl.trigger == nullptr || mDispatchControl.trigger(*header)) { + std::vector outputsPerChannel; + outputsPerChannel.resize(mProxy.getNumOutputChannels()); + for (auto& message : mScheduledMessages) { + fair::mq::Parts parts = message->finalize(); + assert(message->empty()); + assert(parts.Size() == 2); + for (auto& part : parts) { + outputsPerChannel[mProxy.getOutputChannelIndex(message->route()).value].AddPart(std::move(part)); + } + } + for (int ci = 0; ci < mProxy.getNumOutputChannels(); ++ci) { + auto& parts = outputsPerChannel[ci]; + if (parts.Size() == 0) { + continue; + } + mDispatchControl.dispatch(std::move(parts), ChannelIndex{ci}, DefaultChannelIndex); + } + mDidDispatch = mScheduledMessages.empty() == false; + mScheduledMessages.clear(); + } + } +} + } // namespace o2::framework From 51820539a485c939a0a1c6c490549c8b06167d37 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 10 Jan 2024 11:41:03 +0100 Subject: [PATCH 201/726] DPL: drop duplicate include statement --- Framework/Core/test/test_TMessageSerializer.cxx | 1 - 1 file changed, 1 deletion(-) diff --git a/Framework/Core/test/test_TMessageSerializer.cxx b/Framework/Core/test/test_TMessageSerializer.cxx index b54d3cf1a7da5..bc5f817400a44 100644 --- a/Framework/Core/test/test_TMessageSerializer.cxx +++ b/Framework/Core/test/test_TMessageSerializer.cxx @@ -13,7 +13,6 @@ #include "Framework/RuntimeError.h" #include "TestClasses.h" #include -#include #include using namespace o2::framework; From 3882264a0b149759338ba9e39a5bf5b5bfee9c03 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 10 Jan 2024 11:43:58 +0100 Subject: [PATCH 202/726] DPL: detect when a Lifetime::Timeframe output is missing --- .../Core/include/Framework/StreamContext.h | 7 +++ Framework/Core/src/CommonServices.cxx | 27 ++++++++ Framework/Core/src/DataAllocator.cxx | 3 + Framework/TestWorkflows/CMakeLists.txt | 3 + .../test/test_DetectMissingTimeframe.cxx | 62 +++++++++++++++++++ 5 files changed, 102 insertions(+) create mode 100644 Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx diff --git a/Framework/Core/include/Framework/StreamContext.h b/Framework/Core/include/Framework/StreamContext.h index 0ab353e147276..198802179a419 100644 --- a/Framework/Core/include/Framework/StreamContext.h +++ b/Framework/Core/include/Framework/StreamContext.h @@ -63,6 +63,13 @@ struct StreamContext { // Notice that in such a case all the services will be created upfront, so // the callback will be called for all of them. std::vector preStartStreamHandles; + + // Information on wether or not all the required routes have been created. + // This is used to check if the LifetimeTimeframe routes were all created + // for a given iteration. + // This is in the stream context to allow tracking data creation on a per thread + // basis. + std::vector routeCreated; }; } // namespace o2::framework diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 05b8fba47e532..b5351c8822cb1 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -162,6 +162,33 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() .uniqueId = simpleServiceId(), .init = simpleServiceInit(), .configure = noConfiguration(), + .preProcessing = [](ProcessingContext& context, void* service) { + auto* stream = (StreamContext*)service; + auto& routes = context.services().get().outputs; + // Notice I need to do this here, because different invocation for + // the same stream might be referring to different data processors. + // We should probably have a context which is per stream of a specific + // data processor. + stream->routeCreated.resize(routes.size()); + // Reset the routeCreated at every processing step + std::fill(stream->routeCreated.begin(), stream->routeCreated.end(), false); }, + .postProcessing = [](ProcessingContext& processingContext, void* service) { + auto* stream = (StreamContext*)service; + auto& routes = processingContext.services().get().outputs; + auto& timeslice = processingContext.services().get().timeslice; + for (size_t ri = 0; ri < routes.size(); ++ri) { + if (stream->routeCreated[ri] == true) { + continue; + } + auto &route = routes[ri]; + auto &matcher = route.matcher; + if ((timeslice % route.maxTimeslices) != route.timeslice) { + continue; + } + if (matcher.lifetime == Lifetime::Timeframe) { + LOGP(error, "Expected Lifetime::Timeframe data {} was not created for timeslice {} and might result in dropped timeframes", DataSpecUtils::describe(matcher), timeslice); + } + } }, .kind = ServiceKind::Stream}; } diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index 1230d2e942f6e..9ce72aed8c1cf 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -19,6 +19,7 @@ #include "Framework/FairMQResizableBuffer.h" #include "Framework/DataProcessingContext.h" #include "Framework/DeviceSpec.h" +#include "Framework/StreamContext.h" #include "Headers/Stack.h" #include @@ -47,10 +48,12 @@ DataAllocator::DataAllocator(ServiceRegistryRef contextRegistry) RouteIndex DataAllocator::matchDataHeader(const Output& spec, size_t timeslice) { auto& allowedOutputRoutes = mRegistry.get().outputs; + auto& stream = mRegistry.get(); // FIXME: we should take timeframeId into account as well. for (auto ri = 0; ri < allowedOutputRoutes.size(); ++ri) { auto& route = allowedOutputRoutes[ri]; if (DataSpecUtils::match(route.matcher, spec.origin, spec.description, spec.subSpec) && ((timeslice % route.maxTimeslices) == route.timeslice)) { + stream.routeCreated[ri] = true; return RouteIndex{ri}; } } diff --git a/Framework/TestWorkflows/CMakeLists.txt b/Framework/TestWorkflows/CMakeLists.txt index 7a396c0f2c3a9..64e9dddbb020e 100644 --- a/Framework/TestWorkflows/CMakeLists.txt +++ b/Framework/TestWorkflows/CMakeLists.txt @@ -12,6 +12,9 @@ o2_add_dpl_workflow(dummy-workflow SOURCES src/o2DummyWorkflow.cxx COMPONENT_NAME TestWorkflows) +o2_add_dpl_workflow(detect-missing-timeframe + SOURCES test/test_DetectMissingTimeframe.cxx + COMPONENT_NAME TestWorkflows) o2_add_dpl_workflow(o2rootmessage-workflow SOURCES "src/test_o2RootMessageWorkflow.cxx" diff --git a/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx new file mode 100644 index 0000000000000..3374cd9ad6cf2 --- /dev/null +++ b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx @@ -0,0 +1,62 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#include "Framework/ConfigParamSpec.h" +#include "Framework/DataTakingContext.h" +#include "Framework/CompletionPolicyHelpers.h" +#include "Framework/DeviceSpec.h" +#include "Framework/RawDeviceService.h" +#include "Framework/ControlService.h" +#include "Framework/Configurable.h" +#include "Framework/RunningWorkflowInfo.h" +#include "Framework/RateLimiter.h" +#include + +#include +#include +#include +#include + +using namespace o2::framework; + +#include "Framework/runDataProcessing.h" + +// This is how you can define your processing in a declarative way +WorkflowSpec defineDataProcessing(ConfigContext const& specs) +{ + DataProcessorSpec a{ + .name = "A", + .outputs = {OutputSpec{{"a1"}, "TST", "A1"}, + OutputSpec{{"a2"}, "TST", "A2"}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + outputs.make(OutputRef{"a1"}, 1); + static int i = 0; + outputs.make(OutputRef{"a1"}, 1); + if (i++ % 2 == 0) { + outputs.make(OutputRef{"a2"}, 1); + } + })}, + }; + DataProcessorSpec d{ + .name = "D", + .inputs = {InputSpec{"a1", "TST", "A1"}, + InputSpec{"a2", "TST", "A2"}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](InputRecord& inputs) { + auto ref = inputs.get("a1"); + auto header = o2::header::get(ref.header); + LOG(info) << "Start time: " << header->startTime; + })}, + }; + + return workflow::concat(WorkflowSpec{a}, + WorkflowSpec{d}); +} From 8d12ff88993c9fb217f6dd00b4c1658b8779f6be Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 10 Jan 2024 11:49:51 +0100 Subject: [PATCH 203/726] DPL: detect when a Lifetime::Timeframe output is missing --- .../Core/include/Framework/StreamContext.h | 7 +++ Framework/Core/src/CommonServices.cxx | 27 ++++++++ Framework/Core/src/DataAllocator.cxx | 3 + Framework/TestWorkflows/CMakeLists.txt | 3 + .../test/test_DetectMissingTimeframe.cxx | 62 +++++++++++++++++++ 5 files changed, 102 insertions(+) create mode 100644 Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx diff --git a/Framework/Core/include/Framework/StreamContext.h b/Framework/Core/include/Framework/StreamContext.h index 0ab353e147276..198802179a419 100644 --- a/Framework/Core/include/Framework/StreamContext.h +++ b/Framework/Core/include/Framework/StreamContext.h @@ -63,6 +63,13 @@ struct StreamContext { // Notice that in such a case all the services will be created upfront, so // the callback will be called for all of them. std::vector preStartStreamHandles; + + // Information on wether or not all the required routes have been created. + // This is used to check if the LifetimeTimeframe routes were all created + // for a given iteration. + // This is in the stream context to allow tracking data creation on a per thread + // basis. + std::vector routeCreated; }; } // namespace o2::framework diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 05b8fba47e532..1a79cc8afacba 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -162,6 +162,33 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() .uniqueId = simpleServiceId(), .init = simpleServiceInit(), .configure = noConfiguration(), + .preProcessing = [](ProcessingContext& context, void* service) { + auto* stream = (StreamContext*)service; + auto& routes = context.services().get().outputs; + // Notice I need to do this here, because different invocation for + // the same stream might be referring to different data processors. + // We should probably have a context which is per stream of a specific + // data processor. + stream->routeCreated.resize(routes.size()); + // Reset the routeCreated at every processing step + std::fill(stream->routeCreated.begin(), stream->routeCreated.end(), false); }, + .postProcessing = [](ProcessingContext& processingContext, void* service) { + auto* stream = (StreamContext*)service; + auto& routes = processingContext.services().get().outputs; + auto& timeslice = processingContext.services().get().timeslice; + for (size_t ri = 0; ri < routes.size(); ++ri) { + if (stream->routeCreated[ri] == true) { + continue; + } + auto &route = routes[ri]; + auto &matcher = route.matcher; + if ((timeslice % route.maxTimeslices) != route.timeslice) { + continue; + } + if (matcher.lifetime == Lifetime::Timeframe) { + LOGP(error, "Expected Lifetime::Timeframe data {} was not created for timeslice {} and might result in dropped timeframes", DataSpecUtils::describe(matcher), timeslice); + } + } }, .kind = ServiceKind::Stream}; } diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index 1230d2e942f6e..9ce72aed8c1cf 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -19,6 +19,7 @@ #include "Framework/FairMQResizableBuffer.h" #include "Framework/DataProcessingContext.h" #include "Framework/DeviceSpec.h" +#include "Framework/StreamContext.h" #include "Headers/Stack.h" #include @@ -47,10 +48,12 @@ DataAllocator::DataAllocator(ServiceRegistryRef contextRegistry) RouteIndex DataAllocator::matchDataHeader(const Output& spec, size_t timeslice) { auto& allowedOutputRoutes = mRegistry.get().outputs; + auto& stream = mRegistry.get(); // FIXME: we should take timeframeId into account as well. for (auto ri = 0; ri < allowedOutputRoutes.size(); ++ri) { auto& route = allowedOutputRoutes[ri]; if (DataSpecUtils::match(route.matcher, spec.origin, spec.description, spec.subSpec) && ((timeslice % route.maxTimeslices) == route.timeslice)) { + stream.routeCreated[ri] = true; return RouteIndex{ri}; } } diff --git a/Framework/TestWorkflows/CMakeLists.txt b/Framework/TestWorkflows/CMakeLists.txt index 7a396c0f2c3a9..64e9dddbb020e 100644 --- a/Framework/TestWorkflows/CMakeLists.txt +++ b/Framework/TestWorkflows/CMakeLists.txt @@ -12,6 +12,9 @@ o2_add_dpl_workflow(dummy-workflow SOURCES src/o2DummyWorkflow.cxx COMPONENT_NAME TestWorkflows) +o2_add_dpl_workflow(detect-missing-timeframe + SOURCES test/test_DetectMissingTimeframe.cxx + COMPONENT_NAME TestWorkflows) o2_add_dpl_workflow(o2rootmessage-workflow SOURCES "src/test_o2RootMessageWorkflow.cxx" diff --git a/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx new file mode 100644 index 0000000000000..3374cd9ad6cf2 --- /dev/null +++ b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx @@ -0,0 +1,62 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#include "Framework/ConfigParamSpec.h" +#include "Framework/DataTakingContext.h" +#include "Framework/CompletionPolicyHelpers.h" +#include "Framework/DeviceSpec.h" +#include "Framework/RawDeviceService.h" +#include "Framework/ControlService.h" +#include "Framework/Configurable.h" +#include "Framework/RunningWorkflowInfo.h" +#include "Framework/RateLimiter.h" +#include + +#include +#include +#include +#include + +using namespace o2::framework; + +#include "Framework/runDataProcessing.h" + +// This is how you can define your processing in a declarative way +WorkflowSpec defineDataProcessing(ConfigContext const& specs) +{ + DataProcessorSpec a{ + .name = "A", + .outputs = {OutputSpec{{"a1"}, "TST", "A1"}, + OutputSpec{{"a2"}, "TST", "A2"}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + outputs.make(OutputRef{"a1"}, 1); + static int i = 0; + outputs.make(OutputRef{"a1"}, 1); + if (i++ % 2 == 0) { + outputs.make(OutputRef{"a2"}, 1); + } + })}, + }; + DataProcessorSpec d{ + .name = "D", + .inputs = {InputSpec{"a1", "TST", "A1"}, + InputSpec{"a2", "TST", "A2"}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](InputRecord& inputs) { + auto ref = inputs.get("a1"); + auto header = o2::header::get(ref.header); + LOG(info) << "Start time: " << header->startTime; + })}, + }; + + return workflow::concat(WorkflowSpec{a}, + WorkflowSpec{d}); +} From b41d10b74e22566e2ae77e0a4f3e08a439531b87 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 11 Jan 2024 13:48:25 +0100 Subject: [PATCH 204/726] DPL: remove need for special engineering type We can simply use Instruments one and convert them to something sensible when using the FairLogger implementation. --- Framework/Core/src/runDataProcessing.cxx | 4 +- .../Foundation/include/Framework/Signpost.h | 48 ++++++++++++++----- Framework/Foundation/test/test_Signpost.cxx | 2 +- .../Foundation/test/test_SignpostLogger.cxx | 2 +- 4 files changed, 40 insertions(+), 16 deletions(-) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index e9d1f3d4ee314..325417235cd83 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -800,7 +800,7 @@ void processChildrenOutput(DriverInfo& driverInfo, } O2_SIGNPOST_ID_FROM_POINTER(sid, driver, &info); - O2_SIGNPOST_START(driver, sid, "bytes_processed", "bytes processed by " O2_ENG_TYPE(pid, "d"), info.pid); + O2_SIGNPOST_START(driver, sid, "bytes_processed", "bytes processed by %{xcode:pid}d", info.pid); std::string_view s = info.unprinted; size_t pos = 0; @@ -848,7 +848,7 @@ void processChildrenOutput(DriverInfo& driverInfo, size_t oldSize = info.unprinted.size(); info.unprinted = std::string(s); int64_t bytesProcessed = oldSize - info.unprinted.size(); - O2_SIGNPOST_END(driver, sid, "bytes_processed", "bytes processed by " O2_ENG_TYPE(network - size - in - bytes, PRIi64), bytesProcessed); + O2_SIGNPOST_END(driver, sid, "bytes_processed", "bytes processed by %{xcode:network-size-in-bytes}" PRIi64, bytesProcessed); } } diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 44421687a75c9..f286e43d16afc 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -12,6 +12,7 @@ #define O2_FRAMEWORK_SIGNPOST_H_ #include +#include struct o2_log_handle_t { char const* name = nullptr; @@ -19,6 +20,31 @@ struct o2_log_handle_t { o2_log_handle_t* next = nullptr; }; +// Helper function which replaces engineering types with a printf +// compatible format string. +template +constexpr auto remove_engineering_type(char const (&src)[N]) { + std::array res = {}; + // do whatever string manipulation you want in res. + char *t = res.data(); + for (int i = 0; i < N; ++i) { + if (src[i] == '%' && src[i+1] == '{') + { + *t++ = src[i]; + while (src[i] != '}' && src[i] != 0) { + ++i; + } + if (src[i] == 0) { + *t = 0; + return res; + } + } else { + *t++ = src[i]; + } + } + return res; +} + // Loggers registry is actually a feature available to all platforms // We use this to register the loggers and to walk over them. // So that also on mac we can have a list of all the registered loggers. @@ -71,10 +97,10 @@ void* _o2_log_create(char const* name, char const* category); #define O2_LOG_DEBUG(log, ...) os_log_debug(private_o2_log_##log, __VA_ARGS__) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) os_signpost_id_t name = os_signpost_id_make_with_pointer(private_o2_log_##log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) os_signpost_id_t name = os_signpost_id_generate(private_o2_log_##log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, ...) os_signpost_event_emit(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, ...) os_signpost_interval_begin(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, ...) os_signpost_interval_end(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_ENG_TYPE(x, what) "%{xcode:" #x "}" what +// FIXME: use __VA_OPT__ when available in C++20 +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) os_signpost_event_emit(private_o2_log_##log, id, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_START(log, id, name, format, ...) os_signpost_interval_begin(private_o2_log_##log, id, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_END(log, id, name, format, ...) os_signpost_interval_end(private_o2_log_##log, id, name, format, ##__VA_ARGS__) #ifdef O2_SIGNPOST_IMPLEMENTATION /// We use a wrapper so that we can keep track of the logs. @@ -465,10 +491,9 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) #define O2_LOG_DEBUG(log, ...) O2_LOG_MACRO(__VA_ARGS__) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) _o2_signpost_id_t name = _o2_signpost_id_make_with_pointer(private_o2_log_##log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) _o2_signpost_id_t name = _o2_signpost_id_generate_local(private_o2_log_##log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, ...) _o2_signpost_event_emit(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, ...) _o2_signpost_interval_begin(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, ...) _o2_signpost_interval_end(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_ENG_TYPE(x, what) "%" what +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) +#define O2_SIGNPOST_START(log, id, name, format, ...) _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) +#define O2_SIGNPOST_END(log, id, name, format, ...) _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) #else // This is the release implementation, it does nothing. #define O2_DECLARE_DYNAMIC_LOG(x) #define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(x) @@ -478,10 +503,9 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) #define O2_LOG_DEBUG(log, ...) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, ...) -#define O2_SIGNPOST_START(log, id, name, ...) -#define O2_SIGNPOST_END(log, id, name, ...) -#define O2_ENG_TYPE(x) +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) +#define O2_SIGNPOST_START(log, id, name, format, ...) +#define O2_SIGNPOST_END(log, id, name, format, ...) #endif #endif // O2_FRAMEWORK_SIGNPOST_H_ diff --git a/Framework/Foundation/test/test_Signpost.cxx b/Framework/Foundation/test/test_Signpost.cxx index faf5a259fc6a6..24b6afaec5c3d 100644 --- a/Framework/Foundation/test/test_Signpost.cxx +++ b/Framework/Foundation/test/test_Signpost.cxx @@ -38,7 +38,7 @@ int main(int argc, char** argv) // This has an engineering type, which we will not use on Linux / FairLogger O2_SIGNPOST_ID_FROM_POINTER(id4, test_Signpost, &id3); - O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter " O2_ENG_TYPE(size - in - bytes, "d"), 1); + O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter %{size-in-bytes}d", 1); O2_SIGNPOST_END(test_Signpost, id4, "Test category", "A signpost interval from a pointer"); O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); diff --git a/Framework/Foundation/test/test_SignpostLogger.cxx b/Framework/Foundation/test/test_SignpostLogger.cxx index 4ce862dfb4c74..74da35abf7c70 100644 --- a/Framework/Foundation/test/test_SignpostLogger.cxx +++ b/Framework/Foundation/test/test_SignpostLogger.cxx @@ -46,7 +46,7 @@ int main(int argc, char** argv) // This has an engineering type, which we will not use on Linux / FairLogger O2_SIGNPOST_ID_FROM_POINTER(id4, test_Signpost, &id3); - O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter " O2_ENG_TYPE(size - in - bytes, "d"), 1); + O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter %{size-in-bytes}d", 1); O2_SIGNPOST_END(test_Signpost, id4, "Test category", "A signpost interval from a pointer"); O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); From 3e520dfee34d48a356e06e64bb0a3a6079ee8efd Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 11 Jan 2024 19:54:46 +0100 Subject: [PATCH 205/726] DPL: remove need for special engineering type We can simply use Instruments one and convert them to something sensible when using the FairLogger implementation. --- Framework/Core/src/runDataProcessing.cxx | 4 +- .../Foundation/include/Framework/Signpost.h | 48 ++++++++++++++----- Framework/Foundation/test/test_Signpost.cxx | 2 +- .../Foundation/test/test_SignpostLogger.cxx | 2 +- 4 files changed, 40 insertions(+), 16 deletions(-) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index e9d1f3d4ee314..325417235cd83 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -800,7 +800,7 @@ void processChildrenOutput(DriverInfo& driverInfo, } O2_SIGNPOST_ID_FROM_POINTER(sid, driver, &info); - O2_SIGNPOST_START(driver, sid, "bytes_processed", "bytes processed by " O2_ENG_TYPE(pid, "d"), info.pid); + O2_SIGNPOST_START(driver, sid, "bytes_processed", "bytes processed by %{xcode:pid}d", info.pid); std::string_view s = info.unprinted; size_t pos = 0; @@ -848,7 +848,7 @@ void processChildrenOutput(DriverInfo& driverInfo, size_t oldSize = info.unprinted.size(); info.unprinted = std::string(s); int64_t bytesProcessed = oldSize - info.unprinted.size(); - O2_SIGNPOST_END(driver, sid, "bytes_processed", "bytes processed by " O2_ENG_TYPE(network - size - in - bytes, PRIi64), bytesProcessed); + O2_SIGNPOST_END(driver, sid, "bytes_processed", "bytes processed by %{xcode:network-size-in-bytes}" PRIi64, bytesProcessed); } } diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 44421687a75c9..f286e43d16afc 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -12,6 +12,7 @@ #define O2_FRAMEWORK_SIGNPOST_H_ #include +#include struct o2_log_handle_t { char const* name = nullptr; @@ -19,6 +20,31 @@ struct o2_log_handle_t { o2_log_handle_t* next = nullptr; }; +// Helper function which replaces engineering types with a printf +// compatible format string. +template +constexpr auto remove_engineering_type(char const (&src)[N]) { + std::array res = {}; + // do whatever string manipulation you want in res. + char *t = res.data(); + for (int i = 0; i < N; ++i) { + if (src[i] == '%' && src[i+1] == '{') + { + *t++ = src[i]; + while (src[i] != '}' && src[i] != 0) { + ++i; + } + if (src[i] == 0) { + *t = 0; + return res; + } + } else { + *t++ = src[i]; + } + } + return res; +} + // Loggers registry is actually a feature available to all platforms // We use this to register the loggers and to walk over them. // So that also on mac we can have a list of all the registered loggers. @@ -71,10 +97,10 @@ void* _o2_log_create(char const* name, char const* category); #define O2_LOG_DEBUG(log, ...) os_log_debug(private_o2_log_##log, __VA_ARGS__) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) os_signpost_id_t name = os_signpost_id_make_with_pointer(private_o2_log_##log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) os_signpost_id_t name = os_signpost_id_generate(private_o2_log_##log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, ...) os_signpost_event_emit(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, ...) os_signpost_interval_begin(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, ...) os_signpost_interval_end(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_ENG_TYPE(x, what) "%{xcode:" #x "}" what +// FIXME: use __VA_OPT__ when available in C++20 +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) os_signpost_event_emit(private_o2_log_##log, id, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_START(log, id, name, format, ...) os_signpost_interval_begin(private_o2_log_##log, id, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_END(log, id, name, format, ...) os_signpost_interval_end(private_o2_log_##log, id, name, format, ##__VA_ARGS__) #ifdef O2_SIGNPOST_IMPLEMENTATION /// We use a wrapper so that we can keep track of the logs. @@ -465,10 +491,9 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) #define O2_LOG_DEBUG(log, ...) O2_LOG_MACRO(__VA_ARGS__) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) _o2_signpost_id_t name = _o2_signpost_id_make_with_pointer(private_o2_log_##log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) _o2_signpost_id_t name = _o2_signpost_id_generate_local(private_o2_log_##log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, ...) _o2_signpost_event_emit(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, ...) _o2_signpost_interval_begin(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, ...) _o2_signpost_interval_end(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_ENG_TYPE(x, what) "%" what +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) +#define O2_SIGNPOST_START(log, id, name, format, ...) _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) +#define O2_SIGNPOST_END(log, id, name, format, ...) _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) #else // This is the release implementation, it does nothing. #define O2_DECLARE_DYNAMIC_LOG(x) #define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(x) @@ -478,10 +503,9 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) #define O2_LOG_DEBUG(log, ...) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, ...) -#define O2_SIGNPOST_START(log, id, name, ...) -#define O2_SIGNPOST_END(log, id, name, ...) -#define O2_ENG_TYPE(x) +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) +#define O2_SIGNPOST_START(log, id, name, format, ...) +#define O2_SIGNPOST_END(log, id, name, format, ...) #endif #endif // O2_FRAMEWORK_SIGNPOST_H_ diff --git a/Framework/Foundation/test/test_Signpost.cxx b/Framework/Foundation/test/test_Signpost.cxx index faf5a259fc6a6..24b6afaec5c3d 100644 --- a/Framework/Foundation/test/test_Signpost.cxx +++ b/Framework/Foundation/test/test_Signpost.cxx @@ -38,7 +38,7 @@ int main(int argc, char** argv) // This has an engineering type, which we will not use on Linux / FairLogger O2_SIGNPOST_ID_FROM_POINTER(id4, test_Signpost, &id3); - O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter " O2_ENG_TYPE(size - in - bytes, "d"), 1); + O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter %{size-in-bytes}d", 1); O2_SIGNPOST_END(test_Signpost, id4, "Test category", "A signpost interval from a pointer"); O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); diff --git a/Framework/Foundation/test/test_SignpostLogger.cxx b/Framework/Foundation/test/test_SignpostLogger.cxx index 4ce862dfb4c74..74da35abf7c70 100644 --- a/Framework/Foundation/test/test_SignpostLogger.cxx +++ b/Framework/Foundation/test/test_SignpostLogger.cxx @@ -46,7 +46,7 @@ int main(int argc, char** argv) // This has an engineering type, which we will not use on Linux / FairLogger O2_SIGNPOST_ID_FROM_POINTER(id4, test_Signpost, &id3); - O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter " O2_ENG_TYPE(size - in - bytes, "d"), 1); + O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter %{size-in-bytes}d", 1); O2_SIGNPOST_END(test_Signpost, id4, "Test category", "A signpost interval from a pointer"); O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); From 8e52e59a168bc106a3a8e15bd28ac34d7df07da1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 11 Jan 2024 20:14:16 +0100 Subject: [PATCH 206/726] DPL: detect when a Lifetime::Timeframe output is missing --- .../Core/include/Framework/MessageContext.h | 1 + .../Core/include/Framework/StreamContext.h | 7 +++ Framework/Core/src/CommonServices.cxx | 42 +++++++++++++ Framework/Core/src/DataAllocator.cxx | 3 + Framework/TestWorkflows/CMakeLists.txt | 3 + .../test/test_DetectMissingTimeframe.cxx | 62 +++++++++++++++++++ 6 files changed, 118 insertions(+) create mode 100644 Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx diff --git a/Framework/Core/include/Framework/MessageContext.h b/Framework/Core/include/Framework/MessageContext.h index c1f2c695ff58a..9e7268f614fda 100644 --- a/Framework/Core/include/Framework/MessageContext.h +++ b/Framework/Core/include/Framework/MessageContext.h @@ -496,6 +496,7 @@ class MessageContext o2::header::Stack* findMessageHeaderStack(const Output& spec); int countDeviceOutputs(bool excludeDPLOrigin = false); void fakeDispatch() { mDidDispatch = true; } + bool didDispatch() { return mDidDispatch; } o2::framework::DataProcessingHeader* findMessageDataProcessingHeader(const Output& spec); std::pair findMessageHeaders(const Output& spec); diff --git a/Framework/Core/include/Framework/StreamContext.h b/Framework/Core/include/Framework/StreamContext.h index 0ab353e147276..198802179a419 100644 --- a/Framework/Core/include/Framework/StreamContext.h +++ b/Framework/Core/include/Framework/StreamContext.h @@ -63,6 +63,13 @@ struct StreamContext { // Notice that in such a case all the services will be created upfront, so // the callback will be called for all of them. std::vector preStartStreamHandles; + + // Information on wether or not all the required routes have been created. + // This is used to check if the LifetimeTimeframe routes were all created + // for a given iteration. + // This is in the stream context to allow tracking data creation on a per thread + // basis. + std::vector routeCreated; }; } // namespace o2::framework diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 05b8fba47e532..4656ddeb12e63 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -162,6 +162,48 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() .uniqueId = simpleServiceId(), .init = simpleServiceInit(), .configure = noConfiguration(), + .preProcessing = [](ProcessingContext& context, void* service) { + auto* stream = (StreamContext*)service; + auto& routes = context.services().get().outputs; + // Notice I need to do this here, because different invocation for + // the same stream might be referring to different data processors. + // We should probably have a context which is per stream of a specific + // data processor. + stream->routeCreated.resize(routes.size()); + // Reset the routeCreated at every processing step + std::fill(stream->routeCreated.begin(), stream->routeCreated.end(), false); }, + .postProcessing = [](ProcessingContext& processingContext, void* service) { + auto* stream = (StreamContext*)service; + auto& routes = processingContext.services().get().outputs; + auto& timeslice = processingContext.services().get().timeslice; + auto& messageContext = processingContext.services().get(); + // Check if we never created any data for this timeslice + // if we did not, but we still have didDispatched set to true + // it means it was created out of band. + bool didCreate = false; + for (size_t ri = 0; ri < routes.size(); ++ri) { + if (stream->routeCreated[ri] == true) { + didCreate = true; + break; + } + } + if (didCreate == false && messageContext.didDispatch() == true) { + LOGP(debug, "Data created out of band"); + return; + } + for (size_t ri = 0; ri < routes.size(); ++ri) { + if (stream->routeCreated[ri] == true) { + continue; + } + auto &route = routes[ri]; + auto &matcher = route.matcher; + if ((timeslice % route.maxTimeslices) != route.timeslice) { + continue; + } + if (matcher.lifetime == Lifetime::Timeframe) { + LOGP(error, "Expected Lifetime::Timeframe data {} was not created for timeslice {} and might result in dropped timeframes", DataSpecUtils::describe(matcher), timeslice); + } + } }, .kind = ServiceKind::Stream}; } diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index 1230d2e942f6e..9ce72aed8c1cf 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -19,6 +19,7 @@ #include "Framework/FairMQResizableBuffer.h" #include "Framework/DataProcessingContext.h" #include "Framework/DeviceSpec.h" +#include "Framework/StreamContext.h" #include "Headers/Stack.h" #include @@ -47,10 +48,12 @@ DataAllocator::DataAllocator(ServiceRegistryRef contextRegistry) RouteIndex DataAllocator::matchDataHeader(const Output& spec, size_t timeslice) { auto& allowedOutputRoutes = mRegistry.get().outputs; + auto& stream = mRegistry.get(); // FIXME: we should take timeframeId into account as well. for (auto ri = 0; ri < allowedOutputRoutes.size(); ++ri) { auto& route = allowedOutputRoutes[ri]; if (DataSpecUtils::match(route.matcher, spec.origin, spec.description, spec.subSpec) && ((timeslice % route.maxTimeslices) == route.timeslice)) { + stream.routeCreated[ri] = true; return RouteIndex{ri}; } } diff --git a/Framework/TestWorkflows/CMakeLists.txt b/Framework/TestWorkflows/CMakeLists.txt index 7a396c0f2c3a9..64e9dddbb020e 100644 --- a/Framework/TestWorkflows/CMakeLists.txt +++ b/Framework/TestWorkflows/CMakeLists.txt @@ -12,6 +12,9 @@ o2_add_dpl_workflow(dummy-workflow SOURCES src/o2DummyWorkflow.cxx COMPONENT_NAME TestWorkflows) +o2_add_dpl_workflow(detect-missing-timeframe + SOURCES test/test_DetectMissingTimeframe.cxx + COMPONENT_NAME TestWorkflows) o2_add_dpl_workflow(o2rootmessage-workflow SOURCES "src/test_o2RootMessageWorkflow.cxx" diff --git a/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx new file mode 100644 index 0000000000000..3374cd9ad6cf2 --- /dev/null +++ b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx @@ -0,0 +1,62 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#include "Framework/ConfigParamSpec.h" +#include "Framework/DataTakingContext.h" +#include "Framework/CompletionPolicyHelpers.h" +#include "Framework/DeviceSpec.h" +#include "Framework/RawDeviceService.h" +#include "Framework/ControlService.h" +#include "Framework/Configurable.h" +#include "Framework/RunningWorkflowInfo.h" +#include "Framework/RateLimiter.h" +#include + +#include +#include +#include +#include + +using namespace o2::framework; + +#include "Framework/runDataProcessing.h" + +// This is how you can define your processing in a declarative way +WorkflowSpec defineDataProcessing(ConfigContext const& specs) +{ + DataProcessorSpec a{ + .name = "A", + .outputs = {OutputSpec{{"a1"}, "TST", "A1"}, + OutputSpec{{"a2"}, "TST", "A2"}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + outputs.make(OutputRef{"a1"}, 1); + static int i = 0; + outputs.make(OutputRef{"a1"}, 1); + if (i++ % 2 == 0) { + outputs.make(OutputRef{"a2"}, 1); + } + })}, + }; + DataProcessorSpec d{ + .name = "D", + .inputs = {InputSpec{"a1", "TST", "A1"}, + InputSpec{"a2", "TST", "A2"}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](InputRecord& inputs) { + auto ref = inputs.get("a1"); + auto header = o2::header::get(ref.header); + LOG(info) << "Start time: " << header->startTime; + })}, + }; + + return workflow::concat(WorkflowSpec{a}, + WorkflowSpec{d}); +} From 232bc84bb69b23651c7bf56003a694bb3f4c6377 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 12 Jan 2024 06:52:11 +0100 Subject: [PATCH 207/726] DPL: remove need for special engineering type We can simply use Instruments one and convert them to something sensible when using the FairLogger implementation. --- Framework/Core/src/runDataProcessing.cxx | 4 +- .../Foundation/include/Framework/Signpost.h | 48 ++++++++++++++----- Framework/Foundation/test/test_Signpost.cxx | 2 +- .../Foundation/test/test_SignpostLogger.cxx | 2 +- 4 files changed, 40 insertions(+), 16 deletions(-) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index e9d1f3d4ee314..325417235cd83 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -800,7 +800,7 @@ void processChildrenOutput(DriverInfo& driverInfo, } O2_SIGNPOST_ID_FROM_POINTER(sid, driver, &info); - O2_SIGNPOST_START(driver, sid, "bytes_processed", "bytes processed by " O2_ENG_TYPE(pid, "d"), info.pid); + O2_SIGNPOST_START(driver, sid, "bytes_processed", "bytes processed by %{xcode:pid}d", info.pid); std::string_view s = info.unprinted; size_t pos = 0; @@ -848,7 +848,7 @@ void processChildrenOutput(DriverInfo& driverInfo, size_t oldSize = info.unprinted.size(); info.unprinted = std::string(s); int64_t bytesProcessed = oldSize - info.unprinted.size(); - O2_SIGNPOST_END(driver, sid, "bytes_processed", "bytes processed by " O2_ENG_TYPE(network - size - in - bytes, PRIi64), bytesProcessed); + O2_SIGNPOST_END(driver, sid, "bytes_processed", "bytes processed by %{xcode:network-size-in-bytes}" PRIi64, bytesProcessed); } } diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 44421687a75c9..eca9e26f77469 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -12,6 +12,7 @@ #define O2_FRAMEWORK_SIGNPOST_H_ #include +#include struct o2_log_handle_t { char const* name = nullptr; @@ -19,6 +20,31 @@ struct o2_log_handle_t { o2_log_handle_t* next = nullptr; }; +// Helper function which replaces engineering types with a printf +// compatible format string. +template +constexpr auto remove_engineering_type(char const (&src)[N]) +{ + std::array res = {}; + // do whatever string manipulation you want in res. + char* t = res.data(); + for (int i = 0; i < N; ++i) { + if (src[i] == '%' && src[i + 1] == '{') { + *t++ = src[i]; + while (src[i] != '}' && src[i] != 0) { + ++i; + } + if (src[i] == 0) { + *t = 0; + return res; + } + } else { + *t++ = src[i]; + } + } + return res; +} + // Loggers registry is actually a feature available to all platforms // We use this to register the loggers and to walk over them. // So that also on mac we can have a list of all the registered loggers. @@ -71,10 +97,10 @@ void* _o2_log_create(char const* name, char const* category); #define O2_LOG_DEBUG(log, ...) os_log_debug(private_o2_log_##log, __VA_ARGS__) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) os_signpost_id_t name = os_signpost_id_make_with_pointer(private_o2_log_##log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) os_signpost_id_t name = os_signpost_id_generate(private_o2_log_##log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, ...) os_signpost_event_emit(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, ...) os_signpost_interval_begin(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, ...) os_signpost_interval_end(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_ENG_TYPE(x, what) "%{xcode:" #x "}" what +// FIXME: use __VA_OPT__ when available in C++20 +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) os_signpost_event_emit(private_o2_log_##log, id, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_START(log, id, name, format, ...) os_signpost_interval_begin(private_o2_log_##log, id, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_END(log, id, name, format, ...) os_signpost_interval_end(private_o2_log_##log, id, name, format, ##__VA_ARGS__) #ifdef O2_SIGNPOST_IMPLEMENTATION /// We use a wrapper so that we can keep track of the logs. @@ -465,10 +491,9 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) #define O2_LOG_DEBUG(log, ...) O2_LOG_MACRO(__VA_ARGS__) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) _o2_signpost_id_t name = _o2_signpost_id_make_with_pointer(private_o2_log_##log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) _o2_signpost_id_t name = _o2_signpost_id_generate_local(private_o2_log_##log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, ...) _o2_signpost_event_emit(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, ...) _o2_signpost_interval_begin(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, ...) _o2_signpost_interval_end(private_o2_log_##log, id, name, __VA_ARGS__) -#define O2_ENG_TYPE(x, what) "%" what +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) +#define O2_SIGNPOST_START(log, id, name, format, ...) _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) +#define O2_SIGNPOST_END(log, id, name, format, ...) _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) #else // This is the release implementation, it does nothing. #define O2_DECLARE_DYNAMIC_LOG(x) #define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(x) @@ -478,10 +503,9 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) #define O2_LOG_DEBUG(log, ...) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, ...) -#define O2_SIGNPOST_START(log, id, name, ...) -#define O2_SIGNPOST_END(log, id, name, ...) -#define O2_ENG_TYPE(x) +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) +#define O2_SIGNPOST_START(log, id, name, format, ...) +#define O2_SIGNPOST_END(log, id, name, format, ...) #endif #endif // O2_FRAMEWORK_SIGNPOST_H_ diff --git a/Framework/Foundation/test/test_Signpost.cxx b/Framework/Foundation/test/test_Signpost.cxx index faf5a259fc6a6..24b6afaec5c3d 100644 --- a/Framework/Foundation/test/test_Signpost.cxx +++ b/Framework/Foundation/test/test_Signpost.cxx @@ -38,7 +38,7 @@ int main(int argc, char** argv) // This has an engineering type, which we will not use on Linux / FairLogger O2_SIGNPOST_ID_FROM_POINTER(id4, test_Signpost, &id3); - O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter " O2_ENG_TYPE(size - in - bytes, "d"), 1); + O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter %{size-in-bytes}d", 1); O2_SIGNPOST_END(test_Signpost, id4, "Test category", "A signpost interval from a pointer"); O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); diff --git a/Framework/Foundation/test/test_SignpostLogger.cxx b/Framework/Foundation/test/test_SignpostLogger.cxx index 4ce862dfb4c74..74da35abf7c70 100644 --- a/Framework/Foundation/test/test_SignpostLogger.cxx +++ b/Framework/Foundation/test/test_SignpostLogger.cxx @@ -46,7 +46,7 @@ int main(int argc, char** argv) // This has an engineering type, which we will not use on Linux / FairLogger O2_SIGNPOST_ID_FROM_POINTER(id4, test_Signpost, &id3); - O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter " O2_ENG_TYPE(size - in - bytes, "d"), 1); + O2_SIGNPOST_START(test_Signpost, id4, "Test category", "A signpost with an engineering type formatter %{size-in-bytes}d", 1); O2_SIGNPOST_END(test_Signpost, id4, "Test category", "A signpost interval from a pointer"); O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); From 44d49baba95acd91b6226d1fcd43d494f0eff9c8 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 12 Jan 2024 12:37:11 +0100 Subject: [PATCH 208/726] DPL: wait as long as possible for Sporadic inputs Right now if we have the standard consumeWhenAll policy and we have a sporadic input, it will wait indefinitely until all the inputs arrive or it will drop timeframes without that sporadic input. This changes the behavior and waits only until the oldest possible timeframe does not allow the Sporadic input to be there. At which point, it schedules the processing in any case, under the assumption that a task declaring a sporadic input knows what to do in case it's not there. --- .../Core/src/CompletionPolicyHelpers.cxx | 27 +++++- Framework/TestWorkflows/CMakeLists.txt | 5 ++ .../test/test_WaitUntilPossible.cxx | 82 +++++++++++++++++++ 3 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 Framework/TestWorkflows/test/test_WaitUntilPossible.cxx diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index b3e0621bf984e..a24c7342bd6fb 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -15,6 +15,7 @@ #include "Framework/DeviceSpec.h" #include "Framework/CompilerBuiltins.h" #include "Framework/Logger.h" +#include "Framework/TimesliceIndex.h" #include "Framework/TimingInfo.h" #include "DecongestionService.h" @@ -107,10 +108,34 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl { auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { assert(inputs.size() == specs.size()); + + size_t si = 0; + bool missingSporadic = false; + bool missingTimeframe = false; + size_t currentTimeslice = -1; for (auto& input : inputs) { - if (input.header == nullptr) { + assert(si < specs.size()); + auto& spec = specs[si++]; + if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { return CompletionPolicy::CompletionOp::Wait; } + if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { + missingSporadic |= true; + } + if (input.header != nullptr && currentTimeslice == -1) { + auto const* dph = framework::DataRefUtils::getHeader(input); + if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { + currentTimeslice = dph->startTime; + } + } + } + // If some sporadic inputs are missing, we wait for them util we are sure they will not come, + // i.e. until the oldest possible timeslice is beyond the timeslice of the input. + auto& timesliceIndex = ref.get(); + auto oldestPossibleTimeslice = timesliceIndex.getOldestPossibleInput().timeslice.value; + + if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { + return CompletionPolicy::CompletionOp::Retry; } return CompletionPolicy::CompletionOp::Consume; }; diff --git a/Framework/TestWorkflows/CMakeLists.txt b/Framework/TestWorkflows/CMakeLists.txt index 64e9dddbb020e..1aa20b4efdd3b 100644 --- a/Framework/TestWorkflows/CMakeLists.txt +++ b/Framework/TestWorkflows/CMakeLists.txt @@ -12,9 +12,14 @@ o2_add_dpl_workflow(dummy-workflow SOURCES src/o2DummyWorkflow.cxx COMPONENT_NAME TestWorkflows) + o2_add_dpl_workflow(detect-missing-timeframe SOURCES test/test_DetectMissingTimeframe.cxx COMPONENT_NAME TestWorkflows) + +o2_add_dpl_workflow(wait-until-possible + SOURCES test/test_WaitUntilPossible.cxx + COMPONENT_NAME TestWorkflows) o2_add_dpl_workflow(o2rootmessage-workflow SOURCES "src/test_o2RootMessageWorkflow.cxx" diff --git a/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx b/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx new file mode 100644 index 0000000000000..d9ba315ef02b8 --- /dev/null +++ b/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx @@ -0,0 +1,82 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#include "Framework/ConfigParamSpec.h" +#include "Framework/DataTakingContext.h" +#include "Framework/CompletionPolicyHelpers.h" +#include "Framework/DeviceSpec.h" +#include "Framework/RawDeviceService.h" +#include "Framework/ControlService.h" +#include "Framework/Configurable.h" +#include "Framework/RunningWorkflowInfo.h" +#include "Framework/RateLimiter.h" +#include + +#include +#include +#include +#include + +using namespace o2::framework; + +#include "Framework/runDataProcessing.h" + +// This is how you can define your processing in a declarative way +WorkflowSpec defineDataProcessing(ConfigContext const& specs) +{ + DataProcessorSpec a{ + .name = "A", + .outputs = {OutputSpec{{"data"}, "TST", "A1", 0}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + LOG(info) << "Data TST/A1/0 created"; + outputs.make(OutputRef{"data"}, 1); + })}, + }; + DataProcessorSpec b{ + .name = "B", + .outputs = {OutputSpec{{"sporadic"}, "TST", "B1", 0, Lifetime::Sporadic}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + // This will always be late, however since the oldest possible timeframe + // will be used to decide the scheduling, it will not be dropped. + sleep(1); + // We also create it only every second time, so that we can check that + // the sporadic output is not mandatory. + static int i = 0; + if (i++ % 2 == 0) { + LOG(info) << "Data TST/B1/0 created"; + outputs.make(OutputRef{"sporadic"}, 1); + } + })}, + }; + DataProcessorSpec d{ + .name = "D", + .inputs = {InputSpec{"a1", "TST", "A1", 0, Lifetime::Timeframe}, + InputSpec{"b1", "TST", "B1", 0, Lifetime::Sporadic}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](InputRecord& inputs) { + auto refA = inputs.get("a1"); + auto headerA = o2::header::get(refA.header); + LOG(info) << "Start time: " << headerA->startTime; + auto refB = inputs.get("b1"); + if (!refB.header) { + LOG(info) << "No sporadic input for start time " << headerA->startTime; + return; + } + auto headerB = o2::header::get(refB.header); + LOG(info) << "Start time: " << headerB->startTime; + })}, + }; + + return workflow::concat(WorkflowSpec{a}, + WorkflowSpec{b}, + WorkflowSpec{d}); +} From a59ea0ead75c70e355ac5b4e7eea23a86b4dcdd8 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 12 Jan 2024 12:43:10 +0100 Subject: [PATCH 209/726] DPL: wait as long as possible for Sporadic inputs Right now if we have the standard consumeWhenAll policy and we have a sporadic input, it will wait indefinitely until all the inputs arrive or it will drop timeframes without that sporadic input. This changes the behavior and waits only until the oldest possible timeframe does not allow the Sporadic input to be there. At which point, it schedules the processing in any case, under the assumption that a task declaring a sporadic input knows what to do in case it's not there. --- .../Core/src/CompletionPolicyHelpers.cxx | 26 +++++- Framework/TestWorkflows/CMakeLists.txt | 5 ++ .../test/test_WaitUntilPossible.cxx | 82 +++++++++++++++++++ 3 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 Framework/TestWorkflows/test/test_WaitUntilPossible.cxx diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index b3e0621bf984e..61aba49be14d1 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -15,6 +15,7 @@ #include "Framework/DeviceSpec.h" #include "Framework/CompilerBuiltins.h" #include "Framework/Logger.h" +#include "Framework/TimesliceIndex.h" #include "Framework/TimingInfo.h" #include "DecongestionService.h" @@ -107,10 +108,33 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl { auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { assert(inputs.size() == specs.size()); + + size_t si = 0; + bool missingSporadic = false; + size_t currentTimeslice = -1; for (auto& input : inputs) { - if (input.header == nullptr) { + assert(si < specs.size()); + auto& spec = specs[si++]; + if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { return CompletionPolicy::CompletionOp::Wait; } + if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { + missingSporadic |= true; + } + if (input.header != nullptr && currentTimeslice == -1) { + auto const* dph = framework::DataRefUtils::getHeader(input); + if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { + currentTimeslice = dph->startTime; + } + } + } + // If some sporadic inputs are missing, we wait for them util we are sure they will not come, + // i.e. until the oldest possible timeslice is beyond the timeslice of the input. + auto& timesliceIndex = ref.get(); + auto oldestPossibleTimeslice = timesliceIndex.getOldestPossibleInput().timeslice.value; + + if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { + return CompletionPolicy::CompletionOp::Retry; } return CompletionPolicy::CompletionOp::Consume; }; diff --git a/Framework/TestWorkflows/CMakeLists.txt b/Framework/TestWorkflows/CMakeLists.txt index 64e9dddbb020e..1aa20b4efdd3b 100644 --- a/Framework/TestWorkflows/CMakeLists.txt +++ b/Framework/TestWorkflows/CMakeLists.txt @@ -12,9 +12,14 @@ o2_add_dpl_workflow(dummy-workflow SOURCES src/o2DummyWorkflow.cxx COMPONENT_NAME TestWorkflows) + o2_add_dpl_workflow(detect-missing-timeframe SOURCES test/test_DetectMissingTimeframe.cxx COMPONENT_NAME TestWorkflows) + +o2_add_dpl_workflow(wait-until-possible + SOURCES test/test_WaitUntilPossible.cxx + COMPONENT_NAME TestWorkflows) o2_add_dpl_workflow(o2rootmessage-workflow SOURCES "src/test_o2RootMessageWorkflow.cxx" diff --git a/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx b/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx new file mode 100644 index 0000000000000..d9ba315ef02b8 --- /dev/null +++ b/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx @@ -0,0 +1,82 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#include "Framework/ConfigParamSpec.h" +#include "Framework/DataTakingContext.h" +#include "Framework/CompletionPolicyHelpers.h" +#include "Framework/DeviceSpec.h" +#include "Framework/RawDeviceService.h" +#include "Framework/ControlService.h" +#include "Framework/Configurable.h" +#include "Framework/RunningWorkflowInfo.h" +#include "Framework/RateLimiter.h" +#include + +#include +#include +#include +#include + +using namespace o2::framework; + +#include "Framework/runDataProcessing.h" + +// This is how you can define your processing in a declarative way +WorkflowSpec defineDataProcessing(ConfigContext const& specs) +{ + DataProcessorSpec a{ + .name = "A", + .outputs = {OutputSpec{{"data"}, "TST", "A1", 0}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + LOG(info) << "Data TST/A1/0 created"; + outputs.make(OutputRef{"data"}, 1); + })}, + }; + DataProcessorSpec b{ + .name = "B", + .outputs = {OutputSpec{{"sporadic"}, "TST", "B1", 0, Lifetime::Sporadic}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + // This will always be late, however since the oldest possible timeframe + // will be used to decide the scheduling, it will not be dropped. + sleep(1); + // We also create it only every second time, so that we can check that + // the sporadic output is not mandatory. + static int i = 0; + if (i++ % 2 == 0) { + LOG(info) << "Data TST/B1/0 created"; + outputs.make(OutputRef{"sporadic"}, 1); + } + })}, + }; + DataProcessorSpec d{ + .name = "D", + .inputs = {InputSpec{"a1", "TST", "A1", 0, Lifetime::Timeframe}, + InputSpec{"b1", "TST", "B1", 0, Lifetime::Sporadic}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](InputRecord& inputs) { + auto refA = inputs.get("a1"); + auto headerA = o2::header::get(refA.header); + LOG(info) << "Start time: " << headerA->startTime; + auto refB = inputs.get("b1"); + if (!refB.header) { + LOG(info) << "No sporadic input for start time " << headerA->startTime; + return; + } + auto headerB = o2::header::get(refB.header); + LOG(info) << "Start time: " << headerB->startTime; + })}, + }; + + return workflow::concat(WorkflowSpec{a}, + WorkflowSpec{b}, + WorkflowSpec{d}); +} From d4f5f6e452c57121534614d56038d25e4377e3ff Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 12 Jan 2024 14:21:30 +0100 Subject: [PATCH 210/726] DPL: wait as long as possible for Sporadic inputs Right now if we have the standard consumeWhenAll policy and we have a sporadic input, it will wait indefinitely until all the inputs arrive or it will drop timeframes without that sporadic input. This changes the behavior and waits only until the oldest possible timeframe does not allow the Sporadic input to be there. At which point, it schedules the processing in any case, under the assumption that a task declaring a sporadic input knows what to do in case it's not there. --- .../Core/src/CompletionPolicyHelpers.cxx | 26 +++++- Framework/Core/test/test_DataRelayer.cxx | 11 +++ Framework/TestWorkflows/CMakeLists.txt | 5 ++ .../test/test_WaitUntilPossible.cxx | 82 +++++++++++++++++++ 4 files changed, 123 insertions(+), 1 deletion(-) create mode 100644 Framework/TestWorkflows/test/test_WaitUntilPossible.cxx diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index b3e0621bf984e..61aba49be14d1 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -15,6 +15,7 @@ #include "Framework/DeviceSpec.h" #include "Framework/CompilerBuiltins.h" #include "Framework/Logger.h" +#include "Framework/TimesliceIndex.h" #include "Framework/TimingInfo.h" #include "DecongestionService.h" @@ -107,10 +108,33 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl { auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { assert(inputs.size() == specs.size()); + + size_t si = 0; + bool missingSporadic = false; + size_t currentTimeslice = -1; for (auto& input : inputs) { - if (input.header == nullptr) { + assert(si < specs.size()); + auto& spec = specs[si++]; + if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { return CompletionPolicy::CompletionOp::Wait; } + if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { + missingSporadic |= true; + } + if (input.header != nullptr && currentTimeslice == -1) { + auto const* dph = framework::DataRefUtils::getHeader(input); + if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { + currentTimeslice = dph->startTime; + } + } + } + // If some sporadic inputs are missing, we wait for them util we are sure they will not come, + // i.e. until the oldest possible timeslice is beyond the timeslice of the input. + auto& timesliceIndex = ref.get(); + auto oldestPossibleTimeslice = timesliceIndex.getOldestPossibleInput().timeslice.value; + + if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { + return CompletionPolicy::CompletionOp::Retry; } return CompletionPolicy::CompletionOp::Consume; }; diff --git a/Framework/Core/test/test_DataRelayer.cxx b/Framework/Core/test/test_DataRelayer.cxx index 788417778642b..bd1ead40c9773 100644 --- a/Framework/Core/test/test_DataRelayer.cxx +++ b/Framework/Core/test/test_DataRelayer.cxx @@ -76,6 +76,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -124,6 +125,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -184,6 +186,7 @@ TEST_CASE("DataRelayer") std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAll(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -263,6 +266,7 @@ TEST_CASE("DataRelayer") std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAll(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -346,6 +350,7 @@ TEST_CASE("DataRelayer") auto policy = CompletionPolicyHelpers::consumeWhenAll(); std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); DataRelayer relayer(policy, inputs, index, {registry}); // Only two messages to fill the cache. relayer.setPipelineLength(2); @@ -420,6 +425,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::processWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -490,6 +496,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::processWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -547,6 +554,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::processWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -605,6 +613,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::processWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -670,6 +679,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -722,6 +732,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); diff --git a/Framework/TestWorkflows/CMakeLists.txt b/Framework/TestWorkflows/CMakeLists.txt index 64e9dddbb020e..1aa20b4efdd3b 100644 --- a/Framework/TestWorkflows/CMakeLists.txt +++ b/Framework/TestWorkflows/CMakeLists.txt @@ -12,9 +12,14 @@ o2_add_dpl_workflow(dummy-workflow SOURCES src/o2DummyWorkflow.cxx COMPONENT_NAME TestWorkflows) + o2_add_dpl_workflow(detect-missing-timeframe SOURCES test/test_DetectMissingTimeframe.cxx COMPONENT_NAME TestWorkflows) + +o2_add_dpl_workflow(wait-until-possible + SOURCES test/test_WaitUntilPossible.cxx + COMPONENT_NAME TestWorkflows) o2_add_dpl_workflow(o2rootmessage-workflow SOURCES "src/test_o2RootMessageWorkflow.cxx" diff --git a/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx b/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx new file mode 100644 index 0000000000000..d9ba315ef02b8 --- /dev/null +++ b/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx @@ -0,0 +1,82 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#include "Framework/ConfigParamSpec.h" +#include "Framework/DataTakingContext.h" +#include "Framework/CompletionPolicyHelpers.h" +#include "Framework/DeviceSpec.h" +#include "Framework/RawDeviceService.h" +#include "Framework/ControlService.h" +#include "Framework/Configurable.h" +#include "Framework/RunningWorkflowInfo.h" +#include "Framework/RateLimiter.h" +#include + +#include +#include +#include +#include + +using namespace o2::framework; + +#include "Framework/runDataProcessing.h" + +// This is how you can define your processing in a declarative way +WorkflowSpec defineDataProcessing(ConfigContext const& specs) +{ + DataProcessorSpec a{ + .name = "A", + .outputs = {OutputSpec{{"data"}, "TST", "A1", 0}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + LOG(info) << "Data TST/A1/0 created"; + outputs.make(OutputRef{"data"}, 1); + })}, + }; + DataProcessorSpec b{ + .name = "B", + .outputs = {OutputSpec{{"sporadic"}, "TST", "B1", 0, Lifetime::Sporadic}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + // This will always be late, however since the oldest possible timeframe + // will be used to decide the scheduling, it will not be dropped. + sleep(1); + // We also create it only every second time, so that we can check that + // the sporadic output is not mandatory. + static int i = 0; + if (i++ % 2 == 0) { + LOG(info) << "Data TST/B1/0 created"; + outputs.make(OutputRef{"sporadic"}, 1); + } + })}, + }; + DataProcessorSpec d{ + .name = "D", + .inputs = {InputSpec{"a1", "TST", "A1", 0, Lifetime::Timeframe}, + InputSpec{"b1", "TST", "B1", 0, Lifetime::Sporadic}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](InputRecord& inputs) { + auto refA = inputs.get("a1"); + auto headerA = o2::header::get(refA.header); + LOG(info) << "Start time: " << headerA->startTime; + auto refB = inputs.get("b1"); + if (!refB.header) { + LOG(info) << "No sporadic input for start time " << headerA->startTime; + return; + } + auto headerB = o2::header::get(refB.header); + LOG(info) << "Start time: " << headerB->startTime; + })}, + }; + + return workflow::concat(WorkflowSpec{a}, + WorkflowSpec{b}, + WorkflowSpec{d}); +} From 799976301a2984351675b5da7973fa9f2dcc4423 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 12 Jan 2024 14:54:00 +0100 Subject: [PATCH 211/726] DPL: wait as long as possible for Sporadic inputs Right now if we have the standard consumeWhenAll policy and we have a sporadic input, it will wait indefinitely until all the inputs arrive or it will drop timeframes without that sporadic input. This changes the behavior and waits only until the oldest possible timeframe does not allow the Sporadic input to be there. At which point, it schedules the processing in any case, under the assumption that a task declaring a sporadic input knows what to do in case it's not there. --- .../Core/src/CompletionPolicyHelpers.cxx | 26 +++++- Framework/Core/test/test_DataRelayer.cxx | 11 +++ Framework/TestWorkflows/CMakeLists.txt | 5 ++ .../test/test_WaitUntilPossible.cxx | 82 +++++++++++++++++++ 4 files changed, 123 insertions(+), 1 deletion(-) create mode 100644 Framework/TestWorkflows/test/test_WaitUntilPossible.cxx diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index b3e0621bf984e..8c4b38ba3355c 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -15,6 +15,7 @@ #include "Framework/DeviceSpec.h" #include "Framework/CompilerBuiltins.h" #include "Framework/Logger.h" +#include "Framework/TimesliceIndex.h" #include "Framework/TimingInfo.h" #include "DecongestionService.h" @@ -107,10 +108,33 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl { auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { assert(inputs.size() == specs.size()); + + size_t si = 0; + bool missingSporadic = false; + size_t currentTimeslice = -1; for (auto& input : inputs) { - if (input.header == nullptr) { + assert(si < specs.size()); + auto& spec = specs[si++]; + if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { return CompletionPolicy::CompletionOp::Wait; } + if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { + missingSporadic = true; + } + if (input.header != nullptr && currentTimeslice == -1) { + auto const* dph = framework::DataRefUtils::getHeader(input); + if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { + currentTimeslice = dph->startTime; + } + } + } + // If some sporadic inputs are missing, we wait for them util we are sure they will not come, + // i.e. until the oldest possible timeslice is beyond the timeslice of the input. + auto& timesliceIndex = ref.get(); + auto oldestPossibleTimeslice = timesliceIndex.getOldestPossibleInput().timeslice.value; + + if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { + return CompletionPolicy::CompletionOp::Retry; } return CompletionPolicy::CompletionOp::Consume; }; diff --git a/Framework/Core/test/test_DataRelayer.cxx b/Framework/Core/test/test_DataRelayer.cxx index 788417778642b..bd1ead40c9773 100644 --- a/Framework/Core/test/test_DataRelayer.cxx +++ b/Framework/Core/test/test_DataRelayer.cxx @@ -76,6 +76,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -124,6 +125,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -184,6 +186,7 @@ TEST_CASE("DataRelayer") std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAll(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -263,6 +266,7 @@ TEST_CASE("DataRelayer") std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAll(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -346,6 +350,7 @@ TEST_CASE("DataRelayer") auto policy = CompletionPolicyHelpers::consumeWhenAll(); std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); DataRelayer relayer(policy, inputs, index, {registry}); // Only two messages to fill the cache. relayer.setPipelineLength(2); @@ -420,6 +425,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::processWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -490,6 +496,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::processWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -547,6 +554,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::processWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -605,6 +613,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::processWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -670,6 +679,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); @@ -722,6 +732,7 @@ TEST_CASE("DataRelayer") std::vector forwards; std::vector infos{1}; TimesliceIndex index{1, infos}; + ref.registerService(ServiceRegistryHelpers::handleForService(&index)); auto policy = CompletionPolicyHelpers::consumeWhenAny(); DataRelayer relayer(policy, inputs, index, {registry}); diff --git a/Framework/TestWorkflows/CMakeLists.txt b/Framework/TestWorkflows/CMakeLists.txt index 64e9dddbb020e..784e07c803d76 100644 --- a/Framework/TestWorkflows/CMakeLists.txt +++ b/Framework/TestWorkflows/CMakeLists.txt @@ -12,10 +12,15 @@ o2_add_dpl_workflow(dummy-workflow SOURCES src/o2DummyWorkflow.cxx COMPONENT_NAME TestWorkflows) + o2_add_dpl_workflow(detect-missing-timeframe SOURCES test/test_DetectMissingTimeframe.cxx COMPONENT_NAME TestWorkflows) +o2_add_dpl_workflow(wait-until-possible + SOURCES test/test_WaitUntilPossible.cxx + COMPONENT_NAME TestWorkflows) + o2_add_dpl_workflow(o2rootmessage-workflow SOURCES "src/test_o2RootMessageWorkflow.cxx" COMPONENT_NAME TestWorkflows) diff --git a/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx b/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx new file mode 100644 index 0000000000000..d9ba315ef02b8 --- /dev/null +++ b/Framework/TestWorkflows/test/test_WaitUntilPossible.cxx @@ -0,0 +1,82 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#include "Framework/ConfigParamSpec.h" +#include "Framework/DataTakingContext.h" +#include "Framework/CompletionPolicyHelpers.h" +#include "Framework/DeviceSpec.h" +#include "Framework/RawDeviceService.h" +#include "Framework/ControlService.h" +#include "Framework/Configurable.h" +#include "Framework/RunningWorkflowInfo.h" +#include "Framework/RateLimiter.h" +#include + +#include +#include +#include +#include + +using namespace o2::framework; + +#include "Framework/runDataProcessing.h" + +// This is how you can define your processing in a declarative way +WorkflowSpec defineDataProcessing(ConfigContext const& specs) +{ + DataProcessorSpec a{ + .name = "A", + .outputs = {OutputSpec{{"data"}, "TST", "A1", 0}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + LOG(info) << "Data TST/A1/0 created"; + outputs.make(OutputRef{"data"}, 1); + })}, + }; + DataProcessorSpec b{ + .name = "B", + .outputs = {OutputSpec{{"sporadic"}, "TST", "B1", 0, Lifetime::Sporadic}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + // This will always be late, however since the oldest possible timeframe + // will be used to decide the scheduling, it will not be dropped. + sleep(1); + // We also create it only every second time, so that we can check that + // the sporadic output is not mandatory. + static int i = 0; + if (i++ % 2 == 0) { + LOG(info) << "Data TST/B1/0 created"; + outputs.make(OutputRef{"sporadic"}, 1); + } + })}, + }; + DataProcessorSpec d{ + .name = "D", + .inputs = {InputSpec{"a1", "TST", "A1", 0, Lifetime::Timeframe}, + InputSpec{"b1", "TST", "B1", 0, Lifetime::Sporadic}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](InputRecord& inputs) { + auto refA = inputs.get("a1"); + auto headerA = o2::header::get(refA.header); + LOG(info) << "Start time: " << headerA->startTime; + auto refB = inputs.get("b1"); + if (!refB.header) { + LOG(info) << "No sporadic input for start time " << headerA->startTime; + return; + } + auto headerB = o2::header::get(refB.header); + LOG(info) << "Start time: " << headerB->startTime; + })}, + }; + + return workflow::concat(WorkflowSpec{a}, + WorkflowSpec{b}, + WorkflowSpec{d}); +} From 50b71eb7083d016ab32addf592cfbb7724566f96 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 12 Jan 2024 16:25:30 +0100 Subject: [PATCH 212/726] Drop unused output The newly introduced check now complains about this output which is actually simply never used. --- Detectors/PHOS/calib/src/PHOSRunbyrunCalibDevice.cxx | 1 - 1 file changed, 1 deletion(-) diff --git a/Detectors/PHOS/calib/src/PHOSRunbyrunCalibDevice.cxx b/Detectors/PHOS/calib/src/PHOSRunbyrunCalibDevice.cxx index ed88d40daf049..cf767cb76c7ad 100644 --- a/Detectors/PHOS/calib/src/PHOSRunbyrunCalibDevice.cxx +++ b/Detectors/PHOS/calib/src/PHOSRunbyrunCalibDevice.cxx @@ -133,7 +133,6 @@ o2::framework::DataProcessorSpec o2::phos::getPHOSRunbyrunCalibDeviceSpec(bool u { std::vector outputs; - outputs.emplace_back("PHS", "RUNBYRUNHISTOS", 0, o2::framework::Lifetime::Timeframe); outputs.emplace_back(o2::calibration::Utils::gDataOriginCDBPayload, "PHOS_Runbyrun", 0, Lifetime::Sporadic); outputs.emplace_back(o2::calibration::Utils::gDataOriginCDBWrapper, "PHOS_Runbyrun", 0, Lifetime::Sporadic); From d99c82ba49c2a2caaf0a1e33cb3a4bb6b80148e1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 15 Jan 2024 13:02:30 +0100 Subject: [PATCH 213/726] DPL: drop unused o2_signpost_id_make_with_pointer method Probably old remaining from a previous implementation. --- Framework/Foundation/include/Framework/Signpost.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index eca9e26f77469..64d92d67acbf7 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -218,7 +218,6 @@ bool _o2_lock_free_stack_push(_o2_lock_free_stack& stack, const int& value, bool bool _o2_lock_free_stack_pop(_o2_lock_free_stack& stack, int& value, bool spin = false); //_o2_signpost_id_t _o2_signpost_id_generate_local(_o2_log_t* log); //_o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer); -_o2_signpost_index_t o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer); void* _o2_log_create(char const* name, int stacktrace); void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); void _o2_signpost_interval_begin(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); @@ -246,14 +245,6 @@ inline _o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* return uniqueId; } -inline _o2_signpost_index_t o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer) -{ - _o2_signpost_index_t signpost_index; - _o2_lock_free_stack_pop(log->slots, signpost_index, true); - log->ids[signpost_index].id = (int64_t)pointer; - return signpost_index; -} - // Implementation start here. Include this file with O2_SIGNPOST_IMPLEMENTATION defined in one file of your // project. #ifdef O2_SIGNPOST_IMPLEMENTATION From 2a74f97657e41bf1b4295670b758c244631a2a29 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 09:22:29 +0100 Subject: [PATCH 214/726] DPL: rework signposts support The logger based Signposts are now behaving the same on macOS and linux when running normally. The difference between the two implementations will now be that if Instruments enables some OSLog namespace, we stop printing the messages and use the os_signpost API instead. --- .../Foundation/include/Framework/Signpost.h | 191 +++++++++--------- Framework/Foundation/test/test_Signpost.cxx | 2 +- .../Foundation/test/test_SignpostLogger.cxx | 2 +- 3 files changed, 94 insertions(+), 101 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 64d92d67acbf7..d2bdec0ec8246 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -13,6 +13,9 @@ #include #include +#ifdef __APPLE__ +#include +#endif struct o2_log_handle_t { char const* name = nullptr; @@ -22,6 +25,7 @@ struct o2_log_handle_t { // Helper function which replaces engineering types with a printf // compatible format string. +// FIXME: make this consteval when available in C++20 template constexpr auto remove_engineering_type(char const (&src)[N]) { @@ -82,78 +86,37 @@ o2_log_handle_t* o2_walk_logs(bool (*callback)(char const* name, void* log, void } #endif -#if !defined(O2_FORCE_LOGGER_SIGNPOST) && defined(__APPLE__) && (!defined(NDEBUG) || defined(O2_FORCE_SIGNPOSTS)) +#if defined(__APPLE__) #include #include #include -void* _o2_log_create(char const* name, char const* category); -#define O2_DECLARE_DYNAMIC_LOG(x) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, OS_LOG_CATEGORY_DYNAMIC_TRACING) -#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(x) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, OS_LOG_CATEGORY_DYNAMIC_STACK_TRACING) -// This is a no-op on macOS using the os_signpost API because only external instruments can enable/disable dynamic signposts -#define O2_LOG_ENABLE_DYNAMIC(log) -// This is a no-op on macOS using the os_signpost API because only external instruments can enable/disable dynamic signposts -#define O2_LOG_ENABLE_STACKTRACE(log) -#define O2_DECLARE_LOG(x, category) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, #category) -#define O2_LOG_DEBUG(log, ...) os_log_debug(private_o2_log_##log, __VA_ARGS__) -#define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) os_signpost_id_t name = os_signpost_id_make_with_pointer(private_o2_log_##log, pointer) -#define O2_SIGNPOST_ID_GENERATE(name, log) os_signpost_id_t name = os_signpost_id_generate(private_o2_log_##log) +#define O2_LOG_DEBUG_MAC(log, ...) os_log_debug(private_o2_log_##log, __VA_ARGS__) // FIXME: use __VA_OPT__ when available in C++20 -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) os_signpost_event_emit(private_o2_log_##log, id, name, format, ##__VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, format, ...) os_signpost_interval_begin(private_o2_log_##log, id, name, format, ##__VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, format, ...) os_signpost_interval_end(private_o2_log_##log, id, name, format, ##__VA_ARGS__) - -#ifdef O2_SIGNPOST_IMPLEMENTATION -/// We use a wrapper so that we can keep track of the logs. -void* _o2_log_create(char const* name, char const* category) -{ - // iterate over the list of logs and check if we already have - // one with the same name. - auto findLogByName = [](char const* name, void* log, void* context) -> bool { - char const* currentName = (char const*)context; - if (strcmp(name, currentName) == 0) { - return false; - } - return true; - }; - - o2_log_handle_t* handle = o2_walk_logs(findLogByName, (void*)name); - - // If we found one, return it. - if (handle) { - return handle->log; - } - // Otherwise, create a new one and add it to the end of the list. - os_log_t log = os_log_create(name, category); - o2_log_handle_t* newHandle = new o2_log_handle_t(); - newHandle->log = log; - newHandle->name = strdup(name); - newHandle->next = o2_get_logs_tail().load(); - // Until I manage to replace the log I have in next, keep trying. - // Notice this does not protect against two threads trying to insert - // a log with the same name. I should probably do a sorted insert for that. - while (!o2_get_logs_tail().compare_exchange_weak(newHandle->next, newHandle, - std::memory_order_release, - std::memory_order_relaxed)) { - newHandle->next = o2_get_logs_tail(); - } - - return log; -} +#define O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ...) os_signpost_event_emit(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_START_MAC(log, id, name, format, ...) os_signpost_interval_begin(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_END_MAC(log, id, name, format, ...) os_signpost_interval_end(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_ENABLED_MAC(log) os_signpost_enabled(private_o2_log_##log->os_log) +#else +// These are no-ops on linux. +#define O2_DECLARE_LOG_MAC(x, category) +#define O2_LOG_DEBUG_MAC(log, ...) +#define O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_START_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_END_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_ENABLED_MAC(log) false +#endif // __APPLE__ + +// Unless we are on apple we enable checking for signposts only if in debug mode or if we force them. +#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(NDEBUG) +#define O2_LOG_ENABLED(log) private_o2_log_##log->stacktrace +#else +#define O2_LOG_ENABLED(log) false #endif -#elif !defined(NDEBUG) || defined(O2_FORCE_LOGGER_SIGNPOST) || defined(O2_FORCE_SIGNPOSTS) - -#ifndef O2_LOG_MACRO -#if __has_include("Framework/Logger.h") +#if !defined(O2_LOG_MACRO) && __has_include("Framework/Logger.h") #include "Framework/Logger.h" -// If NDEBUG is not defined, we use the logger to print out the signposts at the debug level. -#if !defined(NDEBUG) -#define O2_LOG_MACRO(...) LOGF(debug, __VA_ARGS__) -#elif defined(O2_FORCE_LOGGER_SIGNPOST) || defined(O2_FORCE_SIGNPOSTS) -// If we force the presence of the logger, we use it to print out the signposts at the detail level, which is not optimized out. #define O2_LOG_MACRO(...) LOGF(info, __VA_ARGS__) -#endif -#else +#elif !defined(O2_LOG_MACRO) // If we do not have the fairlogger, we simply print out the signposts to the console. // This is useful for things like the tests, which this way do not need to depend on the FairLogger. #define O2_LOG_MACRO(...) \ @@ -161,8 +124,9 @@ void* _o2_log_create(char const* name, char const* category) printf(__VA_ARGS__); \ printf("\n"); \ } while (0) -#endif -#endif +#else +#define O2_LOG_MACRO(...) +#endif // O2_LOG_MACRO // This is the linux implementation, it is not as nice as the apple one and simply prints out // the signpost information to the log. @@ -181,7 +145,7 @@ struct _o2_lock_free_stack { // A log is simply an inbox which keeps track of the available id, so that we can print out different signposts // with different indentation levels. // supports up to 1024 paralle signposts before it spinlocks. -typedef int _o2_signpost_index_t; +using _o2_signpost_index_t = int; struct _o2_activity_t { // How much the activity is indented in the output log. @@ -191,10 +155,13 @@ struct _o2_activity_t { struct _o2_signpost_id_t { // The id of the activity. - int64_t id = -1; + int64_t value = -1; }; struct _o2_log_t { +#ifdef __APPLE__ + os_log_t os_log = nullptr; +#endif // A circular buffer of available slots. Each unique interval pulls an id from this buffer. _o2_lock_free_stack slots; // Up to 256 activities can be active at the same time. @@ -211,7 +178,10 @@ struct _o2_log_t { // 0 means the log is disabled. // 1 means only the current signpost is printed. // >1 means the current signpost and n levels of the stacktrace are printed. - std::atomic stacktrace = 1; + std::atomic stacktrace = 0; + + // Default stacktrace level for the log, when enabled. + int defaultStacktrace = 1; }; bool _o2_lock_free_stack_push(_o2_lock_free_stack& stack, const int& value, bool spin = false); @@ -310,7 +280,7 @@ bool _o2_lock_free_stack_pop(_o2_lock_free_stack& stack, int& value, bool spin) } } -void* _o2_log_create(char const* name, int stacktrace) +void* _o2_log_create(char const* name, int defaultStacktrace) { // iterate over the list of logs and check if we already have // one with the same name. @@ -328,7 +298,7 @@ void* _o2_log_create(char const* name, int stacktrace) return handle->log; } // Otherwise, create a new one and add it to the end of the list. - _o2_log_t* log = new _o2_log_t(); + auto* log = new _o2_log_t(); // Write the initial 256 ids to the inbox, in reverse, so that the // linear search below is just for an handful of elements. int n = _o2_lock_free_stack::N; @@ -336,9 +306,18 @@ void* _o2_log_create(char const* name, int stacktrace) _o2_signpost_index_t signpost_index{n - 1 - i}; _o2_lock_free_stack_push(log->slots, signpost_index, true); } - log->stacktrace = stacktrace; - o2_log_handle_t* newHandle = new o2_log_handle_t(); + log->defaultStacktrace = defaultStacktrace; + auto* newHandle = new o2_log_handle_t(); newHandle->log = log; +#ifdef __APPLE__ + // On macOS, we use the os_signpost API so that when we are + // using instruments we can see the messages there. + if (defaultStacktrace > 1) { + log->os_log = os_log_create(name, OS_LOG_CATEGORY_DYNAMIC_STACK_TRACING); + } else { + log->os_log = os_log_create(name, OS_LOG_CATEGORY_DYNAMIC_TRACING); + } +#endif newHandle->name = strdup(name); newHandle->next = o2_get_logs_tail().load(); // Until I manage to replace the log I have in next, keep trying. @@ -357,10 +336,6 @@ void* _o2_log_create(char const* name, int stacktrace) // If the slot is empty, it will return the id and increment the indentation level. void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...) { - // Nothing to be done - if (log->stacktrace == 0) { - return; - } va_list args; va_start(args, format); @@ -368,10 +343,10 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n int leading = 0; // This is the equivalent of exclusive - if (id.id != 0) { + if (id.value != 0) { int i = 0; for (i = 0; i < log->ids.size(); ++i) { - if (log->ids[i].id == id.id) { + if (log->ids[i].value == id.value) { break; } } @@ -385,7 +360,7 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n } char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16s*>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16s*>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); va_end(args); O2_LOG_MACRO("%s", prebuffer); @@ -395,22 +370,19 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n // If the slot is empty, it will return the id and increment the indentation level. void _o2_signpost_interval_begin(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...) { - if (log->stacktrace == 0) { - return; - } va_list args; va_start(args, format); // This is a unique slot for this interval. _o2_signpost_index_t signpost_index; _o2_lock_free_stack_pop(log->slots, signpost_index, true); // Put the id in the slot, to close things or to attach signposts to a given interval - log->ids[signpost_index].id = id.id; + log->ids[signpost_index].value = id.value; auto* activity = &log->activities[signpost_index]; activity->indentation = log->current_indentation++; activity->name = name; int leading = activity->indentation * 2; char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sS>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sS>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); va_end(args); O2_LOG_MACRO("%s", prebuffer); @@ -424,7 +396,7 @@ void _o2_signpost_interval_end_v(_o2_log_t* log, _o2_signpost_id_t id, char cons // Find the index of the activity int i = 0; for (i = 0; i < log->ids.size(); ++i) { - if (log->ids[i].id == id.id) { + if (log->ids[i].value == id.value) { break; } } @@ -439,13 +411,13 @@ void _o2_signpost_interval_end_v(_o2_log_t* log, _o2_signpost_id_t id, char cons _o2_activity_t* activity = &log->activities[i]; int leading = activity->indentation * 2; char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sE>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sE>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); O2_LOG_MACRO("%s", prebuffer); // Clear the slot activity->indentation = -1; activity->name = nullptr; - log->ids[i].id = -1; + log->ids[i].value = -1; // Put back the slot log->current_indentation--; _o2_signpost_index_t signpost_index{i}; @@ -468,29 +440,50 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) } #endif // O2_SIGNPOST_IMPLEMENTATION -/// Dynamic logs need to be enabled via the O2_LOG_ENABLE_DYNAMIC macro. Notice this will only work +#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(NDEBUG) +/// Dynamic logs need to be enabled via the O2_LOG_ENABLE macro. Notice this will only work /// for the logger based logging, since the Apple version needs instruments to enable them. -#define O2_DECLARE_DYNAMIC_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 0) +#define O2_DECLARE_DYNAMIC_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 1) /// For the moment we do not support logs with a stacktrace. -#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 0) +#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 64) #define O2_DECLARE_LOG(name, category) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 1) -#define O2_LOG_ENABLE_DYNAMIC(log) _o2_log_set_stacktrace(private_o2_log_##log, 1) -// We print out only the first 64 frames. -#define O2_LOG_ENABLE_STACKTRACE(log) _o2_log_set_stacktrace(private_o2_log_##log, 64) +// When we enable the log, we set the stacktrace to the default value. +#define O2_LOG_ENABLE(log) _o2_log_set_stacktrace(private_o2_log_##log, private_o2_log_##log->defaultStacktrace) +#define O2_LOG_DISABLE(log) _o2_log_set_stacktrace(private_o2_log_##log, 0) // For the moment we simply use LOG DEBUG. We should have proper activities so that we can // turn on and off the printing. #define O2_LOG_DEBUG(log, ...) O2_LOG_MACRO(__VA_ARGS__) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) _o2_signpost_id_t name = _o2_signpost_id_make_with_pointer(private_o2_log_##log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) _o2_signpost_id_t name = _o2_signpost_id_generate_local(private_o2_log_##log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, format, ...) _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, format, ...) _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) +// In case Instruments is attached, we switch to the Apple signpost API otherwise, both one +// mac and on linux we use our own implementation, using the logger. We can use the same ids because +// they are compatible between the two implementations, we also use remove_engineering_type to remove +// the engineering types from the format string, so that we can use the same format string for both. +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) __extension__({ \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } \ +}) +#define O2_SIGNPOST_START(log, id, name, format, ...) \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_START_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } +#define O2_SIGNPOST_END(log, id, name, format, ...) \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_END_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } #else // This is the release implementation, it does nothing. #define O2_DECLARE_DYNAMIC_LOG(x) #define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(x) #define O2_DECLARE_LOG(x, category) -#define O2_LOG_ENABLE_DYNAMIC(log) -#define O2_LOG_ENABLE_STACKTRACE(log) +#define O2_LOG_ENABLE(log) +#define O2_LOG_DISABLE(log) #define O2_LOG_DEBUG(log, ...) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) diff --git a/Framework/Foundation/test/test_Signpost.cxx b/Framework/Foundation/test/test_Signpost.cxx index 24b6afaec5c3d..f9b8d4ec0e13a 100644 --- a/Framework/Foundation/test/test_Signpost.cxx +++ b/Framework/Foundation/test/test_Signpost.cxx @@ -43,7 +43,7 @@ int main(int argc, char** argv) O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); O2_SIGNPOST_END(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); - O2_LOG_ENABLE_DYNAMIC(test_SignpostDynamic); + O2_LOG_ENABLE(test_SignpostDynamic); #ifdef __APPLE__ // On Apple there is no way to turn on signposts in the logger, so we do not display this message O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will see, because we turned them on"); diff --git a/Framework/Foundation/test/test_SignpostLogger.cxx b/Framework/Foundation/test/test_SignpostLogger.cxx index 74da35abf7c70..ed917ea7cadf5 100644 --- a/Framework/Foundation/test/test_SignpostLogger.cxx +++ b/Framework/Foundation/test/test_SignpostLogger.cxx @@ -51,7 +51,7 @@ int main(int argc, char** argv) O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); O2_SIGNPOST_END(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); - O2_LOG_ENABLE_DYNAMIC(test_SignpostDynamic); + O2_LOG_ENABLE(test_SignpostDynamic); #ifdef __APPLE__ // On Apple there is no way to turn on signposts in the logger, so we do not display this message O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will see, because we turned them on"); From 9f4370f658edb8c2b2a6418ea76891559dc4add1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 09:24:03 +0100 Subject: [PATCH 215/726] DPL: add proper tracing for DataProcessingDevice socket callbacks --- Framework/Core/src/DataProcessingDevice.cxx | 40 +++++++++++++-------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index b03904c5b3776..366bfafd7d801 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -35,6 +35,9 @@ #include "Framework/TMessageSerializer.h" #include "Framework/InputRecord.h" #include "Framework/InputSpan.h" +#if defined(__APPLE__) || defined(NDEBUG) +#define O2_SIGNPOST_IMPLEMENTATION +#endif #include "Framework/Signpost.h" #include "Framework/TimingHelpers.h" #include "Framework/SourceInfoHeader.h" @@ -80,6 +83,8 @@ #include #include +O2_DECLARE_DYNAMIC_LOG(device); + using namespace o2::framework; using ConfigurationInterface = o2::configuration::ConfigurationInterface; using DataHeader = o2::header::DataHeader; @@ -274,21 +279,22 @@ struct PollerContext { void on_socket_polled(uv_poll_t* poller, int status, int events) { auto* context = (PollerContext*)poller->data; + assert(context); + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); context->state->loopReason |= DeviceState::DATA_SOCKET_POLLED; switch (events) { case UV_READABLE: { - ZoneScopedN("socket readable event"); - LOG(debug) << "socket polled UV_READABLE: " << context->name; + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_INCOMING; } break; case UV_WRITABLE: { - ZoneScopedN("socket writeable"); + O2_SIGNPOST_END(device, sid, "socket_state", "Socket connected for channel %{public}s", context->name); if (context->read) { - LOG(debug) << "socket polled UV_CONNECT" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "Socket connected for read in context %{public}s", context->name); uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &on_socket_polled); context->state->loopReason |= DeviceState::DATA_CONNECTED; } else { - LOG(debug) << "socket polled UV_WRITABLE" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "Socket connected for write for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_OUTGOING; // If the socket is writable, fairmq will handle the rest, so we can stop polling and // just wait for the disconnect. @@ -297,12 +303,10 @@ void on_socket_polled(uv_poll_t* poller, int status, int events) context->pollerState = PollerContext::PollerState::Connected; } break; case UV_DISCONNECT: { - ZoneScopedN("socket disconnect"); - LOG(debug) << "socket polled UV_DISCONNECT"; + O2_SIGNPOST_END(device, sid, "socket_state", "Socket disconnected in context %{public}s", context->name); } break; case UV_PRIORITIZED: { - ZoneScopedN("socket prioritized"); - LOG(debug) << "socket polled UV_PRIORITIZED"; + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for context %{public}s", context->name); } break; } // We do nothing, all the logic for now stays in DataProcessingDevice::doRun() @@ -873,7 +877,9 @@ void DataProcessingDevice::startPollers() auto& deviceContext = ref.get(); auto& state = ref.get(); - for (auto& poller : state.activeInputPollers) { + for (auto* poller : state.activeInputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_START(device, sid, "socket_state", "Input socket waiting for connection."); uv_poll_start(poller, UV_WRITABLE, &on_socket_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } @@ -881,7 +887,9 @@ void DataProcessingDevice::startPollers() uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } - for (auto& poller : state.activeOutputPollers) { + for (auto* poller : state.activeOutputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_START(device, sid, "socket_state", "Output socket waiting for connection."); uv_poll_start(poller, UV_WRITABLE, &on_socket_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } @@ -897,17 +905,21 @@ void DataProcessingDevice::stopPollers() auto& deviceContext = ref.get(); auto& state = ref.get(); LOGP(detail, "Stopping {} input pollers", state.activeInputPollers.size()); - for (auto& poller : state.activeInputPollers) { + for (auto* poller : state.activeInputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed."); uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } LOGP(detail, "Stopping {} out of band pollers", state.activeOutOfBandPollers.size()); - for (auto& poller : state.activeOutOfBandPollers) { + for (auto* poller : state.activeOutOfBandPollers) { uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } LOGP(detail, "Stopping {} output pollers", state.activeOutOfBandPollers.size()); - for (auto& poller : state.activeOutputPollers) { + for (auto* poller : state.activeOutputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed."); uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } From 0766b8af23b5a64627f8b7dd56ee767130071f4f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 09:24:03 +0100 Subject: [PATCH 216/726] DPL use Signposts to debug consumeWhenAll --- Framework/Core/src/CompletionPolicyHelpers.cxx | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 8c4b38ba3355c..a6240b1165b81 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -18,10 +18,13 @@ #include "Framework/TimesliceIndex.h" #include "Framework/TimingInfo.h" #include "DecongestionService.h" +#include "Framework/Signpost.h" #include #include +O2_DECLARE_DYNAMIC_LOG(completion); + namespace o2::framework { @@ -108,6 +111,8 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl { auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { assert(inputs.size() == specs.size()); + O2_SIGNPOST_ID_GENERATE(sid, completion); + O2_SIGNPOST_START(completion, sid, "consumeWhenAll", "Completion policy invoked"); size_t si = 0; bool missingSporadic = false; @@ -116,15 +121,18 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl assert(si < specs.size()); auto& spec = specs[si++]; if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s due to missing input %lu", "Wait", si); return CompletionPolicy::CompletionOp::Wait; } if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { + O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "Missing sporadic found for route index %lu", si); missingSporadic = true; } if (input.header != nullptr && currentTimeslice == -1) { auto const* dph = framework::DataRefUtils::getHeader(input); if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { currentTimeslice = dph->startTime; + O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "currentTimeslice %lu from route index %lu", currentTimeslice, si); } } } @@ -134,8 +142,10 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl auto oldestPossibleTimeslice = timesliceIndex.getOldestPossibleInput().timeslice.value; if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu > oldestPossibleTimeslice %lu", "Retry", currentTimeslice, oldestPossibleTimeslice); return CompletionPolicy::CompletionOp::Retry; } + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu <= oldestPossibleTimeslice %lu", "Consume", currentTimeslice, oldestPossibleTimeslice); return CompletionPolicy::CompletionOp::Consume; }; return CompletionPolicy{name, matcher, callback}; From a9f53d8aee1c66fa8af02efbfd633efab5081e5d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 09:24:03 +0100 Subject: [PATCH 217/726] Ability to turn on / off the logger from the GUI. --- .../Core/include/Framework/DeviceControl.h | 2 + .../Core/include/Framework/DeviceState.h | 9 ++++ Framework/Core/src/WSDriverClient.cxx | 41 +++++++++++++++++++ .../src/FrameworkGUIDeviceInspector.cxx | 11 +++++ .../test/test_DetectMissingTimeframe.cxx | 1 + 5 files changed, 64 insertions(+) diff --git a/Framework/Core/include/Framework/DeviceControl.h b/Framework/Core/include/Framework/DeviceControl.h index 589de3937ee55..ce946e8e77fbf 100644 --- a/Framework/Core/include/Framework/DeviceControl.h +++ b/Framework/Core/include/Framework/DeviceControl.h @@ -48,6 +48,8 @@ struct DeviceControl { DeviceController* controller = nullptr; /// What kind of events should run with the TRACE level int tracingFlags = 0; + /// What kind of log streams should be enabled + int logStreams = 0; /// An incremental number to identify the device state int requestedState = 0; }; diff --git a/Framework/Core/include/Framework/DeviceState.h b/Framework/Core/include/Framework/DeviceState.h index 3be0302fd4113..4fa72a84cad71 100644 --- a/Framework/Core/include/Framework/DeviceState.h +++ b/Framework/Core/include/Framework/DeviceState.h @@ -57,6 +57,13 @@ struct DeviceState { DATA_CONNECTED = 1 << 19, // Data channel connected }; + enum LogStreams : int { + NO_LOG = 0, + DEVICE_LOG = 1 << 0, // Log for Data Processing Device activities. + COMPLETION_LOG = 1 << 1, // Log for the completion policy of the device. + MONITORING_SERVICE_LOG = 1 << 2, // Log for the monitoring service flushing. + }; + std::vector inputChannelInfos; StreamingState streaming = StreamingState::Streaming; bool quitRequested = false; @@ -93,6 +100,8 @@ struct DeviceState { int loopReason = 0; /// Bitmask of LoopReason to trace int tracingFlags = 0; + /// Bitmask of log streams which are available + int logStreams = 0; /// Stack of the severity, so that we can display only /// the bits we are interested in. std::vector severityStack; diff --git a/Framework/Core/src/WSDriverClient.cxx b/Framework/Core/src/WSDriverClient.cxx index d4ed77b9a004e..234b98b10259d 100644 --- a/Framework/Core/src/WSDriverClient.cxx +++ b/Framework/Core/src/WSDriverClient.cxx @@ -16,10 +16,15 @@ #include "Framework/DeviceSpec.h" #include "DriverClientContext.h" #include "DPLWebSocket.h" +#include "Framework/Signpost.h" #include #include #include +O2_DECLARE_DYNAMIC_LOG(device); +O2_DECLARE_DYNAMIC_LOG(completion); +O2_DECLARE_DYNAMIC_LOG(monitoring_service); + namespace o2::framework { @@ -152,6 +157,42 @@ void on_connect(uv_connect_t* connection, int status) state.tracingFlags = tracingFlags; }); + client->observe("/log-streams", [ref = context->ref](std::string_view cmd) { + auto& state = ref.get(); + static constexpr int prefixSize = std::string_view{"/log-streams "}.size(); + if (prefixSize > cmd.size()) { + LOG(error) << "Malformed log-streams request"; + return; + } + cmd.remove_prefix(prefixSize); + int logStreams = 0; + + auto error = std::from_chars(cmd.data(), cmd.data() + cmd.size(), logStreams); + if (error.ec != std::errc()) { + LOG(error) << "Malformed log-streams mask"; + return; + } + LOGP(info, "Logstreams flags set to {}", logStreams); + state.logStreams = logStreams; + if ((state.logStreams & DeviceState::LogStreams::DEVICE_LOG) != 0) { + O2_LOG_ENABLE(device); + } else { + O2_LOG_DISABLE(device); + } + if ((state.logStreams & DeviceState::LogStreams::COMPLETION_LOG) != 0) { + O2_LOG_ENABLE(completion); + } else { + O2_LOG_DISABLE(completion); + } + + if ((state.logStreams & DeviceState::LogStreams::MONITORING_SERVICE_LOG) != 0) { + O2_LOG_ENABLE(monitoring_service); + } else { + O2_LOG_DISABLE(monitoring_service); + } + + }); + // Client will be filled in the line after. I can probably have a single // client per device. auto dplClient = std::make_unique(); diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index a82753eb5af1f..fe93ca6d0f07f 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -400,6 +400,17 @@ void displayDeviceInspector(DeviceSpec const& spec, } } + bool logsChanged = false; + if (ImGui::CollapsingHeader("Signposts", ImGuiTreeNodeFlags_DefaultOpen)) { + logsChanged = ImGui::CheckboxFlags("Device", &control.logStreams, DeviceState::LogStreams::DEVICE_LOG); + logsChanged = ImGui::CheckboxFlags("Completion", &control.logStreams, DeviceState::LogStreams::COMPLETION_LOG); + logsChanged = ImGui::CheckboxFlags("Monitoring", &control.logStreams, DeviceState::LogStreams::MONITORING_SERVICE_LOG); + if (logsChanged && control.controller) { + std::string cmd = fmt::format("/log-streams {}", control.logStreams); + control.controller->write(cmd.c_str(), cmd.size()); + } + } + bool flagsChanged = false; if (ImGui::CollapsingHeader("Event loop tracing", ImGuiTreeNodeFlags_DefaultOpen)) { flagsChanged |= ImGui::CheckboxFlags("METRICS_MUST_FLUSH", &control.tracingFlags, DeviceState::LoopReason::METRICS_MUST_FLUSH); diff --git a/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx index 3374cd9ad6cf2..7ef1370f040b4 100644 --- a/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx +++ b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx @@ -43,6 +43,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& specs) if (i++ % 2 == 0) { outputs.make(OutputRef{"a2"}, 1); } + sleep(1); })}, }; DataProcessorSpec d{ From f977cf7469d7d1923ffcd37a3193698cdff8be2b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 11:20:50 +0100 Subject: [PATCH 218/726] DPL: do not validate lifetime when consumer uses consumeWhenAny --- Framework/Core/src/DeviceSpecHelpers.cxx | 16 +++++++++++++--- Framework/Core/src/WorkflowHelpers.cxx | 22 ++++++++++++++++++---- Framework/Core/src/WorkflowHelpers.h | 6 ++++++ 3 files changed, 37 insertions(+), 7 deletions(-) diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 6c2063cb59311..359b1a343cb7e 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -1074,8 +1074,6 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf WorkflowHelpers::constructGraph(workflow, logicalEdges, outputs, availableForwardsInfo); - WorkflowHelpers::validateEdges(workflow, logicalEdges, outputs); - // We need to instanciate one device per (me, timeIndex) in the // DeviceConnectionEdge. For each device we need one new binding // server per (me, other) -> port Moreover for each (me, other, @@ -1122,10 +1120,13 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf inActions, workflow, availableForwardsInfo, channelPolicies, channelPrefix, defaultOffer, overrideServices); // We apply the completion policies here since this is where we have all the // devices resolved. - for (auto& device : devices) { + std::map policies; + for (DeviceSpec& device : devices) { bool hasPolicy = false; + policies[device.name].completionPolicyName = "unknown"; for (auto& policy : completionPolicies) { if (policy.matcher(device) == true) { + policies[policy.name].completionPolicyName = policy.name; device.completionPolicy = policy; hasPolicy = true; break; @@ -1158,6 +1159,15 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf throw runtime_error_f("Unable to find a resource policy for %s", device.id.c_str()); } } + // Iterate of the workflow and create a consistent vector of DataProcessorPoliciesInfo + std::vector policiesVector; + for (size_t wi = 0; wi < workflow.size(); ++wi) { + auto& processor = workflow[wi]; + auto& info = policies[processor.name]; + policiesVector.push_back(info); + } + + WorkflowHelpers::validateEdges(workflow, policiesVector, logicalEdges, outputs); for (auto& device : devices) { device.resourceMonitoringInterval = resourcesMonitoringInterval; diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 360480f80d4ef..6b4ad35f826be 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1214,8 +1214,14 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } -bool validateLifetime(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +bool validateLifetime(std::ostream& errors, + DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorPoliciesInfo const& producerPolicies, + DataProcessorSpec const& consumer, InputSpec const& input, DataProcessorPoliciesInfo const& consumerPolicies) { + // In case the completion policy is consume-any, we do not need to check anything. + if (consumerPolicies.completionPolicyName == "consume-any") { + return true; + } if (input.lifetime == Lifetime::Timeframe && output.lifetime == Lifetime::Sporadic) { errors << fmt::format("Input {} of {} has lifetime Timeframe, but output {} of {} has lifetime Sporadic\n", DataSpecUtils::describe(input).c_str(), consumer.name, @@ -1225,7 +1231,9 @@ bool validateLifetime(std::ostream& errors, DataProcessorSpec const& producer, O return true; } -bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +bool validateExpendable(std::ostream& errors, + DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorPoliciesInfo const& producerPolicies, + DataProcessorSpec const& consumer, InputSpec const& input, DataProcessorPoliciesInfo const& consumerPolicies) { auto isExpendable = [](DataProcessorLabel const& label) { return label.value == "expendable"; @@ -1244,8 +1252,12 @@ bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, return true; } -using Validator = std::function; +using Validator = std::function; + void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, + std::vector const& policies, std::vector const& edges, std::vector const& outputs) { @@ -1262,10 +1274,12 @@ void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, for (auto& edge : edges) { DataProcessorSpec const& producer = workflow[edge.producer]; DataProcessorSpec const& consumer = workflow[edge.consumer]; + DataProcessorPoliciesInfo const& producerPolicies = policies[edge.producer]; + DataProcessorPoliciesInfo const& consumerPolicies = policies[edge.consumer]; OutputSpec const& output = outputs[edge.outputGlobalIndex]; InputSpec const& input = consumer.inputs[edge.consumerInputIndex]; for (auto& validator : defaultValidators) { - hasErrors |= !validator(errors, producer, output, consumer, input); + hasErrors |= !validator(errors, producer, output, producerPolicies, consumer, input, consumerPolicies); } } if (hasErrors) { diff --git a/Framework/Core/src/WorkflowHelpers.h b/Framework/Core/src/WorkflowHelpers.h index 0a3cd408b3424..055cae1d6011b 100644 --- a/Framework/Core/src/WorkflowHelpers.h +++ b/Framework/Core/src/WorkflowHelpers.h @@ -141,6 +141,11 @@ struct TopoIndexInfo { friend std::ostream& operator<<(std::ostream& out, TopoIndexInfo const& info); }; +// Information about the policies which were derived for a given data processor. +struct DataProcessorPoliciesInfo { + std::string completionPolicyName; +}; + struct OutputObj { InputSpec spec; bool isdangling; @@ -234,6 +239,7 @@ struct WorkflowHelpers { /// For example we should make sure that Lifetime::Timeframe inputs of /// one node is not connected to an Output of Lifetime::Sporadic of another node. static void validateEdges(WorkflowSpec const& workflow, + std::vector const& policiesInfos, std::vector const& edges, std::vector const& outputs); }; From 6ca6b75d10920b3b6299974fa6604643ceba2222 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 15 Jan 2024 13:02:30 +0100 Subject: [PATCH 219/726] DPL: drop unused o2_signpost_id_make_with_pointer method Probably old remaining from a previous implementation. --- Framework/Foundation/include/Framework/Signpost.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index eca9e26f77469..64d92d67acbf7 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -218,7 +218,6 @@ bool _o2_lock_free_stack_push(_o2_lock_free_stack& stack, const int& value, bool bool _o2_lock_free_stack_pop(_o2_lock_free_stack& stack, int& value, bool spin = false); //_o2_signpost_id_t _o2_signpost_id_generate_local(_o2_log_t* log); //_o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer); -_o2_signpost_index_t o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer); void* _o2_log_create(char const* name, int stacktrace); void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); void _o2_signpost_interval_begin(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); @@ -246,14 +245,6 @@ inline _o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* return uniqueId; } -inline _o2_signpost_index_t o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer) -{ - _o2_signpost_index_t signpost_index; - _o2_lock_free_stack_pop(log->slots, signpost_index, true); - log->ids[signpost_index].id = (int64_t)pointer; - return signpost_index; -} - // Implementation start here. Include this file with O2_SIGNPOST_IMPLEMENTATION defined in one file of your // project. #ifdef O2_SIGNPOST_IMPLEMENTATION From 87974b029e76c38f30b82238f03d48ae7732b9ad Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 11:21:05 +0100 Subject: [PATCH 220/726] DPL: rework signposts support The logger based Signposts are now behaving the same on macOS and linux when running normally. The difference between the two implementations will now be that if Instruments enables some OSLog namespace, we stop printing the messages and use the os_signpost API instead. --- .../Foundation/include/Framework/Signpost.h | 191 +++++++++--------- Framework/Foundation/test/test_Signpost.cxx | 2 +- .../Foundation/test/test_SignpostLogger.cxx | 2 +- 3 files changed, 94 insertions(+), 101 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 64d92d67acbf7..d2bdec0ec8246 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -13,6 +13,9 @@ #include #include +#ifdef __APPLE__ +#include +#endif struct o2_log_handle_t { char const* name = nullptr; @@ -22,6 +25,7 @@ struct o2_log_handle_t { // Helper function which replaces engineering types with a printf // compatible format string. +// FIXME: make this consteval when available in C++20 template constexpr auto remove_engineering_type(char const (&src)[N]) { @@ -82,78 +86,37 @@ o2_log_handle_t* o2_walk_logs(bool (*callback)(char const* name, void* log, void } #endif -#if !defined(O2_FORCE_LOGGER_SIGNPOST) && defined(__APPLE__) && (!defined(NDEBUG) || defined(O2_FORCE_SIGNPOSTS)) +#if defined(__APPLE__) #include #include #include -void* _o2_log_create(char const* name, char const* category); -#define O2_DECLARE_DYNAMIC_LOG(x) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, OS_LOG_CATEGORY_DYNAMIC_TRACING) -#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(x) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, OS_LOG_CATEGORY_DYNAMIC_STACK_TRACING) -// This is a no-op on macOS using the os_signpost API because only external instruments can enable/disable dynamic signposts -#define O2_LOG_ENABLE_DYNAMIC(log) -// This is a no-op on macOS using the os_signpost API because only external instruments can enable/disable dynamic signposts -#define O2_LOG_ENABLE_STACKTRACE(log) -#define O2_DECLARE_LOG(x, category) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, #category) -#define O2_LOG_DEBUG(log, ...) os_log_debug(private_o2_log_##log, __VA_ARGS__) -#define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) os_signpost_id_t name = os_signpost_id_make_with_pointer(private_o2_log_##log, pointer) -#define O2_SIGNPOST_ID_GENERATE(name, log) os_signpost_id_t name = os_signpost_id_generate(private_o2_log_##log) +#define O2_LOG_DEBUG_MAC(log, ...) os_log_debug(private_o2_log_##log, __VA_ARGS__) // FIXME: use __VA_OPT__ when available in C++20 -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) os_signpost_event_emit(private_o2_log_##log, id, name, format, ##__VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, format, ...) os_signpost_interval_begin(private_o2_log_##log, id, name, format, ##__VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, format, ...) os_signpost_interval_end(private_o2_log_##log, id, name, format, ##__VA_ARGS__) - -#ifdef O2_SIGNPOST_IMPLEMENTATION -/// We use a wrapper so that we can keep track of the logs. -void* _o2_log_create(char const* name, char const* category) -{ - // iterate over the list of logs and check if we already have - // one with the same name. - auto findLogByName = [](char const* name, void* log, void* context) -> bool { - char const* currentName = (char const*)context; - if (strcmp(name, currentName) == 0) { - return false; - } - return true; - }; - - o2_log_handle_t* handle = o2_walk_logs(findLogByName, (void*)name); - - // If we found one, return it. - if (handle) { - return handle->log; - } - // Otherwise, create a new one and add it to the end of the list. - os_log_t log = os_log_create(name, category); - o2_log_handle_t* newHandle = new o2_log_handle_t(); - newHandle->log = log; - newHandle->name = strdup(name); - newHandle->next = o2_get_logs_tail().load(); - // Until I manage to replace the log I have in next, keep trying. - // Notice this does not protect against two threads trying to insert - // a log with the same name. I should probably do a sorted insert for that. - while (!o2_get_logs_tail().compare_exchange_weak(newHandle->next, newHandle, - std::memory_order_release, - std::memory_order_relaxed)) { - newHandle->next = o2_get_logs_tail(); - } - - return log; -} +#define O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ...) os_signpost_event_emit(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_START_MAC(log, id, name, format, ...) os_signpost_interval_begin(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_END_MAC(log, id, name, format, ...) os_signpost_interval_end(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_ENABLED_MAC(log) os_signpost_enabled(private_o2_log_##log->os_log) +#else +// These are no-ops on linux. +#define O2_DECLARE_LOG_MAC(x, category) +#define O2_LOG_DEBUG_MAC(log, ...) +#define O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_START_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_END_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_ENABLED_MAC(log) false +#endif // __APPLE__ + +// Unless we are on apple we enable checking for signposts only if in debug mode or if we force them. +#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(NDEBUG) +#define O2_LOG_ENABLED(log) private_o2_log_##log->stacktrace +#else +#define O2_LOG_ENABLED(log) false #endif -#elif !defined(NDEBUG) || defined(O2_FORCE_LOGGER_SIGNPOST) || defined(O2_FORCE_SIGNPOSTS) - -#ifndef O2_LOG_MACRO -#if __has_include("Framework/Logger.h") +#if !defined(O2_LOG_MACRO) && __has_include("Framework/Logger.h") #include "Framework/Logger.h" -// If NDEBUG is not defined, we use the logger to print out the signposts at the debug level. -#if !defined(NDEBUG) -#define O2_LOG_MACRO(...) LOGF(debug, __VA_ARGS__) -#elif defined(O2_FORCE_LOGGER_SIGNPOST) || defined(O2_FORCE_SIGNPOSTS) -// If we force the presence of the logger, we use it to print out the signposts at the detail level, which is not optimized out. #define O2_LOG_MACRO(...) LOGF(info, __VA_ARGS__) -#endif -#else +#elif !defined(O2_LOG_MACRO) // If we do not have the fairlogger, we simply print out the signposts to the console. // This is useful for things like the tests, which this way do not need to depend on the FairLogger. #define O2_LOG_MACRO(...) \ @@ -161,8 +124,9 @@ void* _o2_log_create(char const* name, char const* category) printf(__VA_ARGS__); \ printf("\n"); \ } while (0) -#endif -#endif +#else +#define O2_LOG_MACRO(...) +#endif // O2_LOG_MACRO // This is the linux implementation, it is not as nice as the apple one and simply prints out // the signpost information to the log. @@ -181,7 +145,7 @@ struct _o2_lock_free_stack { // A log is simply an inbox which keeps track of the available id, so that we can print out different signposts // with different indentation levels. // supports up to 1024 paralle signposts before it spinlocks. -typedef int _o2_signpost_index_t; +using _o2_signpost_index_t = int; struct _o2_activity_t { // How much the activity is indented in the output log. @@ -191,10 +155,13 @@ struct _o2_activity_t { struct _o2_signpost_id_t { // The id of the activity. - int64_t id = -1; + int64_t value = -1; }; struct _o2_log_t { +#ifdef __APPLE__ + os_log_t os_log = nullptr; +#endif // A circular buffer of available slots. Each unique interval pulls an id from this buffer. _o2_lock_free_stack slots; // Up to 256 activities can be active at the same time. @@ -211,7 +178,10 @@ struct _o2_log_t { // 0 means the log is disabled. // 1 means only the current signpost is printed. // >1 means the current signpost and n levels of the stacktrace are printed. - std::atomic stacktrace = 1; + std::atomic stacktrace = 0; + + // Default stacktrace level for the log, when enabled. + int defaultStacktrace = 1; }; bool _o2_lock_free_stack_push(_o2_lock_free_stack& stack, const int& value, bool spin = false); @@ -310,7 +280,7 @@ bool _o2_lock_free_stack_pop(_o2_lock_free_stack& stack, int& value, bool spin) } } -void* _o2_log_create(char const* name, int stacktrace) +void* _o2_log_create(char const* name, int defaultStacktrace) { // iterate over the list of logs and check if we already have // one with the same name. @@ -328,7 +298,7 @@ void* _o2_log_create(char const* name, int stacktrace) return handle->log; } // Otherwise, create a new one and add it to the end of the list. - _o2_log_t* log = new _o2_log_t(); + auto* log = new _o2_log_t(); // Write the initial 256 ids to the inbox, in reverse, so that the // linear search below is just for an handful of elements. int n = _o2_lock_free_stack::N; @@ -336,9 +306,18 @@ void* _o2_log_create(char const* name, int stacktrace) _o2_signpost_index_t signpost_index{n - 1 - i}; _o2_lock_free_stack_push(log->slots, signpost_index, true); } - log->stacktrace = stacktrace; - o2_log_handle_t* newHandle = new o2_log_handle_t(); + log->defaultStacktrace = defaultStacktrace; + auto* newHandle = new o2_log_handle_t(); newHandle->log = log; +#ifdef __APPLE__ + // On macOS, we use the os_signpost API so that when we are + // using instruments we can see the messages there. + if (defaultStacktrace > 1) { + log->os_log = os_log_create(name, OS_LOG_CATEGORY_DYNAMIC_STACK_TRACING); + } else { + log->os_log = os_log_create(name, OS_LOG_CATEGORY_DYNAMIC_TRACING); + } +#endif newHandle->name = strdup(name); newHandle->next = o2_get_logs_tail().load(); // Until I manage to replace the log I have in next, keep trying. @@ -357,10 +336,6 @@ void* _o2_log_create(char const* name, int stacktrace) // If the slot is empty, it will return the id and increment the indentation level. void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...) { - // Nothing to be done - if (log->stacktrace == 0) { - return; - } va_list args; va_start(args, format); @@ -368,10 +343,10 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n int leading = 0; // This is the equivalent of exclusive - if (id.id != 0) { + if (id.value != 0) { int i = 0; for (i = 0; i < log->ids.size(); ++i) { - if (log->ids[i].id == id.id) { + if (log->ids[i].value == id.value) { break; } } @@ -385,7 +360,7 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n } char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16s*>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16s*>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); va_end(args); O2_LOG_MACRO("%s", prebuffer); @@ -395,22 +370,19 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n // If the slot is empty, it will return the id and increment the indentation level. void _o2_signpost_interval_begin(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...) { - if (log->stacktrace == 0) { - return; - } va_list args; va_start(args, format); // This is a unique slot for this interval. _o2_signpost_index_t signpost_index; _o2_lock_free_stack_pop(log->slots, signpost_index, true); // Put the id in the slot, to close things or to attach signposts to a given interval - log->ids[signpost_index].id = id.id; + log->ids[signpost_index].value = id.value; auto* activity = &log->activities[signpost_index]; activity->indentation = log->current_indentation++; activity->name = name; int leading = activity->indentation * 2; char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sS>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sS>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); va_end(args); O2_LOG_MACRO("%s", prebuffer); @@ -424,7 +396,7 @@ void _o2_signpost_interval_end_v(_o2_log_t* log, _o2_signpost_id_t id, char cons // Find the index of the activity int i = 0; for (i = 0; i < log->ids.size(); ++i) { - if (log->ids[i].id == id.id) { + if (log->ids[i].value == id.value) { break; } } @@ -439,13 +411,13 @@ void _o2_signpost_interval_end_v(_o2_log_t* log, _o2_signpost_id_t id, char cons _o2_activity_t* activity = &log->activities[i]; int leading = activity->indentation * 2; char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sE>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sE>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); O2_LOG_MACRO("%s", prebuffer); // Clear the slot activity->indentation = -1; activity->name = nullptr; - log->ids[i].id = -1; + log->ids[i].value = -1; // Put back the slot log->current_indentation--; _o2_signpost_index_t signpost_index{i}; @@ -468,29 +440,50 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) } #endif // O2_SIGNPOST_IMPLEMENTATION -/// Dynamic logs need to be enabled via the O2_LOG_ENABLE_DYNAMIC macro. Notice this will only work +#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(NDEBUG) +/// Dynamic logs need to be enabled via the O2_LOG_ENABLE macro. Notice this will only work /// for the logger based logging, since the Apple version needs instruments to enable them. -#define O2_DECLARE_DYNAMIC_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 0) +#define O2_DECLARE_DYNAMIC_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 1) /// For the moment we do not support logs with a stacktrace. -#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 0) +#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 64) #define O2_DECLARE_LOG(name, category) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 1) -#define O2_LOG_ENABLE_DYNAMIC(log) _o2_log_set_stacktrace(private_o2_log_##log, 1) -// We print out only the first 64 frames. -#define O2_LOG_ENABLE_STACKTRACE(log) _o2_log_set_stacktrace(private_o2_log_##log, 64) +// When we enable the log, we set the stacktrace to the default value. +#define O2_LOG_ENABLE(log) _o2_log_set_stacktrace(private_o2_log_##log, private_o2_log_##log->defaultStacktrace) +#define O2_LOG_DISABLE(log) _o2_log_set_stacktrace(private_o2_log_##log, 0) // For the moment we simply use LOG DEBUG. We should have proper activities so that we can // turn on and off the printing. #define O2_LOG_DEBUG(log, ...) O2_LOG_MACRO(__VA_ARGS__) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) _o2_signpost_id_t name = _o2_signpost_id_make_with_pointer(private_o2_log_##log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) _o2_signpost_id_t name = _o2_signpost_id_generate_local(private_o2_log_##log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, format, ...) _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, format, ...) _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) +// In case Instruments is attached, we switch to the Apple signpost API otherwise, both one +// mac and on linux we use our own implementation, using the logger. We can use the same ids because +// they are compatible between the two implementations, we also use remove_engineering_type to remove +// the engineering types from the format string, so that we can use the same format string for both. +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) __extension__({ \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } \ +}) +#define O2_SIGNPOST_START(log, id, name, format, ...) \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_START_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } +#define O2_SIGNPOST_END(log, id, name, format, ...) \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_END_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } #else // This is the release implementation, it does nothing. #define O2_DECLARE_DYNAMIC_LOG(x) #define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(x) #define O2_DECLARE_LOG(x, category) -#define O2_LOG_ENABLE_DYNAMIC(log) -#define O2_LOG_ENABLE_STACKTRACE(log) +#define O2_LOG_ENABLE(log) +#define O2_LOG_DISABLE(log) #define O2_LOG_DEBUG(log, ...) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) diff --git a/Framework/Foundation/test/test_Signpost.cxx b/Framework/Foundation/test/test_Signpost.cxx index 24b6afaec5c3d..f9b8d4ec0e13a 100644 --- a/Framework/Foundation/test/test_Signpost.cxx +++ b/Framework/Foundation/test/test_Signpost.cxx @@ -43,7 +43,7 @@ int main(int argc, char** argv) O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); O2_SIGNPOST_END(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); - O2_LOG_ENABLE_DYNAMIC(test_SignpostDynamic); + O2_LOG_ENABLE(test_SignpostDynamic); #ifdef __APPLE__ // On Apple there is no way to turn on signposts in the logger, so we do not display this message O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will see, because we turned them on"); diff --git a/Framework/Foundation/test/test_SignpostLogger.cxx b/Framework/Foundation/test/test_SignpostLogger.cxx index 74da35abf7c70..ed917ea7cadf5 100644 --- a/Framework/Foundation/test/test_SignpostLogger.cxx +++ b/Framework/Foundation/test/test_SignpostLogger.cxx @@ -51,7 +51,7 @@ int main(int argc, char** argv) O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); O2_SIGNPOST_END(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); - O2_LOG_ENABLE_DYNAMIC(test_SignpostDynamic); + O2_LOG_ENABLE(test_SignpostDynamic); #ifdef __APPLE__ // On Apple there is no way to turn on signposts in the logger, so we do not display this message O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will see, because we turned them on"); From a7d3034558e369aa0ce539d779260b01de23cfc7 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 09:24:03 +0100 Subject: [PATCH 221/726] DPL: add proper tracing for DataProcessingDevice socket callbacks --- Framework/Core/src/DataProcessingDevice.cxx | 40 +++++++++++++-------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index b03904c5b3776..366bfafd7d801 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -35,6 +35,9 @@ #include "Framework/TMessageSerializer.h" #include "Framework/InputRecord.h" #include "Framework/InputSpan.h" +#if defined(__APPLE__) || defined(NDEBUG) +#define O2_SIGNPOST_IMPLEMENTATION +#endif #include "Framework/Signpost.h" #include "Framework/TimingHelpers.h" #include "Framework/SourceInfoHeader.h" @@ -80,6 +83,8 @@ #include #include +O2_DECLARE_DYNAMIC_LOG(device); + using namespace o2::framework; using ConfigurationInterface = o2::configuration::ConfigurationInterface; using DataHeader = o2::header::DataHeader; @@ -274,21 +279,22 @@ struct PollerContext { void on_socket_polled(uv_poll_t* poller, int status, int events) { auto* context = (PollerContext*)poller->data; + assert(context); + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); context->state->loopReason |= DeviceState::DATA_SOCKET_POLLED; switch (events) { case UV_READABLE: { - ZoneScopedN("socket readable event"); - LOG(debug) << "socket polled UV_READABLE: " << context->name; + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_INCOMING; } break; case UV_WRITABLE: { - ZoneScopedN("socket writeable"); + O2_SIGNPOST_END(device, sid, "socket_state", "Socket connected for channel %{public}s", context->name); if (context->read) { - LOG(debug) << "socket polled UV_CONNECT" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "Socket connected for read in context %{public}s", context->name); uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &on_socket_polled); context->state->loopReason |= DeviceState::DATA_CONNECTED; } else { - LOG(debug) << "socket polled UV_WRITABLE" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "Socket connected for write for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_OUTGOING; // If the socket is writable, fairmq will handle the rest, so we can stop polling and // just wait for the disconnect. @@ -297,12 +303,10 @@ void on_socket_polled(uv_poll_t* poller, int status, int events) context->pollerState = PollerContext::PollerState::Connected; } break; case UV_DISCONNECT: { - ZoneScopedN("socket disconnect"); - LOG(debug) << "socket polled UV_DISCONNECT"; + O2_SIGNPOST_END(device, sid, "socket_state", "Socket disconnected in context %{public}s", context->name); } break; case UV_PRIORITIZED: { - ZoneScopedN("socket prioritized"); - LOG(debug) << "socket polled UV_PRIORITIZED"; + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for context %{public}s", context->name); } break; } // We do nothing, all the logic for now stays in DataProcessingDevice::doRun() @@ -873,7 +877,9 @@ void DataProcessingDevice::startPollers() auto& deviceContext = ref.get(); auto& state = ref.get(); - for (auto& poller : state.activeInputPollers) { + for (auto* poller : state.activeInputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_START(device, sid, "socket_state", "Input socket waiting for connection."); uv_poll_start(poller, UV_WRITABLE, &on_socket_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } @@ -881,7 +887,9 @@ void DataProcessingDevice::startPollers() uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } - for (auto& poller : state.activeOutputPollers) { + for (auto* poller : state.activeOutputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_START(device, sid, "socket_state", "Output socket waiting for connection."); uv_poll_start(poller, UV_WRITABLE, &on_socket_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } @@ -897,17 +905,21 @@ void DataProcessingDevice::stopPollers() auto& deviceContext = ref.get(); auto& state = ref.get(); LOGP(detail, "Stopping {} input pollers", state.activeInputPollers.size()); - for (auto& poller : state.activeInputPollers) { + for (auto* poller : state.activeInputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed."); uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } LOGP(detail, "Stopping {} out of band pollers", state.activeOutOfBandPollers.size()); - for (auto& poller : state.activeOutOfBandPollers) { + for (auto* poller : state.activeOutOfBandPollers) { uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } LOGP(detail, "Stopping {} output pollers", state.activeOutOfBandPollers.size()); - for (auto& poller : state.activeOutputPollers) { + for (auto* poller : state.activeOutputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed."); uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } From 4df6e4a6d17cbd9241dc7ff2ad3ed5346456bff3 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 09:24:03 +0100 Subject: [PATCH 222/726] DPL use Signposts to debug consumeWhenAll --- Framework/Core/src/CompletionPolicyHelpers.cxx | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 8c4b38ba3355c..a6240b1165b81 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -18,10 +18,13 @@ #include "Framework/TimesliceIndex.h" #include "Framework/TimingInfo.h" #include "DecongestionService.h" +#include "Framework/Signpost.h" #include #include +O2_DECLARE_DYNAMIC_LOG(completion); + namespace o2::framework { @@ -108,6 +111,8 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl { auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { assert(inputs.size() == specs.size()); + O2_SIGNPOST_ID_GENERATE(sid, completion); + O2_SIGNPOST_START(completion, sid, "consumeWhenAll", "Completion policy invoked"); size_t si = 0; bool missingSporadic = false; @@ -116,15 +121,18 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl assert(si < specs.size()); auto& spec = specs[si++]; if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s due to missing input %lu", "Wait", si); return CompletionPolicy::CompletionOp::Wait; } if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { + O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "Missing sporadic found for route index %lu", si); missingSporadic = true; } if (input.header != nullptr && currentTimeslice == -1) { auto const* dph = framework::DataRefUtils::getHeader(input); if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { currentTimeslice = dph->startTime; + O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "currentTimeslice %lu from route index %lu", currentTimeslice, si); } } } @@ -134,8 +142,10 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl auto oldestPossibleTimeslice = timesliceIndex.getOldestPossibleInput().timeslice.value; if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu > oldestPossibleTimeslice %lu", "Retry", currentTimeslice, oldestPossibleTimeslice); return CompletionPolicy::CompletionOp::Retry; } + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu <= oldestPossibleTimeslice %lu", "Consume", currentTimeslice, oldestPossibleTimeslice); return CompletionPolicy::CompletionOp::Consume; }; return CompletionPolicy{name, matcher, callback}; From d566fd1bc0000efb68deb110b3c61a4f54f25fb5 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 09:41:37 +0100 Subject: [PATCH 223/726] Ability to turn on / off the logger from the GUI. --- .../Core/include/Framework/DeviceControl.h | 2 + .../Core/include/Framework/DeviceState.h | 9 +++++ Framework/Core/src/WSDriverClient.cxx | 40 +++++++++++++++++++ .../src/FrameworkGUIDeviceInspector.cxx | 11 +++++ .../test/test_DetectMissingTimeframe.cxx | 1 + 5 files changed, 63 insertions(+) diff --git a/Framework/Core/include/Framework/DeviceControl.h b/Framework/Core/include/Framework/DeviceControl.h index 589de3937ee55..ce946e8e77fbf 100644 --- a/Framework/Core/include/Framework/DeviceControl.h +++ b/Framework/Core/include/Framework/DeviceControl.h @@ -48,6 +48,8 @@ struct DeviceControl { DeviceController* controller = nullptr; /// What kind of events should run with the TRACE level int tracingFlags = 0; + /// What kind of log streams should be enabled + int logStreams = 0; /// An incremental number to identify the device state int requestedState = 0; }; diff --git a/Framework/Core/include/Framework/DeviceState.h b/Framework/Core/include/Framework/DeviceState.h index 3be0302fd4113..4fa72a84cad71 100644 --- a/Framework/Core/include/Framework/DeviceState.h +++ b/Framework/Core/include/Framework/DeviceState.h @@ -57,6 +57,13 @@ struct DeviceState { DATA_CONNECTED = 1 << 19, // Data channel connected }; + enum LogStreams : int { + NO_LOG = 0, + DEVICE_LOG = 1 << 0, // Log for Data Processing Device activities. + COMPLETION_LOG = 1 << 1, // Log for the completion policy of the device. + MONITORING_SERVICE_LOG = 1 << 2, // Log for the monitoring service flushing. + }; + std::vector inputChannelInfos; StreamingState streaming = StreamingState::Streaming; bool quitRequested = false; @@ -93,6 +100,8 @@ struct DeviceState { int loopReason = 0; /// Bitmask of LoopReason to trace int tracingFlags = 0; + /// Bitmask of log streams which are available + int logStreams = 0; /// Stack of the severity, so that we can display only /// the bits we are interested in. std::vector severityStack; diff --git a/Framework/Core/src/WSDriverClient.cxx b/Framework/Core/src/WSDriverClient.cxx index d4ed77b9a004e..ac2e3db41fcef 100644 --- a/Framework/Core/src/WSDriverClient.cxx +++ b/Framework/Core/src/WSDriverClient.cxx @@ -16,10 +16,15 @@ #include "Framework/DeviceSpec.h" #include "DriverClientContext.h" #include "DPLWebSocket.h" +#include "Framework/Signpost.h" #include #include #include +O2_DECLARE_DYNAMIC_LOG(device); +O2_DECLARE_DYNAMIC_LOG(completion); +O2_DECLARE_DYNAMIC_LOG(monitoring_service); + namespace o2::framework { @@ -152,6 +157,41 @@ void on_connect(uv_connect_t* connection, int status) state.tracingFlags = tracingFlags; }); + client->observe("/log-streams", [ref = context->ref](std::string_view cmd) { + auto& state = ref.get(); + static constexpr int prefixSize = std::string_view{"/log-streams "}.size(); + if (prefixSize > cmd.size()) { + LOG(error) << "Malformed log-streams request"; + return; + } + cmd.remove_prefix(prefixSize); + int logStreams = 0; + + auto error = std::from_chars(cmd.data(), cmd.data() + cmd.size(), logStreams); + if (error.ec != std::errc()) { + LOG(error) << "Malformed log-streams mask"; + return; + } + LOGP(info, "Logstreams flags set to {}", logStreams); + state.logStreams = logStreams; + if ((state.logStreams & DeviceState::LogStreams::DEVICE_LOG) != 0) { + O2_LOG_ENABLE(device); + } else { + O2_LOG_DISABLE(device); + } + if ((state.logStreams & DeviceState::LogStreams::COMPLETION_LOG) != 0) { + O2_LOG_ENABLE(completion); + } else { + O2_LOG_DISABLE(completion); + } + + if ((state.logStreams & DeviceState::LogStreams::MONITORING_SERVICE_LOG) != 0) { + O2_LOG_ENABLE(monitoring_service); + } else { + O2_LOG_DISABLE(monitoring_service); + } + }); + // Client will be filled in the line after. I can probably have a single // client per device. auto dplClient = std::make_unique(); diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index a82753eb5af1f..fe93ca6d0f07f 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -400,6 +400,17 @@ void displayDeviceInspector(DeviceSpec const& spec, } } + bool logsChanged = false; + if (ImGui::CollapsingHeader("Signposts", ImGuiTreeNodeFlags_DefaultOpen)) { + logsChanged = ImGui::CheckboxFlags("Device", &control.logStreams, DeviceState::LogStreams::DEVICE_LOG); + logsChanged = ImGui::CheckboxFlags("Completion", &control.logStreams, DeviceState::LogStreams::COMPLETION_LOG); + logsChanged = ImGui::CheckboxFlags("Monitoring", &control.logStreams, DeviceState::LogStreams::MONITORING_SERVICE_LOG); + if (logsChanged && control.controller) { + std::string cmd = fmt::format("/log-streams {}", control.logStreams); + control.controller->write(cmd.c_str(), cmd.size()); + } + } + bool flagsChanged = false; if (ImGui::CollapsingHeader("Event loop tracing", ImGuiTreeNodeFlags_DefaultOpen)) { flagsChanged |= ImGui::CheckboxFlags("METRICS_MUST_FLUSH", &control.tracingFlags, DeviceState::LoopReason::METRICS_MUST_FLUSH); diff --git a/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx index 3374cd9ad6cf2..7ef1370f040b4 100644 --- a/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx +++ b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx @@ -43,6 +43,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& specs) if (i++ % 2 == 0) { outputs.make(OutputRef{"a2"}, 1); } + sleep(1); })}, }; DataProcessorSpec d{ From 34d79bf5747e7f47eef5453c147f162870720041 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 10:17:25 +0100 Subject: [PATCH 224/726] DPL GUI: add GUI to enable / disable driver signposts --- Framework/GUISupport/src/FrameworkGUIDebugger.cxx | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx index 35e0bc563a22e..005f7082d77b4 100644 --- a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx @@ -23,6 +23,7 @@ #include "FrameworkGUIDataRelayerUsage.h" #include "PaletteHelpers.h" #include "FrameworkGUIState.h" +#include "Framework/Signpost.h" #include #include @@ -34,6 +35,7 @@ #include #include +O2_DECLARE_DYNAMIC_LOG(driver); // Make sure we can use aggregated initialisers. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpedantic" @@ -506,7 +508,7 @@ void displayDeviceMetrics(const char* label, case MetricsDisplayStyle::Lines: { auto xAxisFlags = ImPlotAxisFlags_None; auto yAxisFlags = ImPlotAxisFlags_LockMin; - //ImPlot::FitNextPlotAxes(true, true, true, true); + // ImPlot::FitNextPlotAxes(true, true, true, true); if (ImPlot::BeginPlot("##Some plot", {-1, -1}, axisFlags)) { ImPlot::SetupAxes("time", "value", xAxisFlags, yAxisFlags); ImPlot::SetupAxisFormat(ImAxis_Y1, formatSI, nullptr); @@ -1093,6 +1095,17 @@ void displayDriverInfo(DriverInfo const& driverInfo, DriverControl& driverContro } #endif + static bool selectedSignpost = false; + if (ImGui::CollapsingHeader("Signposts", ImGuiTreeNodeFlags_DefaultOpen)) { + if (ImGui::Checkbox("Driver", &selectedSignpost)) { + if (selectedSignpost) { + O2_LOG_ENABLE(driver); + } else { + O2_LOG_DISABLE(driver); + } + } + } + for (size_t i = 0; i < driverInfo.states.size(); ++i) { ImGui::Text("#%lu: %s", i, DriverInfoHelper::stateToString(driverInfo.states[i])); } From 942a18c7312363a4bed12a85ebce1c804c3cee17 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 15 Jan 2024 13:02:30 +0100 Subject: [PATCH 225/726] DPL: drop unused o2_signpost_id_make_with_pointer method Probably old remaining from a previous implementation. --- Framework/Foundation/include/Framework/Signpost.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index eca9e26f77469..64d92d67acbf7 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -218,7 +218,6 @@ bool _o2_lock_free_stack_push(_o2_lock_free_stack& stack, const int& value, bool bool _o2_lock_free_stack_pop(_o2_lock_free_stack& stack, int& value, bool spin = false); //_o2_signpost_id_t _o2_signpost_id_generate_local(_o2_log_t* log); //_o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer); -_o2_signpost_index_t o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer); void* _o2_log_create(char const* name, int stacktrace); void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); void _o2_signpost_interval_begin(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); @@ -246,14 +245,6 @@ inline _o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* return uniqueId; } -inline _o2_signpost_index_t o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer) -{ - _o2_signpost_index_t signpost_index; - _o2_lock_free_stack_pop(log->slots, signpost_index, true); - log->ids[signpost_index].id = (int64_t)pointer; - return signpost_index; -} - // Implementation start here. Include this file with O2_SIGNPOST_IMPLEMENTATION defined in one file of your // project. #ifdef O2_SIGNPOST_IMPLEMENTATION From 7eb936ce0b25f3ee218b3c159d27e87d743b7002 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 11:21:05 +0100 Subject: [PATCH 226/726] DPL: rework signposts support The logger based Signposts are now behaving the same on macOS and linux when running normally. The difference between the two implementations will now be that if Instruments enables some OSLog namespace, we stop printing the messages and use the os_signpost API instead. --- .../Foundation/include/Framework/Signpost.h | 191 +++++++++--------- Framework/Foundation/test/test_Signpost.cxx | 2 +- .../Foundation/test/test_SignpostLogger.cxx | 2 +- 3 files changed, 94 insertions(+), 101 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 64d92d67acbf7..d2bdec0ec8246 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -13,6 +13,9 @@ #include #include +#ifdef __APPLE__ +#include +#endif struct o2_log_handle_t { char const* name = nullptr; @@ -22,6 +25,7 @@ struct o2_log_handle_t { // Helper function which replaces engineering types with a printf // compatible format string. +// FIXME: make this consteval when available in C++20 template constexpr auto remove_engineering_type(char const (&src)[N]) { @@ -82,78 +86,37 @@ o2_log_handle_t* o2_walk_logs(bool (*callback)(char const* name, void* log, void } #endif -#if !defined(O2_FORCE_LOGGER_SIGNPOST) && defined(__APPLE__) && (!defined(NDEBUG) || defined(O2_FORCE_SIGNPOSTS)) +#if defined(__APPLE__) #include #include #include -void* _o2_log_create(char const* name, char const* category); -#define O2_DECLARE_DYNAMIC_LOG(x) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, OS_LOG_CATEGORY_DYNAMIC_TRACING) -#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(x) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, OS_LOG_CATEGORY_DYNAMIC_STACK_TRACING) -// This is a no-op on macOS using the os_signpost API because only external instruments can enable/disable dynamic signposts -#define O2_LOG_ENABLE_DYNAMIC(log) -// This is a no-op on macOS using the os_signpost API because only external instruments can enable/disable dynamic signposts -#define O2_LOG_ENABLE_STACKTRACE(log) -#define O2_DECLARE_LOG(x, category) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, #category) -#define O2_LOG_DEBUG(log, ...) os_log_debug(private_o2_log_##log, __VA_ARGS__) -#define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) os_signpost_id_t name = os_signpost_id_make_with_pointer(private_o2_log_##log, pointer) -#define O2_SIGNPOST_ID_GENERATE(name, log) os_signpost_id_t name = os_signpost_id_generate(private_o2_log_##log) +#define O2_LOG_DEBUG_MAC(log, ...) os_log_debug(private_o2_log_##log, __VA_ARGS__) // FIXME: use __VA_OPT__ when available in C++20 -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) os_signpost_event_emit(private_o2_log_##log, id, name, format, ##__VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, format, ...) os_signpost_interval_begin(private_o2_log_##log, id, name, format, ##__VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, format, ...) os_signpost_interval_end(private_o2_log_##log, id, name, format, ##__VA_ARGS__) - -#ifdef O2_SIGNPOST_IMPLEMENTATION -/// We use a wrapper so that we can keep track of the logs. -void* _o2_log_create(char const* name, char const* category) -{ - // iterate over the list of logs and check if we already have - // one with the same name. - auto findLogByName = [](char const* name, void* log, void* context) -> bool { - char const* currentName = (char const*)context; - if (strcmp(name, currentName) == 0) { - return false; - } - return true; - }; - - o2_log_handle_t* handle = o2_walk_logs(findLogByName, (void*)name); - - // If we found one, return it. - if (handle) { - return handle->log; - } - // Otherwise, create a new one and add it to the end of the list. - os_log_t log = os_log_create(name, category); - o2_log_handle_t* newHandle = new o2_log_handle_t(); - newHandle->log = log; - newHandle->name = strdup(name); - newHandle->next = o2_get_logs_tail().load(); - // Until I manage to replace the log I have in next, keep trying. - // Notice this does not protect against two threads trying to insert - // a log with the same name. I should probably do a sorted insert for that. - while (!o2_get_logs_tail().compare_exchange_weak(newHandle->next, newHandle, - std::memory_order_release, - std::memory_order_relaxed)) { - newHandle->next = o2_get_logs_tail(); - } - - return log; -} +#define O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ...) os_signpost_event_emit(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_START_MAC(log, id, name, format, ...) os_signpost_interval_begin(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_END_MAC(log, id, name, format, ...) os_signpost_interval_end(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_ENABLED_MAC(log) os_signpost_enabled(private_o2_log_##log->os_log) +#else +// These are no-ops on linux. +#define O2_DECLARE_LOG_MAC(x, category) +#define O2_LOG_DEBUG_MAC(log, ...) +#define O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_START_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_END_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_ENABLED_MAC(log) false +#endif // __APPLE__ + +// Unless we are on apple we enable checking for signposts only if in debug mode or if we force them. +#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(NDEBUG) +#define O2_LOG_ENABLED(log) private_o2_log_##log->stacktrace +#else +#define O2_LOG_ENABLED(log) false #endif -#elif !defined(NDEBUG) || defined(O2_FORCE_LOGGER_SIGNPOST) || defined(O2_FORCE_SIGNPOSTS) - -#ifndef O2_LOG_MACRO -#if __has_include("Framework/Logger.h") +#if !defined(O2_LOG_MACRO) && __has_include("Framework/Logger.h") #include "Framework/Logger.h" -// If NDEBUG is not defined, we use the logger to print out the signposts at the debug level. -#if !defined(NDEBUG) -#define O2_LOG_MACRO(...) LOGF(debug, __VA_ARGS__) -#elif defined(O2_FORCE_LOGGER_SIGNPOST) || defined(O2_FORCE_SIGNPOSTS) -// If we force the presence of the logger, we use it to print out the signposts at the detail level, which is not optimized out. #define O2_LOG_MACRO(...) LOGF(info, __VA_ARGS__) -#endif -#else +#elif !defined(O2_LOG_MACRO) // If we do not have the fairlogger, we simply print out the signposts to the console. // This is useful for things like the tests, which this way do not need to depend on the FairLogger. #define O2_LOG_MACRO(...) \ @@ -161,8 +124,9 @@ void* _o2_log_create(char const* name, char const* category) printf(__VA_ARGS__); \ printf("\n"); \ } while (0) -#endif -#endif +#else +#define O2_LOG_MACRO(...) +#endif // O2_LOG_MACRO // This is the linux implementation, it is not as nice as the apple one and simply prints out // the signpost information to the log. @@ -181,7 +145,7 @@ struct _o2_lock_free_stack { // A log is simply an inbox which keeps track of the available id, so that we can print out different signposts // with different indentation levels. // supports up to 1024 paralle signposts before it spinlocks. -typedef int _o2_signpost_index_t; +using _o2_signpost_index_t = int; struct _o2_activity_t { // How much the activity is indented in the output log. @@ -191,10 +155,13 @@ struct _o2_activity_t { struct _o2_signpost_id_t { // The id of the activity. - int64_t id = -1; + int64_t value = -1; }; struct _o2_log_t { +#ifdef __APPLE__ + os_log_t os_log = nullptr; +#endif // A circular buffer of available slots. Each unique interval pulls an id from this buffer. _o2_lock_free_stack slots; // Up to 256 activities can be active at the same time. @@ -211,7 +178,10 @@ struct _o2_log_t { // 0 means the log is disabled. // 1 means only the current signpost is printed. // >1 means the current signpost and n levels of the stacktrace are printed. - std::atomic stacktrace = 1; + std::atomic stacktrace = 0; + + // Default stacktrace level for the log, when enabled. + int defaultStacktrace = 1; }; bool _o2_lock_free_stack_push(_o2_lock_free_stack& stack, const int& value, bool spin = false); @@ -310,7 +280,7 @@ bool _o2_lock_free_stack_pop(_o2_lock_free_stack& stack, int& value, bool spin) } } -void* _o2_log_create(char const* name, int stacktrace) +void* _o2_log_create(char const* name, int defaultStacktrace) { // iterate over the list of logs and check if we already have // one with the same name. @@ -328,7 +298,7 @@ void* _o2_log_create(char const* name, int stacktrace) return handle->log; } // Otherwise, create a new one and add it to the end of the list. - _o2_log_t* log = new _o2_log_t(); + auto* log = new _o2_log_t(); // Write the initial 256 ids to the inbox, in reverse, so that the // linear search below is just for an handful of elements. int n = _o2_lock_free_stack::N; @@ -336,9 +306,18 @@ void* _o2_log_create(char const* name, int stacktrace) _o2_signpost_index_t signpost_index{n - 1 - i}; _o2_lock_free_stack_push(log->slots, signpost_index, true); } - log->stacktrace = stacktrace; - o2_log_handle_t* newHandle = new o2_log_handle_t(); + log->defaultStacktrace = defaultStacktrace; + auto* newHandle = new o2_log_handle_t(); newHandle->log = log; +#ifdef __APPLE__ + // On macOS, we use the os_signpost API so that when we are + // using instruments we can see the messages there. + if (defaultStacktrace > 1) { + log->os_log = os_log_create(name, OS_LOG_CATEGORY_DYNAMIC_STACK_TRACING); + } else { + log->os_log = os_log_create(name, OS_LOG_CATEGORY_DYNAMIC_TRACING); + } +#endif newHandle->name = strdup(name); newHandle->next = o2_get_logs_tail().load(); // Until I manage to replace the log I have in next, keep trying. @@ -357,10 +336,6 @@ void* _o2_log_create(char const* name, int stacktrace) // If the slot is empty, it will return the id and increment the indentation level. void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...) { - // Nothing to be done - if (log->stacktrace == 0) { - return; - } va_list args; va_start(args, format); @@ -368,10 +343,10 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n int leading = 0; // This is the equivalent of exclusive - if (id.id != 0) { + if (id.value != 0) { int i = 0; for (i = 0; i < log->ids.size(); ++i) { - if (log->ids[i].id == id.id) { + if (log->ids[i].value == id.value) { break; } } @@ -385,7 +360,7 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n } char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16s*>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16s*>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); va_end(args); O2_LOG_MACRO("%s", prebuffer); @@ -395,22 +370,19 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n // If the slot is empty, it will return the id and increment the indentation level. void _o2_signpost_interval_begin(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...) { - if (log->stacktrace == 0) { - return; - } va_list args; va_start(args, format); // This is a unique slot for this interval. _o2_signpost_index_t signpost_index; _o2_lock_free_stack_pop(log->slots, signpost_index, true); // Put the id in the slot, to close things or to attach signposts to a given interval - log->ids[signpost_index].id = id.id; + log->ids[signpost_index].value = id.value; auto* activity = &log->activities[signpost_index]; activity->indentation = log->current_indentation++; activity->name = name; int leading = activity->indentation * 2; char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sS>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sS>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); va_end(args); O2_LOG_MACRO("%s", prebuffer); @@ -424,7 +396,7 @@ void _o2_signpost_interval_end_v(_o2_log_t* log, _o2_signpost_id_t id, char cons // Find the index of the activity int i = 0; for (i = 0; i < log->ids.size(); ++i) { - if (log->ids[i].id == id.id) { + if (log->ids[i].value == id.value) { break; } } @@ -439,13 +411,13 @@ void _o2_signpost_interval_end_v(_o2_log_t* log, _o2_signpost_id_t id, char cons _o2_activity_t* activity = &log->activities[i]; int leading = activity->indentation * 2; char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sE>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sE>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); O2_LOG_MACRO("%s", prebuffer); // Clear the slot activity->indentation = -1; activity->name = nullptr; - log->ids[i].id = -1; + log->ids[i].value = -1; // Put back the slot log->current_indentation--; _o2_signpost_index_t signpost_index{i}; @@ -468,29 +440,50 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) } #endif // O2_SIGNPOST_IMPLEMENTATION -/// Dynamic logs need to be enabled via the O2_LOG_ENABLE_DYNAMIC macro. Notice this will only work +#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(NDEBUG) +/// Dynamic logs need to be enabled via the O2_LOG_ENABLE macro. Notice this will only work /// for the logger based logging, since the Apple version needs instruments to enable them. -#define O2_DECLARE_DYNAMIC_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 0) +#define O2_DECLARE_DYNAMIC_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 1) /// For the moment we do not support logs with a stacktrace. -#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 0) +#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 64) #define O2_DECLARE_LOG(name, category) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 1) -#define O2_LOG_ENABLE_DYNAMIC(log) _o2_log_set_stacktrace(private_o2_log_##log, 1) -// We print out only the first 64 frames. -#define O2_LOG_ENABLE_STACKTRACE(log) _o2_log_set_stacktrace(private_o2_log_##log, 64) +// When we enable the log, we set the stacktrace to the default value. +#define O2_LOG_ENABLE(log) _o2_log_set_stacktrace(private_o2_log_##log, private_o2_log_##log->defaultStacktrace) +#define O2_LOG_DISABLE(log) _o2_log_set_stacktrace(private_o2_log_##log, 0) // For the moment we simply use LOG DEBUG. We should have proper activities so that we can // turn on and off the printing. #define O2_LOG_DEBUG(log, ...) O2_LOG_MACRO(__VA_ARGS__) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) _o2_signpost_id_t name = _o2_signpost_id_make_with_pointer(private_o2_log_##log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) _o2_signpost_id_t name = _o2_signpost_id_generate_local(private_o2_log_##log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, format, ...) _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, format, ...) _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) +// In case Instruments is attached, we switch to the Apple signpost API otherwise, both one +// mac and on linux we use our own implementation, using the logger. We can use the same ids because +// they are compatible between the two implementations, we also use remove_engineering_type to remove +// the engineering types from the format string, so that we can use the same format string for both. +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) __extension__({ \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } \ +}) +#define O2_SIGNPOST_START(log, id, name, format, ...) \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_START_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } +#define O2_SIGNPOST_END(log, id, name, format, ...) \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_END_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } #else // This is the release implementation, it does nothing. #define O2_DECLARE_DYNAMIC_LOG(x) #define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(x) #define O2_DECLARE_LOG(x, category) -#define O2_LOG_ENABLE_DYNAMIC(log) -#define O2_LOG_ENABLE_STACKTRACE(log) +#define O2_LOG_ENABLE(log) +#define O2_LOG_DISABLE(log) #define O2_LOG_DEBUG(log, ...) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) diff --git a/Framework/Foundation/test/test_Signpost.cxx b/Framework/Foundation/test/test_Signpost.cxx index 24b6afaec5c3d..f9b8d4ec0e13a 100644 --- a/Framework/Foundation/test/test_Signpost.cxx +++ b/Framework/Foundation/test/test_Signpost.cxx @@ -43,7 +43,7 @@ int main(int argc, char** argv) O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); O2_SIGNPOST_END(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); - O2_LOG_ENABLE_DYNAMIC(test_SignpostDynamic); + O2_LOG_ENABLE(test_SignpostDynamic); #ifdef __APPLE__ // On Apple there is no way to turn on signposts in the logger, so we do not display this message O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will see, because we turned them on"); diff --git a/Framework/Foundation/test/test_SignpostLogger.cxx b/Framework/Foundation/test/test_SignpostLogger.cxx index 74da35abf7c70..ed917ea7cadf5 100644 --- a/Framework/Foundation/test/test_SignpostLogger.cxx +++ b/Framework/Foundation/test/test_SignpostLogger.cxx @@ -51,7 +51,7 @@ int main(int argc, char** argv) O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); O2_SIGNPOST_END(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); - O2_LOG_ENABLE_DYNAMIC(test_SignpostDynamic); + O2_LOG_ENABLE(test_SignpostDynamic); #ifdef __APPLE__ // On Apple there is no way to turn on signposts in the logger, so we do not display this message O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will see, because we turned them on"); From d102e70351f89e1e2556340fb1c50374f5e1cfdf Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 11:22:27 +0100 Subject: [PATCH 227/726] DPL: add proper tracing for DataProcessingDevice socket callbacks --- Framework/Core/src/DataProcessingDevice.cxx | 40 +++++++++++++-------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index b03904c5b3776..366bfafd7d801 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -35,6 +35,9 @@ #include "Framework/TMessageSerializer.h" #include "Framework/InputRecord.h" #include "Framework/InputSpan.h" +#if defined(__APPLE__) || defined(NDEBUG) +#define O2_SIGNPOST_IMPLEMENTATION +#endif #include "Framework/Signpost.h" #include "Framework/TimingHelpers.h" #include "Framework/SourceInfoHeader.h" @@ -80,6 +83,8 @@ #include #include +O2_DECLARE_DYNAMIC_LOG(device); + using namespace o2::framework; using ConfigurationInterface = o2::configuration::ConfigurationInterface; using DataHeader = o2::header::DataHeader; @@ -274,21 +279,22 @@ struct PollerContext { void on_socket_polled(uv_poll_t* poller, int status, int events) { auto* context = (PollerContext*)poller->data; + assert(context); + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); context->state->loopReason |= DeviceState::DATA_SOCKET_POLLED; switch (events) { case UV_READABLE: { - ZoneScopedN("socket readable event"); - LOG(debug) << "socket polled UV_READABLE: " << context->name; + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_INCOMING; } break; case UV_WRITABLE: { - ZoneScopedN("socket writeable"); + O2_SIGNPOST_END(device, sid, "socket_state", "Socket connected for channel %{public}s", context->name); if (context->read) { - LOG(debug) << "socket polled UV_CONNECT" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "Socket connected for read in context %{public}s", context->name); uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &on_socket_polled); context->state->loopReason |= DeviceState::DATA_CONNECTED; } else { - LOG(debug) << "socket polled UV_WRITABLE" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "Socket connected for write for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_OUTGOING; // If the socket is writable, fairmq will handle the rest, so we can stop polling and // just wait for the disconnect. @@ -297,12 +303,10 @@ void on_socket_polled(uv_poll_t* poller, int status, int events) context->pollerState = PollerContext::PollerState::Connected; } break; case UV_DISCONNECT: { - ZoneScopedN("socket disconnect"); - LOG(debug) << "socket polled UV_DISCONNECT"; + O2_SIGNPOST_END(device, sid, "socket_state", "Socket disconnected in context %{public}s", context->name); } break; case UV_PRIORITIZED: { - ZoneScopedN("socket prioritized"); - LOG(debug) << "socket polled UV_PRIORITIZED"; + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for context %{public}s", context->name); } break; } // We do nothing, all the logic for now stays in DataProcessingDevice::doRun() @@ -873,7 +877,9 @@ void DataProcessingDevice::startPollers() auto& deviceContext = ref.get(); auto& state = ref.get(); - for (auto& poller : state.activeInputPollers) { + for (auto* poller : state.activeInputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_START(device, sid, "socket_state", "Input socket waiting for connection."); uv_poll_start(poller, UV_WRITABLE, &on_socket_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } @@ -881,7 +887,9 @@ void DataProcessingDevice::startPollers() uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } - for (auto& poller : state.activeOutputPollers) { + for (auto* poller : state.activeOutputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_START(device, sid, "socket_state", "Output socket waiting for connection."); uv_poll_start(poller, UV_WRITABLE, &on_socket_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } @@ -897,17 +905,21 @@ void DataProcessingDevice::stopPollers() auto& deviceContext = ref.get(); auto& state = ref.get(); LOGP(detail, "Stopping {} input pollers", state.activeInputPollers.size()); - for (auto& poller : state.activeInputPollers) { + for (auto* poller : state.activeInputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed."); uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } LOGP(detail, "Stopping {} out of band pollers", state.activeOutOfBandPollers.size()); - for (auto& poller : state.activeOutOfBandPollers) { + for (auto* poller : state.activeOutOfBandPollers) { uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } LOGP(detail, "Stopping {} output pollers", state.activeOutOfBandPollers.size()); - for (auto& poller : state.activeOutputPollers) { + for (auto* poller : state.activeOutputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed."); uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } From 7607e2111442968e483732fcf2405f9467efb343 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 09:24:03 +0100 Subject: [PATCH 228/726] DPL use Signposts to debug consumeWhenAll --- Framework/Core/src/CompletionPolicyHelpers.cxx | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 8c4b38ba3355c..a6240b1165b81 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -18,10 +18,13 @@ #include "Framework/TimesliceIndex.h" #include "Framework/TimingInfo.h" #include "DecongestionService.h" +#include "Framework/Signpost.h" #include #include +O2_DECLARE_DYNAMIC_LOG(completion); + namespace o2::framework { @@ -108,6 +111,8 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl { auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { assert(inputs.size() == specs.size()); + O2_SIGNPOST_ID_GENERATE(sid, completion); + O2_SIGNPOST_START(completion, sid, "consumeWhenAll", "Completion policy invoked"); size_t si = 0; bool missingSporadic = false; @@ -116,15 +121,18 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl assert(si < specs.size()); auto& spec = specs[si++]; if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s due to missing input %lu", "Wait", si); return CompletionPolicy::CompletionOp::Wait; } if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { + O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "Missing sporadic found for route index %lu", si); missingSporadic = true; } if (input.header != nullptr && currentTimeslice == -1) { auto const* dph = framework::DataRefUtils::getHeader(input); if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { currentTimeslice = dph->startTime; + O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "currentTimeslice %lu from route index %lu", currentTimeslice, si); } } } @@ -134,8 +142,10 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl auto oldestPossibleTimeslice = timesliceIndex.getOldestPossibleInput().timeslice.value; if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu > oldestPossibleTimeslice %lu", "Retry", currentTimeslice, oldestPossibleTimeslice); return CompletionPolicy::CompletionOp::Retry; } + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu <= oldestPossibleTimeslice %lu", "Consume", currentTimeslice, oldestPossibleTimeslice); return CompletionPolicy::CompletionOp::Consume; }; return CompletionPolicy{name, matcher, callback}; From 12f7e25fe2f434c30605ae94b52ce7e89e7300de Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 09:41:37 +0100 Subject: [PATCH 229/726] Ability to turn on / off the logger from the GUI. --- .../Core/include/Framework/DeviceControl.h | 2 + .../Core/include/Framework/DeviceState.h | 9 +++++ Framework/Core/src/WSDriverClient.cxx | 40 +++++++++++++++++++ .../src/FrameworkGUIDeviceInspector.cxx | 11 +++++ .../test/test_DetectMissingTimeframe.cxx | 1 + 5 files changed, 63 insertions(+) diff --git a/Framework/Core/include/Framework/DeviceControl.h b/Framework/Core/include/Framework/DeviceControl.h index 589de3937ee55..ce946e8e77fbf 100644 --- a/Framework/Core/include/Framework/DeviceControl.h +++ b/Framework/Core/include/Framework/DeviceControl.h @@ -48,6 +48,8 @@ struct DeviceControl { DeviceController* controller = nullptr; /// What kind of events should run with the TRACE level int tracingFlags = 0; + /// What kind of log streams should be enabled + int logStreams = 0; /// An incremental number to identify the device state int requestedState = 0; }; diff --git a/Framework/Core/include/Framework/DeviceState.h b/Framework/Core/include/Framework/DeviceState.h index 3be0302fd4113..4fa72a84cad71 100644 --- a/Framework/Core/include/Framework/DeviceState.h +++ b/Framework/Core/include/Framework/DeviceState.h @@ -57,6 +57,13 @@ struct DeviceState { DATA_CONNECTED = 1 << 19, // Data channel connected }; + enum LogStreams : int { + NO_LOG = 0, + DEVICE_LOG = 1 << 0, // Log for Data Processing Device activities. + COMPLETION_LOG = 1 << 1, // Log for the completion policy of the device. + MONITORING_SERVICE_LOG = 1 << 2, // Log for the monitoring service flushing. + }; + std::vector inputChannelInfos; StreamingState streaming = StreamingState::Streaming; bool quitRequested = false; @@ -93,6 +100,8 @@ struct DeviceState { int loopReason = 0; /// Bitmask of LoopReason to trace int tracingFlags = 0; + /// Bitmask of log streams which are available + int logStreams = 0; /// Stack of the severity, so that we can display only /// the bits we are interested in. std::vector severityStack; diff --git a/Framework/Core/src/WSDriverClient.cxx b/Framework/Core/src/WSDriverClient.cxx index d4ed77b9a004e..ac2e3db41fcef 100644 --- a/Framework/Core/src/WSDriverClient.cxx +++ b/Framework/Core/src/WSDriverClient.cxx @@ -16,10 +16,15 @@ #include "Framework/DeviceSpec.h" #include "DriverClientContext.h" #include "DPLWebSocket.h" +#include "Framework/Signpost.h" #include #include #include +O2_DECLARE_DYNAMIC_LOG(device); +O2_DECLARE_DYNAMIC_LOG(completion); +O2_DECLARE_DYNAMIC_LOG(monitoring_service); + namespace o2::framework { @@ -152,6 +157,41 @@ void on_connect(uv_connect_t* connection, int status) state.tracingFlags = tracingFlags; }); + client->observe("/log-streams", [ref = context->ref](std::string_view cmd) { + auto& state = ref.get(); + static constexpr int prefixSize = std::string_view{"/log-streams "}.size(); + if (prefixSize > cmd.size()) { + LOG(error) << "Malformed log-streams request"; + return; + } + cmd.remove_prefix(prefixSize); + int logStreams = 0; + + auto error = std::from_chars(cmd.data(), cmd.data() + cmd.size(), logStreams); + if (error.ec != std::errc()) { + LOG(error) << "Malformed log-streams mask"; + return; + } + LOGP(info, "Logstreams flags set to {}", logStreams); + state.logStreams = logStreams; + if ((state.logStreams & DeviceState::LogStreams::DEVICE_LOG) != 0) { + O2_LOG_ENABLE(device); + } else { + O2_LOG_DISABLE(device); + } + if ((state.logStreams & DeviceState::LogStreams::COMPLETION_LOG) != 0) { + O2_LOG_ENABLE(completion); + } else { + O2_LOG_DISABLE(completion); + } + + if ((state.logStreams & DeviceState::LogStreams::MONITORING_SERVICE_LOG) != 0) { + O2_LOG_ENABLE(monitoring_service); + } else { + O2_LOG_DISABLE(monitoring_service); + } + }); + // Client will be filled in the line after. I can probably have a single // client per device. auto dplClient = std::make_unique(); diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index a82753eb5af1f..fe93ca6d0f07f 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -400,6 +400,17 @@ void displayDeviceInspector(DeviceSpec const& spec, } } + bool logsChanged = false; + if (ImGui::CollapsingHeader("Signposts", ImGuiTreeNodeFlags_DefaultOpen)) { + logsChanged = ImGui::CheckboxFlags("Device", &control.logStreams, DeviceState::LogStreams::DEVICE_LOG); + logsChanged = ImGui::CheckboxFlags("Completion", &control.logStreams, DeviceState::LogStreams::COMPLETION_LOG); + logsChanged = ImGui::CheckboxFlags("Monitoring", &control.logStreams, DeviceState::LogStreams::MONITORING_SERVICE_LOG); + if (logsChanged && control.controller) { + std::string cmd = fmt::format("/log-streams {}", control.logStreams); + control.controller->write(cmd.c_str(), cmd.size()); + } + } + bool flagsChanged = false; if (ImGui::CollapsingHeader("Event loop tracing", ImGuiTreeNodeFlags_DefaultOpen)) { flagsChanged |= ImGui::CheckboxFlags("METRICS_MUST_FLUSH", &control.tracingFlags, DeviceState::LoopReason::METRICS_MUST_FLUSH); diff --git a/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx index 3374cd9ad6cf2..7ef1370f040b4 100644 --- a/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx +++ b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx @@ -43,6 +43,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& specs) if (i++ % 2 == 0) { outputs.make(OutputRef{"a2"}, 1); } + sleep(1); })}, }; DataProcessorSpec d{ From b88a52e5631f03c19fb49503de5e91d8256bd587 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 10:17:25 +0100 Subject: [PATCH 230/726] DPL GUI: add GUI to enable / disable driver signposts --- Framework/GUISupport/src/FrameworkGUIDebugger.cxx | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx index 35e0bc563a22e..005f7082d77b4 100644 --- a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx @@ -23,6 +23,7 @@ #include "FrameworkGUIDataRelayerUsage.h" #include "PaletteHelpers.h" #include "FrameworkGUIState.h" +#include "Framework/Signpost.h" #include #include @@ -34,6 +35,7 @@ #include #include +O2_DECLARE_DYNAMIC_LOG(driver); // Make sure we can use aggregated initialisers. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpedantic" @@ -506,7 +508,7 @@ void displayDeviceMetrics(const char* label, case MetricsDisplayStyle::Lines: { auto xAxisFlags = ImPlotAxisFlags_None; auto yAxisFlags = ImPlotAxisFlags_LockMin; - //ImPlot::FitNextPlotAxes(true, true, true, true); + // ImPlot::FitNextPlotAxes(true, true, true, true); if (ImPlot::BeginPlot("##Some plot", {-1, -1}, axisFlags)) { ImPlot::SetupAxes("time", "value", xAxisFlags, yAxisFlags); ImPlot::SetupAxisFormat(ImAxis_Y1, formatSI, nullptr); @@ -1093,6 +1095,17 @@ void displayDriverInfo(DriverInfo const& driverInfo, DriverControl& driverContro } #endif + static bool selectedSignpost = false; + if (ImGui::CollapsingHeader("Signposts", ImGuiTreeNodeFlags_DefaultOpen)) { + if (ImGui::Checkbox("Driver", &selectedSignpost)) { + if (selectedSignpost) { + O2_LOG_ENABLE(driver); + } else { + O2_LOG_DISABLE(driver); + } + } + } + for (size_t i = 0; i < driverInfo.states.size(); ++i) { ImGui::Text("#%lu: %s", i, DriverInfoHelper::stateToString(driverInfo.states[i])); } From 7978c5c54716ee48cb324f091363f435ce12ac3b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 11:20:50 +0100 Subject: [PATCH 231/726] DPL: do not validate lifetime when consumer uses consumeWhenAny --- Framework/Core/src/DeviceSpecHelpers.cxx | 16 +++++++++++++--- Framework/Core/src/WorkflowHelpers.cxx | 22 ++++++++++++++++++---- Framework/Core/src/WorkflowHelpers.h | 6 ++++++ 3 files changed, 37 insertions(+), 7 deletions(-) diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 6c2063cb59311..359b1a343cb7e 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -1074,8 +1074,6 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf WorkflowHelpers::constructGraph(workflow, logicalEdges, outputs, availableForwardsInfo); - WorkflowHelpers::validateEdges(workflow, logicalEdges, outputs); - // We need to instanciate one device per (me, timeIndex) in the // DeviceConnectionEdge. For each device we need one new binding // server per (me, other) -> port Moreover for each (me, other, @@ -1122,10 +1120,13 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf inActions, workflow, availableForwardsInfo, channelPolicies, channelPrefix, defaultOffer, overrideServices); // We apply the completion policies here since this is where we have all the // devices resolved. - for (auto& device : devices) { + std::map policies; + for (DeviceSpec& device : devices) { bool hasPolicy = false; + policies[device.name].completionPolicyName = "unknown"; for (auto& policy : completionPolicies) { if (policy.matcher(device) == true) { + policies[policy.name].completionPolicyName = policy.name; device.completionPolicy = policy; hasPolicy = true; break; @@ -1158,6 +1159,15 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf throw runtime_error_f("Unable to find a resource policy for %s", device.id.c_str()); } } + // Iterate of the workflow and create a consistent vector of DataProcessorPoliciesInfo + std::vector policiesVector; + for (size_t wi = 0; wi < workflow.size(); ++wi) { + auto& processor = workflow[wi]; + auto& info = policies[processor.name]; + policiesVector.push_back(info); + } + + WorkflowHelpers::validateEdges(workflow, policiesVector, logicalEdges, outputs); for (auto& device : devices) { device.resourceMonitoringInterval = resourcesMonitoringInterval; diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 360480f80d4ef..6b4ad35f826be 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1214,8 +1214,14 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } -bool validateLifetime(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +bool validateLifetime(std::ostream& errors, + DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorPoliciesInfo const& producerPolicies, + DataProcessorSpec const& consumer, InputSpec const& input, DataProcessorPoliciesInfo const& consumerPolicies) { + // In case the completion policy is consume-any, we do not need to check anything. + if (consumerPolicies.completionPolicyName == "consume-any") { + return true; + } if (input.lifetime == Lifetime::Timeframe && output.lifetime == Lifetime::Sporadic) { errors << fmt::format("Input {} of {} has lifetime Timeframe, but output {} of {} has lifetime Sporadic\n", DataSpecUtils::describe(input).c_str(), consumer.name, @@ -1225,7 +1231,9 @@ bool validateLifetime(std::ostream& errors, DataProcessorSpec const& producer, O return true; } -bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +bool validateExpendable(std::ostream& errors, + DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorPoliciesInfo const& producerPolicies, + DataProcessorSpec const& consumer, InputSpec const& input, DataProcessorPoliciesInfo const& consumerPolicies) { auto isExpendable = [](DataProcessorLabel const& label) { return label.value == "expendable"; @@ -1244,8 +1252,12 @@ bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, return true; } -using Validator = std::function; +using Validator = std::function; + void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, + std::vector const& policies, std::vector const& edges, std::vector const& outputs) { @@ -1262,10 +1274,12 @@ void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, for (auto& edge : edges) { DataProcessorSpec const& producer = workflow[edge.producer]; DataProcessorSpec const& consumer = workflow[edge.consumer]; + DataProcessorPoliciesInfo const& producerPolicies = policies[edge.producer]; + DataProcessorPoliciesInfo const& consumerPolicies = policies[edge.consumer]; OutputSpec const& output = outputs[edge.outputGlobalIndex]; InputSpec const& input = consumer.inputs[edge.consumerInputIndex]; for (auto& validator : defaultValidators) { - hasErrors |= !validator(errors, producer, output, consumer, input); + hasErrors |= !validator(errors, producer, output, producerPolicies, consumer, input, consumerPolicies); } } if (hasErrors) { diff --git a/Framework/Core/src/WorkflowHelpers.h b/Framework/Core/src/WorkflowHelpers.h index 0a3cd408b3424..055cae1d6011b 100644 --- a/Framework/Core/src/WorkflowHelpers.h +++ b/Framework/Core/src/WorkflowHelpers.h @@ -141,6 +141,11 @@ struct TopoIndexInfo { friend std::ostream& operator<<(std::ostream& out, TopoIndexInfo const& info); }; +// Information about the policies which were derived for a given data processor. +struct DataProcessorPoliciesInfo { + std::string completionPolicyName; +}; + struct OutputObj { InputSpec spec; bool isdangling; @@ -234,6 +239,7 @@ struct WorkflowHelpers { /// For example we should make sure that Lifetime::Timeframe inputs of /// one node is not connected to an Output of Lifetime::Sporadic of another node. static void validateEdges(WorkflowSpec const& workflow, + std::vector const& policiesInfos, std::vector const& edges, std::vector const& outputs); }; From 7e0544cb8f55a3436e32a20191ccec163e614cba Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 14:19:18 +0100 Subject: [PATCH 232/726] DPL: discard timeframe in case there is nothing which needs processing --- Framework/Core/src/CompletionPolicyHelpers.cxx | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 8c4b38ba3355c..8877870be1a34 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -111,6 +111,7 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl size_t si = 0; bool missingSporadic = false; + bool needsProcessing = false; size_t currentTimeslice = -1; for (auto& input : inputs) { assert(si < specs.size()); @@ -126,6 +127,9 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { currentTimeslice = dph->startTime; } + // If we have at least some data, we can consume, otherwise we can Discard once + // we are sure that no more data will come. + needsProcessing |= (spec.lifetime != Lifetime::Condition); } } // If some sporadic inputs are missing, we wait for them util we are sure they will not come, @@ -136,7 +140,8 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { return CompletionPolicy::CompletionOp::Retry; } - return CompletionPolicy::CompletionOp::Consume; + // We only consume if we have something which needs processing. + return needsProcessing ? CompletionPolicy::CompletionOp::Consume : CompletionPolicy::CompletionOp::Discard; }; return CompletionPolicy{name, matcher, callback}; } From 84a7e8eea3bd7dcb204b7e07e641e6ef50d97fa6 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 14:20:51 +0100 Subject: [PATCH 233/726] DPL: discard timeframe in case there is nothing which needs processing --- Framework/Core/src/CompletionPolicyHelpers.cxx | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 8c4b38ba3355c..b2b2fe7323742 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -111,6 +111,7 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl size_t si = 0; bool missingSporadic = false; + bool needsProcessing = false; size_t currentTimeslice = -1; for (auto& input : inputs) { assert(si < specs.size()); @@ -126,6 +127,9 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { currentTimeslice = dph->startTime; } + // Prepare for processing if we have at least some data, we can consume, otherwise we can Discard once + // we are sure that no more data will come. + needsProcessing |= (spec.lifetime != Lifetime::Condition); } } // If some sporadic inputs are missing, we wait for them util we are sure they will not come, @@ -136,7 +140,8 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { return CompletionPolicy::CompletionOp::Retry; } - return CompletionPolicy::CompletionOp::Consume; + // We only consume if we have something which needs processing. + return needsProcessing ? CompletionPolicy::CompletionOp::Consume : CompletionPolicy::CompletionOp::Discard; }; return CompletionPolicy{name, matcher, callback}; } From d0d260285468b27ade61c37b4f688e2e2e74f0bd Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 14:53:42 +0100 Subject: [PATCH 234/726] DPL: discard timeframe in case there is nothing which needs processing --- Framework/Core/src/CompletionPolicyHelpers.cxx | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 8c4b38ba3355c..64b183d43f2e4 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -111,6 +111,7 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl size_t si = 0; bool missingSporadic = false; + bool needsProcessing = false; size_t currentTimeslice = -1; for (auto& input : inputs) { assert(si < specs.size()); @@ -127,6 +128,9 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl currentTimeslice = dph->startTime; } } + if (input.header != nullptr && spec.lifetime != Lifetime::Condition) { + needsProcessing = true; + } } // If some sporadic inputs are missing, we wait for them util we are sure they will not come, // i.e. until the oldest possible timeslice is beyond the timeslice of the input. @@ -136,7 +140,8 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { return CompletionPolicy::CompletionOp::Retry; } - return CompletionPolicy::CompletionOp::Consume; + // We only consume if we have something which needs processing. + return needsProcessing ? CompletionPolicy::CompletionOp::Consume : CompletionPolicy::CompletionOp::Discard; }; return CompletionPolicy{name, matcher, callback}; } From bf4962ab343ee141be67a08a50572110b8d91e53 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:20:14 +0100 Subject: [PATCH 235/726] DPL: drop unused o2_signpost_id_make_with_pointer method Probably old remaining from a previous implementation. --- Framework/Foundation/include/Framework/Signpost.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index eca9e26f77469..64d92d67acbf7 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -218,7 +218,6 @@ bool _o2_lock_free_stack_push(_o2_lock_free_stack& stack, const int& value, bool bool _o2_lock_free_stack_pop(_o2_lock_free_stack& stack, int& value, bool spin = false); //_o2_signpost_id_t _o2_signpost_id_generate_local(_o2_log_t* log); //_o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer); -_o2_signpost_index_t o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer); void* _o2_log_create(char const* name, int stacktrace); void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); void _o2_signpost_interval_begin(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); @@ -246,14 +245,6 @@ inline _o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* return uniqueId; } -inline _o2_signpost_index_t o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer) -{ - _o2_signpost_index_t signpost_index; - _o2_lock_free_stack_pop(log->slots, signpost_index, true); - log->ids[signpost_index].id = (int64_t)pointer; - return signpost_index; -} - // Implementation start here. Include this file with O2_SIGNPOST_IMPLEMENTATION defined in one file of your // project. #ifdef O2_SIGNPOST_IMPLEMENTATION From f17429804bd33b45919f44f7213bb5c0b802b697 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:20:14 +0100 Subject: [PATCH 236/726] DPL: rework signposts support The logger based Signposts are now behaving the same on macOS and linux when running normally. The difference between the two implementations will now be that if Instruments enables some OSLog namespace, we stop printing the messages and use the os_signpost API instead. --- .../Foundation/include/Framework/Signpost.h | 191 +++++++++--------- Framework/Foundation/test/test_Signpost.cxx | 2 +- .../Foundation/test/test_SignpostLogger.cxx | 2 +- 3 files changed, 94 insertions(+), 101 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 64d92d67acbf7..d2bdec0ec8246 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -13,6 +13,9 @@ #include #include +#ifdef __APPLE__ +#include +#endif struct o2_log_handle_t { char const* name = nullptr; @@ -22,6 +25,7 @@ struct o2_log_handle_t { // Helper function which replaces engineering types with a printf // compatible format string. +// FIXME: make this consteval when available in C++20 template constexpr auto remove_engineering_type(char const (&src)[N]) { @@ -82,78 +86,37 @@ o2_log_handle_t* o2_walk_logs(bool (*callback)(char const* name, void* log, void } #endif -#if !defined(O2_FORCE_LOGGER_SIGNPOST) && defined(__APPLE__) && (!defined(NDEBUG) || defined(O2_FORCE_SIGNPOSTS)) +#if defined(__APPLE__) #include #include #include -void* _o2_log_create(char const* name, char const* category); -#define O2_DECLARE_DYNAMIC_LOG(x) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, OS_LOG_CATEGORY_DYNAMIC_TRACING) -#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(x) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, OS_LOG_CATEGORY_DYNAMIC_STACK_TRACING) -// This is a no-op on macOS using the os_signpost API because only external instruments can enable/disable dynamic signposts -#define O2_LOG_ENABLE_DYNAMIC(log) -// This is a no-op on macOS using the os_signpost API because only external instruments can enable/disable dynamic signposts -#define O2_LOG_ENABLE_STACKTRACE(log) -#define O2_DECLARE_LOG(x, category) static os_log_t private_o2_log_##x = (os_log_t)_o2_log_create("ch.cern.aliceo2." #x, #category) -#define O2_LOG_DEBUG(log, ...) os_log_debug(private_o2_log_##log, __VA_ARGS__) -#define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) os_signpost_id_t name = os_signpost_id_make_with_pointer(private_o2_log_##log, pointer) -#define O2_SIGNPOST_ID_GENERATE(name, log) os_signpost_id_t name = os_signpost_id_generate(private_o2_log_##log) +#define O2_LOG_DEBUG_MAC(log, ...) os_log_debug(private_o2_log_##log, __VA_ARGS__) // FIXME: use __VA_OPT__ when available in C++20 -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) os_signpost_event_emit(private_o2_log_##log, id, name, format, ##__VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, format, ...) os_signpost_interval_begin(private_o2_log_##log, id, name, format, ##__VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, format, ...) os_signpost_interval_end(private_o2_log_##log, id, name, format, ##__VA_ARGS__) - -#ifdef O2_SIGNPOST_IMPLEMENTATION -/// We use a wrapper so that we can keep track of the logs. -void* _o2_log_create(char const* name, char const* category) -{ - // iterate over the list of logs and check if we already have - // one with the same name. - auto findLogByName = [](char const* name, void* log, void* context) -> bool { - char const* currentName = (char const*)context; - if (strcmp(name, currentName) == 0) { - return false; - } - return true; - }; - - o2_log_handle_t* handle = o2_walk_logs(findLogByName, (void*)name); - - // If we found one, return it. - if (handle) { - return handle->log; - } - // Otherwise, create a new one and add it to the end of the list. - os_log_t log = os_log_create(name, category); - o2_log_handle_t* newHandle = new o2_log_handle_t(); - newHandle->log = log; - newHandle->name = strdup(name); - newHandle->next = o2_get_logs_tail().load(); - // Until I manage to replace the log I have in next, keep trying. - // Notice this does not protect against two threads trying to insert - // a log with the same name. I should probably do a sorted insert for that. - while (!o2_get_logs_tail().compare_exchange_weak(newHandle->next, newHandle, - std::memory_order_release, - std::memory_order_relaxed)) { - newHandle->next = o2_get_logs_tail(); - } - - return log; -} +#define O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ...) os_signpost_event_emit(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_START_MAC(log, id, name, format, ...) os_signpost_interval_begin(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_END_MAC(log, id, name, format, ...) os_signpost_interval_end(private_o2_log_##log->os_log, (uint64_t)id.value, name, format, ##__VA_ARGS__) +#define O2_SIGNPOST_ENABLED_MAC(log) os_signpost_enabled(private_o2_log_##log->os_log) +#else +// These are no-ops on linux. +#define O2_DECLARE_LOG_MAC(x, category) +#define O2_LOG_DEBUG_MAC(log, ...) +#define O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_START_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_END_MAC(log, id, name, format, ...) +#define O2_SIGNPOST_ENABLED_MAC(log) false +#endif // __APPLE__ + +// Unless we are on apple we enable checking for signposts only if in debug mode or if we force them. +#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(NDEBUG) +#define O2_LOG_ENABLED(log) private_o2_log_##log->stacktrace +#else +#define O2_LOG_ENABLED(log) false #endif -#elif !defined(NDEBUG) || defined(O2_FORCE_LOGGER_SIGNPOST) || defined(O2_FORCE_SIGNPOSTS) - -#ifndef O2_LOG_MACRO -#if __has_include("Framework/Logger.h") +#if !defined(O2_LOG_MACRO) && __has_include("Framework/Logger.h") #include "Framework/Logger.h" -// If NDEBUG is not defined, we use the logger to print out the signposts at the debug level. -#if !defined(NDEBUG) -#define O2_LOG_MACRO(...) LOGF(debug, __VA_ARGS__) -#elif defined(O2_FORCE_LOGGER_SIGNPOST) || defined(O2_FORCE_SIGNPOSTS) -// If we force the presence of the logger, we use it to print out the signposts at the detail level, which is not optimized out. #define O2_LOG_MACRO(...) LOGF(info, __VA_ARGS__) -#endif -#else +#elif !defined(O2_LOG_MACRO) // If we do not have the fairlogger, we simply print out the signposts to the console. // This is useful for things like the tests, which this way do not need to depend on the FairLogger. #define O2_LOG_MACRO(...) \ @@ -161,8 +124,9 @@ void* _o2_log_create(char const* name, char const* category) printf(__VA_ARGS__); \ printf("\n"); \ } while (0) -#endif -#endif +#else +#define O2_LOG_MACRO(...) +#endif // O2_LOG_MACRO // This is the linux implementation, it is not as nice as the apple one and simply prints out // the signpost information to the log. @@ -181,7 +145,7 @@ struct _o2_lock_free_stack { // A log is simply an inbox which keeps track of the available id, so that we can print out different signposts // with different indentation levels. // supports up to 1024 paralle signposts before it spinlocks. -typedef int _o2_signpost_index_t; +using _o2_signpost_index_t = int; struct _o2_activity_t { // How much the activity is indented in the output log. @@ -191,10 +155,13 @@ struct _o2_activity_t { struct _o2_signpost_id_t { // The id of the activity. - int64_t id = -1; + int64_t value = -1; }; struct _o2_log_t { +#ifdef __APPLE__ + os_log_t os_log = nullptr; +#endif // A circular buffer of available slots. Each unique interval pulls an id from this buffer. _o2_lock_free_stack slots; // Up to 256 activities can be active at the same time. @@ -211,7 +178,10 @@ struct _o2_log_t { // 0 means the log is disabled. // 1 means only the current signpost is printed. // >1 means the current signpost and n levels of the stacktrace are printed. - std::atomic stacktrace = 1; + std::atomic stacktrace = 0; + + // Default stacktrace level for the log, when enabled. + int defaultStacktrace = 1; }; bool _o2_lock_free_stack_push(_o2_lock_free_stack& stack, const int& value, bool spin = false); @@ -310,7 +280,7 @@ bool _o2_lock_free_stack_pop(_o2_lock_free_stack& stack, int& value, bool spin) } } -void* _o2_log_create(char const* name, int stacktrace) +void* _o2_log_create(char const* name, int defaultStacktrace) { // iterate over the list of logs and check if we already have // one with the same name. @@ -328,7 +298,7 @@ void* _o2_log_create(char const* name, int stacktrace) return handle->log; } // Otherwise, create a new one and add it to the end of the list. - _o2_log_t* log = new _o2_log_t(); + auto* log = new _o2_log_t(); // Write the initial 256 ids to the inbox, in reverse, so that the // linear search below is just for an handful of elements. int n = _o2_lock_free_stack::N; @@ -336,9 +306,18 @@ void* _o2_log_create(char const* name, int stacktrace) _o2_signpost_index_t signpost_index{n - 1 - i}; _o2_lock_free_stack_push(log->slots, signpost_index, true); } - log->stacktrace = stacktrace; - o2_log_handle_t* newHandle = new o2_log_handle_t(); + log->defaultStacktrace = defaultStacktrace; + auto* newHandle = new o2_log_handle_t(); newHandle->log = log; +#ifdef __APPLE__ + // On macOS, we use the os_signpost API so that when we are + // using instruments we can see the messages there. + if (defaultStacktrace > 1) { + log->os_log = os_log_create(name, OS_LOG_CATEGORY_DYNAMIC_STACK_TRACING); + } else { + log->os_log = os_log_create(name, OS_LOG_CATEGORY_DYNAMIC_TRACING); + } +#endif newHandle->name = strdup(name); newHandle->next = o2_get_logs_tail().load(); // Until I manage to replace the log I have in next, keep trying. @@ -357,10 +336,6 @@ void* _o2_log_create(char const* name, int stacktrace) // If the slot is empty, it will return the id and increment the indentation level. void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...) { - // Nothing to be done - if (log->stacktrace == 0) { - return; - } va_list args; va_start(args, format); @@ -368,10 +343,10 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n int leading = 0; // This is the equivalent of exclusive - if (id.id != 0) { + if (id.value != 0) { int i = 0; for (i = 0; i < log->ids.size(); ++i) { - if (log->ids[i].id == id.id) { + if (log->ids[i].value == id.value) { break; } } @@ -385,7 +360,7 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n } char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16s*>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16s*>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); va_end(args); O2_LOG_MACRO("%s", prebuffer); @@ -395,22 +370,19 @@ void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* n // If the slot is empty, it will return the id and increment the indentation level. void _o2_signpost_interval_begin(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...) { - if (log->stacktrace == 0) { - return; - } va_list args; va_start(args, format); // This is a unique slot for this interval. _o2_signpost_index_t signpost_index; _o2_lock_free_stack_pop(log->slots, signpost_index, true); // Put the id in the slot, to close things or to attach signposts to a given interval - log->ids[signpost_index].id = id.id; + log->ids[signpost_index].value = id.value; auto* activity = &log->activities[signpost_index]; activity->indentation = log->current_indentation++; activity->name = name; int leading = activity->indentation * 2; char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sS>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sS>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); va_end(args); O2_LOG_MACRO("%s", prebuffer); @@ -424,7 +396,7 @@ void _o2_signpost_interval_end_v(_o2_log_t* log, _o2_signpost_id_t id, char cons // Find the index of the activity int i = 0; for (i = 0; i < log->ids.size(); ++i) { - if (log->ids[i].id == id.id) { + if (log->ids[i].value == id.value) { break; } } @@ -439,13 +411,13 @@ void _o2_signpost_interval_end_v(_o2_log_t* log, _o2_signpost_id_t id, char cons _o2_activity_t* activity = &log->activities[i]; int leading = activity->indentation * 2; char prebuffer[4096]; - int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sE>%*c", id.id, name, leading, ' '); + int s = snprintf(prebuffer, 4096, "id%.16" PRIx64 ":%-16sE>%*c", id.value, name, leading, ' '); vsnprintf(prebuffer + s, 4096 - s, format, args); O2_LOG_MACRO("%s", prebuffer); // Clear the slot activity->indentation = -1; activity->name = nullptr; - log->ids[i].id = -1; + log->ids[i].value = -1; // Put back the slot log->current_indentation--; _o2_signpost_index_t signpost_index{i}; @@ -468,29 +440,50 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) } #endif // O2_SIGNPOST_IMPLEMENTATION -/// Dynamic logs need to be enabled via the O2_LOG_ENABLE_DYNAMIC macro. Notice this will only work +#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(NDEBUG) +/// Dynamic logs need to be enabled via the O2_LOG_ENABLE macro. Notice this will only work /// for the logger based logging, since the Apple version needs instruments to enable them. -#define O2_DECLARE_DYNAMIC_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 0) +#define O2_DECLARE_DYNAMIC_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 1) /// For the moment we do not support logs with a stacktrace. -#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 0) +#define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 64) #define O2_DECLARE_LOG(name, category) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 1) -#define O2_LOG_ENABLE_DYNAMIC(log) _o2_log_set_stacktrace(private_o2_log_##log, 1) -// We print out only the first 64 frames. -#define O2_LOG_ENABLE_STACKTRACE(log) _o2_log_set_stacktrace(private_o2_log_##log, 64) +// When we enable the log, we set the stacktrace to the default value. +#define O2_LOG_ENABLE(log) _o2_log_set_stacktrace(private_o2_log_##log, private_o2_log_##log->defaultStacktrace) +#define O2_LOG_DISABLE(log) _o2_log_set_stacktrace(private_o2_log_##log, 0) // For the moment we simply use LOG DEBUG. We should have proper activities so that we can // turn on and off the printing. #define O2_LOG_DEBUG(log, ...) O2_LOG_MACRO(__VA_ARGS__) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) _o2_signpost_id_t name = _o2_signpost_id_make_with_pointer(private_o2_log_##log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) _o2_signpost_id_t name = _o2_signpost_id_generate_local(private_o2_log_##log) -#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) -#define O2_SIGNPOST_START(log, id, name, format, ...) _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) -#define O2_SIGNPOST_END(log, id, name, format, ...) _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__) +// In case Instruments is attached, we switch to the Apple signpost API otherwise, both one +// mac and on linux we use our own implementation, using the logger. We can use the same ids because +// they are compatible between the two implementations, we also use remove_engineering_type to remove +// the engineering types from the format string, so that we can use the same format string for both. +#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) __extension__({ \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } \ +}) +#define O2_SIGNPOST_START(log, id, name, format, ...) \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_START_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } +#define O2_SIGNPOST_END(log, id, name, format, ...) \ + if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + O2_SIGNPOST_END_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (private_o2_log_##log->stacktrace) { \ + _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } #else // This is the release implementation, it does nothing. #define O2_DECLARE_DYNAMIC_LOG(x) #define O2_DECLARE_DYNAMIC_STACKTRACE_LOG(x) #define O2_DECLARE_LOG(x, category) -#define O2_LOG_ENABLE_DYNAMIC(log) -#define O2_LOG_ENABLE_STACKTRACE(log) +#define O2_LOG_ENABLE(log) +#define O2_LOG_DISABLE(log) #define O2_LOG_DEBUG(log, ...) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) diff --git a/Framework/Foundation/test/test_Signpost.cxx b/Framework/Foundation/test/test_Signpost.cxx index 24b6afaec5c3d..f9b8d4ec0e13a 100644 --- a/Framework/Foundation/test/test_Signpost.cxx +++ b/Framework/Foundation/test/test_Signpost.cxx @@ -43,7 +43,7 @@ int main(int argc, char** argv) O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); O2_SIGNPOST_END(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); - O2_LOG_ENABLE_DYNAMIC(test_SignpostDynamic); + O2_LOG_ENABLE(test_SignpostDynamic); #ifdef __APPLE__ // On Apple there is no way to turn on signposts in the logger, so we do not display this message O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will see, because we turned them on"); diff --git a/Framework/Foundation/test/test_SignpostLogger.cxx b/Framework/Foundation/test/test_SignpostLogger.cxx index 74da35abf7c70..ed917ea7cadf5 100644 --- a/Framework/Foundation/test/test_SignpostLogger.cxx +++ b/Framework/Foundation/test/test_SignpostLogger.cxx @@ -51,7 +51,7 @@ int main(int argc, char** argv) O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); O2_SIGNPOST_END(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will not see, because they are off by default"); - O2_LOG_ENABLE_DYNAMIC(test_SignpostDynamic); + O2_LOG_ENABLE(test_SignpostDynamic); #ifdef __APPLE__ // On Apple there is no way to turn on signposts in the logger, so we do not display this message O2_SIGNPOST_START(test_SignpostDynamic, id, "Test category", "This is dynamic signpost which you will see, because we turned them on"); From 4e66267bd9a19c41cd65437285bab67fd2c2fba7 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:21:04 +0100 Subject: [PATCH 237/726] DPL: add proper tracing for DataProcessingDevice socket callbacks --- Framework/Core/src/DataProcessingDevice.cxx | 40 +++++++++++++-------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index b03904c5b3776..366bfafd7d801 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -35,6 +35,9 @@ #include "Framework/TMessageSerializer.h" #include "Framework/InputRecord.h" #include "Framework/InputSpan.h" +#if defined(__APPLE__) || defined(NDEBUG) +#define O2_SIGNPOST_IMPLEMENTATION +#endif #include "Framework/Signpost.h" #include "Framework/TimingHelpers.h" #include "Framework/SourceInfoHeader.h" @@ -80,6 +83,8 @@ #include #include +O2_DECLARE_DYNAMIC_LOG(device); + using namespace o2::framework; using ConfigurationInterface = o2::configuration::ConfigurationInterface; using DataHeader = o2::header::DataHeader; @@ -274,21 +279,22 @@ struct PollerContext { void on_socket_polled(uv_poll_t* poller, int status, int events) { auto* context = (PollerContext*)poller->data; + assert(context); + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); context->state->loopReason |= DeviceState::DATA_SOCKET_POLLED; switch (events) { case UV_READABLE: { - ZoneScopedN("socket readable event"); - LOG(debug) << "socket polled UV_READABLE: " << context->name; + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_INCOMING; } break; case UV_WRITABLE: { - ZoneScopedN("socket writeable"); + O2_SIGNPOST_END(device, sid, "socket_state", "Socket connected for channel %{public}s", context->name); if (context->read) { - LOG(debug) << "socket polled UV_CONNECT" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "Socket connected for read in context %{public}s", context->name); uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &on_socket_polled); context->state->loopReason |= DeviceState::DATA_CONNECTED; } else { - LOG(debug) << "socket polled UV_WRITABLE" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "Socket connected for write for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_OUTGOING; // If the socket is writable, fairmq will handle the rest, so we can stop polling and // just wait for the disconnect. @@ -297,12 +303,10 @@ void on_socket_polled(uv_poll_t* poller, int status, int events) context->pollerState = PollerContext::PollerState::Connected; } break; case UV_DISCONNECT: { - ZoneScopedN("socket disconnect"); - LOG(debug) << "socket polled UV_DISCONNECT"; + O2_SIGNPOST_END(device, sid, "socket_state", "Socket disconnected in context %{public}s", context->name); } break; case UV_PRIORITIZED: { - ZoneScopedN("socket prioritized"); - LOG(debug) << "socket polled UV_PRIORITIZED"; + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for context %{public}s", context->name); } break; } // We do nothing, all the logic for now stays in DataProcessingDevice::doRun() @@ -873,7 +877,9 @@ void DataProcessingDevice::startPollers() auto& deviceContext = ref.get(); auto& state = ref.get(); - for (auto& poller : state.activeInputPollers) { + for (auto* poller : state.activeInputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_START(device, sid, "socket_state", "Input socket waiting for connection."); uv_poll_start(poller, UV_WRITABLE, &on_socket_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } @@ -881,7 +887,9 @@ void DataProcessingDevice::startPollers() uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } - for (auto& poller : state.activeOutputPollers) { + for (auto* poller : state.activeOutputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_START(device, sid, "socket_state", "Output socket waiting for connection."); uv_poll_start(poller, UV_WRITABLE, &on_socket_polled); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected; } @@ -897,17 +905,21 @@ void DataProcessingDevice::stopPollers() auto& deviceContext = ref.get(); auto& state = ref.get(); LOGP(detail, "Stopping {} input pollers", state.activeInputPollers.size()); - for (auto& poller : state.activeInputPollers) { + for (auto* poller : state.activeInputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed."); uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } LOGP(detail, "Stopping {} out of band pollers", state.activeOutOfBandPollers.size()); - for (auto& poller : state.activeOutOfBandPollers) { + for (auto* poller : state.activeOutOfBandPollers) { uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } LOGP(detail, "Stopping {} output pollers", state.activeOutOfBandPollers.size()); - for (auto& poller : state.activeOutputPollers) { + for (auto* poller : state.activeOutputPollers) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); + O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed."); uv_poll_stop(poller); ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped; } From b69354a7f54e476b5c87bee8a7dcbe3f8e68c11e Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:20:14 +0100 Subject: [PATCH 238/726] DPL use Signposts to debug consumeWhenAll --- Framework/Core/src/CompletionPolicyHelpers.cxx | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 64b183d43f2e4..585046bc89763 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -18,10 +18,13 @@ #include "Framework/TimesliceIndex.h" #include "Framework/TimingInfo.h" #include "DecongestionService.h" +#include "Framework/Signpost.h" #include #include +O2_DECLARE_DYNAMIC_LOG(completion); + namespace o2::framework { @@ -108,6 +111,8 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl { auto callback = [](InputSpan const& inputs, std::vector const& specs, ServiceRegistryRef& ref) -> CompletionPolicy::CompletionOp { assert(inputs.size() == specs.size()); + O2_SIGNPOST_ID_GENERATE(sid, completion); + O2_SIGNPOST_START(completion, sid, "consumeWhenAll", "Completion policy invoked"); size_t si = 0; bool missingSporadic = false; @@ -117,15 +122,18 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl assert(si < specs.size()); auto& spec = specs[si++]; if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s due to missing input %lu", "Wait", si); return CompletionPolicy::CompletionOp::Wait; } if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { + O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "Missing sporadic found for route index %lu", si); missingSporadic = true; } if (input.header != nullptr && currentTimeslice == -1) { auto const* dph = framework::DataRefUtils::getHeader(input); if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { currentTimeslice = dph->startTime; + O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "currentTimeslice %lu from route index %lu", currentTimeslice, si); } } if (input.header != nullptr && spec.lifetime != Lifetime::Condition) { @@ -138,9 +146,10 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl auto oldestPossibleTimeslice = timesliceIndex.getOldestPossibleInput().timeslice.value; if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu > oldestPossibleTimeslice %lu", "Retry", currentTimeslice, oldestPossibleTimeslice); return CompletionPolicy::CompletionOp::Retry; } - // We only consume if we have something which needs processing. + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu <= oldestPossibleTimeslice %lu", needsProcessing ? "Consume" : "Discard", currentTimeslice, oldestPossibleTimeslice); return needsProcessing ? CompletionPolicy::CompletionOp::Consume : CompletionPolicy::CompletionOp::Discard; }; return CompletionPolicy{name, matcher, callback}; From ab530c0085516f3640ae4b2ca7ec349b8f86d2b9 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:21:05 +0100 Subject: [PATCH 239/726] Ability to turn on / off the logger from the GUI. --- .../Core/include/Framework/DeviceControl.h | 2 + .../Core/include/Framework/DeviceState.h | 9 +++++ Framework/Core/src/WSDriverClient.cxx | 40 +++++++++++++++++++ .../src/FrameworkGUIDeviceInspector.cxx | 11 +++++ .../test/test_DetectMissingTimeframe.cxx | 1 + 5 files changed, 63 insertions(+) diff --git a/Framework/Core/include/Framework/DeviceControl.h b/Framework/Core/include/Framework/DeviceControl.h index 589de3937ee55..ce946e8e77fbf 100644 --- a/Framework/Core/include/Framework/DeviceControl.h +++ b/Framework/Core/include/Framework/DeviceControl.h @@ -48,6 +48,8 @@ struct DeviceControl { DeviceController* controller = nullptr; /// What kind of events should run with the TRACE level int tracingFlags = 0; + /// What kind of log streams should be enabled + int logStreams = 0; /// An incremental number to identify the device state int requestedState = 0; }; diff --git a/Framework/Core/include/Framework/DeviceState.h b/Framework/Core/include/Framework/DeviceState.h index 3be0302fd4113..4fa72a84cad71 100644 --- a/Framework/Core/include/Framework/DeviceState.h +++ b/Framework/Core/include/Framework/DeviceState.h @@ -57,6 +57,13 @@ struct DeviceState { DATA_CONNECTED = 1 << 19, // Data channel connected }; + enum LogStreams : int { + NO_LOG = 0, + DEVICE_LOG = 1 << 0, // Log for Data Processing Device activities. + COMPLETION_LOG = 1 << 1, // Log for the completion policy of the device. + MONITORING_SERVICE_LOG = 1 << 2, // Log for the monitoring service flushing. + }; + std::vector inputChannelInfos; StreamingState streaming = StreamingState::Streaming; bool quitRequested = false; @@ -93,6 +100,8 @@ struct DeviceState { int loopReason = 0; /// Bitmask of LoopReason to trace int tracingFlags = 0; + /// Bitmask of log streams which are available + int logStreams = 0; /// Stack of the severity, so that we can display only /// the bits we are interested in. std::vector severityStack; diff --git a/Framework/Core/src/WSDriverClient.cxx b/Framework/Core/src/WSDriverClient.cxx index d4ed77b9a004e..ac2e3db41fcef 100644 --- a/Framework/Core/src/WSDriverClient.cxx +++ b/Framework/Core/src/WSDriverClient.cxx @@ -16,10 +16,15 @@ #include "Framework/DeviceSpec.h" #include "DriverClientContext.h" #include "DPLWebSocket.h" +#include "Framework/Signpost.h" #include #include #include +O2_DECLARE_DYNAMIC_LOG(device); +O2_DECLARE_DYNAMIC_LOG(completion); +O2_DECLARE_DYNAMIC_LOG(monitoring_service); + namespace o2::framework { @@ -152,6 +157,41 @@ void on_connect(uv_connect_t* connection, int status) state.tracingFlags = tracingFlags; }); + client->observe("/log-streams", [ref = context->ref](std::string_view cmd) { + auto& state = ref.get(); + static constexpr int prefixSize = std::string_view{"/log-streams "}.size(); + if (prefixSize > cmd.size()) { + LOG(error) << "Malformed log-streams request"; + return; + } + cmd.remove_prefix(prefixSize); + int logStreams = 0; + + auto error = std::from_chars(cmd.data(), cmd.data() + cmd.size(), logStreams); + if (error.ec != std::errc()) { + LOG(error) << "Malformed log-streams mask"; + return; + } + LOGP(info, "Logstreams flags set to {}", logStreams); + state.logStreams = logStreams; + if ((state.logStreams & DeviceState::LogStreams::DEVICE_LOG) != 0) { + O2_LOG_ENABLE(device); + } else { + O2_LOG_DISABLE(device); + } + if ((state.logStreams & DeviceState::LogStreams::COMPLETION_LOG) != 0) { + O2_LOG_ENABLE(completion); + } else { + O2_LOG_DISABLE(completion); + } + + if ((state.logStreams & DeviceState::LogStreams::MONITORING_SERVICE_LOG) != 0) { + O2_LOG_ENABLE(monitoring_service); + } else { + O2_LOG_DISABLE(monitoring_service); + } + }); + // Client will be filled in the line after. I can probably have a single // client per device. auto dplClient = std::make_unique(); diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index a82753eb5af1f..fe93ca6d0f07f 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -400,6 +400,17 @@ void displayDeviceInspector(DeviceSpec const& spec, } } + bool logsChanged = false; + if (ImGui::CollapsingHeader("Signposts", ImGuiTreeNodeFlags_DefaultOpen)) { + logsChanged = ImGui::CheckboxFlags("Device", &control.logStreams, DeviceState::LogStreams::DEVICE_LOG); + logsChanged = ImGui::CheckboxFlags("Completion", &control.logStreams, DeviceState::LogStreams::COMPLETION_LOG); + logsChanged = ImGui::CheckboxFlags("Monitoring", &control.logStreams, DeviceState::LogStreams::MONITORING_SERVICE_LOG); + if (logsChanged && control.controller) { + std::string cmd = fmt::format("/log-streams {}", control.logStreams); + control.controller->write(cmd.c_str(), cmd.size()); + } + } + bool flagsChanged = false; if (ImGui::CollapsingHeader("Event loop tracing", ImGuiTreeNodeFlags_DefaultOpen)) { flagsChanged |= ImGui::CheckboxFlags("METRICS_MUST_FLUSH", &control.tracingFlags, DeviceState::LoopReason::METRICS_MUST_FLUSH); diff --git a/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx index 3374cd9ad6cf2..7ef1370f040b4 100644 --- a/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx +++ b/Framework/TestWorkflows/test/test_DetectMissingTimeframe.cxx @@ -43,6 +43,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& specs) if (i++ % 2 == 0) { outputs.make(OutputRef{"a2"}, 1); } + sleep(1); })}, }; DataProcessorSpec d{ From f38b25347476b5b3ba4581c0e98c6e8a4b8cf4a8 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:20:14 +0100 Subject: [PATCH 240/726] DPL GUI: add GUI to enable / disable driver signposts --- Framework/GUISupport/src/FrameworkGUIDebugger.cxx | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx index 35e0bc563a22e..005f7082d77b4 100644 --- a/Framework/GUISupport/src/FrameworkGUIDebugger.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDebugger.cxx @@ -23,6 +23,7 @@ #include "FrameworkGUIDataRelayerUsage.h" #include "PaletteHelpers.h" #include "FrameworkGUIState.h" +#include "Framework/Signpost.h" #include #include @@ -34,6 +35,7 @@ #include #include +O2_DECLARE_DYNAMIC_LOG(driver); // Make sure we can use aggregated initialisers. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpedantic" @@ -506,7 +508,7 @@ void displayDeviceMetrics(const char* label, case MetricsDisplayStyle::Lines: { auto xAxisFlags = ImPlotAxisFlags_None; auto yAxisFlags = ImPlotAxisFlags_LockMin; - //ImPlot::FitNextPlotAxes(true, true, true, true); + // ImPlot::FitNextPlotAxes(true, true, true, true); if (ImPlot::BeginPlot("##Some plot", {-1, -1}, axisFlags)) { ImPlot::SetupAxes("time", "value", xAxisFlags, yAxisFlags); ImPlot::SetupAxisFormat(ImAxis_Y1, formatSI, nullptr); @@ -1093,6 +1095,17 @@ void displayDriverInfo(DriverInfo const& driverInfo, DriverControl& driverContro } #endif + static bool selectedSignpost = false; + if (ImGui::CollapsingHeader("Signposts", ImGuiTreeNodeFlags_DefaultOpen)) { + if (ImGui::Checkbox("Driver", &selectedSignpost)) { + if (selectedSignpost) { + O2_LOG_ENABLE(driver); + } else { + O2_LOG_DISABLE(driver); + } + } + } + for (size_t i = 0; i < driverInfo.states.size(); ++i) { ImGui::Text("#%lu: %s", i, DriverInfoHelper::stateToString(driverInfo.states[i])); } From e1c1265ad943ddeab401fefbc1a0b85c9e06ae22 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:18:30 +0100 Subject: [PATCH 241/726] DPL: add ability to disable inputs programmatically Disabled inputs will not result in an actual route for the data, however it will be stored in the configuration, so that analysis workflows will not need to have the configuration available at every step. --- Framework/Core/include/Framework/InputSpec.h | 3 +++ Framework/Core/include/Framework/OutputSpec.h | 4 ++++ Framework/Core/src/DeviceSpecHelpers.cxx | 15 ++++++++++-- Framework/Core/src/WorkflowHelpers.cxx | 12 +++++++--- .../Core/src/WorkflowSerializationHelpers.cxx | 24 ++++++++++++++++++- 5 files changed, 52 insertions(+), 6 deletions(-) diff --git a/Framework/Core/include/Framework/InputSpec.h b/Framework/Core/include/Framework/InputSpec.h index 9be95e6853da2..7858aacb80664 100644 --- a/Framework/Core/include/Framework/InputSpec.h +++ b/Framework/Core/include/Framework/InputSpec.h @@ -74,6 +74,9 @@ struct InputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not the input is to be considered enabled. + /// Useful to programmatically disable inputs e.g. for the ProcessorOptions. + bool enabled = true; friend std::ostream& operator<<(std::ostream& stream, InputSpec const& arg); bool operator==(InputSpec const& that) const; diff --git a/Framework/Core/include/Framework/OutputSpec.h b/Framework/Core/include/Framework/OutputSpec.h index e92792faa6b14..51527bce4e339 100644 --- a/Framework/Core/include/Framework/OutputSpec.h +++ b/Framework/Core/include/Framework/OutputSpec.h @@ -86,6 +86,10 @@ struct OutputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not this output is enabled. This is useful to decide programmatically + /// wether or not to produce a given output. + bool enabled = true; + friend std::ostream& operator<<(std::ostream& stream, OutputSpec const& arg); }; diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 6c2063cb59311..2b846c91b46aa 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -679,19 +679,27 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext assert(policyPtr != nullptr); if (edge.isForward == false) { + auto &matcher = outputsMatchers[edge.outputGlobalIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } OutputRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], + matcher, channel.name, policyPtr, }; device.outputs.emplace_back(route); } else { + auto &matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } ForwardRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], + matcher, channel.name}; device.forwards.emplace_back(route); } @@ -915,6 +923,9 @@ void DeviceSpecHelpers::processInEdgeActions(std::vector& devices, auto const& inputSpec = consumer.inputs[edge.consumerInputIndex]; auto const& sourceChannel = consumerDevice.inputChannels[ci].name; + if (inputSpec.enabled == false) { + throw runtime_error_f("Input %s is disabled but it was still used in topology", DataSpecUtils::describe(inputSpec).data()); + } InputRoute route{ inputSpec, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 360480f80d4ef..a4e1bd82f2ad8 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -841,11 +841,13 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, for (size_t wi = 0; wi < workflow.size(); ++wi) { auto& producer = workflow[wi]; - for (size_t oi = 0; oi < producer.outputs.size(); ++oi) { - auto& out = producer.outputs[oi]; + for (auto& output : producer.outputs) { + if (output.enabled == false) { + continue; + } auto uniqueOutputId = outputs.size(); availableOutputsInfo.emplace_back(LogicalOutputInfo{wi, uniqueOutputId, false}); - outputs.push_back(out); + outputs.push_back(output); } } }; @@ -879,6 +881,10 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, std::vector matches(constOutputs.size()); for (size_t consumer = 0; consumer < workflow.size(); ++consumer) { for (size_t input = 0; input < workflow[consumer].inputs.size(); ++input) { + // Skip disabled inputs. + if (workflow[consumer].inputs[input].enabled == false) { + continue; + } forwards.clear(); for (size_t i = 0; i < constOutputs.size(); i++) { matches[i] = DataSpecUtils::match(workflow[consumer].inputs[input], constOutputs[i]); diff --git a/Framework/Core/src/WorkflowSerializationHelpers.cxx b/Framework/Core/src/WorkflowSerializationHelpers.cxx index ac182a27a70c5..499d22348e1ba 100644 --- a/Framework/Core/src/WorkflowSerializationHelpers.cxx +++ b/Framework/Core/src/WorkflowSerializationHelpers.cxx @@ -58,6 +58,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_INPUT_ORIGIN, IN_INPUT_DESCRIPTION, IN_INPUT_SUBSPEC, + IN_INPUT_ENABLED, IN_INPUT_ORIGIN_REF, IN_INPUT_DESCRIPTION_REF, IN_INPUT_SUBSPEC_REF, @@ -72,6 +73,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_OUTPUT_BINDING, IN_OUTPUT_ORIGIN, IN_OUTPUT_DESCRIPTION, + IN_OUTPUT_ENABLED, IN_OUTPUT_SUBSPEC, IN_OUTPUT_LIFETIME, IN_OUTPUT_OPTIONS, @@ -164,6 +166,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_INPUT_SUBSPEC: s << "IN_INPUT_SUBSPEC"; break; + case State::IN_INPUT_ENABLED: + s << "IN_INPUT_ENABLED"; + break; case State::IN_INPUT_ORIGIN_REF: s << "IN_INPUT_ORIGIN_REF"; break; @@ -215,6 +220,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_OUTPUT_OPTIONS: s << "IN_OUTPUT_OPTIONS"; break; + case WorkflowImporter::State::IN_OUTPUT_ENABLED: + s << "IN_OUTPUT_ENABLED"; + break; case State::IN_OPTION: s << "IN_OPTION"; break; @@ -367,9 +375,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().inputs.push_back(InputSpec({binding}, std::move(*matcher), lifetime, inputOptions)); } + dataProcessors.back().inputs.back().enabled = enabled; inputMatcherNodes.clear(); inputOptions.clear(); - } else if (in(State::IN_INPUT_MATCHER) && inputMatcherNodes.size() > 1) { data_matcher::Node child = std::move(inputMatcherNodes.back()); inputMatcherNodes.pop_back(); @@ -430,6 +438,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().outputs.push_back(OutputSpec({binding}, {origin, description}, lifetime)); } + dataProcessors.back().outputs.back().enabled = enabled; outputHasSubSpec = false; } else if (in(State::IN_OPTION)) { std::unique_ptr opt{nullptr}; @@ -599,6 +608,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_INPUT_DESCRIPTION_REF); } else if (in(State::IN_INPUT) && strncmp(str, "subspecRef", length) == 0) { push(State::IN_INPUT_SUBSPEC_REF); + } else if (in(State::IN_INPUT) && strncmp(str, "enabled", length) == 0) { + push(State::IN_INPUT_ENABLED); } else if (in(State::IN_INPUT) && strncmp(str, "matcher", length) == 0) { // the outermost matcher is starting here // we create a placeholder which is being updated later @@ -731,6 +742,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, metadata.back().executable = s; } else if (in(State::IN_INPUT_BINDING)) { binding = s; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (s == "true"); } else if (in(State::IN_INPUT_ORIGIN)) { origin.runtimeInit(s.c_str(), std::min(s.size(), 4UL)); std::string v(s.c_str(), std::min(s.size(), 4UL)); @@ -838,6 +851,10 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, dataProcessors.back().inputTimeSliceId = i; } else if (in(State::IN_DATAPROCESSOR_MAX_TIMESLICES)) { dataProcessors.back().maxInputTimeslices = i; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (i == 1); + } else if (in(State::IN_OUTPUT_ENABLED)) { + enabled = (i == 1); } pop(); return true; @@ -908,6 +925,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, header::DataDescription description; size_t subspec; size_t ref; + bool enabled; Lifetime lifetime; std::string metadatumKey; std::string metadatumValue; @@ -1096,6 +1114,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)input.lifetime); + w.Key("enabled"); + w.Uint((int)input.enabled); if (input.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); @@ -1145,6 +1165,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)output.lifetime); + w.Key("enabled"); + w.Uint((int)output.enabled); if (output.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); From ab2eb7097b6d88d7471cd989d396c25c53b44f95 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:24:10 +0100 Subject: [PATCH 242/726] DPL: add ability to disable inputs programmatically Disabled inputs will not result in an actual route for the data, however it will be stored in the configuration, so that analysis workflows will not need to have the configuration available at every step. --- Framework/Core/include/Framework/InputSpec.h | 3 +++ Framework/Core/include/Framework/OutputSpec.h | 4 ++++ Framework/Core/src/DeviceSpecHelpers.cxx | 15 ++++++++++-- Framework/Core/src/WorkflowHelpers.cxx | 12 +++++++--- .../Core/src/WorkflowSerializationHelpers.cxx | 24 ++++++++++++++++++- 5 files changed, 52 insertions(+), 6 deletions(-) diff --git a/Framework/Core/include/Framework/InputSpec.h b/Framework/Core/include/Framework/InputSpec.h index 9be95e6853da2..7858aacb80664 100644 --- a/Framework/Core/include/Framework/InputSpec.h +++ b/Framework/Core/include/Framework/InputSpec.h @@ -74,6 +74,9 @@ struct InputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not the input is to be considered enabled. + /// Useful to programmatically disable inputs e.g. for the ProcessorOptions. + bool enabled = true; friend std::ostream& operator<<(std::ostream& stream, InputSpec const& arg); bool operator==(InputSpec const& that) const; diff --git a/Framework/Core/include/Framework/OutputSpec.h b/Framework/Core/include/Framework/OutputSpec.h index e92792faa6b14..51527bce4e339 100644 --- a/Framework/Core/include/Framework/OutputSpec.h +++ b/Framework/Core/include/Framework/OutputSpec.h @@ -86,6 +86,10 @@ struct OutputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not this output is enabled. This is useful to decide programmatically + /// wether or not to produce a given output. + bool enabled = true; + friend std::ostream& operator<<(std::ostream& stream, OutputSpec const& arg); }; diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 6c2063cb59311..bedcda35873f9 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -679,19 +679,27 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext assert(policyPtr != nullptr); if (edge.isForward == false) { + auto& matcher = outputsMatchers[edge.outputGlobalIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } OutputRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], + matcher, channel.name, policyPtr, }; device.outputs.emplace_back(route); } else { + auto& matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } ForwardRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], + matcher, channel.name}; device.forwards.emplace_back(route); } @@ -915,6 +923,9 @@ void DeviceSpecHelpers::processInEdgeActions(std::vector& devices, auto const& inputSpec = consumer.inputs[edge.consumerInputIndex]; auto const& sourceChannel = consumerDevice.inputChannels[ci].name; + if (inputSpec.enabled == false) { + throw runtime_error_f("Input %s is disabled but it was still used in topology", DataSpecUtils::describe(inputSpec).data()); + } InputRoute route{ inputSpec, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 360480f80d4ef..a4e1bd82f2ad8 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -841,11 +841,13 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, for (size_t wi = 0; wi < workflow.size(); ++wi) { auto& producer = workflow[wi]; - for (size_t oi = 0; oi < producer.outputs.size(); ++oi) { - auto& out = producer.outputs[oi]; + for (auto& output : producer.outputs) { + if (output.enabled == false) { + continue; + } auto uniqueOutputId = outputs.size(); availableOutputsInfo.emplace_back(LogicalOutputInfo{wi, uniqueOutputId, false}); - outputs.push_back(out); + outputs.push_back(output); } } }; @@ -879,6 +881,10 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, std::vector matches(constOutputs.size()); for (size_t consumer = 0; consumer < workflow.size(); ++consumer) { for (size_t input = 0; input < workflow[consumer].inputs.size(); ++input) { + // Skip disabled inputs. + if (workflow[consumer].inputs[input].enabled == false) { + continue; + } forwards.clear(); for (size_t i = 0; i < constOutputs.size(); i++) { matches[i] = DataSpecUtils::match(workflow[consumer].inputs[input], constOutputs[i]); diff --git a/Framework/Core/src/WorkflowSerializationHelpers.cxx b/Framework/Core/src/WorkflowSerializationHelpers.cxx index ac182a27a70c5..499d22348e1ba 100644 --- a/Framework/Core/src/WorkflowSerializationHelpers.cxx +++ b/Framework/Core/src/WorkflowSerializationHelpers.cxx @@ -58,6 +58,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_INPUT_ORIGIN, IN_INPUT_DESCRIPTION, IN_INPUT_SUBSPEC, + IN_INPUT_ENABLED, IN_INPUT_ORIGIN_REF, IN_INPUT_DESCRIPTION_REF, IN_INPUT_SUBSPEC_REF, @@ -72,6 +73,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_OUTPUT_BINDING, IN_OUTPUT_ORIGIN, IN_OUTPUT_DESCRIPTION, + IN_OUTPUT_ENABLED, IN_OUTPUT_SUBSPEC, IN_OUTPUT_LIFETIME, IN_OUTPUT_OPTIONS, @@ -164,6 +166,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_INPUT_SUBSPEC: s << "IN_INPUT_SUBSPEC"; break; + case State::IN_INPUT_ENABLED: + s << "IN_INPUT_ENABLED"; + break; case State::IN_INPUT_ORIGIN_REF: s << "IN_INPUT_ORIGIN_REF"; break; @@ -215,6 +220,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_OUTPUT_OPTIONS: s << "IN_OUTPUT_OPTIONS"; break; + case WorkflowImporter::State::IN_OUTPUT_ENABLED: + s << "IN_OUTPUT_ENABLED"; + break; case State::IN_OPTION: s << "IN_OPTION"; break; @@ -367,9 +375,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().inputs.push_back(InputSpec({binding}, std::move(*matcher), lifetime, inputOptions)); } + dataProcessors.back().inputs.back().enabled = enabled; inputMatcherNodes.clear(); inputOptions.clear(); - } else if (in(State::IN_INPUT_MATCHER) && inputMatcherNodes.size() > 1) { data_matcher::Node child = std::move(inputMatcherNodes.back()); inputMatcherNodes.pop_back(); @@ -430,6 +438,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().outputs.push_back(OutputSpec({binding}, {origin, description}, lifetime)); } + dataProcessors.back().outputs.back().enabled = enabled; outputHasSubSpec = false; } else if (in(State::IN_OPTION)) { std::unique_ptr opt{nullptr}; @@ -599,6 +608,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_INPUT_DESCRIPTION_REF); } else if (in(State::IN_INPUT) && strncmp(str, "subspecRef", length) == 0) { push(State::IN_INPUT_SUBSPEC_REF); + } else if (in(State::IN_INPUT) && strncmp(str, "enabled", length) == 0) { + push(State::IN_INPUT_ENABLED); } else if (in(State::IN_INPUT) && strncmp(str, "matcher", length) == 0) { // the outermost matcher is starting here // we create a placeholder which is being updated later @@ -731,6 +742,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, metadata.back().executable = s; } else if (in(State::IN_INPUT_BINDING)) { binding = s; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (s == "true"); } else if (in(State::IN_INPUT_ORIGIN)) { origin.runtimeInit(s.c_str(), std::min(s.size(), 4UL)); std::string v(s.c_str(), std::min(s.size(), 4UL)); @@ -838,6 +851,10 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, dataProcessors.back().inputTimeSliceId = i; } else if (in(State::IN_DATAPROCESSOR_MAX_TIMESLICES)) { dataProcessors.back().maxInputTimeslices = i; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (i == 1); + } else if (in(State::IN_OUTPUT_ENABLED)) { + enabled = (i == 1); } pop(); return true; @@ -908,6 +925,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, header::DataDescription description; size_t subspec; size_t ref; + bool enabled; Lifetime lifetime; std::string metadatumKey; std::string metadatumValue; @@ -1096,6 +1114,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)input.lifetime); + w.Key("enabled"); + w.Uint((int)input.enabled); if (input.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); @@ -1145,6 +1165,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)output.lifetime); + w.Key("enabled"); + w.Uint((int)output.enabled); if (output.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); From 536881a3c9b82b4c3c1d7d46c8faa25bfb8a7fc7 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 16 Jan 2024 16:32:22 +0100 Subject: [PATCH 243/726] DPL: add ability to disable inputs programmatically Disabled inputs will not result in an actual route for the data, however it will be stored in the configuration, so that analysis workflows will not need to have the configuration available at every step. --- Framework/Core/include/Framework/InputSpec.h | 3 ++ Framework/Core/include/Framework/OutputSpec.h | 4 +++ Framework/Core/src/DeviceSpecHelpers.cxx | 15 ++++++++-- Framework/Core/src/WorkflowHelpers.cxx | 12 ++++++-- .../Core/src/WorkflowSerializationHelpers.cxx | 30 ++++++++++++++++++- 5 files changed, 58 insertions(+), 6 deletions(-) diff --git a/Framework/Core/include/Framework/InputSpec.h b/Framework/Core/include/Framework/InputSpec.h index 9be95e6853da2..7858aacb80664 100644 --- a/Framework/Core/include/Framework/InputSpec.h +++ b/Framework/Core/include/Framework/InputSpec.h @@ -74,6 +74,9 @@ struct InputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not the input is to be considered enabled. + /// Useful to programmatically disable inputs e.g. for the ProcessorOptions. + bool enabled = true; friend std::ostream& operator<<(std::ostream& stream, InputSpec const& arg); bool operator==(InputSpec const& that) const; diff --git a/Framework/Core/include/Framework/OutputSpec.h b/Framework/Core/include/Framework/OutputSpec.h index e92792faa6b14..51527bce4e339 100644 --- a/Framework/Core/include/Framework/OutputSpec.h +++ b/Framework/Core/include/Framework/OutputSpec.h @@ -86,6 +86,10 @@ struct OutputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not this output is enabled. This is useful to decide programmatically + /// wether or not to produce a given output. + bool enabled = true; + friend std::ostream& operator<<(std::ostream& stream, OutputSpec const& arg); }; diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 6c2063cb59311..bedcda35873f9 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -679,19 +679,27 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext assert(policyPtr != nullptr); if (edge.isForward == false) { + auto& matcher = outputsMatchers[edge.outputGlobalIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } OutputRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], + matcher, channel.name, policyPtr, }; device.outputs.emplace_back(route); } else { + auto& matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } ForwardRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], + matcher, channel.name}; device.forwards.emplace_back(route); } @@ -915,6 +923,9 @@ void DeviceSpecHelpers::processInEdgeActions(std::vector& devices, auto const& inputSpec = consumer.inputs[edge.consumerInputIndex]; auto const& sourceChannel = consumerDevice.inputChannels[ci].name; + if (inputSpec.enabled == false) { + throw runtime_error_f("Input %s is disabled but it was still used in topology", DataSpecUtils::describe(inputSpec).data()); + } InputRoute route{ inputSpec, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 360480f80d4ef..a4e1bd82f2ad8 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -841,11 +841,13 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, for (size_t wi = 0; wi < workflow.size(); ++wi) { auto& producer = workflow[wi]; - for (size_t oi = 0; oi < producer.outputs.size(); ++oi) { - auto& out = producer.outputs[oi]; + for (auto& output : producer.outputs) { + if (output.enabled == false) { + continue; + } auto uniqueOutputId = outputs.size(); availableOutputsInfo.emplace_back(LogicalOutputInfo{wi, uniqueOutputId, false}); - outputs.push_back(out); + outputs.push_back(output); } } }; @@ -879,6 +881,10 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, std::vector matches(constOutputs.size()); for (size_t consumer = 0; consumer < workflow.size(); ++consumer) { for (size_t input = 0; input < workflow[consumer].inputs.size(); ++input) { + // Skip disabled inputs. + if (workflow[consumer].inputs[input].enabled == false) { + continue; + } forwards.clear(); for (size_t i = 0; i < constOutputs.size(); i++) { matches[i] = DataSpecUtils::match(workflow[consumer].inputs[input], constOutputs[i]); diff --git a/Framework/Core/src/WorkflowSerializationHelpers.cxx b/Framework/Core/src/WorkflowSerializationHelpers.cxx index ac182a27a70c5..dc0eb3243fd03 100644 --- a/Framework/Core/src/WorkflowSerializationHelpers.cxx +++ b/Framework/Core/src/WorkflowSerializationHelpers.cxx @@ -58,6 +58,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_INPUT_ORIGIN, IN_INPUT_DESCRIPTION, IN_INPUT_SUBSPEC, + IN_INPUT_ENABLED, IN_INPUT_ORIGIN_REF, IN_INPUT_DESCRIPTION_REF, IN_INPUT_SUBSPEC_REF, @@ -72,6 +73,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_OUTPUT_BINDING, IN_OUTPUT_ORIGIN, IN_OUTPUT_DESCRIPTION, + IN_OUTPUT_ENABLED, IN_OUTPUT_SUBSPEC, IN_OUTPUT_LIFETIME, IN_OUTPUT_OPTIONS, @@ -164,6 +166,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_INPUT_SUBSPEC: s << "IN_INPUT_SUBSPEC"; break; + case State::IN_INPUT_ENABLED: + s << "IN_INPUT_ENABLED"; + break; case State::IN_INPUT_ORIGIN_REF: s << "IN_INPUT_ORIGIN_REF"; break; @@ -215,6 +220,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_OUTPUT_OPTIONS: s << "IN_OUTPUT_OPTIONS"; break; + case WorkflowImporter::State::IN_OUTPUT_ENABLED: + s << "IN_OUTPUT_ENABLED"; + break; case State::IN_OPTION: s << "IN_OPTION"; break; @@ -299,6 +307,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, dataProcessors.push_back(DataProcessorSpec{}); } else if (in(State::IN_INPUTS)) { push(State::IN_INPUT); + enabled = true; inputMatcherNodes.clear(); } else if (in(State::IN_INPUT_MATCHER)) { // start a new embedded matcher @@ -310,6 +319,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, // will be merged into the parent matcher } else if (in(State::IN_OUTPUTS)) { push(State::IN_OUTPUT); + enabled = true; outputHasSubSpec = false; } else if (in(State::IN_OPTIONS)) { push(State::IN_OPTION); @@ -367,9 +377,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().inputs.push_back(InputSpec({binding}, std::move(*matcher), lifetime, inputOptions)); } + dataProcessors.back().inputs.back().enabled = enabled; inputMatcherNodes.clear(); inputOptions.clear(); - } else if (in(State::IN_INPUT_MATCHER) && inputMatcherNodes.size() > 1) { data_matcher::Node child = std::move(inputMatcherNodes.back()); inputMatcherNodes.pop_back(); @@ -430,6 +440,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().outputs.push_back(OutputSpec({binding}, {origin, description}, lifetime)); } + dataProcessors.back().outputs.back().enabled = enabled; outputHasSubSpec = false; } else if (in(State::IN_OPTION)) { std::unique_ptr opt{nullptr}; @@ -542,12 +553,14 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_DATAPROCESSORS); } else if (in(State::IN_INPUTS)) { push(State::IN_INPUT); + enabled = true; } else if (in(State::IN_INPUT_OPTIONS)) { push(State::IN_OPTION); } else if (in(State::IN_OUTPUT_OPTIONS)) { push(State::IN_OPTION); } else if (in(State::IN_OUTPUTS)) { push(State::IN_OUTPUT); + enabled = true; outputHasSubSpec = false; } else if (in(State::IN_OPTIONS)) { push(State::IN_OPTION); @@ -599,6 +612,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_INPUT_DESCRIPTION_REF); } else if (in(State::IN_INPUT) && strncmp(str, "subspecRef", length) == 0) { push(State::IN_INPUT_SUBSPEC_REF); + } else if (in(State::IN_INPUT) && strncmp(str, "enabled", length) == 0) { + push(State::IN_INPUT_ENABLED); } else if (in(State::IN_INPUT) && strncmp(str, "matcher", length) == 0) { // the outermost matcher is starting here // we create a placeholder which is being updated later @@ -731,6 +746,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, metadata.back().executable = s; } else if (in(State::IN_INPUT_BINDING)) { binding = s; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (s == "true"); } else if (in(State::IN_INPUT_ORIGIN)) { origin.runtimeInit(s.c_str(), std::min(s.size(), 4UL)); std::string v(s.c_str(), std::min(s.size(), 4UL)); @@ -838,6 +855,10 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, dataProcessors.back().inputTimeSliceId = i; } else if (in(State::IN_DATAPROCESSOR_MAX_TIMESLICES)) { dataProcessors.back().maxInputTimeslices = i; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (i == 1); + } else if (in(State::IN_OUTPUT_ENABLED)) { + enabled = (i == 1); } pop(); return true; @@ -908,6 +929,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, header::DataDescription description; size_t subspec; size_t ref; + // Keep track of the enabled state of the input/output + // Unless specified, inputs are enabled by default. + bool enabled = true; Lifetime lifetime; std::string metadatumKey; std::string metadatumValue; @@ -1096,6 +1120,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)input.lifetime); + w.Key("enabled"); + w.Uint((int)input.enabled); if (input.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); @@ -1145,6 +1171,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)output.lifetime); + w.Key("enabled"); + w.Uint((int)output.enabled); if (output.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); From b86551b19d0becdc225586d98904b8c85cb42e75 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 17 Jan 2024 09:16:58 +0100 Subject: [PATCH 244/726] DPL: do not reset mDidDispatch too early --- Framework/Core/include/Framework/MessageContext.h | 9 ++------- Framework/Core/src/MessageContext.cxx | 11 +++++++++-- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/Framework/Core/include/Framework/MessageContext.h b/Framework/Core/include/Framework/MessageContext.h index 9e7268f614fda..407bac0ceb00a 100644 --- a/Framework/Core/include/Framework/MessageContext.h +++ b/Framework/Core/include/Framework/MessageContext.h @@ -464,12 +464,7 @@ class MessageContext /// Prepares the context to create messages for the given timeslice. This /// expects that the previous context was already sent and can be completely /// discarded. - void clear() - { - // Verify that everything has been sent on clear. - assert(std::all_of(mMessages.begin(), mMessages.end(), [](auto& m) { return m->empty(); })); - mMessages.clear(); - } + void clear(); FairMQDeviceProxy& proxy() { @@ -494,7 +489,7 @@ class MessageContext /// return the headers of the 1st (from the end) matching message checking first in mMessages then in mScheduledMessages o2::header::DataHeader* findMessageHeader(const Output& spec); o2::header::Stack* findMessageHeaderStack(const Output& spec); - int countDeviceOutputs(bool excludeDPLOrigin = false); + [[nodiscard]] int countDeviceOutputs(bool excludeDPLOrigin = false) const; void fakeDispatch() { mDidDispatch = true; } bool didDispatch() { return mDidDispatch; } o2::framework::DataProcessingHeader* findMessageDataProcessingHeader(const Output& spec); diff --git a/Framework/Core/src/MessageContext.cxx b/Framework/Core/src/MessageContext.cxx index a7365594785c7..59dfc15837210 100644 --- a/Framework/Core/src/MessageContext.cxx +++ b/Framework/Core/src/MessageContext.cxx @@ -80,12 +80,11 @@ o2::header::Stack* MessageContext::findMessageHeaderStack(const Output& spec) return nullptr; } -int MessageContext::countDeviceOutputs(bool excludeDPLOrigin) +int MessageContext::countDeviceOutputs(bool excludeDPLOrigin) const { // If we dispatched some messages before the end of the callback // we need to account for them as well. int noutputs = mDidDispatch ? 1 : 0; - mDidDispatch = false; constexpr o2::header::DataOrigin DataOriginDPL{"DPL"}; for (auto it = mMessages.rbegin(); it != mMessages.rend(); ++it) { if (!excludeDPLOrigin || (*it)->header()->dataOrigin != DataOriginDPL) { @@ -100,6 +99,14 @@ int MessageContext::countDeviceOutputs(bool excludeDPLOrigin) return noutputs; } +void MessageContext::clear() +{ + // Verify that everything has been sent on clear. + assert(std::all_of(mMessages.begin(), mMessages.end(), [](auto& m) { return m->empty(); })); + mDidDispatch = false; + mMessages.clear(); +} + int64_t MessageContext::addToCache(std::unique_ptr& toCache) { auto&& cached = toCache->GetTransport()->CreateMessage(); From fd2f8bd3d4cb7edb42121f1f78761db26eb0e9c6 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 17 Jan 2024 11:35:28 +0100 Subject: [PATCH 245/726] DPL: optimize for the case debugging is disabled --- Framework/Foundation/include/Framework/Signpost.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index d2bdec0ec8246..76da9f395f6a9 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -11,6 +11,7 @@ #ifndef O2_FRAMEWORK_SIGNPOST_H_ #define O2_FRAMEWORK_SIGNPOST_H_ +#include "Framework/CompilerBuiltins.h" #include #include #ifdef __APPLE__ @@ -460,22 +461,22 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) // they are compatible between the two implementations, we also use remove_engineering_type to remove // the engineering types from the format string, so that we can use the same format string for both. #define O2_SIGNPOST_EVENT_EMIT(log, id, name, format, ...) __extension__({ \ - if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + if (O2_BUILTIN_UNLIKELY(O2_SIGNPOST_ENABLED_MAC(log))) { \ O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ##__VA_ARGS__); \ - } else if (private_o2_log_##log->stacktrace) { \ + } else if (O2_BUILTIN_UNLIKELY(private_o2_log_##log->stacktrace)) { \ _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ } \ }) #define O2_SIGNPOST_START(log, id, name, format, ...) \ - if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + if (O2_BUILTIN_UNLIKELY(O2_SIGNPOST_ENABLED_MAC(log))) { \ O2_SIGNPOST_START_MAC(log, id, name, format, ##__VA_ARGS__); \ - } else if (private_o2_log_##log->stacktrace) { \ + } else if (O2_BUILTIN_UNLIKELY(private_o2_log_##log->stacktrace)) { \ _o2_signpost_interval_begin(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ } #define O2_SIGNPOST_END(log, id, name, format, ...) \ - if (O2_SIGNPOST_ENABLED_MAC(log)) { \ + if (O2_BUILTIN_UNLIKELY(O2_SIGNPOST_ENABLED_MAC(log))) { \ O2_SIGNPOST_END_MAC(log, id, name, format, ##__VA_ARGS__); \ - } else if (private_o2_log_##log->stacktrace) { \ + } else if (O2_BUILTIN_UNLIKELY(private_o2_log_##log->stacktrace)) { \ _o2_signpost_interval_end(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ } #else // This is the release implementation, it does nothing. From 1c8fb9dfe6568cb3515fab1a11a2af9f5dc9682d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 17 Jan 2024 13:14:59 +0100 Subject: [PATCH 246/726] DPL: helper function to enable loggers from gdb / lldb --- Framework/Core/COOKBOOK.md | 14 ++++++++++++++ Framework/Foundation/include/Framework/Signpost.h | 7 +++++++ 2 files changed, 21 insertions(+) diff --git a/Framework/Core/COOKBOOK.md b/Framework/Core/COOKBOOK.md index 7de2a6b7ca9c4..1a27079c3dc13 100644 --- a/Framework/Core/COOKBOOK.md +++ b/Framework/Core/COOKBOOK.md @@ -538,3 +538,17 @@ perf script -i perf.data > profile.linux-perf.txt ``` and then you can either upload it to https://www.speedscope.app or use chrome://tracing. + +## Internal debug log streams + +Debug log entries for several DPL components are now provided via the Signpost API. + +Such streams can be enabled using the "Signpost" part of the Device Inspector GUI. + +Alternatively for the case were the GUI is not available, you can attach to a process via GDB / LLDB and invoke the `o2_debug_log_set_stacktrace` function, passing it the log you want to enable and the amount of stacktrace you want to see. E.g. to enable a log created with `O2_DECLARE_DYNAMIC_LOG(completion)` you can use: + +``` +p o2_debug_log_set_stacktrace(private_o2_log_completion, 1) +``` + +If you are on a mac, you can also use Instruments to visualise your Signpost, just like any other macOS application. In order to do so you need to enable the "Signpost" instrument, making sure you add `ch.cern.aliceo2.completion` to the list of loggers to watch. diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index d2bdec0ec8246..72d956eaa727f 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -438,6 +438,13 @@ void _o2_log_set_stacktrace(_o2_log_t* log, int stacktrace) { log->stacktrace = stacktrace; } +// A C function which can be used to enable the signposts +extern "C" { +void o2_debug_log_set_stacktrace(_o2_log_t* log, int stacktrace) +{ + log->stacktrace = stacktrace; +} +} #endif // O2_SIGNPOST_IMPLEMENTATION #if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(NDEBUG) From 306e2de8ab445df3c5cef412d70c4964e71245ae Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 17 Jan 2024 14:49:14 +0100 Subject: [PATCH 247/726] DPL: account for the case in which outputs are not user created --- .../Core/include/Framework/StreamContext.h | 2 +- Framework/Core/src/CommonServices.cxx | 17 +++++++++++------ Framework/Core/src/DataAllocator.cxx | 2 +- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/Framework/Core/include/Framework/StreamContext.h b/Framework/Core/include/Framework/StreamContext.h index 198802179a419..8300ce4ce7ac8 100644 --- a/Framework/Core/include/Framework/StreamContext.h +++ b/Framework/Core/include/Framework/StreamContext.h @@ -69,7 +69,7 @@ struct StreamContext { // for a given iteration. // This is in the stream context to allow tracking data creation on a per thread // basis. - std::vector routeCreated; + std::vector routeUserCreated; }; } // namespace o2::framework diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 4656ddeb12e63..176a60a44bd36 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -169,9 +169,9 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() // the same stream might be referring to different data processors. // We should probably have a context which is per stream of a specific // data processor. - stream->routeCreated.resize(routes.size()); - // Reset the routeCreated at every processing step - std::fill(stream->routeCreated.begin(), stream->routeCreated.end(), false); }, + stream->routeUserCreated.resize(routes.size()); + // Reset the routeUserCreated at every processing step + std::fill(stream->routeUserCreated.begin(), stream->routeUserCreated.end(), false); }, .postProcessing = [](ProcessingContext& processingContext, void* service) { auto* stream = (StreamContext*)service; auto& routes = processingContext.services().get().outputs; @@ -182,7 +182,7 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() // it means it was created out of band. bool didCreate = false; for (size_t ri = 0; ri < routes.size(); ++ri) { - if (stream->routeCreated[ri] == true) { + if (stream->routeUserCreated[ri] == true) { didCreate = true; break; } @@ -192,7 +192,7 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() return; } for (size_t ri = 0; ri < routes.size(); ++ri) { - if (stream->routeCreated[ri] == true) { + if (stream->routeUserCreated[ri] == true) { continue; } auto &route = routes[ri]; @@ -458,7 +458,9 @@ o2::framework::ServiceSpec CommonServices::ccdbSupportSpec() // For any output that is a FLP/DISTSUBTIMEFRAME with subspec != 0, // we create a new message. InputSpec matcher{"matcher", ConcreteDataTypeMatcher{"FLP", "DISTSUBTIMEFRAME"}}; - for (auto& output : pc.services().get().outputs) { + auto &streamContext = pc.services().get(); + for (size_t oi = 0; oi < pc.services().get().outputs.size(); ++oi) { + OutputRoute const& output = pc.services().get().outputs[oi]; if ((output.timeslice % output.maxTimeslices) != 0) { continue; } @@ -471,6 +473,9 @@ o2::framework::ServiceSpec CommonServices::ccdbSupportSpec() stfDist.id = timingInfo.timeslice; stfDist.firstOrbit = timingInfo.firstTForbit; stfDist.runNumber = timingInfo.runNumber; + // We mark it as not created, because we do should not account for it when + // checking if we created all the data for a timeslice. + streamContext.routeUserCreated[oi] = false; } } }, .kind = ServiceKind::Global}; diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index 9ce72aed8c1cf..a6b13ceeda101 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -53,7 +53,7 @@ RouteIndex DataAllocator::matchDataHeader(const Output& spec, size_t timeslice) for (auto ri = 0; ri < allowedOutputRoutes.size(); ++ri) { auto& route = allowedOutputRoutes[ri]; if (DataSpecUtils::match(route.matcher, spec.origin, spec.description, spec.subSpec) && ((timeslice % route.maxTimeslices) == route.timeslice)) { - stream.routeCreated[ri] = true; + stream.routeUserCreated[ri] = true; return RouteIndex{ri}; } } From 4ef0e664fbd2984eeb24b6b3f94e9ec42f99e493 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 17 Jan 2024 14:58:33 +0100 Subject: [PATCH 248/726] DPL: account for the case in which outputs are not user created --- .../Core/include/Framework/StreamContext.h | 2 +- Framework/Core/src/CommonServices.cxx | 17 +++++++++++------ Framework/Core/src/DataAllocator.cxx | 2 +- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/Framework/Core/include/Framework/StreamContext.h b/Framework/Core/include/Framework/StreamContext.h index 198802179a419..8300ce4ce7ac8 100644 --- a/Framework/Core/include/Framework/StreamContext.h +++ b/Framework/Core/include/Framework/StreamContext.h @@ -69,7 +69,7 @@ struct StreamContext { // for a given iteration. // This is in the stream context to allow tracking data creation on a per thread // basis. - std::vector routeCreated; + std::vector routeUserCreated; }; } // namespace o2::framework diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 4656ddeb12e63..fd02a4c9ccb88 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -169,9 +169,9 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() // the same stream might be referring to different data processors. // We should probably have a context which is per stream of a specific // data processor. - stream->routeCreated.resize(routes.size()); - // Reset the routeCreated at every processing step - std::fill(stream->routeCreated.begin(), stream->routeCreated.end(), false); }, + stream->routeUserCreated.resize(routes.size()); + // Reset the routeUserCreated at every processing step + std::fill(stream->routeUserCreated.begin(), stream->routeUserCreated.end(), false); }, .postProcessing = [](ProcessingContext& processingContext, void* service) { auto* stream = (StreamContext*)service; auto& routes = processingContext.services().get().outputs; @@ -182,7 +182,7 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() // it means it was created out of band. bool didCreate = false; for (size_t ri = 0; ri < routes.size(); ++ri) { - if (stream->routeCreated[ri] == true) { + if (stream->routeUserCreated[ri] == true) { didCreate = true; break; } @@ -192,7 +192,7 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() return; } for (size_t ri = 0; ri < routes.size(); ++ri) { - if (stream->routeCreated[ri] == true) { + if (stream->routeUserCreated[ri] == true) { continue; } auto &route = routes[ri]; @@ -458,7 +458,9 @@ o2::framework::ServiceSpec CommonServices::ccdbSupportSpec() // For any output that is a FLP/DISTSUBTIMEFRAME with subspec != 0, // we create a new message. InputSpec matcher{"matcher", ConcreteDataTypeMatcher{"FLP", "DISTSUBTIMEFRAME"}}; - for (auto& output : pc.services().get().outputs) { + auto& streamContext = pc.services().get(); + for (size_t oi = 0; oi < pc.services().get().outputs.size(); ++oi) { + OutputRoute const& output = pc.services().get().outputs[oi]; if ((output.timeslice % output.maxTimeslices) != 0) { continue; } @@ -471,6 +473,9 @@ o2::framework::ServiceSpec CommonServices::ccdbSupportSpec() stfDist.id = timingInfo.timeslice; stfDist.firstOrbit = timingInfo.firstTForbit; stfDist.runNumber = timingInfo.runNumber; + // We mark it as not created, because we do should not account for it when + // checking if we created all the data for a timeslice. + streamContext.routeUserCreated[oi] = false; } } }, .kind = ServiceKind::Global}; diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index 9ce72aed8c1cf..a6b13ceeda101 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -53,7 +53,7 @@ RouteIndex DataAllocator::matchDataHeader(const Output& spec, size_t timeslice) for (auto ri = 0; ri < allowedOutputRoutes.size(); ++ri) { auto& route = allowedOutputRoutes[ri]; if (DataSpecUtils::match(route.matcher, spec.origin, spec.description, spec.subSpec) && ((timeslice % route.maxTimeslices) == route.timeslice)) { - stream.routeCreated[ri] = true; + stream.routeUserCreated[ri] = true; return RouteIndex{ri}; } } From 0c273197dad569c2fa831ff1f4366708cd4ddaf4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 17 Jan 2024 16:24:00 +0100 Subject: [PATCH 249/726] DPL: invoke fakeDispatch when creating data out of band in test --- Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx b/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx index c105a9d0f662c..249e7fc304949 100644 --- a/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx +++ b/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx @@ -174,7 +174,7 @@ std::vector defineDataProcessing(ConfigContext const& config) callbacks.set(producerChannelInit); } // the compute callback of the producer - auto producerCallback = [nRolls, channelName, proxyMode, counter = std::make_shared()](DataAllocator& outputs, ControlService& control, RawDeviceService& rds) { + auto producerCallback = [nRolls, channelName, proxyMode, counter = std::make_shared()](DataAllocator& outputs, ControlService& control, RawDeviceService& rds, MessageContext& messageContext) { int data = *counter; // outputs.make(OutputRef{"data", 0}) = data; @@ -233,6 +233,7 @@ std::vector defineDataProcessing(ConfigContext const& config) } // using utility from ExternalFairMQDeviceProxy o2::framework::sendOnChannel(device, messages, *channelName, (size_t)-1); + messageContext.fakeDispatch(); if (++(*counter) >= nRolls) { // send the end of stream signal, this is transferred by the proxies @@ -256,6 +257,7 @@ std::vector defineDataProcessing(ConfigContext const& config) // add empty payload message out.AddPart(std::move(device.NewMessageFor(*channelName, 0, 0))); o2::framework::sendOnChannel(device, out, *channelName, (size_t)-1); + messageContext.fakeDispatch(); } } }; @@ -401,7 +403,9 @@ std::vector defineDataProcessing(ConfigContext const& config) output.AddPart(std::move(inputs.At(msgidx))); } } + auto &messageContext = services.get(); o2::framework::sendOnChannel(*device, output, channelName, (size_t)-1); + messageContext.fakeDispatch(); return output.Size() != 0; }; From 77dac53b0a43ec0a07292fa4abf1b3d0540975c1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 17 Jan 2024 20:22:18 +0100 Subject: [PATCH 250/726] DPL: add Signposts for some of the DataProcessing / Stream callbacks --- .../Core/include/Framework/DeviceState.h | 8 +- Framework/Core/src/CommonServices.cxx | 6 + Framework/Core/src/DataProcessingContext.cxx | 122 +++++++++--------- Framework/Core/src/StreamContext.cxx | 11 +- Framework/Core/src/WSDriverClient.cxx | 13 +- .../src/FrameworkGUIDeviceInspector.cxx | 2 + 6 files changed, 95 insertions(+), 67 deletions(-) diff --git a/Framework/Core/include/Framework/DeviceState.h b/Framework/Core/include/Framework/DeviceState.h index 4fa72a84cad71..89961b3e92dc7 100644 --- a/Framework/Core/include/Framework/DeviceState.h +++ b/Framework/Core/include/Framework/DeviceState.h @@ -59,9 +59,11 @@ struct DeviceState { enum LogStreams : int { NO_LOG = 0, - DEVICE_LOG = 1 << 0, // Log for Data Processing Device activities. - COMPLETION_LOG = 1 << 1, // Log for the completion policy of the device. - MONITORING_SERVICE_LOG = 1 << 2, // Log for the monitoring service flushing. + DEVICE_LOG = 1 << 0, // Log for Data Processing Device activities. + COMPLETION_LOG = 1 << 1, // Log for the completion policy of the device. + MONITORING_SERVICE_LOG = 1 << 2, // Log for the monitoring service flushing. + DATA_PROCESSOR_CONTEXT_LOG = 1 << 3, // Log for the DataProcessorContext callbacks + STREAM_CONTEXT_LOG = 1 << 4, // Log for the StreamContext callbacks }; std::vector inputChannelInfos; diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index fd02a4c9ccb88..d264124fe25af 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -57,6 +57,7 @@ #include #include #include +#include "Framework/Signpost.h" #include #include @@ -79,6 +80,9 @@ using Value = o2::monitoring::tags::Value; #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpedantic" +O2_DECLARE_DYNAMIC_LOG(data_processor_context); +O2_DECLARE_DYNAMIC_LOG(stream_context); + namespace o2::framework { @@ -188,6 +192,8 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() } } if (didCreate == false && messageContext.didDispatch() == true) { + O2_SIGNPOST_ID_FROM_POINTER(cid, stream_context, service); + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created out of band"); LOGP(debug, "Data created out of band"); return; } diff --git a/Framework/Core/src/DataProcessingContext.cxx b/Framework/Core/src/DataProcessingContext.cxx index 9c023e5021a9b..73223b3d6ff89 100644 --- a/Framework/Core/src/DataProcessingContext.cxx +++ b/Framework/Core/src/DataProcessingContext.cxx @@ -10,145 +10,143 @@ // or submit itself to any jurisdiction. #include "Framework/DataProcessingContext.h" +#include "Framework/DataProcessorSpec.h" +#include "Framework/Signpost.h" +O2_DECLARE_DYNAMIC_LOG(data_processor_context); namespace o2::framework { + +namespace { +template +void invokeAll(T& handles, char const* callbackName, o2::framework::DataProcessorSpec *spec, ARGS&... args) +{ + assert(callbackName); + O2_SIGNPOST_ID_FROM_POINTER(dpid, data_processor_context, spec); + // FIXME: for now spec is nullptr because we don't have a list of possible DataProcessorSpecs + // per device. + char const* dataProcessorName = spec ? spec->name.c_str() : "DataProcessorContext"; + O2_SIGNPOST_START(data_processor_context, dpid, "callbacks", "Starting %{public}s::%{public}s", dataProcessorName, callbackName); + for (auto& handle : handles) { + O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, handle.service); + O2_SIGNPOST_START(data_processor_context, cid, "callbacks", "Starting %{public}s::%{public}s::%{public}s", dataProcessorName, handle.spec.name.c_str(), callbackName); + handle.callback(args..., handle.service); + O2_SIGNPOST_END(data_processor_context, cid, "callbacks", "Ending %{public}s::%{public}s::%{public}s", dataProcessorName, handle.spec.name.c_str(), callbackName); + } + O2_SIGNPOST_END(data_processor_context, dpid, "callbacks", "Ending %{public}s::%{public}s", dataProcessorName, callbackName); +} +} + /// Invoke callbacks to be executed before every dangling check void DataProcessorContext::preProcessingCallbacks(ProcessingContext& ctx) { - for (auto& handle : preProcessingHandlers) { - LOGP(debug, "Invoking preDanglingCallback for service {}", handle.spec.name); - handle.callback(ctx, handle.service); - } + invokeAll(preProcessingHandlers, "preProcessingCallbacks", spec, ctx); } void DataProcessorContext::finaliseOutputsCallbacks(ProcessingContext& ctx) { - for (auto& handle : finaliseOutputsHandles) { - LOGP(debug, "Invoking postProcessingCallback for service {}", handle.spec.name); - handle.callback(ctx, handle.service); - } + invokeAll(finaliseOutputsHandles, "finaliseOutputsCallbacks", spec, ctx); } /// Invoke callbacks to be executed before every dangling check void DataProcessorContext::postProcessingCallbacks(ProcessingContext& ctx) { - for (auto& handle : postProcessingHandlers) { - LOGP(debug, "Invoking postProcessingCallback for service {}", handle.spec.name); - handle.callback(ctx, handle.service); - } + invokeAll(postProcessingHandlers, "postProcessingCallbacks", spec, ctx); } /// Invoke callbacks to be executed before every dangling check -void DataProcessorContext::preDanglingCallbacks(DanglingContext& danglingContext) +void DataProcessorContext::preDanglingCallbacks(DanglingContext& ctx) { - for (auto& handle : preDanglingHandles) { - LOGP(debug, "Invoking preDanglingCallback for service {}", handle.spec.name); - handle.callback(danglingContext, handle.service); - } + invokeAll(preDanglingHandles, "preDanglingCallbacks", spec, ctx); } /// Invoke callbacks to be executed after every dangling check -void DataProcessorContext::postDanglingCallbacks(DanglingContext& danglingContext) +void DataProcessorContext::postDanglingCallbacks(DanglingContext& ctx) { - for (auto& handle : postDanglingHandles) { - LOGP(debug, "Invoking postDanglingCallback for service {}", handle.spec.name); - handle.callback(danglingContext, handle.service); - } + invokeAll(postDanglingHandles, "postDanglingCallbacks", spec, ctx); } /// Invoke callbacks to be executed before every EOS user callback invokation -void DataProcessorContext::preEOSCallbacks(EndOfStreamContext& eosContext) +void DataProcessorContext::preEOSCallbacks(EndOfStreamContext& ctx) { - for (auto& handle : preEOSHandles) { - LOGP(detail, "Invoking preEosCallback for service {}", handle.spec.name); - handle.callback(eosContext, handle.service); - } + invokeAll(preEOSHandles, "preEOSCallbacks", spec, ctx); } /// Invoke callbacks to be executed after every EOS user callback invokation -void DataProcessorContext::postEOSCallbacks(EndOfStreamContext& eosContext) +void DataProcessorContext::postEOSCallbacks(EndOfStreamContext& ctx) { - for (auto& handle : postEOSHandles) { - LOGP(detail, "Invoking postEoSCallback for service {}", handle.spec.name); - handle.callback(eosContext, handle.service); - } + invokeAll(postEOSHandles, "postEOSCallbacks", spec, ctx); } /// Invoke callbacks to be executed after every data Dispatching -void DataProcessorContext::postDispatchingCallbacks(ProcessingContext& processContext) +void DataProcessorContext::postDispatchingCallbacks(ProcessingContext& ctx) { - for (auto& handle : postDispatchingHandles) { - LOGP(debug, "Invoking postDispatchingCallback for service {}", handle.spec.name); - handle.callback(processContext, handle.service); - } + invokeAll(postDispatchingHandles, "postDispatchingCallbacks", spec, ctx); } /// Invoke callbacks to be executed after every data Dispatching -void DataProcessorContext::postForwardingCallbacks(ProcessingContext& processContext) +void DataProcessorContext::postForwardingCallbacks(ProcessingContext& ctx) { - for (auto& handle : postForwardingHandles) { - LOGP(debug, "Invoking postForwardingCallback for service {}", handle.spec.name); - handle.callback(processContext, handle.service); - } + invokeAll(postForwardingHandles, "postForwardingCallbacks", spec, ctx); } /// Callbacks to be called in fair::mq::Device::PreRun() void DataProcessorContext::preStartCallbacks(ServiceRegistryRef ref) { - for (auto& handle : preStartHandles) { - LOGP(detail, "Invoking preStartCallback for service {}", handle.spec.name); - handle.callback(ref, handle.service); - } + invokeAll(preStartHandles, "preStartCallbacks", spec, ref); } void DataProcessorContext::postStopCallbacks(ServiceRegistryRef ref) { - // FIXME: we need to call the callback only once for the global services - /// I guess... - for (auto& handle : postStopHandles) { - LOGP(detail, "Invoking postStopCallback for service {}", handle.spec.name); - handle.callback(ref, handle.service); - } + invokeAll(postStopHandles, "postStopCallbacks", spec, ref); } /// Invoke callback to be executed on exit, in reverse order. void DataProcessorContext::preExitCallbacks(std::vector handles, ServiceRegistryRef ref) { + O2_SIGNPOST_ID_FROM_POINTER(dpid, data_processor_context, &ref); + O2_SIGNPOST_START(data_processor_context, dpid, "callbacks", "Starting DataProcessorContext preExitCallbacks"); // FIXME: we need to call the callback only once for the global services /// I guess... for (auto handle = handles.rbegin(); handle != handles.rend(); ++handle) { - LOGP(detail, "Invoking preExitCallback for service {}", handle->spec.name); + O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, handle->service); + O2_SIGNPOST_START(data_processor_context, cid, "callbacks", "Starting DataProcessorContext::preExitCallbacks for service %{public}s", handle->spec.name.c_str()); handle->callback(ref, handle->service); + O2_SIGNPOST_END(data_processor_context, cid, "callbacks", "Ending DataProcessorContext::preExitCallbacks for service %{public}s", handle->spec.name.c_str()); } + O2_SIGNPOST_END(data_processor_context, dpid, "callbacks", "Ending DataProcessorContext preExitCallbacks"); } /// Invoke callback to be executed on exit, in reverse order. void DataProcessorContext::preLoopCallbacks(ServiceRegistryRef ref) { - // FIXME: we need to call the callback only once for the global services - /// I guess... - LOGP(debug, "Invoking preLoopCallbacks"); - for (auto& handle : preLoopHandles) { - LOGP(debug, "Invoking preLoopCallback for service {}", handle.spec.name); - handle.callback(ref, handle.service); - } + invokeAll(preLoopHandles, "preLoopCallbacks", spec, ref); } void DataProcessorContext::domainInfoUpdatedCallback(ServiceRegistryRef ref, size_t oldestPossibleTimeslice, ChannelIndex channelIndex) { + O2_SIGNPOST_ID_FROM_POINTER(dpid, data_processor_context, this); + O2_SIGNPOST_START(data_processor_context, dpid, "callbacks", "Starting DataProcessorContext domainInfoUpdatedCallback"); for (auto& handle : domainInfoHandles) { - LOGP(debug, "Invoking domainInfoHandles for service {}", handle.spec.name); + O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, handle.service); + O2_SIGNPOST_START(data_processor_context, cid, "callbacks", "Starting DataProcessorContext::domainInfoUpdatedCallback for service %{public}s", handle.spec.name.c_str()); handle.callback(ref, oldestPossibleTimeslice, channelIndex); + O2_SIGNPOST_END(data_processor_context, cid, "callbacks", "Ending DataProcessorContext::domainInfoUpdatedCallback for service %{public}s", handle.spec.name.c_str()); } + O2_SIGNPOST_END(data_processor_context, dpid, "callbacks", "Ending DataProcessorContext domainInfoUpdatedCallback"); } void DataProcessorContext::preSendingMessagesCallbacks(ServiceRegistryRef ref, fair::mq::Parts& parts, ChannelIndex channelIndex) { + O2_SIGNPOST_ID_FROM_POINTER(dpid, data_processor_context, this); + O2_SIGNPOST_START(data_processor_context, dpid, "callbacks", "Starting DataProcessorContext preSendingMessagesCallbacks"); for (auto& handle : preSendingMessagesHandles) { - LOGP(debug, "Invoking preSending for service {}", handle.spec.name); + O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, handle.service); + O2_SIGNPOST_START(data_processor_context, cid, "callbacks", "Starting DataProcessorContext::preSendingMessagesCallbacks for service %{public}s", handle.spec.name.c_str()); handle.callback(ref, parts, channelIndex); + O2_SIGNPOST_END(data_processor_context, cid, "callbacks", "Ending DataProcessorContext::preSendingMessagesCallbacks for service %{public}s", handle.spec.name.c_str()); } + O2_SIGNPOST_END(data_processor_context, dpid, "callbacks", "Ending DataProcessorContext preSendingMessagesCallbacks"); } } // namespace o2::framework diff --git a/Framework/Core/src/StreamContext.cxx b/Framework/Core/src/StreamContext.cxx index c7f28a3dbde1a..2ce0c1c427cbd 100644 --- a/Framework/Core/src/StreamContext.cxx +++ b/Framework/Core/src/StreamContext.cxx @@ -11,6 +11,10 @@ #include "Framework/StreamContext.h" +#include "Framework/Signpost.h" + +O2_DECLARE_DYNAMIC_LOG(stream_context); + namespace o2::framework { @@ -49,12 +53,17 @@ void StreamContext::finaliseOutputsCallbacks(ProcessingContext& pcx) /// Invoke callbacks to be executed after every process method invokation void StreamContext::postProcessingCallbacks(ProcessingContext& pcx) { + O2_SIGNPOST_ID_FROM_POINTER(dpid, stream_context, &pcx); + O2_SIGNPOST_START(stream_context, dpid, "callbacks", "Starting StreamContext postProcessingCallbacks"); for (auto& handle : postProcessingHandles) { - LOG(debug) << "Invoking postProcessingCallbacks for " << handle.service; + O2_SIGNPOST_ID_FROM_POINTER(cid, stream_context, handle.service); + O2_SIGNPOST_START(stream_context, cid, "callbacks", "Starting StreamContext::postProcessingCallbacks for service %{public}s", handle.spec.name.c_str()); assert(handle.service); assert(handle.callback); handle.callback(pcx, handle.service); + O2_SIGNPOST_END(stream_context, cid, "callbacks", "Ending StreamContext::postProcessingCallbacks for service %{public}s", handle.spec.name.c_str()); } + O2_SIGNPOST_END(stream_context, dpid, "callbacks", "Ending StreamContext postProcessingCallbacks"); } /// Invoke callbacks to be executed before every EOS user callback invokation diff --git a/Framework/Core/src/WSDriverClient.cxx b/Framework/Core/src/WSDriverClient.cxx index ac2e3db41fcef..179b13bf91d76 100644 --- a/Framework/Core/src/WSDriverClient.cxx +++ b/Framework/Core/src/WSDriverClient.cxx @@ -24,6 +24,8 @@ O2_DECLARE_DYNAMIC_LOG(device); O2_DECLARE_DYNAMIC_LOG(completion); O2_DECLARE_DYNAMIC_LOG(monitoring_service); +O2_DECLARE_DYNAMIC_LOG(data_processor_context); +O2_DECLARE_DYNAMIC_LOG(stream_context); namespace o2::framework { @@ -184,12 +186,21 @@ void on_connect(uv_connect_t* connection, int status) } else { O2_LOG_DISABLE(completion); } - if ((state.logStreams & DeviceState::LogStreams::MONITORING_SERVICE_LOG) != 0) { O2_LOG_ENABLE(monitoring_service); } else { O2_LOG_DISABLE(monitoring_service); } + if ((state.logStreams & DeviceState::LogStreams::DATA_PROCESSOR_CONTEXT_LOG) != 0) { + O2_LOG_ENABLE(data_processor_context); + } else { + O2_LOG_DISABLE(data_processor_context); + } + if ((state.logStreams & DeviceState::LogStreams::STREAM_CONTEXT_LOG) != 0) { + O2_LOG_ENABLE(stream_context); + } else { + O2_LOG_DISABLE(stream_context); + } }); // Client will be filled in the line after. I can probably have a single diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index fe93ca6d0f07f..f54e74d6ac019 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -405,6 +405,8 @@ void displayDeviceInspector(DeviceSpec const& spec, logsChanged = ImGui::CheckboxFlags("Device", &control.logStreams, DeviceState::LogStreams::DEVICE_LOG); logsChanged = ImGui::CheckboxFlags("Completion", &control.logStreams, DeviceState::LogStreams::COMPLETION_LOG); logsChanged = ImGui::CheckboxFlags("Monitoring", &control.logStreams, DeviceState::LogStreams::MONITORING_SERVICE_LOG); + logsChanged = ImGui::CheckboxFlags("DataProcessorContext", &control.logStreams, DeviceState::LogStreams::DATA_PROCESSOR_CONTEXT_LOG); + logsChanged = ImGui::CheckboxFlags("StreamContext", &control.logStreams, DeviceState::LogStreams::STREAM_CONTEXT_LOG); if (logsChanged && control.controller) { std::string cmd = fmt::format("/log-streams {}", control.logStreams); control.controller->write(cmd.c_str(), cmd.size()); From 8a93fe99b4709d262c0f1395362459bf29a520d0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 18 Jan 2024 10:14:58 +0100 Subject: [PATCH 251/726] DPL: add Signposts for some of the DataProcessing / Stream callbacks --- .../Core/include/Framework/DeviceState.h | 8 +- Framework/Core/src/CommonServices.cxx | 6 + Framework/Core/src/DataProcessingContext.cxx | 123 +++++++++--------- Framework/Core/src/StreamContext.cxx | 11 +- Framework/Core/src/WSDriverClient.cxx | 13 +- .../src/FrameworkGUIDeviceInspector.cxx | 2 + 6 files changed, 96 insertions(+), 67 deletions(-) diff --git a/Framework/Core/include/Framework/DeviceState.h b/Framework/Core/include/Framework/DeviceState.h index 4fa72a84cad71..89961b3e92dc7 100644 --- a/Framework/Core/include/Framework/DeviceState.h +++ b/Framework/Core/include/Framework/DeviceState.h @@ -59,9 +59,11 @@ struct DeviceState { enum LogStreams : int { NO_LOG = 0, - DEVICE_LOG = 1 << 0, // Log for Data Processing Device activities. - COMPLETION_LOG = 1 << 1, // Log for the completion policy of the device. - MONITORING_SERVICE_LOG = 1 << 2, // Log for the monitoring service flushing. + DEVICE_LOG = 1 << 0, // Log for Data Processing Device activities. + COMPLETION_LOG = 1 << 1, // Log for the completion policy of the device. + MONITORING_SERVICE_LOG = 1 << 2, // Log for the monitoring service flushing. + DATA_PROCESSOR_CONTEXT_LOG = 1 << 3, // Log for the DataProcessorContext callbacks + STREAM_CONTEXT_LOG = 1 << 4, // Log for the StreamContext callbacks }; std::vector inputChannelInfos; diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index fd02a4c9ccb88..d264124fe25af 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -57,6 +57,7 @@ #include #include #include +#include "Framework/Signpost.h" #include #include @@ -79,6 +80,9 @@ using Value = o2::monitoring::tags::Value; #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpedantic" +O2_DECLARE_DYNAMIC_LOG(data_processor_context); +O2_DECLARE_DYNAMIC_LOG(stream_context); + namespace o2::framework { @@ -188,6 +192,8 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() } } if (didCreate == false && messageContext.didDispatch() == true) { + O2_SIGNPOST_ID_FROM_POINTER(cid, stream_context, service); + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created out of band"); LOGP(debug, "Data created out of band"); return; } diff --git a/Framework/Core/src/DataProcessingContext.cxx b/Framework/Core/src/DataProcessingContext.cxx index 9c023e5021a9b..86409bf5434bb 100644 --- a/Framework/Core/src/DataProcessingContext.cxx +++ b/Framework/Core/src/DataProcessingContext.cxx @@ -10,145 +10,144 @@ // or submit itself to any jurisdiction. #include "Framework/DataProcessingContext.h" +#include "Framework/DataProcessorSpec.h" +#include "Framework/Signpost.h" +O2_DECLARE_DYNAMIC_LOG(data_processor_context); namespace o2::framework { + +namespace +{ +template +void invokeAll(T& handles, char const* callbackName, o2::framework::DataProcessorSpec* spec, ARGS&... args) +{ + assert(callbackName); + O2_SIGNPOST_ID_FROM_POINTER(dpid, data_processor_context, spec); + // FIXME: for now spec is nullptr because we don't have a list of possible DataProcessorSpecs + // per device. + char const* dataProcessorName = spec ? spec->name.c_str() : "DataProcessorContext"; + O2_SIGNPOST_START(data_processor_context, dpid, "callbacks", "Starting %{public}s::%{public}s", dataProcessorName, callbackName); + for (auto& handle : handles) { + O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, handle.service); + O2_SIGNPOST_START(data_processor_context, cid, "callbacks", "Starting %{public}s::%{public}s::%{public}s", dataProcessorName, handle.spec.name.c_str(), callbackName); + handle.callback(args..., handle.service); + O2_SIGNPOST_END(data_processor_context, cid, "callbacks", "Ending %{public}s::%{public}s::%{public}s", dataProcessorName, handle.spec.name.c_str(), callbackName); + } + O2_SIGNPOST_END(data_processor_context, dpid, "callbacks", "Ending %{public}s::%{public}s", dataProcessorName, callbackName); +} +} // namespace + /// Invoke callbacks to be executed before every dangling check void DataProcessorContext::preProcessingCallbacks(ProcessingContext& ctx) { - for (auto& handle : preProcessingHandlers) { - LOGP(debug, "Invoking preDanglingCallback for service {}", handle.spec.name); - handle.callback(ctx, handle.service); - } + invokeAll(preProcessingHandlers, "preProcessingCallbacks", spec, ctx); } void DataProcessorContext::finaliseOutputsCallbacks(ProcessingContext& ctx) { - for (auto& handle : finaliseOutputsHandles) { - LOGP(debug, "Invoking postProcessingCallback for service {}", handle.spec.name); - handle.callback(ctx, handle.service); - } + invokeAll(finaliseOutputsHandles, "finaliseOutputsCallbacks", spec, ctx); } /// Invoke callbacks to be executed before every dangling check void DataProcessorContext::postProcessingCallbacks(ProcessingContext& ctx) { - for (auto& handle : postProcessingHandlers) { - LOGP(debug, "Invoking postProcessingCallback for service {}", handle.spec.name); - handle.callback(ctx, handle.service); - } + invokeAll(postProcessingHandlers, "postProcessingCallbacks", spec, ctx); } /// Invoke callbacks to be executed before every dangling check -void DataProcessorContext::preDanglingCallbacks(DanglingContext& danglingContext) +void DataProcessorContext::preDanglingCallbacks(DanglingContext& ctx) { - for (auto& handle : preDanglingHandles) { - LOGP(debug, "Invoking preDanglingCallback for service {}", handle.spec.name); - handle.callback(danglingContext, handle.service); - } + invokeAll(preDanglingHandles, "preDanglingCallbacks", spec, ctx); } /// Invoke callbacks to be executed after every dangling check -void DataProcessorContext::postDanglingCallbacks(DanglingContext& danglingContext) +void DataProcessorContext::postDanglingCallbacks(DanglingContext& ctx) { - for (auto& handle : postDanglingHandles) { - LOGP(debug, "Invoking postDanglingCallback for service {}", handle.spec.name); - handle.callback(danglingContext, handle.service); - } + invokeAll(postDanglingHandles, "postDanglingCallbacks", spec, ctx); } /// Invoke callbacks to be executed before every EOS user callback invokation -void DataProcessorContext::preEOSCallbacks(EndOfStreamContext& eosContext) +void DataProcessorContext::preEOSCallbacks(EndOfStreamContext& ctx) { - for (auto& handle : preEOSHandles) { - LOGP(detail, "Invoking preEosCallback for service {}", handle.spec.name); - handle.callback(eosContext, handle.service); - } + invokeAll(preEOSHandles, "preEOSCallbacks", spec, ctx); } /// Invoke callbacks to be executed after every EOS user callback invokation -void DataProcessorContext::postEOSCallbacks(EndOfStreamContext& eosContext) +void DataProcessorContext::postEOSCallbacks(EndOfStreamContext& ctx) { - for (auto& handle : postEOSHandles) { - LOGP(detail, "Invoking postEoSCallback for service {}", handle.spec.name); - handle.callback(eosContext, handle.service); - } + invokeAll(postEOSHandles, "postEOSCallbacks", spec, ctx); } /// Invoke callbacks to be executed after every data Dispatching -void DataProcessorContext::postDispatchingCallbacks(ProcessingContext& processContext) +void DataProcessorContext::postDispatchingCallbacks(ProcessingContext& ctx) { - for (auto& handle : postDispatchingHandles) { - LOGP(debug, "Invoking postDispatchingCallback for service {}", handle.spec.name); - handle.callback(processContext, handle.service); - } + invokeAll(postDispatchingHandles, "postDispatchingCallbacks", spec, ctx); } /// Invoke callbacks to be executed after every data Dispatching -void DataProcessorContext::postForwardingCallbacks(ProcessingContext& processContext) +void DataProcessorContext::postForwardingCallbacks(ProcessingContext& ctx) { - for (auto& handle : postForwardingHandles) { - LOGP(debug, "Invoking postForwardingCallback for service {}", handle.spec.name); - handle.callback(processContext, handle.service); - } + invokeAll(postForwardingHandles, "postForwardingCallbacks", spec, ctx); } /// Callbacks to be called in fair::mq::Device::PreRun() void DataProcessorContext::preStartCallbacks(ServiceRegistryRef ref) { - for (auto& handle : preStartHandles) { - LOGP(detail, "Invoking preStartCallback for service {}", handle.spec.name); - handle.callback(ref, handle.service); - } + invokeAll(preStartHandles, "preStartCallbacks", spec, ref); } void DataProcessorContext::postStopCallbacks(ServiceRegistryRef ref) { - // FIXME: we need to call the callback only once for the global services - /// I guess... - for (auto& handle : postStopHandles) { - LOGP(detail, "Invoking postStopCallback for service {}", handle.spec.name); - handle.callback(ref, handle.service); - } + invokeAll(postStopHandles, "postStopCallbacks", spec, ref); } /// Invoke callback to be executed on exit, in reverse order. void DataProcessorContext::preExitCallbacks(std::vector handles, ServiceRegistryRef ref) { + O2_SIGNPOST_ID_FROM_POINTER(dpid, data_processor_context, &ref); + O2_SIGNPOST_START(data_processor_context, dpid, "callbacks", "Starting DataProcessorContext preExitCallbacks"); // FIXME: we need to call the callback only once for the global services /// I guess... for (auto handle = handles.rbegin(); handle != handles.rend(); ++handle) { - LOGP(detail, "Invoking preExitCallback for service {}", handle->spec.name); + O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, handle->service); + O2_SIGNPOST_START(data_processor_context, cid, "callbacks", "Starting DataProcessorContext::preExitCallbacks for service %{public}s", handle->spec.name.c_str()); handle->callback(ref, handle->service); + O2_SIGNPOST_END(data_processor_context, cid, "callbacks", "Ending DataProcessorContext::preExitCallbacks for service %{public}s", handle->spec.name.c_str()); } + O2_SIGNPOST_END(data_processor_context, dpid, "callbacks", "Ending DataProcessorContext preExitCallbacks"); } /// Invoke callback to be executed on exit, in reverse order. void DataProcessorContext::preLoopCallbacks(ServiceRegistryRef ref) { - // FIXME: we need to call the callback only once for the global services - /// I guess... - LOGP(debug, "Invoking preLoopCallbacks"); - for (auto& handle : preLoopHandles) { - LOGP(debug, "Invoking preLoopCallback for service {}", handle.spec.name); - handle.callback(ref, handle.service); - } + invokeAll(preLoopHandles, "preLoopCallbacks", spec, ref); } void DataProcessorContext::domainInfoUpdatedCallback(ServiceRegistryRef ref, size_t oldestPossibleTimeslice, ChannelIndex channelIndex) { + O2_SIGNPOST_ID_FROM_POINTER(dpid, data_processor_context, this); + O2_SIGNPOST_START(data_processor_context, dpid, "callbacks", "Starting DataProcessorContext domainInfoUpdatedCallback"); for (auto& handle : domainInfoHandles) { - LOGP(debug, "Invoking domainInfoHandles for service {}", handle.spec.name); + O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, handle.service); + O2_SIGNPOST_START(data_processor_context, cid, "callbacks", "Starting DataProcessorContext::domainInfoUpdatedCallback for service %{public}s", handle.spec.name.c_str()); handle.callback(ref, oldestPossibleTimeslice, channelIndex); + O2_SIGNPOST_END(data_processor_context, cid, "callbacks", "Ending DataProcessorContext::domainInfoUpdatedCallback for service %{public}s", handle.spec.name.c_str()); } + O2_SIGNPOST_END(data_processor_context, dpid, "callbacks", "Ending DataProcessorContext domainInfoUpdatedCallback"); } void DataProcessorContext::preSendingMessagesCallbacks(ServiceRegistryRef ref, fair::mq::Parts& parts, ChannelIndex channelIndex) { + O2_SIGNPOST_ID_FROM_POINTER(dpid, data_processor_context, this); + O2_SIGNPOST_START(data_processor_context, dpid, "callbacks", "Starting DataProcessorContext preSendingMessagesCallbacks"); for (auto& handle : preSendingMessagesHandles) { - LOGP(debug, "Invoking preSending for service {}", handle.spec.name); + O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, handle.service); + O2_SIGNPOST_START(data_processor_context, cid, "callbacks", "Starting DataProcessorContext::preSendingMessagesCallbacks for service %{public}s", handle.spec.name.c_str()); handle.callback(ref, parts, channelIndex); + O2_SIGNPOST_END(data_processor_context, cid, "callbacks", "Ending DataProcessorContext::preSendingMessagesCallbacks for service %{public}s", handle.spec.name.c_str()); } + O2_SIGNPOST_END(data_processor_context, dpid, "callbacks", "Ending DataProcessorContext preSendingMessagesCallbacks"); } } // namespace o2::framework diff --git a/Framework/Core/src/StreamContext.cxx b/Framework/Core/src/StreamContext.cxx index c7f28a3dbde1a..2ce0c1c427cbd 100644 --- a/Framework/Core/src/StreamContext.cxx +++ b/Framework/Core/src/StreamContext.cxx @@ -11,6 +11,10 @@ #include "Framework/StreamContext.h" +#include "Framework/Signpost.h" + +O2_DECLARE_DYNAMIC_LOG(stream_context); + namespace o2::framework { @@ -49,12 +53,17 @@ void StreamContext::finaliseOutputsCallbacks(ProcessingContext& pcx) /// Invoke callbacks to be executed after every process method invokation void StreamContext::postProcessingCallbacks(ProcessingContext& pcx) { + O2_SIGNPOST_ID_FROM_POINTER(dpid, stream_context, &pcx); + O2_SIGNPOST_START(stream_context, dpid, "callbacks", "Starting StreamContext postProcessingCallbacks"); for (auto& handle : postProcessingHandles) { - LOG(debug) << "Invoking postProcessingCallbacks for " << handle.service; + O2_SIGNPOST_ID_FROM_POINTER(cid, stream_context, handle.service); + O2_SIGNPOST_START(stream_context, cid, "callbacks", "Starting StreamContext::postProcessingCallbacks for service %{public}s", handle.spec.name.c_str()); assert(handle.service); assert(handle.callback); handle.callback(pcx, handle.service); + O2_SIGNPOST_END(stream_context, cid, "callbacks", "Ending StreamContext::postProcessingCallbacks for service %{public}s", handle.spec.name.c_str()); } + O2_SIGNPOST_END(stream_context, dpid, "callbacks", "Ending StreamContext postProcessingCallbacks"); } /// Invoke callbacks to be executed before every EOS user callback invokation diff --git a/Framework/Core/src/WSDriverClient.cxx b/Framework/Core/src/WSDriverClient.cxx index ac2e3db41fcef..179b13bf91d76 100644 --- a/Framework/Core/src/WSDriverClient.cxx +++ b/Framework/Core/src/WSDriverClient.cxx @@ -24,6 +24,8 @@ O2_DECLARE_DYNAMIC_LOG(device); O2_DECLARE_DYNAMIC_LOG(completion); O2_DECLARE_DYNAMIC_LOG(monitoring_service); +O2_DECLARE_DYNAMIC_LOG(data_processor_context); +O2_DECLARE_DYNAMIC_LOG(stream_context); namespace o2::framework { @@ -184,12 +186,21 @@ void on_connect(uv_connect_t* connection, int status) } else { O2_LOG_DISABLE(completion); } - if ((state.logStreams & DeviceState::LogStreams::MONITORING_SERVICE_LOG) != 0) { O2_LOG_ENABLE(monitoring_service); } else { O2_LOG_DISABLE(monitoring_service); } + if ((state.logStreams & DeviceState::LogStreams::DATA_PROCESSOR_CONTEXT_LOG) != 0) { + O2_LOG_ENABLE(data_processor_context); + } else { + O2_LOG_DISABLE(data_processor_context); + } + if ((state.logStreams & DeviceState::LogStreams::STREAM_CONTEXT_LOG) != 0) { + O2_LOG_ENABLE(stream_context); + } else { + O2_LOG_DISABLE(stream_context); + } }); // Client will be filled in the line after. I can probably have a single diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index fe93ca6d0f07f..f54e74d6ac019 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -405,6 +405,8 @@ void displayDeviceInspector(DeviceSpec const& spec, logsChanged = ImGui::CheckboxFlags("Device", &control.logStreams, DeviceState::LogStreams::DEVICE_LOG); logsChanged = ImGui::CheckboxFlags("Completion", &control.logStreams, DeviceState::LogStreams::COMPLETION_LOG); logsChanged = ImGui::CheckboxFlags("Monitoring", &control.logStreams, DeviceState::LogStreams::MONITORING_SERVICE_LOG); + logsChanged = ImGui::CheckboxFlags("DataProcessorContext", &control.logStreams, DeviceState::LogStreams::DATA_PROCESSOR_CONTEXT_LOG); + logsChanged = ImGui::CheckboxFlags("StreamContext", &control.logStreams, DeviceState::LogStreams::STREAM_CONTEXT_LOG); if (logsChanged && control.controller) { std::string cmd = fmt::format("/log-streams {}", control.logStreams); control.controller->write(cmd.c_str(), cmd.size()); From d712036dc6e37184fecfa0d8694e01f55acc4e78 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 18 Jan 2024 12:36:12 +0100 Subject: [PATCH 252/726] DPL: make log messages optional as well --- Framework/Foundation/include/Framework/Signpost.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 8bd54975d1622..8d0107e68ecd6 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -460,7 +460,13 @@ void o2_debug_log_set_stacktrace(_o2_log_t* log, int stacktrace) #define O2_LOG_DISABLE(log) _o2_log_set_stacktrace(private_o2_log_##log, 0) // For the moment we simply use LOG DEBUG. We should have proper activities so that we can // turn on and off the printing. -#define O2_LOG_DEBUG(log, ...) O2_LOG_MACRO(__VA_ARGS__) +#define O2_LOG_DEBUG(log, ...) __extension__({ \ + if (O2_BUILTIN_UNLIKELY(O2_LOG_ENABLED(log))) { \ + O2_LOG_MACRO(__VA_ARGS__); \ + } else if (O2_BUILTIN_UNLIKELY(private_o2_log_##log->stacktrace)) { \ + O2_LOG_MACRO(__VA_ARGS__); \ + } \ +}) #define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer) _o2_signpost_id_t name = _o2_signpost_id_make_with_pointer(private_o2_log_##log, pointer) #define O2_SIGNPOST_ID_GENERATE(name, log) _o2_signpost_id_t name = _o2_signpost_id_generate_local(private_o2_log_##log) // In case Instruments is attached, we switch to the Apple signpost API otherwise, both one From 7610e035bc1c413930df1585fe1456147236fa4b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 18 Jan 2024 12:36:12 +0100 Subject: [PATCH 253/726] DPL: improve debugging of WorkflowImporter Using Signposts allow us to have a nested view of the state processing. --- .../Core/src/WorkflowSerializationHelpers.cxx | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/Framework/Core/src/WorkflowSerializationHelpers.cxx b/Framework/Core/src/WorkflowSerializationHelpers.cxx index ac182a27a70c5..e20e23f98c90b 100644 --- a/Framework/Core/src/WorkflowSerializationHelpers.cxx +++ b/Framework/Core/src/WorkflowSerializationHelpers.cxx @@ -18,6 +18,7 @@ #include "Framework/DataDescriptorMatcher.h" #include "Framework/DataMatcherWalker.h" #include "Framework/Logger.h" +#include "Framework/Signpost.h" #include #include @@ -27,6 +28,8 @@ #include #include +O2_DECLARE_DYNAMIC_LOG(workflow_importer); + namespace o2::framework { @@ -811,7 +814,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, bool Uint(unsigned i) { - debug << "Uint(" << i << ")" << std::endl; + O2_SIGNPOST_EVENT_EMIT(workflow_importer, _o2_signpost_id_t{(int64_t)states.size()}, "import", "Uint(%d)", i); if (in(State::IN_INPUT_SUBSPEC)) { subspec = i; inputMatcherNodes.push_back(SubSpecificationTypeValueMatcher{i}); @@ -845,28 +848,31 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, bool Int(int i) { - debug << "Int(" << i << ")" << std::endl; + O2_SIGNPOST_EVENT_EMIT(workflow_importer, _o2_signpost_id_t{(int64_t)states.size()}, "import", "Int(%d)", i); return true; } bool Uint64(uint64_t u) { - debug << "Uint64(" << u << ")" << std::endl; + O2_SIGNPOST_EVENT_EMIT(workflow_importer, _o2_signpost_id_t{(int64_t)states.size()}, "import", "Uint64(%" PRIu64 ")", u); return true; } bool Double(double d) { - debug << "Double(" << d << ")" << std::endl; + O2_SIGNPOST_EVENT_EMIT(workflow_importer, _o2_signpost_id_t{(int64_t)states.size()}, "import", "Double(%f)", d); return true; } void enter(char const* what) { - debug << "ENTER: " << what << std::endl; + O2_SIGNPOST_EVENT_EMIT(workflow_importer, _o2_signpost_id_t{(int64_t)states.size()}, "import", "ENTER: %s", what); } + void push(State state) { - debug << "PUSH: " << state << std::endl; + debug.str(""); + debug << state; states.push_back(state); + O2_SIGNPOST_START(workflow_importer, _o2_signpost_id_t{(int64_t)states.size()}, "import", "PUSH: %s", debug.str().c_str()); } State pop() @@ -877,11 +883,12 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } auto result = states.back(); states.pop_back(); - debug << "POP: " << result; + debug.str(""); + debug << result; if (!states.empty()) { debug << " now in " << states.back(); } - debug << std::endl; + O2_SIGNPOST_END(workflow_importer, _o2_signpost_id_t{(int64_t)states.size()+1}, "import", "POP: %s", debug.str().c_str()); return result; } bool in(State o) From cf1002b505386b1366c5b1d3bf3e2d1ac521ed58 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 18 Jan 2024 13:31:59 +0100 Subject: [PATCH 254/726] DPL: add ability to disable inputs programmatically Disabled inputs will not result in an actual route for the data, however it will be stored in the configuration, so that analysis workflows will not need to have the configuration available at every step. --- Framework/Core/include/Framework/InputSpec.h | 3 ++ Framework/Core/include/Framework/OutputSpec.h | 4 ++ Framework/Core/src/DeviceSpecHelpers.cxx | 15 +++++++- Framework/Core/src/WorkflowHelpers.cxx | 12 ++++-- .../Core/src/WorkflowSerializationHelpers.cxx | 32 +++++++++++++++- .../Core/test/test_WorkflowSerialization.cxx | 38 +++++++++++++++++++ 6 files changed, 98 insertions(+), 6 deletions(-) diff --git a/Framework/Core/include/Framework/InputSpec.h b/Framework/Core/include/Framework/InputSpec.h index 9be95e6853da2..7858aacb80664 100644 --- a/Framework/Core/include/Framework/InputSpec.h +++ b/Framework/Core/include/Framework/InputSpec.h @@ -74,6 +74,9 @@ struct InputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not the input is to be considered enabled. + /// Useful to programmatically disable inputs e.g. for the ProcessorOptions. + bool enabled = true; friend std::ostream& operator<<(std::ostream& stream, InputSpec const& arg); bool operator==(InputSpec const& that) const; diff --git a/Framework/Core/include/Framework/OutputSpec.h b/Framework/Core/include/Framework/OutputSpec.h index e92792faa6b14..51527bce4e339 100644 --- a/Framework/Core/include/Framework/OutputSpec.h +++ b/Framework/Core/include/Framework/OutputSpec.h @@ -86,6 +86,10 @@ struct OutputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not this output is enabled. This is useful to decide programmatically + /// wether or not to produce a given output. + bool enabled = true; + friend std::ostream& operator<<(std::ostream& stream, OutputSpec const& arg); }; diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 6c2063cb59311..bedcda35873f9 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -679,19 +679,27 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext assert(policyPtr != nullptr); if (edge.isForward == false) { + auto& matcher = outputsMatchers[edge.outputGlobalIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } OutputRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], + matcher, channel.name, policyPtr, }; device.outputs.emplace_back(route); } else { + auto& matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } ForwardRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], + matcher, channel.name}; device.forwards.emplace_back(route); } @@ -915,6 +923,9 @@ void DeviceSpecHelpers::processInEdgeActions(std::vector& devices, auto const& inputSpec = consumer.inputs[edge.consumerInputIndex]; auto const& sourceChannel = consumerDevice.inputChannels[ci].name; + if (inputSpec.enabled == false) { + throw runtime_error_f("Input %s is disabled but it was still used in topology", DataSpecUtils::describe(inputSpec).data()); + } InputRoute route{ inputSpec, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 360480f80d4ef..a4e1bd82f2ad8 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -841,11 +841,13 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, for (size_t wi = 0; wi < workflow.size(); ++wi) { auto& producer = workflow[wi]; - for (size_t oi = 0; oi < producer.outputs.size(); ++oi) { - auto& out = producer.outputs[oi]; + for (auto& output : producer.outputs) { + if (output.enabled == false) { + continue; + } auto uniqueOutputId = outputs.size(); availableOutputsInfo.emplace_back(LogicalOutputInfo{wi, uniqueOutputId, false}); - outputs.push_back(out); + outputs.push_back(output); } } }; @@ -879,6 +881,10 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, std::vector matches(constOutputs.size()); for (size_t consumer = 0; consumer < workflow.size(); ++consumer) { for (size_t input = 0; input < workflow[consumer].inputs.size(); ++input) { + // Skip disabled inputs. + if (workflow[consumer].inputs[input].enabled == false) { + continue; + } forwards.clear(); for (size_t i = 0; i < constOutputs.size(); i++) { matches[i] = DataSpecUtils::match(workflow[consumer].inputs[input], constOutputs[i]); diff --git a/Framework/Core/src/WorkflowSerializationHelpers.cxx b/Framework/Core/src/WorkflowSerializationHelpers.cxx index e20e23f98c90b..56fd43df4ca54 100644 --- a/Framework/Core/src/WorkflowSerializationHelpers.cxx +++ b/Framework/Core/src/WorkflowSerializationHelpers.cxx @@ -61,6 +61,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_INPUT_ORIGIN, IN_INPUT_DESCRIPTION, IN_INPUT_SUBSPEC, + IN_INPUT_ENABLED, IN_INPUT_ORIGIN_REF, IN_INPUT_DESCRIPTION_REF, IN_INPUT_SUBSPEC_REF, @@ -75,6 +76,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_OUTPUT_BINDING, IN_OUTPUT_ORIGIN, IN_OUTPUT_DESCRIPTION, + IN_OUTPUT_ENABLED, IN_OUTPUT_SUBSPEC, IN_OUTPUT_LIFETIME, IN_OUTPUT_OPTIONS, @@ -167,6 +169,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_INPUT_SUBSPEC: s << "IN_INPUT_SUBSPEC"; break; + case State::IN_INPUT_ENABLED: + s << "IN_INPUT_ENABLED"; + break; case State::IN_INPUT_ORIGIN_REF: s << "IN_INPUT_ORIGIN_REF"; break; @@ -218,6 +223,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_OUTPUT_OPTIONS: s << "IN_OUTPUT_OPTIONS"; break; + case WorkflowImporter::State::IN_OUTPUT_ENABLED: + s << "IN_OUTPUT_ENABLED"; + break; case State::IN_OPTION: s << "IN_OPTION"; break; @@ -302,6 +310,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, dataProcessors.push_back(DataProcessorSpec{}); } else if (in(State::IN_INPUTS)) { push(State::IN_INPUT); + enabled = true; inputMatcherNodes.clear(); } else if (in(State::IN_INPUT_MATCHER)) { // start a new embedded matcher @@ -313,6 +322,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, // will be merged into the parent matcher } else if (in(State::IN_OUTPUTS)) { push(State::IN_OUTPUT); + enabled = true; outputHasSubSpec = false; } else if (in(State::IN_OPTIONS)) { push(State::IN_OPTION); @@ -370,9 +380,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().inputs.push_back(InputSpec({binding}, std::move(*matcher), lifetime, inputOptions)); } + dataProcessors.back().inputs.back().enabled = enabled; inputMatcherNodes.clear(); inputOptions.clear(); - } else if (in(State::IN_INPUT_MATCHER) && inputMatcherNodes.size() > 1) { data_matcher::Node child = std::move(inputMatcherNodes.back()); inputMatcherNodes.pop_back(); @@ -433,6 +443,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().outputs.push_back(OutputSpec({binding}, {origin, description}, lifetime)); } + dataProcessors.back().outputs.back().enabled = enabled; outputHasSubSpec = false; } else if (in(State::IN_OPTION)) { std::unique_ptr opt{nullptr}; @@ -545,12 +556,14 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_DATAPROCESSORS); } else if (in(State::IN_INPUTS)) { push(State::IN_INPUT); + enabled = true; } else if (in(State::IN_INPUT_OPTIONS)) { push(State::IN_OPTION); } else if (in(State::IN_OUTPUT_OPTIONS)) { push(State::IN_OPTION); } else if (in(State::IN_OUTPUTS)) { push(State::IN_OUTPUT); + enabled = true; outputHasSubSpec = false; } else if (in(State::IN_OPTIONS)) { push(State::IN_OPTION); @@ -602,6 +615,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_INPUT_DESCRIPTION_REF); } else if (in(State::IN_INPUT) && strncmp(str, "subspecRef", length) == 0) { push(State::IN_INPUT_SUBSPEC_REF); + } else if (in(State::IN_INPUT) && strncmp(str, "enabled", length) == 0) { + push(State::IN_INPUT_ENABLED); } else if (in(State::IN_INPUT) && strncmp(str, "matcher", length) == 0) { // the outermost matcher is starting here // we create a placeholder which is being updated later @@ -664,6 +679,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_OUTPUT_LIFETIME); } else if (in(State::IN_OUTPUT) && strncmp(str, "metadata", length) == 0) { push(State::IN_OUTPUT_OPTIONS); + } else if (in(State::IN_OUTPUT) && strncmp(str, "enabled", length) == 0) { + push(State::IN_OUTPUT_ENABLED); } else if (in(State::IN_DATAPROCESSOR) && strncmp(str, "name", length) == 0) { push(State::IN_DATAPROCESSOR_NAME); } else if (in(State::IN_DATAPROCESSOR) && strncmp(str, "ranks", length) == 0) { @@ -734,6 +751,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, metadata.back().executable = s; } else if (in(State::IN_INPUT_BINDING)) { binding = s; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (s == "true"); } else if (in(State::IN_INPUT_ORIGIN)) { origin.runtimeInit(s.c_str(), std::min(s.size(), 4UL)); std::string v(s.c_str(), std::min(s.size(), 4UL)); @@ -841,6 +860,10 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, dataProcessors.back().inputTimeSliceId = i; } else if (in(State::IN_DATAPROCESSOR_MAX_TIMESLICES)) { dataProcessors.back().maxInputTimeslices = i; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (i == 1); + } else if (in(State::IN_OUTPUT_ENABLED)) { + enabled = (i == 1); } pop(); return true; @@ -915,6 +938,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, header::DataDescription description; size_t subspec; size_t ref; + // Keep track of the enabled state of the input/output + // Unless specified, inputs are enabled by default. + bool enabled = true; Lifetime lifetime; std::string metadatumKey; std::string metadatumValue; @@ -1103,6 +1129,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)input.lifetime); + w.Key("enabled"); + w.Uint((int)input.enabled); if (input.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); @@ -1152,6 +1180,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)output.lifetime); + w.Key("enabled"); + w.Uint((int)output.enabled); if (output.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); diff --git a/Framework/Core/test/test_WorkflowSerialization.cxx b/Framework/Core/test/test_WorkflowSerialization.cxx index 6e541f7d22f07..e7f39e7e38cbd 100644 --- a/Framework/Core/test/test_WorkflowSerialization.cxx +++ b/Framework/Core/test/test_WorkflowSerialization.cxx @@ -120,3 +120,41 @@ TEST_CASE("TestVerifyWildcard") // also check if the conversion to ConcreteDataMatcher is working at import // REQUIRE(std::get_if(&w1[0].inputs[0].matcher) != nullptr);; } + +/// Verify that disabled inputs / outputs are serialised correctly +TEST_CASE("TestDisabled") +{ + using namespace o2::framework; + WorkflowSpec w0{ + DataProcessorSpec{ + .name = "A", + .inputs = {{"clbPayload", "CLP"}, {"clbWrapper", "CLW"}}, + .outputs = {{"CP2", "1"}, {"CL2", "2"}}, + }}; + + w0[0].inputs[0].enabled = false; + w0[0].outputs[1].enabled = false; + + std::vector dataProcessorInfoOut{ + {"A", "test_Framework_test_SerializationWorkflow", {}}, + }; + + CommandInfo commandInfoOut{"o2-dpl-workflow -b --option 1 --option 2"}; + + std::vector dataProcessorInfoIn{}; + CommandInfo commandInfoIn; + + std::ostringstream firstDump; + WorkflowSerializationHelpers::dump(firstDump, w0, dataProcessorInfoOut, commandInfoOut); + std::istringstream is; + is.str(firstDump.str()); + WorkflowSpec w1; + WorkflowSerializationHelpers::import(is, w1, dataProcessorInfoIn, commandInfoIn); + REQUIRE(w1.size() == 1); + REQUIRE(w1[0].inputs.size() == 2); + REQUIRE(w1[0].inputs[0].enabled == false); + REQUIRE(w1[0].inputs[1].enabled == true); + REQUIRE(w1[0].outputs.size() == 2); + REQUIRE(w1[0].outputs[0].enabled == true); + REQUIRE(w1[0].outputs[1].enabled == false); +} From 3cb857b0c7adcb96cd83aadc8d12df5488db1021 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 18 Jan 2024 13:31:59 +0100 Subject: [PATCH 255/726] DPL: add ability to disable inputs programmatically Disabled inputs will not result in an actual route for the data, however it will be stored in the configuration, so that analysis workflows will not need to have the configuration available at every step. --- Framework/Core/include/Framework/InputSpec.h | 3 ++ Framework/Core/include/Framework/OutputSpec.h | 4 ++ Framework/Core/src/DeviceSpecHelpers.cxx | 15 +++++- Framework/Core/src/WorkflowHelpers.cxx | 12 +++-- .../Core/src/WorkflowSerializationHelpers.cxx | 32 ++++++++++++- .../Core/test/test_FrameworkDataFlowToDDS.cxx | 48 ++++++++++++------- .../Core/test/test_WorkflowSerialization.cxx | 38 +++++++++++++++ 7 files changed, 130 insertions(+), 22 deletions(-) diff --git a/Framework/Core/include/Framework/InputSpec.h b/Framework/Core/include/Framework/InputSpec.h index 9be95e6853da2..7858aacb80664 100644 --- a/Framework/Core/include/Framework/InputSpec.h +++ b/Framework/Core/include/Framework/InputSpec.h @@ -74,6 +74,9 @@ struct InputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not the input is to be considered enabled. + /// Useful to programmatically disable inputs e.g. for the ProcessorOptions. + bool enabled = true; friend std::ostream& operator<<(std::ostream& stream, InputSpec const& arg); bool operator==(InputSpec const& that) const; diff --git a/Framework/Core/include/Framework/OutputSpec.h b/Framework/Core/include/Framework/OutputSpec.h index e92792faa6b14..51527bce4e339 100644 --- a/Framework/Core/include/Framework/OutputSpec.h +++ b/Framework/Core/include/Framework/OutputSpec.h @@ -86,6 +86,10 @@ struct OutputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not this output is enabled. This is useful to decide programmatically + /// wether or not to produce a given output. + bool enabled = true; + friend std::ostream& operator<<(std::ostream& stream, OutputSpec const& arg); }; diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 6c2063cb59311..bedcda35873f9 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -679,19 +679,27 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext assert(policyPtr != nullptr); if (edge.isForward == false) { + auto& matcher = outputsMatchers[edge.outputGlobalIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } OutputRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], + matcher, channel.name, policyPtr, }; device.outputs.emplace_back(route); } else { + auto& matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } ForwardRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], + matcher, channel.name}; device.forwards.emplace_back(route); } @@ -915,6 +923,9 @@ void DeviceSpecHelpers::processInEdgeActions(std::vector& devices, auto const& inputSpec = consumer.inputs[edge.consumerInputIndex]; auto const& sourceChannel = consumerDevice.inputChannels[ci].name; + if (inputSpec.enabled == false) { + throw runtime_error_f("Input %s is disabled but it was still used in topology", DataSpecUtils::describe(inputSpec).data()); + } InputRoute route{ inputSpec, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 360480f80d4ef..a4e1bd82f2ad8 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -841,11 +841,13 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, for (size_t wi = 0; wi < workflow.size(); ++wi) { auto& producer = workflow[wi]; - for (size_t oi = 0; oi < producer.outputs.size(); ++oi) { - auto& out = producer.outputs[oi]; + for (auto& output : producer.outputs) { + if (output.enabled == false) { + continue; + } auto uniqueOutputId = outputs.size(); availableOutputsInfo.emplace_back(LogicalOutputInfo{wi, uniqueOutputId, false}); - outputs.push_back(out); + outputs.push_back(output); } } }; @@ -879,6 +881,10 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, std::vector matches(constOutputs.size()); for (size_t consumer = 0; consumer < workflow.size(); ++consumer) { for (size_t input = 0; input < workflow[consumer].inputs.size(); ++input) { + // Skip disabled inputs. + if (workflow[consumer].inputs[input].enabled == false) { + continue; + } forwards.clear(); for (size_t i = 0; i < constOutputs.size(); i++) { matches[i] = DataSpecUtils::match(workflow[consumer].inputs[input], constOutputs[i]); diff --git a/Framework/Core/src/WorkflowSerializationHelpers.cxx b/Framework/Core/src/WorkflowSerializationHelpers.cxx index e20e23f98c90b..56fd43df4ca54 100644 --- a/Framework/Core/src/WorkflowSerializationHelpers.cxx +++ b/Framework/Core/src/WorkflowSerializationHelpers.cxx @@ -61,6 +61,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_INPUT_ORIGIN, IN_INPUT_DESCRIPTION, IN_INPUT_SUBSPEC, + IN_INPUT_ENABLED, IN_INPUT_ORIGIN_REF, IN_INPUT_DESCRIPTION_REF, IN_INPUT_SUBSPEC_REF, @@ -75,6 +76,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_OUTPUT_BINDING, IN_OUTPUT_ORIGIN, IN_OUTPUT_DESCRIPTION, + IN_OUTPUT_ENABLED, IN_OUTPUT_SUBSPEC, IN_OUTPUT_LIFETIME, IN_OUTPUT_OPTIONS, @@ -167,6 +169,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_INPUT_SUBSPEC: s << "IN_INPUT_SUBSPEC"; break; + case State::IN_INPUT_ENABLED: + s << "IN_INPUT_ENABLED"; + break; case State::IN_INPUT_ORIGIN_REF: s << "IN_INPUT_ORIGIN_REF"; break; @@ -218,6 +223,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_OUTPUT_OPTIONS: s << "IN_OUTPUT_OPTIONS"; break; + case WorkflowImporter::State::IN_OUTPUT_ENABLED: + s << "IN_OUTPUT_ENABLED"; + break; case State::IN_OPTION: s << "IN_OPTION"; break; @@ -302,6 +310,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, dataProcessors.push_back(DataProcessorSpec{}); } else if (in(State::IN_INPUTS)) { push(State::IN_INPUT); + enabled = true; inputMatcherNodes.clear(); } else if (in(State::IN_INPUT_MATCHER)) { // start a new embedded matcher @@ -313,6 +322,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, // will be merged into the parent matcher } else if (in(State::IN_OUTPUTS)) { push(State::IN_OUTPUT); + enabled = true; outputHasSubSpec = false; } else if (in(State::IN_OPTIONS)) { push(State::IN_OPTION); @@ -370,9 +380,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().inputs.push_back(InputSpec({binding}, std::move(*matcher), lifetime, inputOptions)); } + dataProcessors.back().inputs.back().enabled = enabled; inputMatcherNodes.clear(); inputOptions.clear(); - } else if (in(State::IN_INPUT_MATCHER) && inputMatcherNodes.size() > 1) { data_matcher::Node child = std::move(inputMatcherNodes.back()); inputMatcherNodes.pop_back(); @@ -433,6 +443,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().outputs.push_back(OutputSpec({binding}, {origin, description}, lifetime)); } + dataProcessors.back().outputs.back().enabled = enabled; outputHasSubSpec = false; } else if (in(State::IN_OPTION)) { std::unique_ptr opt{nullptr}; @@ -545,12 +556,14 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_DATAPROCESSORS); } else if (in(State::IN_INPUTS)) { push(State::IN_INPUT); + enabled = true; } else if (in(State::IN_INPUT_OPTIONS)) { push(State::IN_OPTION); } else if (in(State::IN_OUTPUT_OPTIONS)) { push(State::IN_OPTION); } else if (in(State::IN_OUTPUTS)) { push(State::IN_OUTPUT); + enabled = true; outputHasSubSpec = false; } else if (in(State::IN_OPTIONS)) { push(State::IN_OPTION); @@ -602,6 +615,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_INPUT_DESCRIPTION_REF); } else if (in(State::IN_INPUT) && strncmp(str, "subspecRef", length) == 0) { push(State::IN_INPUT_SUBSPEC_REF); + } else if (in(State::IN_INPUT) && strncmp(str, "enabled", length) == 0) { + push(State::IN_INPUT_ENABLED); } else if (in(State::IN_INPUT) && strncmp(str, "matcher", length) == 0) { // the outermost matcher is starting here // we create a placeholder which is being updated later @@ -664,6 +679,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_OUTPUT_LIFETIME); } else if (in(State::IN_OUTPUT) && strncmp(str, "metadata", length) == 0) { push(State::IN_OUTPUT_OPTIONS); + } else if (in(State::IN_OUTPUT) && strncmp(str, "enabled", length) == 0) { + push(State::IN_OUTPUT_ENABLED); } else if (in(State::IN_DATAPROCESSOR) && strncmp(str, "name", length) == 0) { push(State::IN_DATAPROCESSOR_NAME); } else if (in(State::IN_DATAPROCESSOR) && strncmp(str, "ranks", length) == 0) { @@ -734,6 +751,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, metadata.back().executable = s; } else if (in(State::IN_INPUT_BINDING)) { binding = s; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (s == "true"); } else if (in(State::IN_INPUT_ORIGIN)) { origin.runtimeInit(s.c_str(), std::min(s.size(), 4UL)); std::string v(s.c_str(), std::min(s.size(), 4UL)); @@ -841,6 +860,10 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, dataProcessors.back().inputTimeSliceId = i; } else if (in(State::IN_DATAPROCESSOR_MAX_TIMESLICES)) { dataProcessors.back().maxInputTimeslices = i; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (i == 1); + } else if (in(State::IN_OUTPUT_ENABLED)) { + enabled = (i == 1); } pop(); return true; @@ -915,6 +938,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, header::DataDescription description; size_t subspec; size_t ref; + // Keep track of the enabled state of the input/output + // Unless specified, inputs are enabled by default. + bool enabled = true; Lifetime lifetime; std::string metadatumKey; std::string metadatumValue; @@ -1103,6 +1129,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)input.lifetime); + w.Key("enabled"); + w.Uint((int)input.enabled); if (input.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); @@ -1152,6 +1180,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)output.lifetime); + w.Key("enabled"); + w.Uint((int)output.enabled); if (output.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); diff --git a/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx b/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx index 49420e4bd99ad..1a2fed675536a 100644 --- a/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx +++ b/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx @@ -169,14 +169,16 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "A1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 }, { "binding": "TST/A2/0", "origin": "TST", "description": "A2", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -195,7 +197,8 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "A1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [ @@ -204,7 +207,8 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "B1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -223,7 +227,8 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "A2", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [ @@ -232,7 +237,8 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "C1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -251,14 +257,16 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "B1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 }, { "binding": "y", "origin": "TST", "description": "C1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [], @@ -433,14 +441,16 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "A1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 }, { "binding": "TST/A2/0", "origin": "TST", "description": "A2", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -459,7 +469,8 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "A1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [ @@ -468,7 +479,8 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "B1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -487,7 +499,8 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "A2", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [ @@ -496,7 +509,8 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "C1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -515,14 +529,16 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "B1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 }, { "binding": "y", "origin": "TST", "description": "C1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [], diff --git a/Framework/Core/test/test_WorkflowSerialization.cxx b/Framework/Core/test/test_WorkflowSerialization.cxx index 6e541f7d22f07..e7f39e7e38cbd 100644 --- a/Framework/Core/test/test_WorkflowSerialization.cxx +++ b/Framework/Core/test/test_WorkflowSerialization.cxx @@ -120,3 +120,41 @@ TEST_CASE("TestVerifyWildcard") // also check if the conversion to ConcreteDataMatcher is working at import // REQUIRE(std::get_if(&w1[0].inputs[0].matcher) != nullptr);; } + +/// Verify that disabled inputs / outputs are serialised correctly +TEST_CASE("TestDisabled") +{ + using namespace o2::framework; + WorkflowSpec w0{ + DataProcessorSpec{ + .name = "A", + .inputs = {{"clbPayload", "CLP"}, {"clbWrapper", "CLW"}}, + .outputs = {{"CP2", "1"}, {"CL2", "2"}}, + }}; + + w0[0].inputs[0].enabled = false; + w0[0].outputs[1].enabled = false; + + std::vector dataProcessorInfoOut{ + {"A", "test_Framework_test_SerializationWorkflow", {}}, + }; + + CommandInfo commandInfoOut{"o2-dpl-workflow -b --option 1 --option 2"}; + + std::vector dataProcessorInfoIn{}; + CommandInfo commandInfoIn; + + std::ostringstream firstDump; + WorkflowSerializationHelpers::dump(firstDump, w0, dataProcessorInfoOut, commandInfoOut); + std::istringstream is; + is.str(firstDump.str()); + WorkflowSpec w1; + WorkflowSerializationHelpers::import(is, w1, dataProcessorInfoIn, commandInfoIn); + REQUIRE(w1.size() == 1); + REQUIRE(w1[0].inputs.size() == 2); + REQUIRE(w1[0].inputs[0].enabled == false); + REQUIRE(w1[0].inputs[1].enabled == true); + REQUIRE(w1[0].outputs.size() == 2); + REQUIRE(w1[0].outputs[0].enabled == true); + REQUIRE(w1[0].outputs[1].enabled == false); +} From 4a65d7c6ce0580e508f094f3820b50768196bb69 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 18 Jan 2024 14:20:06 +0100 Subject: [PATCH 256/726] DPL: get rid of warning --- Framework/Core/src/FairMQDeviceProxy.cxx | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Framework/Core/src/FairMQDeviceProxy.cxx b/Framework/Core/src/FairMQDeviceProxy.cxx index a83057e9578be..c25a58e47ef2d 100644 --- a/Framework/Core/src/FairMQDeviceProxy.cxx +++ b/Framework/Core/src/FairMQDeviceProxy.cxx @@ -320,10 +320,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto mInputRoutes.emplace_back(RouteState{channelIndex, false}); ri++; } - for (auto& route : mInputRoutes) { - assert(route.channel.value != -1); - assert(route.channel.value < mInputChannels.size()); - } + assert(std::all_of(mInputRoutes.begin(), mInputRoutes.end(), [s = mInputChannels.size()](RouteState const& route) { return route.channel.value != -1 && route.channel.value < s; })); LOGP(detail, "Total input channels found {}, total routes {}", mInputChannels.size(), mInputRoutes.size()); assert(mInputRoutes.size() == inputs.size()); } From c829bb9b39c20d10ca96a4b114af3b8903e3f322 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 18 Jan 2024 14:55:02 +0100 Subject: [PATCH 257/726] DPL: use signposts to debug data creation --- Framework/Core/src/CommonServices.cxx | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index d264124fe25af..d1cf0d2f714ab 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -185,6 +185,7 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() // if we did not, but we still have didDispatched set to true // it means it was created out of band. bool didCreate = false; + O2_SIGNPOST_ID_FROM_POINTER(cid, stream_context, service); for (size_t ri = 0; ri < routes.size(); ++ri) { if (stream->routeUserCreated[ri] == true) { didCreate = true; @@ -192,24 +193,29 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() } } if (didCreate == false && messageContext.didDispatch() == true) { - O2_SIGNPOST_ID_FROM_POINTER(cid, stream_context, service); - O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created out of band"); - LOGP(debug, "Data created out of band"); + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created out of band didCreate == %b && messageContext.didDispatch == %b", + didCreate, + messageContext.didDispatch()); return; } for (size_t ri = 0; ri < routes.size(); ++ri) { + auto &route = routes[ri]; + auto &matcher = route.matcher; if (stream->routeUserCreated[ri] == true) { + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created by user. ri = %" PRIu64 ", %{public}s", + (uint64_t)ri, DataSpecUtils::describe(matcher).c_str()); continue; } - auto &route = routes[ri]; - auto &matcher = route.matcher; if ((timeslice % route.maxTimeslices) != route.timeslice) { + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Route ri = %" PRIu64 ", skipped", + (uint64_t)ri); continue; } if (matcher.lifetime == Lifetime::Timeframe) { LOGP(error, "Expected Lifetime::Timeframe data {} was not created for timeslice {} and might result in dropped timeframes", DataSpecUtils::describe(matcher), timeslice); } - } }, + } + }, .kind = ServiceKind::Stream}; } @@ -510,10 +516,11 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() }, .postForwarding = [](ProcessingContext& ctx, void* service) { auto* decongestion = reinterpret_cast(service); - if (decongestion->isFirstInTopology == false) { - LOGP(debug, "We are not the first in the topology, do not update the oldest possible timeslice"); + if (O2_BUILTIN_LIKELY(decongestion->isFirstInTopology == false)) { return; } + O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, service); + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "postForwardingCallbacks", "We are the first one in the topology, we need to update the oldest possible timeslice"); auto& timesliceIndex = ctx.services().get(); auto& relayer = ctx.services().get(); timesliceIndex.updateOldestPossibleOutput(); From ec29274420b075f3330dcdc32902911b456d0775 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 18 Jan 2024 14:57:00 +0100 Subject: [PATCH 258/726] Debug data creation --- Framework/Core/src/CommonServices.cxx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index d1cf0d2f714ab..59a6488f6235a 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -192,6 +192,7 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() break; } } + O2_LOG_ENABLE(stream_context); if (didCreate == false && messageContext.didDispatch() == true) { O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created out of band didCreate == %b && messageContext.didDispatch == %b", didCreate, @@ -215,6 +216,7 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() LOGP(error, "Expected Lifetime::Timeframe data {} was not created for timeslice {} and might result in dropped timeframes", DataSpecUtils::describe(matcher), timeslice); } } + O2_LOG_DISABLE(stream_context); }, .kind = ServiceKind::Stream}; } From 96a619b5e175233dd3b612e9db63e282d1132eb1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 18 Jan 2024 16:13:58 +0100 Subject: [PATCH 259/726] DPL: do not run the check for the case data was not created nor dispatched --- Framework/Core/src/CommonServices.cxx | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index d264124fe25af..f8783a6c3a21b 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -197,6 +197,11 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() LOGP(debug, "Data created out of band"); return; } + if (didCreate == false && messageContext.didDispatch() == false) { + O2_SIGNPOST_ID_FROM_POINTER(cid, stream_context, service); + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "No data created"); + return; + } for (size_t ri = 0; ri < routes.size(); ++ri) { if (stream->routeUserCreated[ri] == true) { continue; From 94486b9efa0d56f5b05b710117e302ba5c666dca Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 18 Jan 2024 16:27:55 +0100 Subject: [PATCH 260/726] DPL: drop commented code --- .../Core/test/benchmark_ExternalFairMQDeviceProxies.cxx | 6 ------ Framework/Core/test/test_DataAllocator.cxx | 1 - Framework/Core/test/test_DataDescriptorMatcherWorkflow.cxx | 1 - Framework/Core/test/test_DataOutputDirector.cxx | 3 --- Framework/Core/test/test_Parallel.cxx | 1 - Framework/Core/test/test_ParallelPipeline.cxx | 3 --- Framework/Core/test/test_Root2ArrowTable.cxx | 5 ----- Framework/Core/test/test_StaticFor.cxx | 2 -- .../Core/test/test_VariablePayloadSequenceWorkflow.cxx | 1 - 9 files changed, 23 deletions(-) diff --git a/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx b/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx index bdb4deb443021..c116ed81537ce 100644 --- a/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx +++ b/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx @@ -450,14 +450,8 @@ std::vector defineDataProcessing(ConfigContext const& config) Inputs checkerInputs; if (bypassProxies != ProxyBypass::None) { checkerInputs.emplace_back(InputSpec{"datain", ConcreteDataTypeMatcher{"TST", "DATA"}, Lifetime::Timeframe}); - //for (unsigned int i = 0; i < pState->nChannels; i++) { - // checkerInputs.emplace_back(InputSpec{{"datain"}, "TST", "DATA", i, Lifetime::Timeframe}); - //} } else { checkerInputs.emplace_back(InputSpec{"datain", ConcreteDataTypeMatcher{"PRX", "DATA"}, Lifetime::Timeframe}); - //for (unsigned int i = 0; i < pState->nChannels; i++) { - // checkerInputs.emplace_back(InputSpec{{"datain"}, "PRX", "DATA", i, Lifetime::Timeframe}); - //} } workflow.emplace_back(DataProcessorSpec{"checker", std::move(checkerInputs), diff --git a/Framework/Core/test/test_DataAllocator.cxx b/Framework/Core/test/test_DataAllocator.cxx index 894fb5f38368a..5c29040a37450 100644 --- a/Framework/Core/test/test_DataAllocator.cxx +++ b/Framework/Core/test/test_DataAllocator.cxx @@ -338,7 +338,6 @@ DataProcessorSpec getSinkSpec() LOG(info) << "extracting POD vector"; // TODO: use the ReturnType helper once implemented - //InputRecord::ReturnType> podvector; decltype(std::declval().get>(DataRef{nullptr, nullptr, nullptr})) podvector; podvector = pc.inputs().get>("inputPODvector"); ASSERT_ERROR(podvector.size() == 3); diff --git a/Framework/Core/test/test_DataDescriptorMatcherWorkflow.cxx b/Framework/Core/test/test_DataDescriptorMatcherWorkflow.cxx index ef792afa4cc79..e8bac109985c4 100644 --- a/Framework/Core/test/test_DataDescriptorMatcherWorkflow.cxx +++ b/Framework/Core/test/test_DataDescriptorMatcherWorkflow.cxx @@ -21,7 +21,6 @@ #include "Framework/DataAllocator.h" // we need to add workflow options before including Framework/runDataProcessing -//void customize(std::vector& workflowOptions) //{ //} diff --git a/Framework/Core/test/test_DataOutputDirector.cxx b/Framework/Core/test/test_DataOutputDirector.cxx index c158c52932db4..1834dd0781897 100644 --- a/Framework/Core/test/test_DataOutputDirector.cxx +++ b/Framework/Core/test/test_DataOutputDirector.cxx @@ -29,7 +29,6 @@ TEST_CASE("TestDataOutputDirector") std::string keepString("AOD/UNO/0:tr1:c1/c2/c3:fn1,AOD/UNO/0::c4"); dod.readString(keepString); dod.setFilenameBase(mydfn); - //dod.printOut(); printf("\n\n"); auto ds = dod.getDataOutputDescriptors(dh); @@ -59,7 +58,6 @@ TEST_CASE("TestDataOutputDirector") dod.reset(); std::tie(rdn, dfn, fmode, mfs, ntf) = dod.readJsonString(jsonString); - //dod.printOut(); printf("\n\n"); ds = dod.getDataOutputDescriptors(dh); REQUIRE(ds.size() == 1); @@ -106,7 +104,6 @@ TEST_CASE("TestDataOutputDirector") dod.reset(); std::tie(rdn, dfn, fmode, mfs, ntf) = dod.readJson(jsonFile); dod.setFilenameBase("AnalysisResults"); - //dod.printOut(); printf("\n\n"); ds = dod.getDataOutputDescriptors(dh); REQUIRE(ds.size() == 2); diff --git a/Framework/Core/test/test_Parallel.cxx b/Framework/Core/test/test_Parallel.cxx index 6e8ebb2a2d1f0..4fe0c920426e2 100644 --- a/Framework/Core/test/test_Parallel.cxx +++ b/Framework/Core/test/test_Parallel.cxx @@ -19,7 +19,6 @@ #include - using namespace o2::framework; struct FakeCluster { diff --git a/Framework/Core/test/test_ParallelPipeline.cxx b/Framework/Core/test/test_ParallelPipeline.cxx index 47106855a8f74..b79ee3a73cd24 100644 --- a/Framework/Core/test/test_ParallelPipeline.cxx +++ b/Framework/Core/test/test_ParallelPipeline.cxx @@ -63,7 +63,6 @@ std::vector defineDataProcessing(ConfigContext const&) LOG(debug) << "instance " << parallelContext.index1D() << " of " << parallelContext.index1DSize() << ": " << *input.spec << ": " << *((int*)input.payload); auto const* dataheader = DataRefUtils::getHeader(input); - //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); auto& data = ctx.outputs().make(Output{"TST", "PREPROC", dataheader->subSpecification}); ASSERT_ERROR(ctx.inputs().get(input.spec->binding.c_str()) == parallelContext.index1D()); data = parallelContext.index1D(); @@ -84,10 +83,8 @@ std::vector defineDataProcessing(ConfigContext const&) auto const* dataheader = DataRefUtils::getHeader(input); // TODO: there is a bug in the API for using OutputRef, returns an rvalue which can not be bound to // lvalue reference - //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); auto& data = ctx.outputs().make(Output{"TST", "DATA", dataheader->subSpecification}); data = ctx.inputs().get(input.spec->binding.c_str()); - //auto& meta = ctx.outputs().make(OutputRef{"metadt", dataheader->subSpecification}); auto& meta = ctx.outputs().make(Output{"TST", "META", dataheader->subSpecification}); meta = dataheader->subSpecification; } diff --git a/Framework/Core/test/test_Root2ArrowTable.cxx b/Framework/Core/test/test_Root2ArrowTable.cxx index b22432d8565a5..a3f48957d7044 100644 --- a/Framework/Core/test/test_Root2ArrowTable.cxx +++ b/Framework/Core/test/test_Root2ArrowTable.cxx @@ -47,7 +47,6 @@ TEST_CASE("RootTree2Table") t1.Branch("ij", ij, "ij[2]/I"); //fill the tree for (Int_t i = 0; i < 1000; i++) { - //gRandom->Rannor(xyz[0], xyz[1]); xyz[0] = 1; xyz[1] = 2; xyz[2] = 3; @@ -92,16 +91,12 @@ TEST_CASE("RootTree2Table") REQUIRE(array->length() == 3000); const float* c = reinterpret_cast(array->values()->data()); - //auto array = std::static_pointer_cast(table->column(0)->chunk(0)); - //CHECK_EQUAL(array->byte_width(), sizeof(float[3])); - //const float* c = reinterpret_cast(array->Value(0)); CHECK(c[0] == 1); CHECK(c[1] == 2); CHECK(c[2] == 1); } { - //auto values = std::static_pointer_cast(table->column(1)->chunk(0)); auto chunkToUse = table->column(1)->chunk(0); chunkToUse = std::dynamic_pointer_cast(chunkToUse)->values(); auto array = std::static_pointer_cast(chunkToUse); diff --git a/Framework/Core/test/test_StaticFor.cxx b/Framework/Core/test/test_StaticFor.cxx index bacdcaeefe86e..ee66c5b4aaf5b 100644 --- a/Framework/Core/test/test_StaticFor.cxx +++ b/Framework/Core/test/test_StaticFor.cxx @@ -57,8 +57,6 @@ TEST_CASE("TestStaticFor") constexpr int index = i.value; // compiler will complain if constexpr is not enforced for index access: - //CONST_STR(staticNames[index]); // works - //CONST_STR(staticNames[i.value]); // fails constexpr auto sayHello = CONST_STR("Hello ") + CONST_STR(staticNames[index]); diff --git a/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx b/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx index 1024df1218485..9945eab037780 100644 --- a/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx +++ b/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx @@ -217,7 +217,6 @@ std::vector defineDataProcessing(ConfigContext const& config) ASSERT_ERROR(*reinterpret_cast(ref.payload) == sd->initialValue + nSequencePayloads); ++nSequencePayloads; } - //LOG(info) << "input " << ref.spec->binding << " has data {" << dh->dataOrigin.as() << "/" << dh->dataDescription.as() << "/" << dh->subSpecification << "}: " << *reinterpret_cast(ref.payload); } for (auto const& [channel, count] : active) { ++counters[channel]; From deaf51be8461e455167b9c17ca64e9a17a24d4ea Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 18 Jan 2024 16:31:08 +0100 Subject: [PATCH 261/726] Fix formatting in tests --- Framework/Core/test/test_Expressions.cxx | 2 +- Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx | 2 +- Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx | 2 +- Framework/Core/test/test_O2DatabasePDG.cxx | 2 +- Framework/Core/test/test_Parallel.cxx | 6 +++--- Framework/Core/test/test_Root2ArrowTable.cxx | 4 ++-- Framework/Core/test/test_Services.cxx | 2 +- Framework/Core/test/test_StaggeringWorkflow.cxx | 2 +- Framework/Core/test/test_TableBuilder.cxx | 4 ++-- Framework/Core/test/test_TreeToTable.cxx | 2 +- .../Core/test/test_VariablePayloadSequenceWorkflow.cxx | 2 +- 11 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Framework/Core/test/test_Expressions.cxx b/Framework/Core/test/test_Expressions.cxx index 0bd4fdc564946..176db6a259a7c 100644 --- a/Framework/Core/test/test_Expressions.cxx +++ b/Framework/Core/test/test_Expressions.cxx @@ -32,7 +32,7 @@ static BindingNode testInt{"testInt", 6, atype::INT32}; namespace o2::aod::track { -DECLARE_SOA_EXPRESSION_COLUMN(Pze, pz, float, o2::aod::track::tgl*(1.f / o2::aod::track::signed1Pt)); +DECLARE_SOA_EXPRESSION_COLUMN(Pze, pz, float, o2::aod::track::tgl * (1.f / o2::aod::track::signed1Pt)); } // namespace o2::aod::track TEST_CASE("TestTreeParsing") diff --git a/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx b/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx index 249e7fc304949..d4d204f206e0a 100644 --- a/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx +++ b/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx @@ -403,7 +403,7 @@ std::vector defineDataProcessing(ConfigContext const& config) output.AddPart(std::move(inputs.At(msgidx))); } } - auto &messageContext = services.get(); + auto& messageContext = services.get(); o2::framework::sendOnChannel(*device, output, channelName, (size_t)-1); messageContext.fakeDispatch(); return output.Size() != 0; diff --git a/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx b/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx index 7b0ffa462a9db..d15eebd26bdf7 100644 --- a/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx +++ b/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx @@ -31,7 +31,7 @@ namespace { WorkflowSpec defineDataProcessing() { - return {{.name = "A", // + return {{.name = "A", // .outputs = Outputs{OutputSpec{"TST", "A1"}, OutputSpec{"TST", "A2"}}, // A1 will be consumed twice, A2 is dangling .algorithm = AlgorithmSpec{}, // .options = {ConfigParamSpec{"channel-config", VariantType::String, // raw input channel diff --git a/Framework/Core/test/test_O2DatabasePDG.cxx b/Framework/Core/test/test_O2DatabasePDG.cxx index c6f6f3c9c57e7..11b07d1473390 100644 --- a/Framework/Core/test/test_O2DatabasePDG.cxx +++ b/Framework/Core/test/test_O2DatabasePDG.cxx @@ -40,7 +40,7 @@ struct PdgTest { TParticlePDG* p = pdgOld->GetParticle(300553); ASSERT_ERROR(p != nullptr); ASSERT_ERROR(p->Mass() == 10.580); - ASSERT_ERROR(p->Stable() == kFALSE); + ASSERT_ERROR(p->Stable() == kFALSE); ASSERT_ERROR(p->Charge() == 0); ASSERT_ERROR(p->Width() == 0.000); diff --git a/Framework/Core/test/test_Parallel.cxx b/Framework/Core/test/test_Parallel.cxx index 4fe0c920426e2..7967f55a7b93d 100644 --- a/Framework/Core/test/test_Parallel.cxx +++ b/Framework/Core/test/test_Parallel.cxx @@ -57,7 +57,7 @@ std::vector defineDataProcessing(ConfigContext const&) Outputs{ {"TPC", "CLUSTERS_P", 0, Lifetime::Timeframe}}, AlgorithmSpec{ - //CLion says it ambiguous without (AlgorithmSpec::ProcessCallback), but cmake compiles fine anyway. + // CLion says it ambiguous without (AlgorithmSpec::ProcessCallback), but cmake compiles fine anyway. (AlgorithmSpec::ProcessCallback)someProcessingStageAlgorithm}}, parallelSize, [](DataProcessorSpec& spec, size_t index) { @@ -95,7 +95,7 @@ std::vector defineDataProcessing(ConfigContext const&) auto matcher = DataSpecUtils::asConcreteDataMatcher(*inputSpec); o2::header::DataDescription outputDescription = matcher.description; - //todo: better sampled data flagging + // todo: better sampled data flagging size_t len = strlen(outputDescription.str); if (len < outputDescription.size - 2) { outputDescription.str[len] = '_'; @@ -113,7 +113,7 @@ std::vector defineDataProcessing(ConfigContext const&) const auto* inputHeader = DataRefUtils::getHeader(input); auto& output = ctx.outputs().make(description, inputHeader->size()); - //todo: use some std function or adopt(), when it is available for POD data + // todo: use some std function or adopt(), when it is available for POD data const char* input_ptr = input.payload; for (char& it : output) { it = *input_ptr++; diff --git a/Framework/Core/test/test_Root2ArrowTable.cxx b/Framework/Core/test/test_Root2ArrowTable.cxx index a3f48957d7044..bcd0730734eb8 100644 --- a/Framework/Core/test/test_Root2ArrowTable.cxx +++ b/Framework/Core/test/test_Root2ArrowTable.cxx @@ -45,7 +45,7 @@ TEST_CASE("RootTree2Table") t1.Branch("ev", &ev, "ev/I"); t1.Branch("xyz", xyz, "xyz[3]/F"); t1.Branch("ij", ij, "ij[2]/I"); - //fill the tree + // fill the tree for (Int_t i = 0; i < 1000; i++) { xyz[0] = 1; xyz[1] = 2; @@ -146,7 +146,7 @@ TEST_CASE("RootTree2TableViaASoA") t2.Branch("ev", &ev, "ev/I"); t2.Branch("xyz", xyz, "xyz[3]/F"); t2.Branch("ij", ij, "ij[2]/I"); - //fill the tree + // fill the tree for (Int_t i = 0; i < 1000; i++) { gRandom->Rannor(xyz[0], xyz[1]); gRandom->Rannor(px, py); diff --git a/Framework/Core/test/test_Services.cxx b/Framework/Core/test/test_Services.cxx index d1a8a3592cd10..23092127fb37b 100644 --- a/Framework/Core/test/test_Services.cxx +++ b/Framework/Core/test/test_Services.cxx @@ -94,7 +94,7 @@ static ServiceRegistry::Salt salt_1 = ServiceRegistry::Salt{1, 0}; static ServiceRegistry::Salt salt_2 = ServiceRegistry::Salt{2, 0}; static ServiceRegistry::Salt salt_3 = ServiceRegistry::Salt{3, 0}; static ServiceRegistry::Salt salt_1_1 = ServiceRegistry::Salt{1, 1}; -} +} // namespace o2::framework TEST_CASE("TestSerialServices") { diff --git a/Framework/Core/test/test_StaggeringWorkflow.cxx b/Framework/Core/test/test_StaggeringWorkflow.cxx index 590ce83cef467..cef3ec90c2042 100644 --- a/Framework/Core/test/test_StaggeringWorkflow.cxx +++ b/Framework/Core/test/test_StaggeringWorkflow.cxx @@ -53,7 +53,7 @@ void customize(std::vector& policies) // search for spec names starting with "processor" return spec.name.find("processor") == 0; }, - [](auto const&, auto const&, auto &) { return o2::framework::CompletionPolicy::CompletionOp::Consume; }}); + [](auto const&, auto const&, auto&) { return o2::framework::CompletionPolicy::CompletionOp::Consume; }}); } #include "Framework/runDataProcessing.h" diff --git a/Framework/Core/test/test_TableBuilder.cxx b/Framework/Core/test/test_TableBuilder.cxx index 26a0f5a930b73..1035a0e0008b2 100644 --- a/Framework/Core/test/test_TableBuilder.cxx +++ b/Framework/Core/test/test_TableBuilder.cxx @@ -32,7 +32,7 @@ namespace test2 DECLARE_SOA_COLUMN_FULL(X, x, uint64_t, "x"); DECLARE_SOA_COLUMN_FULL(Y, y, uint64_t, "y"); DECLARE_SOA_COLUMN_FULL(Pos, pos, int[4], "pos"); -} // namespace test +} // namespace test2 using TestTable = o2::soa::Table; using ArrayTable = o2::soa::Table; @@ -295,7 +295,7 @@ TEST_CASE("TestColumnCount") TEST_CASE("TestMakeFields") { - auto fields = TableBuilderHelpers::makeFields({ "i", "f" }); + auto fields = TableBuilderHelpers::makeFields({"i", "f"}); REQUIRE(fields.size() == 2); REQUIRE(fields[0]->name() == "i"); REQUIRE(fields[1]->name() == "f"); diff --git a/Framework/Core/test/test_TreeToTable.cxx b/Framework/Core/test/test_TreeToTable.cxx index 65569b72b9f35..4746eead57c1e 100644 --- a/Framework/Core/test/test_TreeToTable.cxx +++ b/Framework/Core/test/test_TreeToTable.cxx @@ -53,7 +53,7 @@ TEST_CASE("TreeToTableConversion") t1.Branch("xyzw", xyzw, "xyzw[96]/F"); t1.Branch("small", &b, "small/b"); - //fill the tree + // fill the tree int ntruein[2] = {0}; for (int i = 0; i < ndp; i++) { ok = (i % 2) == 0; diff --git a/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx b/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx index 9945eab037780..0df6d626f1b49 100644 --- a/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx +++ b/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx @@ -56,7 +56,7 @@ namespace test // a header with the information expected in the payload // will be sent on the header stack struct SequenceDesc : public o2::header::BaseHeader { - //static data for this header type/version + // static data for this header type/version static constexpr uint32_t sVersion{1}; static constexpr o2::header::HeaderType sHeaderType{o2::header::String2("SequDesc")}; static constexpr o2::header::SerializationMethod sSerializationMethod{o2::header::gSerializationMethodNone}; From 1b5ad1dfcd13d72ba7e7543cfeb7e4cb9800cb47 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 19 Jan 2024 08:41:04 +0100 Subject: [PATCH 262/726] DPL: drop commented code --- .../Core/test/benchmark_ExternalFairMQDeviceProxies.cxx | 6 ------ Framework/Core/test/test_DataAllocator.cxx | 1 - Framework/Core/test/test_DataDescriptorMatcherWorkflow.cxx | 1 - Framework/Core/test/test_DataOutputDirector.cxx | 3 --- Framework/Core/test/test_Parallel.cxx | 1 - Framework/Core/test/test_ParallelPipeline.cxx | 3 --- Framework/Core/test/test_Root2ArrowTable.cxx | 5 ----- Framework/Core/test/test_StaticFor.cxx | 2 -- .../Core/test/test_VariablePayloadSequenceWorkflow.cxx | 1 - 9 files changed, 23 deletions(-) diff --git a/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx b/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx index bdb4deb443021..c116ed81537ce 100644 --- a/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx +++ b/Framework/Core/test/benchmark_ExternalFairMQDeviceProxies.cxx @@ -450,14 +450,8 @@ std::vector defineDataProcessing(ConfigContext const& config) Inputs checkerInputs; if (bypassProxies != ProxyBypass::None) { checkerInputs.emplace_back(InputSpec{"datain", ConcreteDataTypeMatcher{"TST", "DATA"}, Lifetime::Timeframe}); - //for (unsigned int i = 0; i < pState->nChannels; i++) { - // checkerInputs.emplace_back(InputSpec{{"datain"}, "TST", "DATA", i, Lifetime::Timeframe}); - //} } else { checkerInputs.emplace_back(InputSpec{"datain", ConcreteDataTypeMatcher{"PRX", "DATA"}, Lifetime::Timeframe}); - //for (unsigned int i = 0; i < pState->nChannels; i++) { - // checkerInputs.emplace_back(InputSpec{{"datain"}, "PRX", "DATA", i, Lifetime::Timeframe}); - //} } workflow.emplace_back(DataProcessorSpec{"checker", std::move(checkerInputs), diff --git a/Framework/Core/test/test_DataAllocator.cxx b/Framework/Core/test/test_DataAllocator.cxx index 894fb5f38368a..5c29040a37450 100644 --- a/Framework/Core/test/test_DataAllocator.cxx +++ b/Framework/Core/test/test_DataAllocator.cxx @@ -338,7 +338,6 @@ DataProcessorSpec getSinkSpec() LOG(info) << "extracting POD vector"; // TODO: use the ReturnType helper once implemented - //InputRecord::ReturnType> podvector; decltype(std::declval().get>(DataRef{nullptr, nullptr, nullptr})) podvector; podvector = pc.inputs().get>("inputPODvector"); ASSERT_ERROR(podvector.size() == 3); diff --git a/Framework/Core/test/test_DataDescriptorMatcherWorkflow.cxx b/Framework/Core/test/test_DataDescriptorMatcherWorkflow.cxx index ef792afa4cc79..e8bac109985c4 100644 --- a/Framework/Core/test/test_DataDescriptorMatcherWorkflow.cxx +++ b/Framework/Core/test/test_DataDescriptorMatcherWorkflow.cxx @@ -21,7 +21,6 @@ #include "Framework/DataAllocator.h" // we need to add workflow options before including Framework/runDataProcessing -//void customize(std::vector& workflowOptions) //{ //} diff --git a/Framework/Core/test/test_DataOutputDirector.cxx b/Framework/Core/test/test_DataOutputDirector.cxx index c158c52932db4..1834dd0781897 100644 --- a/Framework/Core/test/test_DataOutputDirector.cxx +++ b/Framework/Core/test/test_DataOutputDirector.cxx @@ -29,7 +29,6 @@ TEST_CASE("TestDataOutputDirector") std::string keepString("AOD/UNO/0:tr1:c1/c2/c3:fn1,AOD/UNO/0::c4"); dod.readString(keepString); dod.setFilenameBase(mydfn); - //dod.printOut(); printf("\n\n"); auto ds = dod.getDataOutputDescriptors(dh); @@ -59,7 +58,6 @@ TEST_CASE("TestDataOutputDirector") dod.reset(); std::tie(rdn, dfn, fmode, mfs, ntf) = dod.readJsonString(jsonString); - //dod.printOut(); printf("\n\n"); ds = dod.getDataOutputDescriptors(dh); REQUIRE(ds.size() == 1); @@ -106,7 +104,6 @@ TEST_CASE("TestDataOutputDirector") dod.reset(); std::tie(rdn, dfn, fmode, mfs, ntf) = dod.readJson(jsonFile); dod.setFilenameBase("AnalysisResults"); - //dod.printOut(); printf("\n\n"); ds = dod.getDataOutputDescriptors(dh); REQUIRE(ds.size() == 2); diff --git a/Framework/Core/test/test_Parallel.cxx b/Framework/Core/test/test_Parallel.cxx index 6e8ebb2a2d1f0..4fe0c920426e2 100644 --- a/Framework/Core/test/test_Parallel.cxx +++ b/Framework/Core/test/test_Parallel.cxx @@ -19,7 +19,6 @@ #include - using namespace o2::framework; struct FakeCluster { diff --git a/Framework/Core/test/test_ParallelPipeline.cxx b/Framework/Core/test/test_ParallelPipeline.cxx index 47106855a8f74..b79ee3a73cd24 100644 --- a/Framework/Core/test/test_ParallelPipeline.cxx +++ b/Framework/Core/test/test_ParallelPipeline.cxx @@ -63,7 +63,6 @@ std::vector defineDataProcessing(ConfigContext const&) LOG(debug) << "instance " << parallelContext.index1D() << " of " << parallelContext.index1DSize() << ": " << *input.spec << ": " << *((int*)input.payload); auto const* dataheader = DataRefUtils::getHeader(input); - //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); auto& data = ctx.outputs().make(Output{"TST", "PREPROC", dataheader->subSpecification}); ASSERT_ERROR(ctx.inputs().get(input.spec->binding.c_str()) == parallelContext.index1D()); data = parallelContext.index1D(); @@ -84,10 +83,8 @@ std::vector defineDataProcessing(ConfigContext const&) auto const* dataheader = DataRefUtils::getHeader(input); // TODO: there is a bug in the API for using OutputRef, returns an rvalue which can not be bound to // lvalue reference - //auto& data = ctx.outputs().make(OutputRef{"output", dataheader->subSpecification}); auto& data = ctx.outputs().make(Output{"TST", "DATA", dataheader->subSpecification}); data = ctx.inputs().get(input.spec->binding.c_str()); - //auto& meta = ctx.outputs().make(OutputRef{"metadt", dataheader->subSpecification}); auto& meta = ctx.outputs().make(Output{"TST", "META", dataheader->subSpecification}); meta = dataheader->subSpecification; } diff --git a/Framework/Core/test/test_Root2ArrowTable.cxx b/Framework/Core/test/test_Root2ArrowTable.cxx index b22432d8565a5..a3f48957d7044 100644 --- a/Framework/Core/test/test_Root2ArrowTable.cxx +++ b/Framework/Core/test/test_Root2ArrowTable.cxx @@ -47,7 +47,6 @@ TEST_CASE("RootTree2Table") t1.Branch("ij", ij, "ij[2]/I"); //fill the tree for (Int_t i = 0; i < 1000; i++) { - //gRandom->Rannor(xyz[0], xyz[1]); xyz[0] = 1; xyz[1] = 2; xyz[2] = 3; @@ -92,16 +91,12 @@ TEST_CASE("RootTree2Table") REQUIRE(array->length() == 3000); const float* c = reinterpret_cast(array->values()->data()); - //auto array = std::static_pointer_cast(table->column(0)->chunk(0)); - //CHECK_EQUAL(array->byte_width(), sizeof(float[3])); - //const float* c = reinterpret_cast(array->Value(0)); CHECK(c[0] == 1); CHECK(c[1] == 2); CHECK(c[2] == 1); } { - //auto values = std::static_pointer_cast(table->column(1)->chunk(0)); auto chunkToUse = table->column(1)->chunk(0); chunkToUse = std::dynamic_pointer_cast(chunkToUse)->values(); auto array = std::static_pointer_cast(chunkToUse); diff --git a/Framework/Core/test/test_StaticFor.cxx b/Framework/Core/test/test_StaticFor.cxx index bacdcaeefe86e..ee66c5b4aaf5b 100644 --- a/Framework/Core/test/test_StaticFor.cxx +++ b/Framework/Core/test/test_StaticFor.cxx @@ -57,8 +57,6 @@ TEST_CASE("TestStaticFor") constexpr int index = i.value; // compiler will complain if constexpr is not enforced for index access: - //CONST_STR(staticNames[index]); // works - //CONST_STR(staticNames[i.value]); // fails constexpr auto sayHello = CONST_STR("Hello ") + CONST_STR(staticNames[index]); diff --git a/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx b/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx index 1024df1218485..9945eab037780 100644 --- a/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx +++ b/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx @@ -217,7 +217,6 @@ std::vector defineDataProcessing(ConfigContext const& config) ASSERT_ERROR(*reinterpret_cast(ref.payload) == sd->initialValue + nSequencePayloads); ++nSequencePayloads; } - //LOG(info) << "input " << ref.spec->binding << " has data {" << dh->dataOrigin.as() << "/" << dh->dataDescription.as() << "/" << dh->subSpecification << "}: " << *reinterpret_cast(ref.payload); } for (auto const& [channel, count] : active) { ++counters[channel]; From 9a893773bcefdbfba6f5f21fbab85d2d70d76663 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 19 Jan 2024 08:41:04 +0100 Subject: [PATCH 263/726] Fix formatting in tests --- Framework/Core/test/test_Expressions.cxx | 2 +- Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx | 2 +- Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx | 2 +- Framework/Core/test/test_O2DatabasePDG.cxx | 2 +- Framework/Core/test/test_Parallel.cxx | 6 +++--- Framework/Core/test/test_Root2ArrowTable.cxx | 5 ++--- Framework/Core/test/test_Services.cxx | 2 +- Framework/Core/test/test_StaggeringWorkflow.cxx | 2 +- Framework/Core/test/test_TableBuilder.cxx | 4 ++-- Framework/Core/test/test_TreeToTable.cxx | 2 +- .../Core/test/test_VariablePayloadSequenceWorkflow.cxx | 2 +- 11 files changed, 15 insertions(+), 16 deletions(-) diff --git a/Framework/Core/test/test_Expressions.cxx b/Framework/Core/test/test_Expressions.cxx index 0bd4fdc564946..176db6a259a7c 100644 --- a/Framework/Core/test/test_Expressions.cxx +++ b/Framework/Core/test/test_Expressions.cxx @@ -32,7 +32,7 @@ static BindingNode testInt{"testInt", 6, atype::INT32}; namespace o2::aod::track { -DECLARE_SOA_EXPRESSION_COLUMN(Pze, pz, float, o2::aod::track::tgl*(1.f / o2::aod::track::signed1Pt)); +DECLARE_SOA_EXPRESSION_COLUMN(Pze, pz, float, o2::aod::track::tgl * (1.f / o2::aod::track::signed1Pt)); } // namespace o2::aod::track TEST_CASE("TestTreeParsing") diff --git a/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx b/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx index 249e7fc304949..d4d204f206e0a 100644 --- a/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx +++ b/Framework/Core/test/test_ExternalFairMQDeviceWorkflow.cxx @@ -403,7 +403,7 @@ std::vector defineDataProcessing(ConfigContext const& config) output.AddPart(std::move(inputs.At(msgidx))); } } - auto &messageContext = services.get(); + auto& messageContext = services.get(); o2::framework::sendOnChannel(*device, output, channelName, (size_t)-1); messageContext.fakeDispatch(); return output.Size() != 0; diff --git a/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx b/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx index 7b0ffa462a9db..d15eebd26bdf7 100644 --- a/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx +++ b/Framework/Core/test/test_FrameworkDataFlowToO2Control.cxx @@ -31,7 +31,7 @@ namespace { WorkflowSpec defineDataProcessing() { - return {{.name = "A", // + return {{.name = "A", // .outputs = Outputs{OutputSpec{"TST", "A1"}, OutputSpec{"TST", "A2"}}, // A1 will be consumed twice, A2 is dangling .algorithm = AlgorithmSpec{}, // .options = {ConfigParamSpec{"channel-config", VariantType::String, // raw input channel diff --git a/Framework/Core/test/test_O2DatabasePDG.cxx b/Framework/Core/test/test_O2DatabasePDG.cxx index c6f6f3c9c57e7..11b07d1473390 100644 --- a/Framework/Core/test/test_O2DatabasePDG.cxx +++ b/Framework/Core/test/test_O2DatabasePDG.cxx @@ -40,7 +40,7 @@ struct PdgTest { TParticlePDG* p = pdgOld->GetParticle(300553); ASSERT_ERROR(p != nullptr); ASSERT_ERROR(p->Mass() == 10.580); - ASSERT_ERROR(p->Stable() == kFALSE); + ASSERT_ERROR(p->Stable() == kFALSE); ASSERT_ERROR(p->Charge() == 0); ASSERT_ERROR(p->Width() == 0.000); diff --git a/Framework/Core/test/test_Parallel.cxx b/Framework/Core/test/test_Parallel.cxx index 4fe0c920426e2..7967f55a7b93d 100644 --- a/Framework/Core/test/test_Parallel.cxx +++ b/Framework/Core/test/test_Parallel.cxx @@ -57,7 +57,7 @@ std::vector defineDataProcessing(ConfigContext const&) Outputs{ {"TPC", "CLUSTERS_P", 0, Lifetime::Timeframe}}, AlgorithmSpec{ - //CLion says it ambiguous without (AlgorithmSpec::ProcessCallback), but cmake compiles fine anyway. + // CLion says it ambiguous without (AlgorithmSpec::ProcessCallback), but cmake compiles fine anyway. (AlgorithmSpec::ProcessCallback)someProcessingStageAlgorithm}}, parallelSize, [](DataProcessorSpec& spec, size_t index) { @@ -95,7 +95,7 @@ std::vector defineDataProcessing(ConfigContext const&) auto matcher = DataSpecUtils::asConcreteDataMatcher(*inputSpec); o2::header::DataDescription outputDescription = matcher.description; - //todo: better sampled data flagging + // todo: better sampled data flagging size_t len = strlen(outputDescription.str); if (len < outputDescription.size - 2) { outputDescription.str[len] = '_'; @@ -113,7 +113,7 @@ std::vector defineDataProcessing(ConfigContext const&) const auto* inputHeader = DataRefUtils::getHeader(input); auto& output = ctx.outputs().make(description, inputHeader->size()); - //todo: use some std function or adopt(), when it is available for POD data + // todo: use some std function or adopt(), when it is available for POD data const char* input_ptr = input.payload; for (char& it : output) { it = *input_ptr++; diff --git a/Framework/Core/test/test_Root2ArrowTable.cxx b/Framework/Core/test/test_Root2ArrowTable.cxx index a3f48957d7044..109d9718f5f09 100644 --- a/Framework/Core/test/test_Root2ArrowTable.cxx +++ b/Framework/Core/test/test_Root2ArrowTable.cxx @@ -45,7 +45,7 @@ TEST_CASE("RootTree2Table") t1.Branch("ev", &ev, "ev/I"); t1.Branch("xyz", xyz, "xyz[3]/F"); t1.Branch("ij", ij, "ij[2]/I"); - //fill the tree + // fill the tree for (Int_t i = 0; i < 1000; i++) { xyz[0] = 1; xyz[1] = 2; @@ -91,7 +91,6 @@ TEST_CASE("RootTree2Table") REQUIRE(array->length() == 3000); const float* c = reinterpret_cast(array->values()->data()); - CHECK(c[0] == 1); CHECK(c[1] == 2); CHECK(c[2] == 1); @@ -146,7 +145,7 @@ TEST_CASE("RootTree2TableViaASoA") t2.Branch("ev", &ev, "ev/I"); t2.Branch("xyz", xyz, "xyz[3]/F"); t2.Branch("ij", ij, "ij[2]/I"); - //fill the tree + // fill the tree for (Int_t i = 0; i < 1000; i++) { gRandom->Rannor(xyz[0], xyz[1]); gRandom->Rannor(px, py); diff --git a/Framework/Core/test/test_Services.cxx b/Framework/Core/test/test_Services.cxx index d1a8a3592cd10..23092127fb37b 100644 --- a/Framework/Core/test/test_Services.cxx +++ b/Framework/Core/test/test_Services.cxx @@ -94,7 +94,7 @@ static ServiceRegistry::Salt salt_1 = ServiceRegistry::Salt{1, 0}; static ServiceRegistry::Salt salt_2 = ServiceRegistry::Salt{2, 0}; static ServiceRegistry::Salt salt_3 = ServiceRegistry::Salt{3, 0}; static ServiceRegistry::Salt salt_1_1 = ServiceRegistry::Salt{1, 1}; -} +} // namespace o2::framework TEST_CASE("TestSerialServices") { diff --git a/Framework/Core/test/test_StaggeringWorkflow.cxx b/Framework/Core/test/test_StaggeringWorkflow.cxx index 590ce83cef467..cef3ec90c2042 100644 --- a/Framework/Core/test/test_StaggeringWorkflow.cxx +++ b/Framework/Core/test/test_StaggeringWorkflow.cxx @@ -53,7 +53,7 @@ void customize(std::vector& policies) // search for spec names starting with "processor" return spec.name.find("processor") == 0; }, - [](auto const&, auto const&, auto &) { return o2::framework::CompletionPolicy::CompletionOp::Consume; }}); + [](auto const&, auto const&, auto&) { return o2::framework::CompletionPolicy::CompletionOp::Consume; }}); } #include "Framework/runDataProcessing.h" diff --git a/Framework/Core/test/test_TableBuilder.cxx b/Framework/Core/test/test_TableBuilder.cxx index 26a0f5a930b73..1035a0e0008b2 100644 --- a/Framework/Core/test/test_TableBuilder.cxx +++ b/Framework/Core/test/test_TableBuilder.cxx @@ -32,7 +32,7 @@ namespace test2 DECLARE_SOA_COLUMN_FULL(X, x, uint64_t, "x"); DECLARE_SOA_COLUMN_FULL(Y, y, uint64_t, "y"); DECLARE_SOA_COLUMN_FULL(Pos, pos, int[4], "pos"); -} // namespace test +} // namespace test2 using TestTable = o2::soa::Table; using ArrayTable = o2::soa::Table; @@ -295,7 +295,7 @@ TEST_CASE("TestColumnCount") TEST_CASE("TestMakeFields") { - auto fields = TableBuilderHelpers::makeFields({ "i", "f" }); + auto fields = TableBuilderHelpers::makeFields({"i", "f"}); REQUIRE(fields.size() == 2); REQUIRE(fields[0]->name() == "i"); REQUIRE(fields[1]->name() == "f"); diff --git a/Framework/Core/test/test_TreeToTable.cxx b/Framework/Core/test/test_TreeToTable.cxx index 65569b72b9f35..4746eead57c1e 100644 --- a/Framework/Core/test/test_TreeToTable.cxx +++ b/Framework/Core/test/test_TreeToTable.cxx @@ -53,7 +53,7 @@ TEST_CASE("TreeToTableConversion") t1.Branch("xyzw", xyzw, "xyzw[96]/F"); t1.Branch("small", &b, "small/b"); - //fill the tree + // fill the tree int ntruein[2] = {0}; for (int i = 0; i < ndp; i++) { ok = (i % 2) == 0; diff --git a/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx b/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx index 9945eab037780..0df6d626f1b49 100644 --- a/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx +++ b/Framework/Core/test/test_VariablePayloadSequenceWorkflow.cxx @@ -56,7 +56,7 @@ namespace test // a header with the information expected in the payload // will be sent on the header stack struct SequenceDesc : public o2::header::BaseHeader { - //static data for this header type/version + // static data for this header type/version static constexpr uint32_t sVersion{1}; static constexpr o2::header::HeaderType sHeaderType{o2::header::String2("SequDesc")}; static constexpr o2::header::SerializationMethod sSerializationMethod{o2::header::gSerializationMethodNone}; From 4ec5e8adb24f41c2d499c22346041a9fbc2d6ed0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 19 Jan 2024 23:30:41 +0100 Subject: [PATCH 264/726] DPL: enable Signposts by default also on linux --- Framework/Foundation/include/Framework/Signpost.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 8d0107e68ecd6..d7b8766de83c4 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -108,7 +108,7 @@ o2_log_handle_t* o2_walk_logs(bool (*callback)(char const* name, void* log, void #endif // __APPLE__ // Unless we are on apple we enable checking for signposts only if in debug mode or if we force them. -#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(NDEBUG) +#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(O2_NSIGNPOSTS) #define O2_LOG_ENABLED(log) private_o2_log_##log->stacktrace #else #define O2_LOG_ENABLED(log) false @@ -448,7 +448,7 @@ void o2_debug_log_set_stacktrace(_o2_log_t* log, int stacktrace) } #endif // O2_SIGNPOST_IMPLEMENTATION -#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(NDEBUG) +#if defined(__APPLE__) || defined(O2_FORCE_SIGNPOSTS) || !defined(O2_NSIGNPOSTS) /// Dynamic logs need to be enabled via the O2_LOG_ENABLE macro. Notice this will only work /// for the logger based logging, since the Apple version needs instruments to enable them. #define O2_DECLARE_DYNAMIC_LOG(name) static _o2_log_t* private_o2_log_##name = (_o2_log_t*)_o2_log_create("ch.cern.aliceo2." #name, 1) From aa947aad463843fb3c7f6003cd7bf522baf4dc6d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 22 Jan 2024 12:45:45 +0100 Subject: [PATCH 265/726] DPL: fix typo in help message --- Framework/Core/src/WorkflowCustomizationHelpers.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/WorkflowCustomizationHelpers.cxx b/Framework/Core/src/WorkflowCustomizationHelpers.cxx index da21168a8fe44..ffad8a8720019 100644 --- a/Framework/Core/src/WorkflowCustomizationHelpers.cxx +++ b/Framework/Core/src/WorkflowCustomizationHelpers.cxx @@ -49,7 +49,7 @@ std::vector WorkflowCustomizationHelpers::requiredWorkflowOptio {"workflow-suffix", VariantType::String, "", {"suffix to add to all dataprocessors"}}, // options for TF rate limiting - {"timeframes-rate-limit-ipcid", VariantType::String, "-1", {"Suffix for IPC channel for metrix-feedback, -1 = disable"}}, + {"timeframes-rate-limit-ipcid", VariantType::String, "-1", {"Suffix for IPC channel for metric-feedback, -1 = disable"}}, // options for AOD rate limiting {"aod-memory-rate-limit", VariantType::Int64, 0LL, {"Rate limit AOD processing based on memory"}}, From 538614f13978993f3c13c8ab4b701d630dc6c146 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 22 Jan 2024 13:56:03 +0100 Subject: [PATCH 266/726] DPL: fix formatting and format string --- Framework/Core/src/CommonServices.cxx | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index f9d327d14107d..8ac8315e949da 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -193,7 +193,7 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() } } if (didCreate == false && messageContext.didDispatch() == true) { - O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created out of band didCreate == %b && messageContext.didDispatch == %b", + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created out of band didCreate == %d && messageContext.didDispatch == %d", didCreate, messageContext.didDispatch()); return; @@ -219,8 +219,7 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() if (matcher.lifetime == Lifetime::Timeframe) { LOGP(error, "Expected Lifetime::Timeframe data {} was not created for timeslice {} and might result in dropped timeframes", DataSpecUtils::describe(matcher), timeslice); } - } - }, + } }, .kind = ServiceKind::Stream}; } From a50b44593d025044911a6520f7a310c150b7dbd9 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 22 Jan 2024 23:56:00 +0100 Subject: [PATCH 267/726] DPL: handle corner case for consumeWhenAll This should avoid computation if only sporadic inputs are expected and they are all missing. --- .../Core/src/CompletionPolicyHelpers.cxx | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 585046bc89763..018dbb150fc92 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -115,20 +115,28 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl O2_SIGNPOST_START(completion, sid, "consumeWhenAll", "Completion policy invoked"); size_t si = 0; - bool missingSporadic = false; + int sporadicCount = 0; + int timeframeCount = 0; + int missingSporadicCount = 0; bool needsProcessing = false; size_t currentTimeslice = -1; for (auto& input : inputs) { assert(si < specs.size()); auto& spec = specs[si++]; + sporadicCount += spec.lifetime == Lifetime::Sporadic ? 1 : 0; + timeframeCount += spec.lifetime == Lifetime::Timeframe ? 1 : 0; + // If we are missing something which is not sporadic, we wait. if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s due to missing input %lu", "Wait", si); return CompletionPolicy::CompletionOp::Wait; } + // If we are missing something which is sporadic, we wait until we are sure it will not come. if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "Missing sporadic found for route index %lu", si); - missingSporadic = true; + missingSporadicCount += 1; } + // If we have a header, we use it to determine the current timesliceIsTimer + // (unless this is a timer which does not enter the oldest possible timeslice). if (input.header != nullptr && currentTimeslice == -1) { auto const* dph = framework::DataRefUtils::getHeader(input); if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { @@ -136,6 +144,7 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "currentTimeslice %lu from route index %lu", currentTimeslice, si); } } + // If we have a header, we need to process it if it is not a condition object. if (input.header != nullptr && spec.lifetime != Lifetime::Condition) { needsProcessing = true; } @@ -145,11 +154,15 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl auto& timesliceIndex = ref.get(); auto oldestPossibleTimeslice = timesliceIndex.getOldestPossibleInput().timeslice.value; - if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { + if (missingSporadicCount && currentTimeslice >= oldestPossibleTimeslice) { O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu > oldestPossibleTimeslice %lu", "Retry", currentTimeslice, oldestPossibleTimeslice); return CompletionPolicy::CompletionOp::Retry; } O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu <= oldestPossibleTimeslice %lu", needsProcessing ? "Consume" : "Discard", currentTimeslice, oldestPossibleTimeslice); + // No need to process if we have only sporadic inputs and they are all missing. + if (needsProcessing && (missingSporadicCount == sporadicCount) && (timeframeCount == 0)) { + return CompletionPolicy::CompletionOp::Discard; + } return needsProcessing ? CompletionPolicy::CompletionOp::Consume : CompletionPolicy::CompletionOp::Discard; }; return CompletionPolicy{name, matcher, callback}; From dc32ec4ab20cd2a47506cc3425df9f67af0c1789 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 22 Jan 2024 14:14:39 +0100 Subject: [PATCH 268/726] DPL: add signposts to debug signals --- Framework/Core/src/DataProcessingDevice.cxx | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 366bfafd7d801..6ae4b62241930 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -495,11 +495,12 @@ void DataProcessingDevice::Init() void on_signal_callback(uv_signal_t* handle, int signum) { - ZoneScopedN("Signal callaback"); - LOG(debug) << "Signal " << signum << " received."; + O2_SIGNPOST_ID_FROM_POINTER(sid, device, handle); + O2_SIGNPOST_START(device, sid, "signal_state", "Signal %d received.", signum); + auto* registry = (ServiceRegistry*)handle->data; if (!registry) { - LOG(debug) << "No registry active. Ignoring signal"; + O2_SIGNPOST_END(device, sid, "signal_state", "No registry active. Ignoring signal."); return; } ServiceRegistryRef ref{*registry}; @@ -516,6 +517,7 @@ void on_signal_callback(uv_signal_t* handle, int signum) // available and being offered, however we // want to get out of the woods for now. if (offer.valid && offer.sharedMemory != 0) { + O2_SIGNPOST_END(device, sid, "signal_state", "Memory already offered."); return; } ri++; @@ -532,6 +534,7 @@ void on_signal_callback(uv_signal_t* handle, int signum) } } stats.updateStats({(int)ProcessingStatsId::TOTAL_SIGUSR1, DataProcessingStats::Op::Add, 1}); + O2_SIGNPOST_END(device, sid, "signal_state", "Done processing signals."); } static auto toBeForwardedHeader = [](void* header) -> bool { From f674031804a442af92e311aca5883b842048b4ba Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 23 Jan 2024 00:57:44 +0100 Subject: [PATCH 269/726] DPL: handle corner case for consumeWhenAll This should avoid computation if only sporadic inputs are expected and they are all missing. --- .../Core/src/CompletionPolicyHelpers.cxx | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/Framework/Core/src/CompletionPolicyHelpers.cxx b/Framework/Core/src/CompletionPolicyHelpers.cxx index 585046bc89763..9dd895a6fed6d 100644 --- a/Framework/Core/src/CompletionPolicyHelpers.cxx +++ b/Framework/Core/src/CompletionPolicyHelpers.cxx @@ -115,20 +115,28 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl O2_SIGNPOST_START(completion, sid, "consumeWhenAll", "Completion policy invoked"); size_t si = 0; - bool missingSporadic = false; + int sporadicCount = 0; + int timeframeCount = 0; + int missingSporadicCount = 0; bool needsProcessing = false; size_t currentTimeslice = -1; for (auto& input : inputs) { assert(si < specs.size()); auto& spec = specs[si++]; + sporadicCount += spec.lifetime == Lifetime::Sporadic ? 1 : 0; + timeframeCount += spec.lifetime == Lifetime::Timeframe ? 1 : 0; + // If we are missing something which is not sporadic, we wait. if (input.header == nullptr && spec.lifetime != Lifetime::Sporadic) { O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s due to missing input %lu", "Wait", si); return CompletionPolicy::CompletionOp::Wait; } + // If we are missing something which is sporadic, we wait until we are sure it will not come. if (input.header == nullptr && spec.lifetime == Lifetime::Sporadic) { O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "Missing sporadic found for route index %lu", si); - missingSporadic = true; + missingSporadicCount += 1; } + // If we have a header, we use it to determine the current timesliceIsTimer + // (unless this is a timer which does not enter the oldest possible timeslice). if (input.header != nullptr && currentTimeslice == -1) { auto const* dph = framework::DataRefUtils::getHeader(input); if (dph && !TimingInfo::timesliceIsTimer(dph->startTime)) { @@ -136,6 +144,7 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl O2_SIGNPOST_EVENT_EMIT(completion, sid, "consumeWhenAll", "currentTimeslice %lu from route index %lu", currentTimeslice, si); } } + // If we have a header, we need to process it if it is not a condition object. if (input.header != nullptr && spec.lifetime != Lifetime::Condition) { needsProcessing = true; } @@ -145,12 +154,19 @@ CompletionPolicy CompletionPolicyHelpers::consumeWhenAll(const char* name, Compl auto& timesliceIndex = ref.get(); auto oldestPossibleTimeslice = timesliceIndex.getOldestPossibleInput().timeslice.value; - if (missingSporadic && currentTimeslice >= oldestPossibleTimeslice) { + if (missingSporadicCount && currentTimeslice >= oldestPossibleTimeslice) { O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu > oldestPossibleTimeslice %lu", "Retry", currentTimeslice, oldestPossibleTimeslice); return CompletionPolicy::CompletionOp::Retry; } - O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu <= oldestPossibleTimeslice %lu", needsProcessing ? "Consume" : "Discard", currentTimeslice, oldestPossibleTimeslice); - return needsProcessing ? CompletionPolicy::CompletionOp::Consume : CompletionPolicy::CompletionOp::Discard; + + // No need to process if we have only sporadic inputs and they are all missing. + if (needsProcessing && (sporadicCount > 0) && (missingSporadicCount == sporadicCount) && (timeframeCount == 0)) { + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu", "Discard", currentTimeslice); + return CompletionPolicy::CompletionOp::Discard; + } + auto consumes = (needsProcessing || sporadicCount == 0); + O2_SIGNPOST_END(completion, sid, "consumeWhenAll", "Completion policy returned %{public}s for timeslice %lu", consumes ? "Consume" : "Discard", currentTimeslice); + return consumes ? CompletionPolicy::CompletionOp::Consume : CompletionPolicy::CompletionOp::Discard; }; return CompletionPolicy{name, matcher, callback}; } From f6a0983f92a16b67f72e760ee7ed890e17e67355 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 23 Jan 2024 14:21:01 +0100 Subject: [PATCH 270/726] DPL: make sure we do not invoke user callback when Discarding Not sure this is actually the correct thing to do. --- Framework/Core/src/DataProcessingDevice.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 6ae4b62241930..9f69d796f086f 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -2297,8 +2297,8 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v if (spec.forwards.empty() == false) { auto& timesliceIndex = ref.get(); forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), false); - continue; } + continue; } // If there is no optional inputs we canForwardEarly // the messages to that parallel processing can happen. From dc30b66aa6936195616becf87338737f22eedced Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 23 Jan 2024 22:20:58 +0100 Subject: [PATCH 271/726] DPL: improve logging for processing loop --- Framework/Core/src/DataProcessingDevice.cxx | 67 ++++++++++++++------- 1 file changed, 44 insertions(+), 23 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 9f69d796f086f..f5c9b7947afde 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -73,6 +73,7 @@ #include #include +#include #include #include #include @@ -83,6 +84,14 @@ #include #include +// Formatter to avoid having to rewrite the ostream operator for the enum +namespace fmt +{ +template <> +struct formatter : ostream_formatter { +}; +} // namespace fmt + O2_DECLARE_DYNAMIC_LOG(device); using namespace o2::framework; @@ -2253,28 +2262,21 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v }; // This is the main dispatching loop - LOGP(debug, "Processing actions:"); auto& state = ref.get(); auto& spec = ref.get(); auto& dpContext = ref.get(); auto& streamContext = ref.get(); + O2_SIGNPOST_ID_GENERATE(sid, device); + O2_SIGNPOST_START(device, sid, "device", "Start processing ready actions"); for (auto action : getReadyActions()) { - LOGP(debug, " Begin action"); + O2_SIGNPOST_ID_GENERATE(aid, device); + O2_SIGNPOST_START(device, aid, "device", "Processing action on slot %lu for action %{public}s", action.slot.index, fmt::format("{}", action.op).c_str()); if (action.op == CompletionPolicy::CompletionOp::Wait) { - LOGP(debug, " - Action is to Wait"); + O2_SIGNPOST_END(device, aid, "device", "Waiting for more data."); continue; } - switch (action.op) { - case CompletionPolicy::CompletionOp::Consume: - LOG(debug) << " - Action is to " << action.op << " " << action.slot.index; - break; - default: - LOG(debug) << " - Action is to " << action.op << " " << action.slot.index; - break; - } - prepareAllocatorForCurrentTimeSlice(TimesliceSlot{action.slot}); bool shouldConsume = action.op == CompletionPolicy::CompletionOp::Consume || action.op == CompletionPolicy::CompletionOp::Discard; @@ -2285,20 +2287,19 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v *context.registry}; ProcessingContext processContext{record, ref, ref.get()}; { - ZoneScopedN("service pre processing"); // Notice this should be thread safe and reentrant // as it is called from many threads. streamContext.preProcessingCallbacks(processContext); dpContext.preProcessingCallbacks(processContext); } if (action.op == CompletionPolicy::CompletionOp::Discard) { - LOGP(debug, " - Action is to Discard"); context.postDispatchingCallbacks(processContext); if (spec.forwards.empty() == false) { auto& timesliceIndex = ref.get(); forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), false); + O2_SIGNPOST_END(device, aid, "device", "Forwarding inputs consume: %d.", false); + continue; } - continue; } // If there is no optional inputs we canForwardEarly // the messages to that parallel processing can happen. @@ -2308,7 +2309,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v bool consumeSomething = action.op == CompletionPolicy::CompletionOp::Consume || action.op == CompletionPolicy::CompletionOp::ConsumeExisting; if (context.canForwardEarly && hasForwards && consumeSomething) { - LOGP(debug, " - Early forwarding"); + O2_SIGNPOST_EVENT_EMIT(device, aid, "device", "Early forwainding: %{public}s.", fmt::format("{}", action.op).c_str()); auto& timesliceIndex = ref.get(); forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), true, action.op == CompletionPolicy::CompletionOp::Consume); } @@ -2325,9 +2326,20 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v auto& spec = ref.get(); auto& streamContext = ref.get(); auto& dpContext = ref.get(); + auto shouldProcess = [](DataRelayer::RecordAction& action) -> bool { + switch (action.op) { + case CompletionPolicy::CompletionOp::Consume: + case CompletionPolicy::CompletionOp::ConsumeExisting: + case CompletionPolicy::CompletionOp::ConsumeAndRescan: + case CompletionPolicy::CompletionOp::Process: + return true; + break; + default: + return false; + } + }; if (state.quitRequested == false) { { - ZoneScopedN("service post processing"); // Callbacks from services dpContext.preProcessingCallbacks(processContext); streamContext.preProcessingCallbacks(processContext); @@ -2335,18 +2347,27 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v // Callbacks from users ref.get().call(o2::framework::ServiceRegistryRef{ref}, (int)action.op); } - if (context.statefulProcess) { - ZoneScopedN("statefull process"); + O2_SIGNPOST_ID_FROM_POINTER(pcid, device, &processContext); + if (context.statefulProcess && shouldProcess(action)) { + // This way, usercode can use the the same processing context to identify + // its signposts and we can map user code to device iterations. + O2_SIGNPOST_START(device, pcid, "device", "Stateful process"); (context.statefulProcess)(processContext); - } else if (context.statelessProcess) { - ZoneScopedN("stateless process"); + O2_SIGNPOST_END(device, pcid, "device", "Stateful process"); + } else if (context.statelessProcess && shouldProcess(action)) { + O2_SIGNPOST_START(device, pcid, "device", "Stateful process"); (context.statelessProcess)(processContext); + O2_SIGNPOST_END(device, pcid, "device", "Stateful process"); + } else if (context.statelessProcess || context.statefulProcess) { + O2_SIGNPOST_EVENT_EMIT(device, pcid, "device", "Skipping processing because we are discarding."); } else { + O2_SIGNPOST_EVENT_EMIT(device, pcid, "device", "No processing callback provided. Switching to %{public}s.", "Idle"); state.streaming = StreamingState::Idle; } // Notify the sink we just consumed some timeframe data if (context.isSink && action.op == CompletionPolicy::CompletionOp::Consume) { + O2_SIGNPOST_EVENT_EMIT(device, pcid, "device", "Sending dpl-summary"); auto& allocator = ref.get(); allocator.make(OutputRef{"dpl-summary", compile_time_hash(spec.name.c_str())}, 1); } @@ -2361,7 +2382,6 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v } { - ZoneScopedN("service post processing"); ref.get().call(o2::framework::ServiceRegistryRef{ref}, (int)action.op); dpContext.postProcessingCallbacks(processContext); streamContext.postProcessingCallbacks(processContext); @@ -2408,7 +2428,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v ref.get().call(o2::framework::ServiceRegistryRef{ref}); } if ((context.canForwardEarly == false) && hasForwards && consumeSomething) { - LOGP(debug, "Late forwarding"); + O2_SIGNPOST_EVENT_EMIT(device, aid, "device", "Late forwarding"); auto& timesliceIndex = ref.get(); forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), false, action.op == CompletionPolicy::CompletionOp::Consume); } @@ -2421,6 +2441,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v cleanTimers(action.slot, record); } } + O2_SIGNPOST_END(device, sid, "device", "Start processing ready actions"); // We now broadcast the end of stream if it was requested if (state.streaming == StreamingState::EndOfStreaming) { From 28c9b891dec6a32184e45ebd66259930670fe82d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 25 Jan 2024 09:43:21 +0100 Subject: [PATCH 272/726] DPL: command line option to enable signposts --- Framework/Core/src/DeviceSpecHelpers.cxx | 2 ++ Framework/Core/src/runDataProcessing.cxx | 25 ++++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 6c2063cb59311..17abf3fa7faad 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -1664,6 +1664,8 @@ boost::program_options::options_description DeviceSpecHelpers::getForwardedDevic ("infologger-mode", bpo::value(), "O2_INFOLOGGER_MODE override") // ("infologger-severity", bpo::value(), "minimun FairLogger severity which goes to info logger") // ("dpl-tracing-flags", bpo::value(), "pipe separated list of events to trace") // + ("signposts", bpo::value(), // + "comma separated list of signposts to enable (any of `completion`, `data_processor_context`, `stream_context`, `device`, `monitoring_service`)") // ("child-driver", bpo::value(), "external driver to start childs with (e.g. valgrind)"); // return forwardedDeviceOptions; diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 325417235cd83..d98d4a5680e8d 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -977,10 +977,12 @@ int doChild(int argc, char** argv, ServiceRegistry& serviceRegistry, } boost::program_options::options_description optsDesc; ConfigParamsHelper::populateBoostProgramOptions(optsDesc, spec.options, gHiddenDeviceOptions); + char const* defaultSignposts = getenv("DPL_SIGNPOSTS"); optsDesc.add_options()("monitoring-backend", bpo::value()->default_value("default"), "monitoring backend info") // ("driver-client-backend", bpo::value()->default_value(defaultDriverClient), "backend for device -> driver communicataon: stdout://: use stdout, ws://: use websockets") // ("infologger-severity", bpo::value()->default_value(""), "minimum FairLogger severity to send to InfoLogger") // ("dpl-tracing-flags", bpo::value()->default_value(""), "pipe `|` separate list of events to be traced") // + ("signposts", bpo::value()->default_value(defaultSignposts ? defaultSignposts : ""), "comma separated list of signposts to enable") // ("expected-region-callbacks", bpo::value()->default_value("0"), "how many region callbacks we are expecting") // ("exit-transition-timeout", bpo::value()->default_value(defaultExitTransitionTimeout), "how many second to wait before switching from RUN to READY") // ("timeframes-rate-limit", bpo::value()->default_value("0"), "how many timeframe can be in fly at the same moment (0 disables)") // @@ -2949,6 +2951,29 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& workflow, } } + if (varmap.count("signposts")) { + auto signpostsToEnable = varmap["signposts"].as(); + auto matchingLogEnabler = [](char const* name, void* l, void* context) { + auto* log = (_o2_log_t*)l; + auto* selectedName = (char const*)context; + std::string prefix = "ch.cern.aliceo2."; + if (strcmp(name, (prefix + selectedName).data()) == 0) { + LOGP(info, "Enabling signposts for {}", *selectedName); + _o2_log_set_stacktrace(log, 1); + return false; + } + return true; + }; + // Split signpostsToEnable by comma using strtok_r + char* saveptr; + char* src = const_cast(signpostsToEnable.data()); + auto* token = strtok_r(src, ",", &saveptr); + while (token) { + o2_walk_logs(matchingLogEnabler, token); + token = strtok_r(nullptr, ",", &saveptr); + } + } + auto evaluateBatchOption = [&varmap]() -> bool { if (varmap.count("no-batch") > 0) { return false; From 1c8722551611250dc7e16c653c3019fb86c36f28 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 25 Jan 2024 10:30:00 +0100 Subject: [PATCH 273/726] DPL: use Signposts to debug oldest possible timeframe. --- Framework/Core/src/CommonServices.cxx | 39 ++++++++++++++++++--------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index 8ac8315e949da..b3ee75a6a3886 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -536,7 +536,9 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() } if (decongestion->lastTimeslice && oldestPossibleOutput.timeslice.value == decongestion->lastTimeslice) { - LOGP(debug, "Not sending already sent value"); + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", + "Not sending already sent value for oldest possible timeslice: %" PRIu64, + (uint64_t)oldestPossibleOutput.timeslice.value); return; } if (oldestPossibleOutput.timeslice.value < decongestion->lastTimeslice) { @@ -545,9 +547,10 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() return; } - LOGP(debug, "Broadcasting oldest possible output {} due to {} ({})", oldestPossibleOutput.timeslice.value, - oldestPossibleOutput.slot.index == -1 ? "channel" : "slot", - oldestPossibleOutput.slot.index == -1 ? oldestPossibleOutput.channel.value : oldestPossibleOutput.slot.index); + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Broadcasting oldest posssible output %" PRIu64 " due to %{public}s (%" PRIu64 ")", + (uint64_t)oldestPossibleOutput.timeslice.value, + oldestPossibleOutput.slot.index == -1 ? "channel" : "slot", + (uint64_t)(oldestPossibleOutput.slot.index == -1 ? oldestPossibleOutput.channel.value : oldestPossibleOutput.slot.index)); if (decongestion->orderedCompletionPolicyActive) { auto oldNextTimeslice = decongestion->nextTimeslice; decongestion->nextTimeslice = std::max(decongestion->nextTimeslice, (int64_t)oldestPossibleOutput.timeslice.value); @@ -563,11 +566,13 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() auto& state = proxy.getForwardChannelState(ChannelIndex{fi}); // TODO: this we could cache in the proxy at the bind moment. if (info.channelType != ChannelAccountingType::DPL) { - LOG(debug) << "Skipping channel"; + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { - LOGP(debug, "Forwarding to channel {} oldest possible timeslice {}, prio 20", info.name, oldestPossibleOutput.timeslice.value); + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", + "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", + info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); } } decongestion->lastTimeslice = oldestPossibleOutput.timeslice.value; }, @@ -588,13 +593,15 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() auto& relayer = services.get(); auto& timesliceIndex = services.get(); auto& proxy = services.get(); - LOGP(debug, "Received oldest possible timeframe {} from channel {}", oldestPossibleTimeslice, channel.value); + O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, &decongestion); + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Received oldest possible timeframe %" PRIu64 " from channel %d", + (uint64_t)oldestPossibleTimeslice, channel.value); relayer.setOldestPossibleInput({oldestPossibleTimeslice}, channel); timesliceIndex.updateOldestPossibleOutput(); auto oldestPossibleOutput = relayer.getOldestPossibleOutput(); if (oldestPossibleOutput.timeslice.value == decongestion.lastTimeslice) { - LOGP(debug, "Not sending already sent value"); + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Not sending already sent value: %" PRIu64, (uint64_t)oldestPossibleOutput.timeslice.value); return; } if (oldestPossibleOutput.timeslice.value < decongestion.lastTimeslice) { @@ -607,14 +614,18 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() auto *device = services.get().device(); /// We use the oldest possible timeslice to debounce, so that only the latest one /// at the end of one iteration is sent. - LOGP(debug, "Queueing oldest possible timeslice {} propagation for execution.", oldestPossibleOutput.timeslice.value); + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Queueing oldest possible timeslice %" PRIu64 " propagation for execution.", + (uint64_t)oldestPossibleOutput.timeslice.value); AsyncQueueHelpers::post( queue, decongestion.oldestPossibleTimesliceTask, [oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { + O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, &decongestion); if (decongestion.lastTimeslice >= oldestPossibleOutput.timeslice.value) { - LOGP(debug, "Not sending already sent value {} >= {}", decongestion.lastTimeslice, oldestPossibleOutput.timeslice.value); + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Not sending already sent value: %" PRIu64 "> %" PRIu64, + decongestion.lastTimeslice, (uint64_t)oldestPossibleOutput.timeslice.value); return; } - LOGP(debug, "Running oldest possible timeslice {} propagation.", oldestPossibleOutput.timeslice.value); + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Running oldest possible timeslice %" PRIu64 " propagation.", + (uint64_t)oldestPossibleOutput.timeslice.value); DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { @@ -622,11 +633,13 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() auto& state = proxy.getForwardChannelState(ChannelIndex{fi}); // TODO: this we could cache in the proxy at the bind moment. if (info.channelType != ChannelAccountingType::DPL) { - LOG(debug) << "Skipping channel"; + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { - LOGP(debug, "Forwarding to channel {} oldest possible timeslice {}, prio 20", info.name, oldestPossibleOutput.timeslice.value); + O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", + "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", + info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); } } decongestion.lastTimeslice = oldestPossibleOutput.timeslice.value; From 781679104bf2d9bbd846d6b5afca1cb239dfcb4b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 25 Jan 2024 14:17:28 +0100 Subject: [PATCH 274/726] DPL: make stacktrace level not atomic Not needed and simplifies overriding it from the outside via ptrace / vm_write. --- Framework/Foundation/include/Framework/Signpost.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index d7b8766de83c4..6dad1ed9f6341 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -179,7 +179,7 @@ struct _o2_log_t { // 0 means the log is disabled. // 1 means only the current signpost is printed. // >1 means the current signpost and n levels of the stacktrace are printed. - std::atomic stacktrace = 0; + int stacktrace = 0; // Default stacktrace level for the log, when enabled. int defaultStacktrace = 1; From f2785c7b49ce967508e3e7b48a5d5b5bb6bee5ea Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 25 Jan 2024 14:17:28 +0100 Subject: [PATCH 275/726] DPL: allow signposts to be enabled via DPL_SIGNPOSTS --- Framework/Core/src/DeviceSpecHelpers.cxx | 1 + 1 file changed, 1 insertion(+) diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 17abf3fa7faad..756d4d57ef342 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -1627,6 +1627,7 @@ boost::program_options::options_description DeviceSpecHelpers::getForwardedDevic // - rate is an option of FairMQ device for ConditionalRun // - child-driver is not a FairMQ device option but used per device to start to process bpo::options_description forwardedDeviceOptions; + char const* defaultSignposts = getenv("DPL_SIGNPOSTS") ? getenv("DPL_SIGNPOSTS") : ""; forwardedDeviceOptions.add_options() // ("severity", bpo::value()->default_value("info"), "severity level of the log") // ("plugin,P", bpo::value(), "FairMQ plugin list") // From f53182b7a4dc49e5984752a658986e2b7bd187ff Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 25 Jan 2024 14:24:37 +0100 Subject: [PATCH 276/726] DPL: add o2-log utility (for Mac, Linux coming) Allow enabling signpost logs while running. --- Framework/Foundation/CMakeLists.txt | 5 ++ Framework/Foundation/src/o2LogMac.cxx | 91 +++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 Framework/Foundation/src/o2LogMac.cxx diff --git a/Framework/Foundation/CMakeLists.txt b/Framework/Foundation/CMakeLists.txt index aa7a073fca581..1405dbffc031a 100644 --- a/Framework/Foundation/CMakeLists.txt +++ b/Framework/Foundation/CMakeLists.txt @@ -41,6 +41,11 @@ add_executable(o2-test-framework-Signpost add_executable(o2-test-framework-ThreadSanitizer test/test_ThreadSanitizer.cxx) +if (APPLE) +add_executable(o2-log + src/o2LogMac.cxx) +endif() + target_link_libraries(o2-test-framework-Signpost PRIVATE O2::FrameworkFoundation) target_link_libraries(o2-test-framework-ThreadSanitizer PRIVATE O2::FrameworkFoundation Threads::Threads) diff --git a/Framework/Foundation/src/o2LogMac.cxx b/Framework/Foundation/src/o2LogMac.cxx new file mode 100644 index 0000000000000..6ac9a17e9cda7 --- /dev/null +++ b/Framework/Foundation/src/o2LogMac.cxx @@ -0,0 +1,91 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#import +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +int xnu_write(int pid, void* addr, unsigned char* data, size_t dsize) +{ + assert(dsize != 0); + assert(addr != nullptr); + assert(data != nullptr); + + auto* ptxt = (unsigned char*)malloc(dsize); + + assert(ptxt != nullptr); + memcpy(ptxt, data, dsize); + + mach_port_t task; + mach_msg_type_number_t dataCunt = dsize; + + kern_return_t kret = task_for_pid(mach_task_self(), pid, &task); + if (kret != KERN_SUCCESS) { + printf("task_for_pid failed: %s. Are you root?", mach_error_string(kret)); + return 0; + } + + vm_protect(task, (vm_address_t)addr, (vm_size_t)dsize, 0, + VM_PROT_READ | VM_PROT_WRITE | VM_PROT_ALL); + + kret = vm_write(task, (vm_address_t)addr, (pointer_t)ptxt, dataCunt); + + return kret; +} + +// Writes a 4-byte value to the specified address in the target process +// If the address in question points to private_o2_log_::stacktrace +// This will have the side effect of enabling the signpost. +// +int main(int argc, char** argv) +{ + // Use getopt_long to parse command line arguments + // -p pid + // -a address + // -s stacktrace level (default 1, 0 to disable) + static struct option long_options[] = { + {"pid", required_argument, nullptr, 'p'}, + {"address", required_argument, nullptr, 'a'}, + {"stacktrace", required_argument, nullptr, 's'}, + {nullptr, 0, nullptr, 0}}; + int opt; + pid_t pid; + int stacktrace = 1; + void* addr; + while ((opt = getopt_long(argc, argv, "p:a:", long_options, nullptr)) != -1) { + switch (opt) { + case 'p': + pid = atoi(optarg); + break; + case 'a': + addr = (void*)strtoul(optarg, nullptr, 16); + break; + case 's': + stacktrace = strtol(optarg, nullptr, 10); + break; + default: + printf("Usage: %s -p pid -a address [-s level]\n", argv[0]); + exit(EXIT_FAILURE); + } + } + xnu_write(pid, addr, (unsigned char*)&stacktrace, 4); + return 0; +} From bd97b87a9ed17f8558b004acb5b60fdbfb232891 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 25 Jan 2024 14:17:28 +0100 Subject: [PATCH 277/726] DPL: support for Signposts in CCDB --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 39 +++++++++++++++-------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index f2d329db39b6d..837b20cf4262b 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -19,11 +19,14 @@ #include "Framework/DataSpecUtils.h" #include "CCDB/CcdbApi.h" #include "CommonConstants/LHCConstants.h" +#include "Framework/Signpost.h" #include #include #include #include +O2_DECLARE_DYNAMIC_LOG(ccdb); + namespace o2::framework { @@ -181,8 +184,12 @@ auto populateCacheWith(std::shared_ptr const& helper, { std::string ccdbMetadataPrefix = "ccdb-metadata-"; int objCnt = -1; + // We use the timeslice, so that we hook into the same interval as the rest of the + // callback. + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; + O2_SIGNPOST_START(ccdb, sid, "populateCacheWith", "Starting to populate cache with CCDB objects"); for (auto& route : helper->routes) { - LOGP(debug, "Fetching object for route {}", DataSpecUtils::describe(route.matcher)); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Fetching object for route %{public}s", DataSpecUtils::describe(route.matcher).data()); objCnt++; auto concrete = DataSpecUtils::asConcreteDataMatcher(route.matcher); Output output{concrete.origin, concrete.description, concrete.subSpec}; @@ -201,7 +208,7 @@ auto populateCacheWith(std::shared_ptr const& helper, } else if (isPrefix(ccdbMetadataPrefix, meta.name)) { std::string key = meta.name.substr(ccdbMetadataPrefix.size()); auto value = meta.defaultValue.get(); - LOGP(debug, "Adding metadata {}: {} to the request", key, value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Adding metadata %{public}s: %{public}s to the request", key.data(), value.data()); metadata[key] = value; } else if (meta.name == "ccdb-query-rate") { chRate = meta.defaultValue.get() * helper->queryPeriodFactor; @@ -215,7 +222,7 @@ auto populateCacheWith(std::shared_ptr const& helper, checkValidity = true; // never skip check if the cache is empty } - LOGP(debug, "checkValidity is {} for tfID {} of {}", checkValidity, timingInfo.tfCounter, path); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "checkValidity is %{public}s for tfID %d of %{public}s", checkValidity ? "true" : "false", timingInfo.tfCounter, path.data()); const auto& api = helper->getAPI(path); if (checkValidity && (!api.isSnapshotMode() || etag.empty())) { // in the snapshot mode the object needs to be fetched only once @@ -238,7 +245,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodCCDB); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); continue; } if (v.size()) { // but should be overridden by fresh object @@ -249,7 +256,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodCCDB); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); // one could modify the adoptContainer to take optional old cacheID to clean: // mapURL2DPLCache[URL] = ctx.outputs().adoptContainer(output, std::move(outputBuffer), DataAllocator::CacheStrategy::Always, mapURL2DPLCache[URL]); continue; @@ -257,11 +264,12 @@ auto populateCacheWith(std::shared_ptr const& helper, } // cached object is fine auto cacheId = helper->mapURL2DPLCache[path]; - LOGP(debug, "Reusing {} for {}", cacheId.value, path); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Reusing %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); helper->mapURL2UUID[path].cacheHit++; allocator.adoptFromCache(output, cacheId, header::gSerializationMethodCCDB); // the outputBuffer was not used, can we destroy it? } + O2_SIGNPOST_END(ccdb, sid, "populateCacheWith", "Finished populating cache with CCDB objects"); }; AlgorithmSpec CCDBHelpers::fetchFromCCDB() @@ -276,6 +284,7 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() helper->queryPeriodGlo = checkRate > 0 ? checkRate : std::numeric_limits::max(); helper->queryPeriodFactor = checkMult > 0 ? checkMult : 1; LOGP(info, "CCDB Backend at: {}, validity check for every {} TF{}", defHost, helper->queryPeriodGlo, helper->queryPeriodFactor == 1 ? std::string{} : fmt::format(", (query for high-rate objects downscaled by {})", helper->queryPeriodFactor)); + LOGP(info, "Hook to enable signposts for CCDB messages at {}", (void*)&private_o2_log_ccdb->stacktrace); auto remapString = options.get("condition-remap"); ParserResult result = CCDBHelpers::parseRemappings(remapString.c_str()); if (!result.error.empty()) { @@ -322,6 +331,8 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() }); return adaptStateless([helper](DataTakingContext& dtc, DataAllocator& allocator, TimingInfo& timingInfo) { + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; + O2_SIGNPOST_START(ccdb, sid, "fetchFromCCDB", "Fetching CCDB objects for timeslice %" PRIu64, (uint64_t)timingInfo.timeslice); static Long64_t orbitResetTime = -1; static size_t lastTimeUsed = -1; if (timingInfo.creation & DataProcessingHeader::DUMMY_CREATION_TIME_OFFSET) { @@ -342,7 +353,8 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() } else { checkValidity = true; // never skip check if the cache is empty } - LOG(debug) << "checkValidity = " << checkValidity << " for TF " << timingInfo.timeslice; + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "checkValidity is %{public}s for tfID %d of %{public}s", + checkValidity ? "true" : "false", timingInfo.tfCounter, path.data()); Output output{"CTP", "OrbitReset", 0}; Long64_t newOrbitResetTime = orbitResetTime; auto&& v = allocator.makeVector(output); @@ -363,7 +375,7 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() newOrbitResetTime = getOrbitResetTime(v); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodNone); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); } else if (v.size()) { // but should be overridden by fresh object // somewhere here pruneFromCache should be called helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid @@ -373,19 +385,19 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() newOrbitResetTime = getOrbitResetTime(v); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodNone); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); // one could modify the adoptContainer to take optional old cacheID to clean: // mapURL2DPLCache[URL] = ctx.outputs().adoptContainer(output, std::move(outputBuffer), DataAllocator::CacheStrategy::Always, mapURL2DPLCache[URL]); } // cached object is fine } auto cacheId = helper->mapURL2DPLCache[path]; - LOGP(debug, "Reusing {} for {}", cacheId.value, path); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Reusing %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); helper->mapURL2UUID[path].cacheHit++; allocator.adoptFromCache(output, cacheId, header::gSerializationMethodNone); if (newOrbitResetTime != orbitResetTime) { - LOGP(debug, "Orbit reset time now at {} (was {})", newOrbitResetTime, orbitResetTime); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Orbit reset time changed from %lld to %lld", orbitResetTime, newOrbitResetTime); orbitResetTime = newOrbitResetTime; dtc.orbitResetTimeMUS = orbitResetTime; } @@ -403,10 +415,11 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() timestamp = timingInfo.creation; } // Fetch the rest of the objects. - LOGP(debug, "Fetching objects. Run: {}. OrbitResetTime: {}, Creation: {}, Timestamp: {}, firstTForbit: {}", - dtc.runNumber, orbitResetTime, timingInfo.creation, timestamp, timingInfo.firstTForbit); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Fetching objects. Run %{public}s. OrbitResetTime %lld. Creation %lld. Timestamp %lld. firstTForbit %" PRIu32, + dtc.runNumber.data(), orbitResetTime, timingInfo.creation, timestamp, timingInfo.firstTForbit); populateCacheWith(helper, timestamp, timingInfo, dtc, allocator); + O2_SIGNPOST_END(ccdb, _o2_signpost_id_t{(int64_t)timingInfo.timeslice}, "fetchFromCCDB", "Fetching CCDB objects"); }); }); } From 856a01d469ce163f39a201179ae42dd8fde7669d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 25 Jan 2024 16:07:45 +0100 Subject: [PATCH 278/726] DPL: add o2-log utility (for Mac, Linux coming) Allow enabling signpost logs while running. --- Framework/Foundation/CMakeLists.txt | 3 + Framework/Foundation/src/o2Log.cxx | 122 ++++++++++++++++++++++++++++ 2 files changed, 125 insertions(+) create mode 100644 Framework/Foundation/src/o2Log.cxx diff --git a/Framework/Foundation/CMakeLists.txt b/Framework/Foundation/CMakeLists.txt index aa7a073fca581..0682d37b61306 100644 --- a/Framework/Foundation/CMakeLists.txt +++ b/Framework/Foundation/CMakeLists.txt @@ -41,6 +41,9 @@ add_executable(o2-test-framework-Signpost add_executable(o2-test-framework-ThreadSanitizer test/test_ThreadSanitizer.cxx) +add_executable(o2-log + src/o2Log.cxx) + target_link_libraries(o2-test-framework-Signpost PRIVATE O2::FrameworkFoundation) target_link_libraries(o2-test-framework-ThreadSanitizer PRIVATE O2::FrameworkFoundation Threads::Threads) diff --git a/Framework/Foundation/src/o2Log.cxx b/Framework/Foundation/src/o2Log.cxx new file mode 100644 index 0000000000000..7806229b9be7c --- /dev/null +++ b/Framework/Foundation/src/o2Log.cxx @@ -0,0 +1,122 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#ifdef __APPLE__ +#import +#include +#include +#include +#include +#include +#include +#else +#include +#include +#include +#endif + +#include +#include +#include +#include +#include +#include + +#ifdef __APPLE__ +int xnu_write(int pid, void* addr, unsigned char* data, size_t dsize) +{ + assert(dsize != 0); + assert(addr != nullptr); + assert(data != nullptr); + + auto* ptxt = (unsigned char*)malloc(dsize); + + assert(ptxt != nullptr); + memcpy(ptxt, data, dsize); + + mach_port_t task; + mach_msg_type_number_t dataCunt = dsize; + + kern_return_t kret = task_for_pid(mach_task_self(), pid, &task); + if (kret != KERN_SUCCESS) { + printf("task_for_pid failed: %s. Are you root?", mach_error_string(kret)); + return 0; + } + + vm_protect(task, (vm_address_t)addr, (vm_size_t)dsize, 0, + VM_PROT_READ | VM_PROT_WRITE | VM_PROT_ALL); + + kret = vm_write(task, (vm_address_t)addr, (pointer_t)ptxt, dataCunt); + + return kret; +} +#elif __linux__ +int xnu_write(int pid, void *addr, unsigned char *data, size_t dsize) { + if ((ptrace(PTRACE_ATTACH, pid, NULL, NULL)) < 0) { + perror("ptrace(ATTACH)"); + exit(1); + } + int waitStat = 0; + int waitRes = waitpid(pid, &waitStat, WUNTRACED); + if (waitRes != pid || !WIFSTOPPED(waitStat)) { + perror("....:"); + printf("Something went wrong...\n"); + exit(1); + } + + if ((ptrace(PTRACE_POKEDATA, pid, addr, data))) { + perror("pokedata"); + } + + if ((ptrace(PTRACE_DETACH, pid, NULL, NULL) < 0)) { + perror("ptrace(DETACH)"); + exit(1); + } + return 0; +} +#endif + +// Writes a 4-byte value to the specified address in the target process +// If the address in question points to private_o2_log_::stacktrace +// This will have the side effect of enabling the signpost. +int main(int argc, char** argv) +{ + // Use getopt_long to parse command line arguments + // -p pid + // -a address + // -s stacktrace level (default 1, 0 to disable) + static struct option long_options[] = { + {"pid", required_argument, nullptr, 'p'}, + {"address", required_argument, nullptr, 'a'}, + {"stacktrace", required_argument, nullptr, 's'}, + {nullptr, 0, nullptr, 0}}; + int opt; + pid_t pid; + int stacktrace = 1; + void* addr; + while ((opt = getopt_long(argc, argv, "p:a:", long_options, nullptr)) != -1) { + switch (opt) { + case 'p': + pid = atoi(optarg); + break; + case 'a': + addr = (void*)strtoul(optarg, nullptr, 16); + break; + case 's': + stacktrace = strtol(optarg, nullptr, 10); + break; + default: + printf("Usage: %s -p pid -a address [-s level]\n", argv[0]); + exit(EXIT_FAILURE); + } + } + xnu_write(pid, addr, (unsigned char*)&stacktrace, 4); + return 0; +} From 48866d24c60a53a961e428511868e85d893753b6 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 25 Jan 2024 16:07:45 +0100 Subject: [PATCH 279/726] DPL: support for Signposts in CCDB --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 39 +++++++++++++++-------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index f2d329db39b6d..837b20cf4262b 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -19,11 +19,14 @@ #include "Framework/DataSpecUtils.h" #include "CCDB/CcdbApi.h" #include "CommonConstants/LHCConstants.h" +#include "Framework/Signpost.h" #include #include #include #include +O2_DECLARE_DYNAMIC_LOG(ccdb); + namespace o2::framework { @@ -181,8 +184,12 @@ auto populateCacheWith(std::shared_ptr const& helper, { std::string ccdbMetadataPrefix = "ccdb-metadata-"; int objCnt = -1; + // We use the timeslice, so that we hook into the same interval as the rest of the + // callback. + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; + O2_SIGNPOST_START(ccdb, sid, "populateCacheWith", "Starting to populate cache with CCDB objects"); for (auto& route : helper->routes) { - LOGP(debug, "Fetching object for route {}", DataSpecUtils::describe(route.matcher)); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Fetching object for route %{public}s", DataSpecUtils::describe(route.matcher).data()); objCnt++; auto concrete = DataSpecUtils::asConcreteDataMatcher(route.matcher); Output output{concrete.origin, concrete.description, concrete.subSpec}; @@ -201,7 +208,7 @@ auto populateCacheWith(std::shared_ptr const& helper, } else if (isPrefix(ccdbMetadataPrefix, meta.name)) { std::string key = meta.name.substr(ccdbMetadataPrefix.size()); auto value = meta.defaultValue.get(); - LOGP(debug, "Adding metadata {}: {} to the request", key, value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Adding metadata %{public}s: %{public}s to the request", key.data(), value.data()); metadata[key] = value; } else if (meta.name == "ccdb-query-rate") { chRate = meta.defaultValue.get() * helper->queryPeriodFactor; @@ -215,7 +222,7 @@ auto populateCacheWith(std::shared_ptr const& helper, checkValidity = true; // never skip check if the cache is empty } - LOGP(debug, "checkValidity is {} for tfID {} of {}", checkValidity, timingInfo.tfCounter, path); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "checkValidity is %{public}s for tfID %d of %{public}s", checkValidity ? "true" : "false", timingInfo.tfCounter, path.data()); const auto& api = helper->getAPI(path); if (checkValidity && (!api.isSnapshotMode() || etag.empty())) { // in the snapshot mode the object needs to be fetched only once @@ -238,7 +245,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodCCDB); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); continue; } if (v.size()) { // but should be overridden by fresh object @@ -249,7 +256,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodCCDB); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); // one could modify the adoptContainer to take optional old cacheID to clean: // mapURL2DPLCache[URL] = ctx.outputs().adoptContainer(output, std::move(outputBuffer), DataAllocator::CacheStrategy::Always, mapURL2DPLCache[URL]); continue; @@ -257,11 +264,12 @@ auto populateCacheWith(std::shared_ptr const& helper, } // cached object is fine auto cacheId = helper->mapURL2DPLCache[path]; - LOGP(debug, "Reusing {} for {}", cacheId.value, path); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Reusing %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); helper->mapURL2UUID[path].cacheHit++; allocator.adoptFromCache(output, cacheId, header::gSerializationMethodCCDB); // the outputBuffer was not used, can we destroy it? } + O2_SIGNPOST_END(ccdb, sid, "populateCacheWith", "Finished populating cache with CCDB objects"); }; AlgorithmSpec CCDBHelpers::fetchFromCCDB() @@ -276,6 +284,7 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() helper->queryPeriodGlo = checkRate > 0 ? checkRate : std::numeric_limits::max(); helper->queryPeriodFactor = checkMult > 0 ? checkMult : 1; LOGP(info, "CCDB Backend at: {}, validity check for every {} TF{}", defHost, helper->queryPeriodGlo, helper->queryPeriodFactor == 1 ? std::string{} : fmt::format(", (query for high-rate objects downscaled by {})", helper->queryPeriodFactor)); + LOGP(info, "Hook to enable signposts for CCDB messages at {}", (void*)&private_o2_log_ccdb->stacktrace); auto remapString = options.get("condition-remap"); ParserResult result = CCDBHelpers::parseRemappings(remapString.c_str()); if (!result.error.empty()) { @@ -322,6 +331,8 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() }); return adaptStateless([helper](DataTakingContext& dtc, DataAllocator& allocator, TimingInfo& timingInfo) { + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; + O2_SIGNPOST_START(ccdb, sid, "fetchFromCCDB", "Fetching CCDB objects for timeslice %" PRIu64, (uint64_t)timingInfo.timeslice); static Long64_t orbitResetTime = -1; static size_t lastTimeUsed = -1; if (timingInfo.creation & DataProcessingHeader::DUMMY_CREATION_TIME_OFFSET) { @@ -342,7 +353,8 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() } else { checkValidity = true; // never skip check if the cache is empty } - LOG(debug) << "checkValidity = " << checkValidity << " for TF " << timingInfo.timeslice; + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "checkValidity is %{public}s for tfID %d of %{public}s", + checkValidity ? "true" : "false", timingInfo.tfCounter, path.data()); Output output{"CTP", "OrbitReset", 0}; Long64_t newOrbitResetTime = orbitResetTime; auto&& v = allocator.makeVector(output); @@ -363,7 +375,7 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() newOrbitResetTime = getOrbitResetTime(v); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodNone); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); } else if (v.size()) { // but should be overridden by fresh object // somewhere here pruneFromCache should be called helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid @@ -373,19 +385,19 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() newOrbitResetTime = getOrbitResetTime(v); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodNone); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); // one could modify the adoptContainer to take optional old cacheID to clean: // mapURL2DPLCache[URL] = ctx.outputs().adoptContainer(output, std::move(outputBuffer), DataAllocator::CacheStrategy::Always, mapURL2DPLCache[URL]); } // cached object is fine } auto cacheId = helper->mapURL2DPLCache[path]; - LOGP(debug, "Reusing {} for {}", cacheId.value, path); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Reusing %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); helper->mapURL2UUID[path].cacheHit++; allocator.adoptFromCache(output, cacheId, header::gSerializationMethodNone); if (newOrbitResetTime != orbitResetTime) { - LOGP(debug, "Orbit reset time now at {} (was {})", newOrbitResetTime, orbitResetTime); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Orbit reset time changed from %lld to %lld", orbitResetTime, newOrbitResetTime); orbitResetTime = newOrbitResetTime; dtc.orbitResetTimeMUS = orbitResetTime; } @@ -403,10 +415,11 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() timestamp = timingInfo.creation; } // Fetch the rest of the objects. - LOGP(debug, "Fetching objects. Run: {}. OrbitResetTime: {}, Creation: {}, Timestamp: {}, firstTForbit: {}", - dtc.runNumber, orbitResetTime, timingInfo.creation, timestamp, timingInfo.firstTForbit); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Fetching objects. Run %{public}s. OrbitResetTime %lld. Creation %lld. Timestamp %lld. firstTForbit %" PRIu32, + dtc.runNumber.data(), orbitResetTime, timingInfo.creation, timestamp, timingInfo.firstTForbit); populateCacheWith(helper, timestamp, timingInfo, dtc, allocator); + O2_SIGNPOST_END(ccdb, _o2_signpost_id_t{(int64_t)timingInfo.timeslice}, "fetchFromCCDB", "Fetching CCDB objects"); }); }); } From 9d82a88a007ac5d9c12ded6473b59a01cf501acf Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 25 Jan 2024 16:11:39 +0100 Subject: [PATCH 280/726] DPL: add o2-log utility (for Mac, Linux coming) Allow enabling signpost logs while running. --- Framework/Foundation/CMakeLists.txt | 3 + Framework/Foundation/src/o2Log.cxx | 123 ++++++++++++++++++++++++++++ 2 files changed, 126 insertions(+) create mode 100644 Framework/Foundation/src/o2Log.cxx diff --git a/Framework/Foundation/CMakeLists.txt b/Framework/Foundation/CMakeLists.txt index aa7a073fca581..0682d37b61306 100644 --- a/Framework/Foundation/CMakeLists.txt +++ b/Framework/Foundation/CMakeLists.txt @@ -41,6 +41,9 @@ add_executable(o2-test-framework-Signpost add_executable(o2-test-framework-ThreadSanitizer test/test_ThreadSanitizer.cxx) +add_executable(o2-log + src/o2Log.cxx) + target_link_libraries(o2-test-framework-Signpost PRIVATE O2::FrameworkFoundation) target_link_libraries(o2-test-framework-ThreadSanitizer PRIVATE O2::FrameworkFoundation Threads::Threads) diff --git a/Framework/Foundation/src/o2Log.cxx b/Framework/Foundation/src/o2Log.cxx new file mode 100644 index 0000000000000..c9cef04c7b7b6 --- /dev/null +++ b/Framework/Foundation/src/o2Log.cxx @@ -0,0 +1,123 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#ifdef __APPLE__ +#import +#include +#include +#include +#include +#include +#include +#else +#include +#include +#include +#endif + +#include +#include +#include +#include +#include +#include + +#ifdef __APPLE__ +int xnu_write(int pid, void* addr, unsigned char* data, size_t dsize) +{ + assert(dsize != 0); + assert(addr != nullptr); + assert(data != nullptr); + + auto* ptxt = (unsigned char*)malloc(dsize); + + assert(ptxt != nullptr); + memcpy(ptxt, data, dsize); + + mach_port_t task; + mach_msg_type_number_t dataCunt = dsize; + + kern_return_t kret = task_for_pid(mach_task_self(), pid, &task); + if (kret != KERN_SUCCESS) { + printf("task_for_pid failed: %s. Are you root?", mach_error_string(kret)); + return 0; + } + + vm_protect(task, (vm_address_t)addr, (vm_size_t)dsize, 0, + VM_PROT_READ | VM_PROT_WRITE | VM_PROT_ALL); + + kret = vm_write(task, (vm_address_t)addr, (pointer_t)ptxt, dataCunt); + + return kret; +} +#elif __linux__ +int xnu_write(int pid, void* addr, unsigned char* data, size_t dsize) +{ + if ((ptrace(PTRACE_ATTACH, pid, NULL, NULL)) < 0) { + perror("ptrace(ATTACH)"); + exit(1); + } + int waitStat = 0; + int waitRes = waitpid(pid, &waitStat, WUNTRACED); + if (waitRes != pid || !WIFSTOPPED(waitStat)) { + perror("....:"); + printf("Something went wrong...\n"); + exit(1); + } + + if ((ptrace(PTRACE_POKEDATA, pid, addr, data))) { + perror("pokedata"); + } + + if ((ptrace(PTRACE_DETACH, pid, NULL, NULL) < 0)) { + perror("ptrace(DETACH)"); + exit(1); + } + return 0; +} +#endif + +// Writes a 4-byte value to the specified address in the target process +// If the address in question points to private_o2_log_::stacktrace +// This will have the side effect of enabling the signpost. +int main(int argc, char** argv) +{ + // Use getopt_long to parse command line arguments + // -p pid + // -a address + // -s stacktrace level (default 1, 0 to disable) + static struct option long_options[] = { + {"pid", required_argument, nullptr, 'p'}, + {"address", required_argument, nullptr, 'a'}, + {"stacktrace", required_argument, nullptr, 's'}, + {nullptr, 0, nullptr, 0}}; + int opt; + pid_t pid; + int stacktrace = 1; + void* addr; + while ((opt = getopt_long(argc, argv, "p:a:", long_options, nullptr)) != -1) { + switch (opt) { + case 'p': + pid = atoi(optarg); + break; + case 'a': + addr = (void*)strtoul(optarg, nullptr, 16); + break; + case 's': + stacktrace = strtol(optarg, nullptr, 10); + break; + default: + printf("Usage: %s -p pid -a address [-s level]\n", argv[0]); + exit(EXIT_FAILURE); + } + } + xnu_write(pid, addr, (unsigned char*)&stacktrace, 4); + return 0; +} From 6a86ab66f1c47581808e3cac551455501eff0ced Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 25 Jan 2024 16:11:39 +0100 Subject: [PATCH 281/726] DPL: support for Signposts in CCDB --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 39 +++++++++++++++-------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index f2d329db39b6d..837b20cf4262b 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -19,11 +19,14 @@ #include "Framework/DataSpecUtils.h" #include "CCDB/CcdbApi.h" #include "CommonConstants/LHCConstants.h" +#include "Framework/Signpost.h" #include #include #include #include +O2_DECLARE_DYNAMIC_LOG(ccdb); + namespace o2::framework { @@ -181,8 +184,12 @@ auto populateCacheWith(std::shared_ptr const& helper, { std::string ccdbMetadataPrefix = "ccdb-metadata-"; int objCnt = -1; + // We use the timeslice, so that we hook into the same interval as the rest of the + // callback. + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; + O2_SIGNPOST_START(ccdb, sid, "populateCacheWith", "Starting to populate cache with CCDB objects"); for (auto& route : helper->routes) { - LOGP(debug, "Fetching object for route {}", DataSpecUtils::describe(route.matcher)); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Fetching object for route %{public}s", DataSpecUtils::describe(route.matcher).data()); objCnt++; auto concrete = DataSpecUtils::asConcreteDataMatcher(route.matcher); Output output{concrete.origin, concrete.description, concrete.subSpec}; @@ -201,7 +208,7 @@ auto populateCacheWith(std::shared_ptr const& helper, } else if (isPrefix(ccdbMetadataPrefix, meta.name)) { std::string key = meta.name.substr(ccdbMetadataPrefix.size()); auto value = meta.defaultValue.get(); - LOGP(debug, "Adding metadata {}: {} to the request", key, value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Adding metadata %{public}s: %{public}s to the request", key.data(), value.data()); metadata[key] = value; } else if (meta.name == "ccdb-query-rate") { chRate = meta.defaultValue.get() * helper->queryPeriodFactor; @@ -215,7 +222,7 @@ auto populateCacheWith(std::shared_ptr const& helper, checkValidity = true; // never skip check if the cache is empty } - LOGP(debug, "checkValidity is {} for tfID {} of {}", checkValidity, timingInfo.tfCounter, path); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "checkValidity is %{public}s for tfID %d of %{public}s", checkValidity ? "true" : "false", timingInfo.tfCounter, path.data()); const auto& api = helper->getAPI(path); if (checkValidity && (!api.isSnapshotMode() || etag.empty())) { // in the snapshot mode the object needs to be fetched only once @@ -238,7 +245,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodCCDB); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); continue; } if (v.size()) { // but should be overridden by fresh object @@ -249,7 +256,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodCCDB); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); // one could modify the adoptContainer to take optional old cacheID to clean: // mapURL2DPLCache[URL] = ctx.outputs().adoptContainer(output, std::move(outputBuffer), DataAllocator::CacheStrategy::Always, mapURL2DPLCache[URL]); continue; @@ -257,11 +264,12 @@ auto populateCacheWith(std::shared_ptr const& helper, } // cached object is fine auto cacheId = helper->mapURL2DPLCache[path]; - LOGP(debug, "Reusing {} for {}", cacheId.value, path); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "populateCacheWith", "Reusing %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); helper->mapURL2UUID[path].cacheHit++; allocator.adoptFromCache(output, cacheId, header::gSerializationMethodCCDB); // the outputBuffer was not used, can we destroy it? } + O2_SIGNPOST_END(ccdb, sid, "populateCacheWith", "Finished populating cache with CCDB objects"); }; AlgorithmSpec CCDBHelpers::fetchFromCCDB() @@ -276,6 +284,7 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() helper->queryPeriodGlo = checkRate > 0 ? checkRate : std::numeric_limits::max(); helper->queryPeriodFactor = checkMult > 0 ? checkMult : 1; LOGP(info, "CCDB Backend at: {}, validity check for every {} TF{}", defHost, helper->queryPeriodGlo, helper->queryPeriodFactor == 1 ? std::string{} : fmt::format(", (query for high-rate objects downscaled by {})", helper->queryPeriodFactor)); + LOGP(info, "Hook to enable signposts for CCDB messages at {}", (void*)&private_o2_log_ccdb->stacktrace); auto remapString = options.get("condition-remap"); ParserResult result = CCDBHelpers::parseRemappings(remapString.c_str()); if (!result.error.empty()) { @@ -322,6 +331,8 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() }); return adaptStateless([helper](DataTakingContext& dtc, DataAllocator& allocator, TimingInfo& timingInfo) { + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; + O2_SIGNPOST_START(ccdb, sid, "fetchFromCCDB", "Fetching CCDB objects for timeslice %" PRIu64, (uint64_t)timingInfo.timeslice); static Long64_t orbitResetTime = -1; static size_t lastTimeUsed = -1; if (timingInfo.creation & DataProcessingHeader::DUMMY_CREATION_TIME_OFFSET) { @@ -342,7 +353,8 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() } else { checkValidity = true; // never skip check if the cache is empty } - LOG(debug) << "checkValidity = " << checkValidity << " for TF " << timingInfo.timeslice; + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "checkValidity is %{public}s for tfID %d of %{public}s", + checkValidity ? "true" : "false", timingInfo.tfCounter, path.data()); Output output{"CTP", "OrbitReset", 0}; Long64_t newOrbitResetTime = orbitResetTime; auto&& v = allocator.makeVector(output); @@ -363,7 +375,7 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() newOrbitResetTime = getOrbitResetTime(v); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodNone); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); } else if (v.size()) { // but should be overridden by fresh object // somewhere here pruneFromCache should be called helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid @@ -373,19 +385,19 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() newOrbitResetTime = getOrbitResetTime(v); auto cacheId = allocator.adoptContainer(output, std::move(v), DataAllocator::CacheStrategy::Always, header::gSerializationMethodNone); helper->mapURL2DPLCache[path] = cacheId; - LOGP(debug, "Caching {} for {} (DPL id {})", path, headers["ETag"], cacheId.value); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Caching %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); // one could modify the adoptContainer to take optional old cacheID to clean: // mapURL2DPLCache[URL] = ctx.outputs().adoptContainer(output, std::move(outputBuffer), DataAllocator::CacheStrategy::Always, mapURL2DPLCache[URL]); } // cached object is fine } auto cacheId = helper->mapURL2DPLCache[path]; - LOGP(debug, "Reusing {} for {}", cacheId.value, path); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Reusing %{public}s for %{public}s (DPL id %" PRIu64 ")", path.data(), headers["ETag"].data(), cacheId.value); helper->mapURL2UUID[path].cacheHit++; allocator.adoptFromCache(output, cacheId, header::gSerializationMethodNone); if (newOrbitResetTime != orbitResetTime) { - LOGP(debug, "Orbit reset time now at {} (was {})", newOrbitResetTime, orbitResetTime); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Orbit reset time changed from %lld to %lld", orbitResetTime, newOrbitResetTime); orbitResetTime = newOrbitResetTime; dtc.orbitResetTimeMUS = orbitResetTime; } @@ -403,10 +415,11 @@ AlgorithmSpec CCDBHelpers::fetchFromCCDB() timestamp = timingInfo.creation; } // Fetch the rest of the objects. - LOGP(debug, "Fetching objects. Run: {}. OrbitResetTime: {}, Creation: {}, Timestamp: {}, firstTForbit: {}", - dtc.runNumber, orbitResetTime, timingInfo.creation, timestamp, timingInfo.firstTForbit); + O2_SIGNPOST_EVENT_EMIT(ccdb, sid, "fetchFromCCDB", "Fetching objects. Run %{public}s. OrbitResetTime %lld. Creation %lld. Timestamp %lld. firstTForbit %" PRIu32, + dtc.runNumber.data(), orbitResetTime, timingInfo.creation, timestamp, timingInfo.firstTForbit); populateCacheWith(helper, timestamp, timingInfo, dtc, allocator); + O2_SIGNPOST_END(ccdb, _o2_signpost_id_t{(int64_t)timingInfo.timeslice}, "fetchFromCCDB", "Fetching CCDB objects"); }); }); } From f43100f68b7c513ae8fd0a16376474cb6487e183 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 26 Jan 2024 11:36:20 +0100 Subject: [PATCH 282/726] DPL: install o2-log in the proper place --- Framework/Foundation/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Framework/Foundation/CMakeLists.txt b/Framework/Foundation/CMakeLists.txt index 0682d37b61306..8babb4bc9a44e 100644 --- a/Framework/Foundation/CMakeLists.txt +++ b/Framework/Foundation/CMakeLists.txt @@ -59,6 +59,8 @@ set_property(TARGET o2-test-framework-foundation PROPERTY RUNTIME_OUTPUT_DIRECTO set_property(TARGET o2-test-framework-Signpost PROPERTY RUNTIME_OUTPUT_DIRECTORY ${outdir}) set_property(TARGET o2-test-framework-SignpostLogger PROPERTY RUNTIME_OUTPUT_DIRECTORY ${outdir}) set_property(TARGET o2-test-framework-ThreadSanitizer PROPERTY RUNTIME_OUTPUT_DIRECTORY ${outdir}) +get_filename_component(bindir ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/../bin ABSOLUTE) +set_property(TARGET o2-log PROPERTY RUNTIME_OUTPUT_DIRECTORY ${bindir}) add_test(NAME framework:foundation COMMAND o2-test-framework-foundation) From 56e810135e263ce7e8395e9c07d136c634c4b4d3 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 26 Jan 2024 11:36:22 +0100 Subject: [PATCH 283/726] DPL: do not update the new run flag unless we do process data Data might be dropped by the oldest possible timeframe mechanism for the first timeslice which arrives, at which point we lose the information about the new run. Moving it down ensures that the check is done for the first timeframe which arrives. --- Framework/Core/src/DataProcessingDevice.cxx | 32 ++++++++++++--------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index f5c9b7947afde..15abefe42add0 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -2134,7 +2134,6 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v // create messages) because the messages need to have the timeslice id into // it. auto prepareAllocatorForCurrentTimeSlice = [ref](TimesliceSlot i) -> void { - auto& dataProcessorContext = ref.get(); auto& relayer = ref.get(); auto& timingInfo = ref.get(); ZoneScopedN("DataProcessingDevice::prepareForCurrentTimeslice"); @@ -2145,17 +2144,6 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v timingInfo.firstTForbit = relayer.getFirstTFOrbitForSlot(i); timingInfo.runNumber = relayer.getRunNumberForSlot(i); timingInfo.creation = relayer.getCreationTimeForSlot(i); - timingInfo.globalRunNumberChanged = !TimingInfo::timesliceIsTimer(timeslice.value) && dataProcessorContext.lastRunNumberProcessed != timingInfo.runNumber; - // A switch to runNumber=0 should not appear and thus does not set globalRunNumberChanged, unless it is seen in the first processed timeslice - timingInfo.globalRunNumberChanged &= (dataProcessorContext.lastRunNumberProcessed == -1 || timingInfo.runNumber != 0); - // We report wether or not this timing info refers to a new Run. - if (timingInfo.globalRunNumberChanged) { - dataProcessorContext.lastRunNumberProcessed = timingInfo.runNumber; - } - // FIXME: for now there is only one stream, however we - // should calculate this correctly once we finally get the - // the StreamContext in. - timingInfo.streamRunNumberChanged = timingInfo.globalRunNumberChanged; }; // When processing them, timers will have to be cleaned up @@ -2277,9 +2265,9 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v continue; } - prepareAllocatorForCurrentTimeSlice(TimesliceSlot{action.slot}); bool shouldConsume = action.op == CompletionPolicy::CompletionOp::Consume || action.op == CompletionPolicy::CompletionOp::Discard; + prepareAllocatorForCurrentTimeSlice(TimesliceSlot{action.slot}); InputSpan span = getInputSpan(action.slot, shouldConsume); auto& spec = ref.get(); InputRecord record{spec.inputs, @@ -2338,6 +2326,23 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v return false; } }; + auto updateRunInformation = [ref](TimesliceSlot i) -> void { + auto& dataProcessorContext = ref.get(); + auto& relayer = ref.get(); + auto& timingInfo = ref.get(); + auto timeslice = relayer.getTimesliceForSlot(i); + timingInfo.globalRunNumberChanged = !TimingInfo::timesliceIsTimer(timeslice.value) && dataProcessorContext.lastRunNumberProcessed != timingInfo.runNumber; + // A switch to runNumber=0 should not appear and thus does not set globalRunNumberChanged, unless it is seen in the first processed timeslice + timingInfo.globalRunNumberChanged &= (dataProcessorContext.lastRunNumberProcessed == -1 || timingInfo.runNumber != 0); + // We report wether or not this timing info refers to a new Run. + if (timingInfo.globalRunNumberChanged) { + dataProcessorContext.lastRunNumberProcessed = timingInfo.runNumber; + } + // FIXME: for now there is only one stream, however we + // should calculate this correctly once we finally get the + // the StreamContext in. + timingInfo.streamRunNumberChanged = timingInfo.globalRunNumberChanged; + }; if (state.quitRequested == false) { { // Callbacks from services @@ -2349,6 +2354,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v } O2_SIGNPOST_ID_FROM_POINTER(pcid, device, &processContext); if (context.statefulProcess && shouldProcess(action)) { + updateRunInformation(action.slot); // This way, usercode can use the the same processing context to identify // its signposts and we can map user code to device iterations. O2_SIGNPOST_START(device, pcid, "device", "Stateful process"); From 7e70b694465a29ecb05f0ab87c5c81bee0543706 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 26 Jan 2024 14:41:59 +0100 Subject: [PATCH 284/726] DPL: do not update the new run flag unless we do process data Data might be dropped by the oldest possible timeframe mechanism for the first timeslice which arrives, at which point we lose the information about the new run. Moving it down ensures that the check is done for the first timeframe which arrives. --- Framework/Core/src/DataProcessingDevice.cxx | 34 +++++++++++++-------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index f5c9b7947afde..6b1843f925127 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -2134,7 +2134,6 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v // create messages) because the messages need to have the timeslice id into // it. auto prepareAllocatorForCurrentTimeSlice = [ref](TimesliceSlot i) -> void { - auto& dataProcessorContext = ref.get(); auto& relayer = ref.get(); auto& timingInfo = ref.get(); ZoneScopedN("DataProcessingDevice::prepareForCurrentTimeslice"); @@ -2145,17 +2144,6 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v timingInfo.firstTForbit = relayer.getFirstTFOrbitForSlot(i); timingInfo.runNumber = relayer.getRunNumberForSlot(i); timingInfo.creation = relayer.getCreationTimeForSlot(i); - timingInfo.globalRunNumberChanged = !TimingInfo::timesliceIsTimer(timeslice.value) && dataProcessorContext.lastRunNumberProcessed != timingInfo.runNumber; - // A switch to runNumber=0 should not appear and thus does not set globalRunNumberChanged, unless it is seen in the first processed timeslice - timingInfo.globalRunNumberChanged &= (dataProcessorContext.lastRunNumberProcessed == -1 || timingInfo.runNumber != 0); - // We report wether or not this timing info refers to a new Run. - if (timingInfo.globalRunNumberChanged) { - dataProcessorContext.lastRunNumberProcessed = timingInfo.runNumber; - } - // FIXME: for now there is only one stream, however we - // should calculate this correctly once we finally get the - // the StreamContext in. - timingInfo.streamRunNumberChanged = timingInfo.globalRunNumberChanged; }; // When processing them, timers will have to be cleaned up @@ -2277,9 +2265,9 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v continue; } - prepareAllocatorForCurrentTimeSlice(TimesliceSlot{action.slot}); bool shouldConsume = action.op == CompletionPolicy::CompletionOp::Consume || action.op == CompletionPolicy::CompletionOp::Discard; + prepareAllocatorForCurrentTimeSlice(TimesliceSlot{action.slot}); InputSpan span = getInputSpan(action.slot, shouldConsume); auto& spec = ref.get(); InputRecord record{spec.inputs, @@ -2338,6 +2326,23 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v return false; } }; + auto updateRunInformation = [ref](TimesliceSlot i) -> void { + auto& dataProcessorContext = ref.get(); + auto& relayer = ref.get(); + auto& timingInfo = ref.get(); + auto timeslice = relayer.getTimesliceForSlot(i); + timingInfo.globalRunNumberChanged = !TimingInfo::timesliceIsTimer(timeslice.value) && dataProcessorContext.lastRunNumberProcessed != timingInfo.runNumber; + // A switch to runNumber=0 should not appear and thus does not set globalRunNumberChanged, unless it is seen in the first processed timeslice + timingInfo.globalRunNumberChanged &= (dataProcessorContext.lastRunNumberProcessed == -1 || timingInfo.runNumber != 0); + // We report wether or not this timing info refers to a new Run. + if (timingInfo.globalRunNumberChanged) { + dataProcessorContext.lastRunNumberProcessed = timingInfo.runNumber; + } + // FIXME: for now there is only one stream, however we + // should calculate this correctly once we finally get the + // the StreamContext in. + timingInfo.streamRunNumberChanged = timingInfo.globalRunNumberChanged; + }; if (state.quitRequested == false) { { // Callbacks from services @@ -2349,18 +2354,21 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v } O2_SIGNPOST_ID_FROM_POINTER(pcid, device, &processContext); if (context.statefulProcess && shouldProcess(action)) { + updateRunInformation(action.slot); // This way, usercode can use the the same processing context to identify // its signposts and we can map user code to device iterations. O2_SIGNPOST_START(device, pcid, "device", "Stateful process"); (context.statefulProcess)(processContext); O2_SIGNPOST_END(device, pcid, "device", "Stateful process"); } else if (context.statelessProcess && shouldProcess(action)) { + updateRunInformation(action.slot); O2_SIGNPOST_START(device, pcid, "device", "Stateful process"); (context.statelessProcess)(processContext); O2_SIGNPOST_END(device, pcid, "device", "Stateful process"); } else if (context.statelessProcess || context.statefulProcess) { O2_SIGNPOST_EVENT_EMIT(device, pcid, "device", "Skipping processing because we are discarding."); } else { + updateRunInformation(action.slot); O2_SIGNPOST_EVENT_EMIT(device, pcid, "device", "No processing callback provided. Switching to %{public}s.", "Idle"); state.streaming = StreamingState::Idle; } From 0cd7e702d12dfc453d646f6689192bf50a823356 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 26 Jan 2024 21:55:07 +0100 Subject: [PATCH 285/726] DPL: do not update the new run flag unless we do process data Data might be dropped by the oldest possible timeframe mechanism for the first timeslice which arrives, at which point we lose the information about the new run. Moving it down ensures that the check is done for the first timeframe which arrives. --- Framework/Core/src/DataProcessingDevice.cxx | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index f5c9b7947afde..cb4598daa661f 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -2134,7 +2134,6 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v // create messages) because the messages need to have the timeslice id into // it. auto prepareAllocatorForCurrentTimeSlice = [ref](TimesliceSlot i) -> void { - auto& dataProcessorContext = ref.get(); auto& relayer = ref.get(); auto& timingInfo = ref.get(); ZoneScopedN("DataProcessingDevice::prepareForCurrentTimeslice"); @@ -2145,6 +2144,12 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v timingInfo.firstTForbit = relayer.getFirstTFOrbitForSlot(i); timingInfo.runNumber = relayer.getRunNumberForSlot(i); timingInfo.creation = relayer.getCreationTimeForSlot(i); + }; + auto updateRunInformation = [ref](TimesliceSlot i) -> void { + auto& dataProcessorContext = ref.get(); + auto& relayer = ref.get(); + auto& timingInfo = ref.get(); + auto timeslice = relayer.getTimesliceForSlot(i); timingInfo.globalRunNumberChanged = !TimingInfo::timesliceIsTimer(timeslice.value) && dataProcessorContext.lastRunNumberProcessed != timingInfo.runNumber; // A switch to runNumber=0 should not appear and thus does not set globalRunNumberChanged, unless it is seen in the first processed timeslice timingInfo.globalRunNumberChanged &= (dataProcessorContext.lastRunNumberProcessed == -1 || timingInfo.runNumber != 0); @@ -2277,9 +2282,14 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v continue; } - prepareAllocatorForCurrentTimeSlice(TimesliceSlot{action.slot}); bool shouldConsume = action.op == CompletionPolicy::CompletionOp::Consume || action.op == CompletionPolicy::CompletionOp::Discard; + prepareAllocatorForCurrentTimeSlice(TimesliceSlot{action.slot}); + if (action.op != CompletionPolicy::CompletionOp::Discard && + action.op != CompletionPolicy::CompletionOp::Wait && + action.op != CompletionPolicy::CompletionOp::Retry) { + updateRunInformation(TimesliceSlot{action.slot}); + } InputSpan span = getInputSpan(action.slot, shouldConsume); auto& spec = ref.get(); InputRecord record{spec.inputs, From 92093d70dceb6974f19b430db77a224cb76bfbd1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 26 Jan 2024 21:55:17 +0100 Subject: [PATCH 286/726] DPL: also consume for the ConsumeExitisting case --- Framework/Core/src/DataProcessingDevice.cxx | 1 + 1 file changed, 1 insertion(+) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index cb4598daa661f..15c625c3615e5 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -2283,6 +2283,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v } bool shouldConsume = action.op == CompletionPolicy::CompletionOp::Consume || + action.op == CompletionPolicy::CompletionOp::ConsumeExisting || action.op == CompletionPolicy::CompletionOp::Discard; prepareAllocatorForCurrentTimeSlice(TimesliceSlot{action.slot}); if (action.op != CompletionPolicy::CompletionOp::Discard && From c2b3d2d3330c106c55a244c171b760cd806a0730 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 26 Jan 2024 22:00:30 +0100 Subject: [PATCH 287/726] Move another macro to be compiled only --- Detectors/TOF/prototyping/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Detectors/TOF/prototyping/CMakeLists.txt b/Detectors/TOF/prototyping/CMakeLists.txt index 767e44d70d6d4..1ce2268f1358a 100644 --- a/Detectors/TOF/prototyping/CMakeLists.txt +++ b/Detectors/TOF/prototyping/CMakeLists.txt @@ -39,7 +39,7 @@ o2_add_test_root_macro(findTOFclusterFromLabel.C O2::ReconstructionDataFormats O2::CommonDataFormat O2::TOFBase - LABELS tof) + LABELS tof COMPILE_ONLY) o2_add_test_root_macro(checkTS.C PUBLIC_LINK_LIBRARIES O2::DataFormatsTOF From b82e22c6dbbbb6b2ed391459085850baaef0060e Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 26 Jan 2024 22:31:56 +0100 Subject: [PATCH 288/726] DPL: do not validate lifetime when consumer uses consumeWhenAny --- Framework/Core/src/DeviceSpecHelpers.cxx | 16 +++++++++++++--- Framework/Core/src/WorkflowHelpers.cxx | 22 ++++++++++++++++++---- Framework/Core/src/WorkflowHelpers.h | 6 ++++++ 3 files changed, 37 insertions(+), 7 deletions(-) diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 756d4d57ef342..b1681a4779ceb 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -1074,8 +1074,6 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf WorkflowHelpers::constructGraph(workflow, logicalEdges, outputs, availableForwardsInfo); - WorkflowHelpers::validateEdges(workflow, logicalEdges, outputs); - // We need to instanciate one device per (me, timeIndex) in the // DeviceConnectionEdge. For each device we need one new binding // server per (me, other) -> port Moreover for each (me, other, @@ -1122,10 +1120,13 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf inActions, workflow, availableForwardsInfo, channelPolicies, channelPrefix, defaultOffer, overrideServices); // We apply the completion policies here since this is where we have all the // devices resolved. - for (auto& device : devices) { + std::map policies; + for (DeviceSpec& device : devices) { bool hasPolicy = false; + policies[device.name].completionPolicyName = "unknown"; for (auto& policy : completionPolicies) { if (policy.matcher(device) == true) { + policies[policy.name].completionPolicyName = policy.name; device.completionPolicy = policy; hasPolicy = true; break; @@ -1158,6 +1159,15 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf throw runtime_error_f("Unable to find a resource policy for %s", device.id.c_str()); } } + // Iterate of the workflow and create a consistent vector of DataProcessorPoliciesInfo + std::vector policiesVector; + for (size_t wi = 0; wi < workflow.size(); ++wi) { + auto& processor = workflow[wi]; + auto& info = policies[processor.name]; + policiesVector.push_back(info); + } + + WorkflowHelpers::validateEdges(workflow, policiesVector, logicalEdges, outputs); for (auto& device : devices) { device.resourceMonitoringInterval = resourcesMonitoringInterval; diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 360480f80d4ef..6b4ad35f826be 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -1214,8 +1214,14 @@ std::vector WorkflowHelpers::computeDanglingOutputs(WorkflowSpec cons return results; } -bool validateLifetime(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +bool validateLifetime(std::ostream& errors, + DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorPoliciesInfo const& producerPolicies, + DataProcessorSpec const& consumer, InputSpec const& input, DataProcessorPoliciesInfo const& consumerPolicies) { + // In case the completion policy is consume-any, we do not need to check anything. + if (consumerPolicies.completionPolicyName == "consume-any") { + return true; + } if (input.lifetime == Lifetime::Timeframe && output.lifetime == Lifetime::Sporadic) { errors << fmt::format("Input {} of {} has lifetime Timeframe, but output {} of {} has lifetime Sporadic\n", DataSpecUtils::describe(input).c_str(), consumer.name, @@ -1225,7 +1231,9 @@ bool validateLifetime(std::ostream& errors, DataProcessorSpec const& producer, O return true; } -bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorSpec const& consumer, InputSpec const& input) +bool validateExpendable(std::ostream& errors, + DataProcessorSpec const& producer, OutputSpec const& output, DataProcessorPoliciesInfo const& producerPolicies, + DataProcessorSpec const& consumer, InputSpec const& input, DataProcessorPoliciesInfo const& consumerPolicies) { auto isExpendable = [](DataProcessorLabel const& label) { return label.value == "expendable"; @@ -1244,8 +1252,12 @@ bool validateExpendable(std::ostream& errors, DataProcessorSpec const& producer, return true; } -using Validator = std::function; +using Validator = std::function; + void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, + std::vector const& policies, std::vector const& edges, std::vector const& outputs) { @@ -1262,10 +1274,12 @@ void WorkflowHelpers::validateEdges(WorkflowSpec const& workflow, for (auto& edge : edges) { DataProcessorSpec const& producer = workflow[edge.producer]; DataProcessorSpec const& consumer = workflow[edge.consumer]; + DataProcessorPoliciesInfo const& producerPolicies = policies[edge.producer]; + DataProcessorPoliciesInfo const& consumerPolicies = policies[edge.consumer]; OutputSpec const& output = outputs[edge.outputGlobalIndex]; InputSpec const& input = consumer.inputs[edge.consumerInputIndex]; for (auto& validator : defaultValidators) { - hasErrors |= !validator(errors, producer, output, consumer, input); + hasErrors |= !validator(errors, producer, output, producerPolicies, consumer, input, consumerPolicies); } } if (hasErrors) { diff --git a/Framework/Core/src/WorkflowHelpers.h b/Framework/Core/src/WorkflowHelpers.h index 0a3cd408b3424..055cae1d6011b 100644 --- a/Framework/Core/src/WorkflowHelpers.h +++ b/Framework/Core/src/WorkflowHelpers.h @@ -141,6 +141,11 @@ struct TopoIndexInfo { friend std::ostream& operator<<(std::ostream& out, TopoIndexInfo const& info); }; +// Information about the policies which were derived for a given data processor. +struct DataProcessorPoliciesInfo { + std::string completionPolicyName; +}; + struct OutputObj { InputSpec spec; bool isdangling; @@ -234,6 +239,7 @@ struct WorkflowHelpers { /// For example we should make sure that Lifetime::Timeframe inputs of /// one node is not connected to an Output of Lifetime::Sporadic of another node. static void validateEdges(WorkflowSpec const& workflow, + std::vector const& policiesInfos, std::vector const& edges, std::vector const& outputs); }; From b59c981e85c82da2cb4dda7337b7051240572116 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:23:30 +0100 Subject: [PATCH 289/726] Make sure the user routes are traced also on EoS --- Framework/Core/src/CommonServices.cxx | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index b3ee75a6a3886..e6d7ae7fcd159 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -220,6 +220,20 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() LOGP(error, "Expected Lifetime::Timeframe data {} was not created for timeslice {} and might result in dropped timeframes", DataSpecUtils::describe(matcher), timeslice); } } }, + .preEOS = [](EndOfStreamContext& context, void* service) { + // We need to reset the routeUserCreated because the end of stream + // uses a different context which does not know about the routes. + // FIXME: This should be fixed in a different way, but for now it will + // allow TPC IDC to work. + auto* stream = (StreamContext*)service; + auto& routes = context.services().get().outputs; + // Notice I need to do this here, because different invocation for + // the same stream might be referring to different data processors. + // We should probably have a context which is per stream of a specific + // data processor. + stream->routeUserCreated.resize(routes.size()); + // Reset the routeUserCreated at every processing step + std::fill(stream->routeUserCreated.begin(), stream->routeUserCreated.end(), false); }, .kind = ServiceKind::Stream}; } From 0a038a4f943087d9749195065eb9bcd235abd808 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 30 Jan 2024 15:11:18 +0100 Subject: [PATCH 290/726] DPL: avoid invoking matchDataHeader twice when sending data Percolate precomputed value instead, given that payload and header all go to the same route. --- Framework/Core/include/Framework/DataAllocator.h | 4 ++-- Framework/Core/src/DataAllocator.cxx | 8 +++----- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/Framework/Core/include/Framework/DataAllocator.h b/Framework/Core/include/Framework/DataAllocator.h index f80d6d0836e28..23b9c62458b5e 100644 --- a/Framework/Core/include/Framework/DataAllocator.h +++ b/Framework/Core/include/Framework/DataAllocator.h @@ -399,7 +399,7 @@ class DataAllocator "\n - std::vector of messageable structures or pointers to those" "\n - types with ROOT dictionary and implementing ROOT ClassDef interface"); } - addPartToContext(std::move(payloadMessage), spec, serializationType); + addPartToContext(routeIndex, std::move(payloadMessage), spec, serializationType); } /// Take a snapshot of a raw data array which can be either POD or may contain a serialized @@ -527,7 +527,7 @@ class DataAllocator size_t payloadSize); // Output getOutputByBind(OutputRef&& ref); - void addPartToContext(fair::mq::MessagePtr&& payload, + void addPartToContext(RouteIndex routeIndex, fair::mq::MessagePtr&& payload, const Output& spec, o2::header::SerializationMethod serializationMethod); }; diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index dbeeadad68b2b..474be319b4857 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -119,11 +119,9 @@ fair::mq::MessagePtr DataAllocator::headerMessageFromOutput(Output const& spec, return o2::pmr::getMessage(o2::header::Stack{channelAlloc, dh, dph, spec.metaHeader}); } -void DataAllocator::addPartToContext(fair::mq::MessagePtr&& payloadMessage, const Output& spec, +void DataAllocator::addPartToContext(RouteIndex routeIndex, fair::mq::MessagePtr&& payloadMessage, const Output& spec, o2::header::SerializationMethod serializationMethod) { - auto& timingInfo = mRegistry.get(); - RouteIndex routeIndex = matchDataHeader(spec, timingInfo.timeslice); auto headerMessage = headerMessageFromOutput(spec, routeIndex, serializationMethod, 0); // FIXME: this is kind of ugly, we know that we can change the content of the @@ -284,7 +282,7 @@ void DataAllocator::snapshot(const Output& spec, const char* payload, size_t pay fair::mq::MessagePtr payloadMessage(proxy.createOutputMessage(routeIndex, payloadSize)); memcpy(payloadMessage->GetData(), payload, payloadSize); - addPartToContext(std::move(payloadMessage), spec, serializationMethod); + addPartToContext(routeIndex, std::move(payloadMessage), spec, serializationMethod); } Output DataAllocator::getOutputByBind(OutputRef&& ref) @@ -348,7 +346,7 @@ void DataAllocator::cookDeadBeef(const Output& spec) auto deadBeefOutput = Output{spec.origin, spec.description, 0xdeadbeef}; auto headerMessage = headerMessageFromOutput(deadBeefOutput, routeIndex, header::gSerializationMethodNone, 0); - addPartToContext(proxy.createOutputMessage(routeIndex, 0), deadBeefOutput, header::gSerializationMethodNone); + addPartToContext(routeIndex, proxy.createOutputMessage(routeIndex, 0), deadBeefOutput, header::gSerializationMethodNone); } } // namespace o2::framework From 54b809640566f568991d3571557f08b9990555a4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 31 Jan 2024 11:47:51 +0100 Subject: [PATCH 291/726] DPL: mark output proxy as resiliant, if actually the case If the only inputs are sporadic, they can be missing by design. If the completion policy is consumeWhenAny, it can for sure survive a crashing upstream device. --- Framework/Utils/src/dpl-output-proxy.cxx | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Framework/Utils/src/dpl-output-proxy.cxx b/Framework/Utils/src/dpl-output-proxy.cxx index fb70f7dfdf4ed..85bd869f3351c 100644 --- a/Framework/Utils/src/dpl-output-proxy.cxx +++ b/Framework/Utils/src/dpl-output-proxy.cxx @@ -92,11 +92,13 @@ WorkflowSpec defineDataProcessing(ConfigContext const& config) if (inputs.size() == 0) { throw std::runtime_error("invalid dataspec '" + inputConfig + "'"); } + bool isResilient = false; // we need to set the lifetime of the inputs to sporadic if requested if (sporadicInputs) { for (auto& input : inputs) { input.lifetime = Lifetime::Sporadic; } + isResilient = true; } // we build the default channel configuration from the binding of the first input @@ -133,5 +135,11 @@ WorkflowSpec defineDataProcessing(ConfigContext const& config) std::vector workflow; workflow.emplace_back(std::move(specifyFairMQDeviceOutputProxy(processorName.c_str(), inputs, defaultChannelConfig.c_str()))); + if (getenv("DPL_OUTPUT_PROXY_WHENANY") && atoi(getenv("DPL_OUTPUT_PROXY_WHENANY"))) { + isResilient = true; + } + if (isResilient) { + workflow.back().labels.push_back({"resilient"}); + } return workflow; } From 4c1d3786e3f2d414d9254070ecf60babbfd9038a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 31 Jan 2024 13:58:22 +0100 Subject: [PATCH 292/726] DPL: avoid invoking matchDataHeader twice --- Framework/Core/include/Framework/DataAllocator.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Framework/Core/include/Framework/DataAllocator.h b/Framework/Core/include/Framework/DataAllocator.h index 23b9c62458b5e..8151d2f83c6c6 100644 --- a/Framework/Core/include/Framework/DataAllocator.h +++ b/Framework/Core/include/Framework/DataAllocator.h @@ -174,10 +174,10 @@ class DataAllocator decltype(auto) make(const Output& spec, Args... args) { auto& timingInfo = mRegistry.get(); - auto routeIndex = matchDataHeader(spec, timingInfo.timeslice); auto& context = mRegistry.get(); if constexpr (is_specialization_v) { + auto routeIndex = matchDataHeader(spec, timingInfo.timeslice); // plain buffer as polymorphic spectator std::vector, which does not run constructors / destructors using ValueType = typename T::value_type; @@ -187,6 +187,7 @@ class DataAllocator std::move(headerMessage), routeIndex, 0, std::forward(args)...) .get(); } else if constexpr (is_specialization_v && has_messageable_value_type::value) { + auto routeIndex = matchDataHeader(spec, timingInfo.timeslice); // this catches all std::vector objects with messageable value type before checking if is also // has a root dictionary, so non-serialized transmission is preferred using ValueType = typename T::value_type; @@ -195,6 +196,7 @@ class DataAllocator fair::mq::MessagePtr headerMessage = headerMessageFromOutput(spec, routeIndex, o2::header::gSerializationMethodNone, 0); return context.add>(std::move(headerMessage), routeIndex, 0, std::forward(args)...).get(); } else if constexpr (has_root_dictionary::value == true && is_messageable::value == false) { + auto routeIndex = matchDataHeader(spec, timingInfo.timeslice); // Extended support for types implementing the Root ClassDef interface, both TObject // derived types and others if constexpr (enable_root_serialization::value) { @@ -233,6 +235,7 @@ class DataAllocator if constexpr (is_messageable::value == true) { auto [nElements] = std::make_tuple(args...); auto size = nElements * sizeof(T); + auto routeIndex = matchDataHeader(spec, timingInfo.timeslice); fair::mq::MessagePtr headerMessage = headerMessageFromOutput(spec, routeIndex, o2::header::gSerializationMethodNone, size); return context.add>(std::move(headerMessage), routeIndex, 0, nElements).get(); From 00e80dec12fa3a43224780642874dda56fea7102 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 31 Jan 2024 20:14:46 +0100 Subject: [PATCH 293/726] DPL: make sure we correctly handle DPL created messages This should fix the issue about the missing DISTSUBTIMEFRAME messages for good. --- .../Core/include/Framework/StreamContext.h | 3 +- Framework/Core/src/CommonServices.cxx | 51 ++++++++++++------- Framework/Core/src/DataAllocator.cxx | 8 ++- 3 files changed, 41 insertions(+), 21 deletions(-) diff --git a/Framework/Core/include/Framework/StreamContext.h b/Framework/Core/include/Framework/StreamContext.h index 8300ce4ce7ac8..79c8ad798836a 100644 --- a/Framework/Core/include/Framework/StreamContext.h +++ b/Framework/Core/include/Framework/StreamContext.h @@ -69,7 +69,8 @@ struct StreamContext { // for a given iteration. // This is in the stream context to allow tracking data creation on a per thread // basis. - std::vector routeUserCreated; + std::vector routeDPLCreated; + std::vector routeCreated; }; } // namespace o2::framework diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index e6d7ae7fcd159..fc5127c9b05a3 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -173,9 +173,11 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() // the same stream might be referring to different data processors. // We should probably have a context which is per stream of a specific // data processor. - stream->routeUserCreated.resize(routes.size()); - // Reset the routeUserCreated at every processing step - std::fill(stream->routeUserCreated.begin(), stream->routeUserCreated.end(), false); }, + stream->routeDPLCreated.resize(routes.size()); + stream->routeCreated.resize(routes.size()); + // Reset the routeDPLCreated at every processing step + std::fill(stream->routeDPLCreated.begin(), stream->routeDPLCreated.end(), false); + std::fill(stream->routeCreated.begin(), stream->routeCreated.end(), false); }, .postProcessing = [](ProcessingContext& processingContext, void* service) { auto* stream = (StreamContext*)service; auto& routes = processingContext.services().get().outputs; @@ -184,44 +186,50 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() // Check if we never created any data for this timeslice // if we did not, but we still have didDispatched set to true // it means it was created out of band. - bool didCreate = false; + bool userDidCreate = false; O2_SIGNPOST_ID_FROM_POINTER(cid, stream_context, service); for (size_t ri = 0; ri < routes.size(); ++ri) { - if (stream->routeUserCreated[ri] == true) { - didCreate = true; + if (stream->routeCreated[ri] == true && stream->routeDPLCreated[ri] == false) { + userDidCreate = true; break; } } - if (didCreate == false && messageContext.didDispatch() == true) { - O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created out of band didCreate == %d && messageContext.didDispatch == %d", - didCreate, + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "userDidCreate == %d && didDispatch == %d", + userDidCreate, + messageContext.didDispatch()); + if (userDidCreate == false && messageContext.didDispatch() == true) { + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created out of band userDidCreate == %d && messageContext.didDispatch == %d", + userDidCreate, messageContext.didDispatch()); return; } - if (didCreate == false && messageContext.didDispatch() == false) { + if (userDidCreate == false && messageContext.didDispatch() == false) { O2_SIGNPOST_ID_FROM_POINTER(cid, stream_context, service); - O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "No data created"); + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "No data created."); return; } for (size_t ri = 0; ri < routes.size(); ++ri) { auto &route = routes[ri]; auto &matcher = route.matcher; - if (stream->routeUserCreated[ri] == true) { - O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created by user. ri = %" PRIu64 ", %{public}s", + if (stream->routeDPLCreated[ri] == true) { + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created by DPL. ri = %" PRIu64 ", %{public}s", (uint64_t)ri, DataSpecUtils::describe(matcher).c_str()); continue; } if ((timeslice % route.maxTimeslices) != route.timeslice) { - O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Route ri = %" PRIu64 ", skipped", + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Route ri = %" PRIu64 ", skipped because of pipelining.", (uint64_t)ri); continue; } if (matcher.lifetime == Lifetime::Timeframe) { + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", + "Expected Lifetime::Timeframe data %{public}s was not created for timeslice %" PRIu64 " and might result in dropped timeframes", + DataSpecUtils::describe(matcher).c_str(), (uint64_t)timeslice); LOGP(error, "Expected Lifetime::Timeframe data {} was not created for timeslice {} and might result in dropped timeframes", DataSpecUtils::describe(matcher), timeslice); } } }, .preEOS = [](EndOfStreamContext& context, void* service) { - // We need to reset the routeUserCreated because the end of stream + // We need to reset the routeDPLCreated / routeCreated because the end of stream // uses a different context which does not know about the routes. // FIXME: This should be fixed in a different way, but for now it will // allow TPC IDC to work. @@ -231,9 +239,11 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() // the same stream might be referring to different data processors. // We should probably have a context which is per stream of a specific // data processor. - stream->routeUserCreated.resize(routes.size()); - // Reset the routeUserCreated at every processing step - std::fill(stream->routeUserCreated.begin(), stream->routeUserCreated.end(), false); }, + stream->routeDPLCreated.resize(routes.size()); + stream->routeCreated.resize(routes.size()); + // Reset the routeCreated / routeDPLCreated at every processing step + std::fill(stream->routeCreated.begin(), stream->routeCreated.end(), false); + std::fill(stream->routeDPLCreated.begin(), stream->routeDPLCreated.end(), false); }, .kind = ServiceKind::Stream}; } @@ -505,7 +515,10 @@ o2::framework::ServiceSpec CommonServices::ccdbSupportSpec() stfDist.runNumber = timingInfo.runNumber; // We mark it as not created, because we do should not account for it when // checking if we created all the data for a timeslice. - streamContext.routeUserCreated[oi] = false; + O2_SIGNPOST_ID_FROM_POINTER(sid, stream_context, &streamContext); + O2_SIGNPOST_EVENT_EMIT(stream_context, sid, "finaliseOutputs", "Route %" PRIu64 " (%{public}s) was created by DPL.", (uint64_t)oi, + DataSpecUtils::describe(output.matcher).c_str()); + streamContext.routeDPLCreated[oi] = true; } } }, .kind = ServiceKind::Global}; diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index 474be319b4857..1171caa0b0e23 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -20,6 +20,7 @@ #include "Framework/DataProcessingContext.h" #include "Framework/DeviceSpec.h" #include "Framework/StreamContext.h" +#include "Framework/Signpost.h" #include "Headers/Stack.h" #include @@ -33,6 +34,8 @@ #include +O2_DECLARE_DYNAMIC_LOG(stream_context); + namespace o2::framework { @@ -53,7 +56,10 @@ RouteIndex DataAllocator::matchDataHeader(const Output& spec, size_t timeslice) for (auto ri = 0; ri < allowedOutputRoutes.size(); ++ri) { auto& route = allowedOutputRoutes[ri]; if (DataSpecUtils::match(route.matcher, spec.origin, spec.description, spec.subSpec) && ((timeslice % route.maxTimeslices) == route.timeslice)) { - stream.routeUserCreated.at(ri) = true; + stream.routeCreated.at(ri) = true; + auto sid = _o2_signpost_id_t{(int64_t)&stream}; + O2_SIGNPOST_EVENT_EMIT(stream_context, sid, "data_allocator", "Route %" PRIu64 " (%{public}s) created for timeslice %" PRIu64, + (uint64_t)ri, DataSpecUtils::describe(route.matcher).c_str(), (uint64_t)timeslice); return RouteIndex{ri}; } } From 9cbe4b647a0fe1db279c3051e3a82dd85b795e3e Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 1 Feb 2024 13:14:48 +0100 Subject: [PATCH 294/726] DPL: print out how to enable disabled signposts --- Framework/Core/src/runDataProcessing.cxx | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index d98d4a5680e8d..9ca46e060b1a2 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -2951,6 +2951,7 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& workflow, } } + static pid_t pid = getpid(); if (varmap.count("signposts")) { auto signpostsToEnable = varmap["signposts"].as(); auto matchingLogEnabler = [](char const* name, void* l, void* context) { @@ -2961,6 +2962,8 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& workflow, LOGP(info, "Enabling signposts for {}", *selectedName); _o2_log_set_stacktrace(log, 1); return false; + } else { + LOGP(info, "Signpost stream \"{}\" disabled. Enable it with o2-log -p {} -a {}", name, pid, (void*)&log->stacktrace); } return true; }; @@ -2972,6 +2975,13 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& workflow, o2_walk_logs(matchingLogEnabler, token); token = strtok_r(nullptr, ",", &saveptr); } + } else { + auto printAllSignposts = [](char const* name, void* l, void* context) { + auto* log = (_o2_log_t*)l; + LOGP(detail, "Signpost stream {} disabled. Enable it with o2-log -p {} -a {}", name, pid, (void*)&log->stacktrace); + return true; + }; + o2_walk_logs(printAllSignposts, nullptr); } auto evaluateBatchOption = [&varmap]() -> bool { From bf896bcfee9a95c15f415422bcad055c9904b472 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 1 Feb 2024 13:26:45 +0100 Subject: [PATCH 295/726] DPL: properly install o2-log --- Framework/Foundation/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Framework/Foundation/CMakeLists.txt b/Framework/Foundation/CMakeLists.txt index 8babb4bc9a44e..bcde0f7a69f34 100644 --- a/Framework/Foundation/CMakeLists.txt +++ b/Framework/Foundation/CMakeLists.txt @@ -61,6 +61,7 @@ set_property(TARGET o2-test-framework-SignpostLogger PROPERTY RUNTIME_OUTPUT_DIR set_property(TARGET o2-test-framework-ThreadSanitizer PROPERTY RUNTIME_OUTPUT_DIRECTORY ${outdir}) get_filename_component(bindir ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/../bin ABSOLUTE) set_property(TARGET o2-log PROPERTY RUNTIME_OUTPUT_DIRECTORY ${bindir}) +install(TARGETS o2-log RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) add_test(NAME framework:foundation COMMAND o2-test-framework-foundation) From b14aad9b9f8e4617be75b208d2e83d6aceade0f7 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 1 Feb 2024 15:50:29 +0100 Subject: [PATCH 296/726] DPL: make sure we correctly handle DPL created messages This should fix the issue about the missing DISTSUBTIMEFRAME messages for good. --- .../Core/include/Framework/StreamContext.h | 3 +- Framework/Core/src/CommonServices.cxx | 55 ++++++++++++------- Framework/Core/src/DataAllocator.cxx | 8 ++- 3 files changed, 44 insertions(+), 22 deletions(-) diff --git a/Framework/Core/include/Framework/StreamContext.h b/Framework/Core/include/Framework/StreamContext.h index 8300ce4ce7ac8..79c8ad798836a 100644 --- a/Framework/Core/include/Framework/StreamContext.h +++ b/Framework/Core/include/Framework/StreamContext.h @@ -69,7 +69,8 @@ struct StreamContext { // for a given iteration. // This is in the stream context to allow tracking data creation on a per thread // basis. - std::vector routeUserCreated; + std::vector routeDPLCreated; + std::vector routeCreated; }; } // namespace o2::framework diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index e6d7ae7fcd159..b7fc3410a1931 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -173,9 +173,11 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() // the same stream might be referring to different data processors. // We should probably have a context which is per stream of a specific // data processor. - stream->routeUserCreated.resize(routes.size()); - // Reset the routeUserCreated at every processing step - std::fill(stream->routeUserCreated.begin(), stream->routeUserCreated.end(), false); }, + stream->routeDPLCreated.resize(routes.size()); + stream->routeCreated.resize(routes.size()); + // Reset the routeDPLCreated at every processing step + std::fill(stream->routeDPLCreated.begin(), stream->routeDPLCreated.end(), false); + std::fill(stream->routeCreated.begin(), stream->routeCreated.end(), false); }, .postProcessing = [](ProcessingContext& processingContext, void* service) { auto* stream = (StreamContext*)service; auto& routes = processingContext.services().get().outputs; @@ -184,44 +186,52 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() // Check if we never created any data for this timeslice // if we did not, but we still have didDispatched set to true // it means it was created out of band. - bool didCreate = false; + bool userDidCreate = false; O2_SIGNPOST_ID_FROM_POINTER(cid, stream_context, service); for (size_t ri = 0; ri < routes.size(); ++ri) { - if (stream->routeUserCreated[ri] == true) { - didCreate = true; + if (stream->routeCreated[ri] == true && stream->routeDPLCreated[ri] == false) { + userDidCreate = true; break; } } - if (didCreate == false && messageContext.didDispatch() == true) { - O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created out of band didCreate == %d && messageContext.didDispatch == %d", - didCreate, + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "userDidCreate == %d && didDispatch == %d", + userDidCreate, + messageContext.didDispatch()); + if (userDidCreate == false && messageContext.didDispatch() == true) { + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created out of band userDidCreate == %d && messageContext.didDispatch == %d", + userDidCreate, messageContext.didDispatch()); return; } - if (didCreate == false && messageContext.didDispatch() == false) { + if (userDidCreate == false && messageContext.didDispatch() == false) { O2_SIGNPOST_ID_FROM_POINTER(cid, stream_context, service); - O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "No data created"); + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "No data created."); return; } for (size_t ri = 0; ri < routes.size(); ++ri) { auto &route = routes[ri]; auto &matcher = route.matcher; - if (stream->routeUserCreated[ri] == true) { - O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created by user. ri = %" PRIu64 ", %{public}s", + if (stream->routeDPLCreated[ri] == true) { + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Data created by DPL. ri = %" PRIu64 ", %{public}s", (uint64_t)ri, DataSpecUtils::describe(matcher).c_str()); continue; } - if ((timeslice % route.maxTimeslices) != route.timeslice) { - O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Route ri = %" PRIu64 ", skipped", + if (stream->routeCreated[ri] == true) { + continue; + } if ((timeslice % route.maxTimeslices) != route.timeslice) { + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", "Route ri = %" PRIu64 ", skipped because of pipelining.", (uint64_t)ri); continue; } if (matcher.lifetime == Lifetime::Timeframe) { + O2_SIGNPOST_EVENT_EMIT(stream_context, cid, "postProcessingCallbacks", + "Expected Lifetime::Timeframe data %{public}s was not created for timeslice %" PRIu64 " and might result in dropped timeframes", + DataSpecUtils::describe(matcher).c_str(), (uint64_t)timeslice); LOGP(error, "Expected Lifetime::Timeframe data {} was not created for timeslice {} and might result in dropped timeframes", DataSpecUtils::describe(matcher), timeslice); } } }, .preEOS = [](EndOfStreamContext& context, void* service) { - // We need to reset the routeUserCreated because the end of stream + // We need to reset the routeDPLCreated / routeCreated because the end of stream // uses a different context which does not know about the routes. // FIXME: This should be fixed in a different way, but for now it will // allow TPC IDC to work. @@ -231,9 +241,11 @@ o2::framework::ServiceSpec CommonServices::streamContextSpec() // the same stream might be referring to different data processors. // We should probably have a context which is per stream of a specific // data processor. - stream->routeUserCreated.resize(routes.size()); - // Reset the routeUserCreated at every processing step - std::fill(stream->routeUserCreated.begin(), stream->routeUserCreated.end(), false); }, + stream->routeDPLCreated.resize(routes.size()); + stream->routeCreated.resize(routes.size()); + // Reset the routeCreated / routeDPLCreated at every processing step + std::fill(stream->routeCreated.begin(), stream->routeCreated.end(), false); + std::fill(stream->routeDPLCreated.begin(), stream->routeDPLCreated.end(), false); }, .kind = ServiceKind::Stream}; } @@ -505,7 +517,10 @@ o2::framework::ServiceSpec CommonServices::ccdbSupportSpec() stfDist.runNumber = timingInfo.runNumber; // We mark it as not created, because we do should not account for it when // checking if we created all the data for a timeslice. - streamContext.routeUserCreated[oi] = false; + O2_SIGNPOST_ID_FROM_POINTER(sid, stream_context, &streamContext); + O2_SIGNPOST_EVENT_EMIT(stream_context, sid, "finaliseOutputs", "Route %" PRIu64 " (%{public}s) was created by DPL.", (uint64_t)oi, + DataSpecUtils::describe(output.matcher).c_str()); + streamContext.routeDPLCreated[oi] = true; } } }, .kind = ServiceKind::Global}; diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index 474be319b4857..de500cd6d6c4e 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -20,6 +20,7 @@ #include "Framework/DataProcessingContext.h" #include "Framework/DeviceSpec.h" #include "Framework/StreamContext.h" +#include "Framework/Signpost.h" #include "Headers/Stack.h" #include @@ -33,6 +34,8 @@ #include +O2_DECLARE_DYNAMIC_LOG(stream_context); + namespace o2::framework { @@ -53,7 +56,10 @@ RouteIndex DataAllocator::matchDataHeader(const Output& spec, size_t timeslice) for (auto ri = 0; ri < allowedOutputRoutes.size(); ++ri) { auto& route = allowedOutputRoutes[ri]; if (DataSpecUtils::match(route.matcher, spec.origin, spec.description, spec.subSpec) && ((timeslice % route.maxTimeslices) == route.timeslice)) { - stream.routeUserCreated.at(ri) = true; + stream.routeCreated.at(ri) = true; + auto sid = _o2_signpost_id_t{(int64_t)&stream}; + O2_SIGNPOST_EVENT_EMIT(stream_context, sid, "data_allocator", "Route %" PRIu64 " (%{public}s) created for timeslice %" PRIu64, + (uint64_t)ri, DataSpecUtils::describe(route.matcher).c_str(), (uint64_t)timeslice); return RouteIndex{ri}; } } From bb5c392d97bd97b7d6e117da03140aaa6eb6e558 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 2 Feb 2024 09:03:03 +0100 Subject: [PATCH 297/726] Use C++20 spaceship operator See also: https://en.cppreference.com/w/cpp/language/operator_comparison#Three-way_comparison https://en.cppreference.com/w/cpp/language/default_comparisons --- Detectors/MUON/MID/CTF/include/MIDCTF/CTFHelper.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/Detectors/MUON/MID/CTF/include/MIDCTF/CTFHelper.h b/Detectors/MUON/MID/CTF/include/MIDCTF/CTFHelper.h index 01e519701b501..8ca21658e0942 100644 --- a/Detectors/MUON/MID/CTF/include/MIDCTF/CTFHelper.h +++ b/Detectors/MUON/MID/CTF/include/MIDCTF/CTFHelper.h @@ -131,12 +131,8 @@ class CTFHelper inline friend I operator+(difference_type i, const I& iter) { return iter + i; }; - bool operator!=(const I& other) const noexcept { return mIndex != other.mIndex; } bool operator==(const I& other) const noexcept { return mIndex == other.mIndex; } - bool operator>(const I& other) const noexcept { return mIndex > other.mIndex; } - bool operator<(const I& other) const noexcept { return mIndex < other.mIndex; } - bool operator>=(const I& other) const noexcept { return mIndex >= other.mIndex; } - bool operator<=(const I& other) const noexcept { return mIndex <= other.mIndex; } + bool operator<=>(const I& other) const noexcept { return mIndex <=> other.mIndex; } protected: gsl::span mOrder{}; From cad2296e920e1dd83ef59ed6f888225180a4c6c7 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 2 Feb 2024 11:26:18 +0100 Subject: [PATCH 298/726] DPL GUI: display labels for the device --- Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index f54e74d6ac019..1887a41192bc4 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -354,6 +354,11 @@ void displayDeviceInspector(DeviceSpec const& spec, deviceInfoTable("Outputs:", ProcessingStateId::OUTPUT_MATCHERS, states, metrics); configurationTable(info.currentConfig, info.currentProvenance); optionsTable("Workflow Options", metadata.workflowOptions, control); + if (ImGui::CollapsingHeader("Labels", ImGuiTreeNodeFlags_DefaultOpen)) { + for (auto &label : spec.labels) { + ImGui::Text("%s", label.value.c_str()); + } + } servicesTable("Services", spec.services); if (ImGui::CollapsingHeader("Command line arguments", ImGuiTreeNodeFlags_DefaultOpen)) { static ImGuiTextFilter filter; From 345d0c5e7851c454f009a13bdb13d3bbe4b684da Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 2 Feb 2024 11:23:30 +0100 Subject: [PATCH 299/726] DPL: fix circular dependency between expendable / resilient tasks and output proxy The output proxy was always sorted last, however this clearly conflicts in the case there is expendable tasks or resilient one, where the behavior than depends on the resiliency of the output proxy itself and the data dependencies. This address the issue and hopefully fixes the problem for good. --- Framework/Core/src/TopologyPolicy.cxx | 58 +++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 7 deletions(-) diff --git a/Framework/Core/src/TopologyPolicy.cxx b/Framework/Core/src/TopologyPolicy.cxx index fb96eff5af2fc..ebeb70c7950a3 100644 --- a/Framework/Core/src/TopologyPolicy.cxx +++ b/Framework/Core/src/TopologyPolicy.cxx @@ -10,9 +10,12 @@ // or submit itself to any jurisdiction. #include "Framework/DataProcessorSpec.h" #include "Framework/TopologyPolicy.h" +#include "Framework/Signpost.h" #include #include +O2_DECLARE_DYNAMIC_LOG(topology); + namespace o2::framework { @@ -68,15 +71,20 @@ bool dataDeps(DataProcessorSpec const& a, DataProcessorSpec const& b) bool expendableDataDeps(DataProcessorSpec const& a, DataProcessorSpec const& b) { + O2_SIGNPOST_ID_GENERATE(sid, topology); + O2_SIGNPOST_START(topology, sid, "expendableDataDeps", "Checking if %s depends on %s", a.name.c_str(), b.name.c_str()); // We never put anything behind the dummy sink. if (b.name.find("internal-dpl-injected-dummy-sink") != std::string::npos) { + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "false. %s is dummy sink and it nothing can depend on it.", b.name.c_str()); return false; } if (a.name.find("internal-dpl-injected-dummy-sink") != std::string::npos) { + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "true. %s is dummy sink and it nothing can depend on it.", a.name.c_str()); return true; } /// If there is an actual dependency between a and b, we return true. if (dataDeps(a, b)) { + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "true. %s has a data dependency on %s", a.name.c_str(), b.name.c_str()); return true; } // If we are here we do not have any data dependency, @@ -101,27 +109,35 @@ bool expendableDataDeps(DataProcessorSpec const& a, DataProcessorSpec const& b) // If none is expendable. We simply return false and sort as usual. if (!isAExpendable && !isBExpendable) { - LOGP(debug, "Neither {} nor {} are expendable. No dependency beyond data deps.", a.name, b.name); + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "false. Neither %s nor %s are expendable. No dependency beyond data deps.", + a.name.c_str(), b.name.c_str()); return false; } // If both are expendable. We return false and sort as usual. if (isAExpendable && isBExpendable) { - LOGP(debug, "Both {} and {} are expendable. No dependency.", a.name, b.name); + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "false. Both %s and %s are expendable. No dependency.", + a.name.c_str(), b.name.c_str()); return false; } // If b is expendable but b is resilient, we can keep the same order. if (isAExpendable && bResilient) { - LOGP(debug, "{} is expendable but b is resilient, no need to add an unneeded dependency", a.name, a.name, b.name); + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "false. %s is expendable but %s is resilient, no need to add an unneeded dependency", + a.name.c_str(), b.name.c_str()); return false; } // If a is expendable we consider it as if there was a dependency from a to b, // however we still need to check if there is not one already from b to a. if (isAExpendable) { - LOGP(debug, "{} is expendable. Checking if there is a dependency from {} to {}.", a.name, b.name, a.name); - return !dataDeps(b, a); + bool hasDependency = dataDeps(b, a); + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "%s is expendable. %s from %s to %s => %s.", + a.name.c_str(), hasDependency ? "There is however an inverse dependency" : "No inverse dependency", b.name.c_str(), a.name.c_str(), + !hasDependency ? "true" : "false"); + return !hasDependency; } // b is expendable and a is not. We are fine with no dependency. + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "false. %s is expendable but %s is not. No need to add an unneeded dependency.", + b.name.c_str(), a.name.c_str()); return false; }; @@ -135,18 +151,46 @@ TopologyPolicy::DependencyChecker TopologyPolicyHelpers::dataDependency() TopologyPolicy::DependencyChecker TopologyPolicyHelpers::alwaysDependent() { return [](DataProcessorSpec const& dependent, DataProcessorSpec const& ancestor) { + O2_SIGNPOST_ID_GENERATE(sid, topology); + O2_SIGNPOST_START(topology, sid, "alwaysDependent", "Checking if %s depends on %s", dependent.name.c_str(), ancestor.name.c_str()); if (dependent.name == ancestor.name) { + O2_SIGNPOST_END(topology, sid, "alwaysDependent", "false. %s and %s are the same.", dependent.name.c_str(), ancestor.name.c_str()); return false; } if (ancestor.name == "internal-dpl-injected-dummy-sink") { + O2_SIGNPOST_END(topology, sid, "alwaysDependent", "false. %s is a dummy sink.", ancestor.name.c_str()); return false; } const std::regex matcher(".*output-proxy.*"); // Check if regex applies std::cmatch m; - if (std::regex_match(ancestor.name.data(), m, matcher) && std::regex_match(ancestor.name.data(), m, matcher)) { - return dataDeps(dependent, ancestor); + bool isAncestorOutputProxy = std::regex_match(ancestor.name.data(), m, matcher); + // For now dependent is always an output proxy. + assert(std::regex_match(dependent.name.data(), m, matcher)); + bool isAncestorExpendable = std::find_if(ancestor.labels.begin(), ancestor.labels.end(), [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }) != ancestor.labels.end(); + + bool isDependentResilient = std::find_if(dependent.labels.begin(), dependent.labels.end(), [](DataProcessorLabel const& label) { + return label.value == "resilient"; + }) != dependent.labels.end(); + bool isAncestorResilient = std::find_if(ancestor.labels.begin(), ancestor.labels.end(), [](DataProcessorLabel const& label) { + return label.value == "resilient"; + }) != ancestor.labels.end(); + + if (!isDependentResilient && isAncestorExpendable) { + O2_SIGNPOST_END(topology, sid, "alwaysDependent", "false. Ancestor %s is expendable while %s is non-resilient output proxy (dependent).", + ancestor.name.c_str(), dependent.name.c_str()); + return false; + } + + if (isAncestorOutputProxy || (!isDependentResilient && isAncestorResilient)) { + bool hasDependency = dataDeps(dependent, ancestor); + O2_SIGNPOST_END(topology, sid, "alwaysDependent", "%s. Dependent %s %s a dependency on ancestor %s.", + hasDependency ? "true" : "false", dependent.name.c_str(), hasDependency ? "has" : "has not", ancestor.name.c_str()); + return hasDependency; } + O2_SIGNPOST_END(topology, sid, "alwaysDependent", "true by default. Ancestor %s is not an output proxy.", ancestor.name.c_str()); return true; }; } From ef3f58af0917bc72ad1d92b3fb9653f0aad7e58c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 2 Feb 2024 11:42:03 +0100 Subject: [PATCH 300/726] DPL GUI: display labels for the device --- Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index f54e74d6ac019..eb62fcc09d338 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -354,6 +354,11 @@ void displayDeviceInspector(DeviceSpec const& spec, deviceInfoTable("Outputs:", ProcessingStateId::OUTPUT_MATCHERS, states, metrics); configurationTable(info.currentConfig, info.currentProvenance); optionsTable("Workflow Options", metadata.workflowOptions, control); + if (ImGui::CollapsingHeader("Labels", ImGuiTreeNodeFlags_DefaultOpen)) { + for (auto& label : spec.labels) { + ImGui::Text("%s", label.value.c_str()); + } + } servicesTable("Services", spec.services); if (ImGui::CollapsingHeader("Command line arguments", ImGuiTreeNodeFlags_DefaultOpen)) { static ImGuiTextFilter filter; From 9a6cd8d52688914650dc36942b7748cd7bed96bd Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 2 Feb 2024 11:42:03 +0100 Subject: [PATCH 301/726] DPL: fix circular dependency between expendable / resilient tasks and output proxy The output proxy was always sorted last, however this clearly conflicts in the case there is expendable tasks or resilient one, where the behavior than depends on the resiliency of the output proxy itself and the data dependencies. This address the issue and hopefully fixes the problem for good. --- Framework/Core/src/TopologyPolicy.cxx | 58 +++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 7 deletions(-) diff --git a/Framework/Core/src/TopologyPolicy.cxx b/Framework/Core/src/TopologyPolicy.cxx index fb96eff5af2fc..ebeb70c7950a3 100644 --- a/Framework/Core/src/TopologyPolicy.cxx +++ b/Framework/Core/src/TopologyPolicy.cxx @@ -10,9 +10,12 @@ // or submit itself to any jurisdiction. #include "Framework/DataProcessorSpec.h" #include "Framework/TopologyPolicy.h" +#include "Framework/Signpost.h" #include #include +O2_DECLARE_DYNAMIC_LOG(topology); + namespace o2::framework { @@ -68,15 +71,20 @@ bool dataDeps(DataProcessorSpec const& a, DataProcessorSpec const& b) bool expendableDataDeps(DataProcessorSpec const& a, DataProcessorSpec const& b) { + O2_SIGNPOST_ID_GENERATE(sid, topology); + O2_SIGNPOST_START(topology, sid, "expendableDataDeps", "Checking if %s depends on %s", a.name.c_str(), b.name.c_str()); // We never put anything behind the dummy sink. if (b.name.find("internal-dpl-injected-dummy-sink") != std::string::npos) { + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "false. %s is dummy sink and it nothing can depend on it.", b.name.c_str()); return false; } if (a.name.find("internal-dpl-injected-dummy-sink") != std::string::npos) { + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "true. %s is dummy sink and it nothing can depend on it.", a.name.c_str()); return true; } /// If there is an actual dependency between a and b, we return true. if (dataDeps(a, b)) { + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "true. %s has a data dependency on %s", a.name.c_str(), b.name.c_str()); return true; } // If we are here we do not have any data dependency, @@ -101,27 +109,35 @@ bool expendableDataDeps(DataProcessorSpec const& a, DataProcessorSpec const& b) // If none is expendable. We simply return false and sort as usual. if (!isAExpendable && !isBExpendable) { - LOGP(debug, "Neither {} nor {} are expendable. No dependency beyond data deps.", a.name, b.name); + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "false. Neither %s nor %s are expendable. No dependency beyond data deps.", + a.name.c_str(), b.name.c_str()); return false; } // If both are expendable. We return false and sort as usual. if (isAExpendable && isBExpendable) { - LOGP(debug, "Both {} and {} are expendable. No dependency.", a.name, b.name); + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "false. Both %s and %s are expendable. No dependency.", + a.name.c_str(), b.name.c_str()); return false; } // If b is expendable but b is resilient, we can keep the same order. if (isAExpendable && bResilient) { - LOGP(debug, "{} is expendable but b is resilient, no need to add an unneeded dependency", a.name, a.name, b.name); + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "false. %s is expendable but %s is resilient, no need to add an unneeded dependency", + a.name.c_str(), b.name.c_str()); return false; } // If a is expendable we consider it as if there was a dependency from a to b, // however we still need to check if there is not one already from b to a. if (isAExpendable) { - LOGP(debug, "{} is expendable. Checking if there is a dependency from {} to {}.", a.name, b.name, a.name); - return !dataDeps(b, a); + bool hasDependency = dataDeps(b, a); + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "%s is expendable. %s from %s to %s => %s.", + a.name.c_str(), hasDependency ? "There is however an inverse dependency" : "No inverse dependency", b.name.c_str(), a.name.c_str(), + !hasDependency ? "true" : "false"); + return !hasDependency; } // b is expendable and a is not. We are fine with no dependency. + O2_SIGNPOST_END(topology, sid, "expendableDataDeps", "false. %s is expendable but %s is not. No need to add an unneeded dependency.", + b.name.c_str(), a.name.c_str()); return false; }; @@ -135,18 +151,46 @@ TopologyPolicy::DependencyChecker TopologyPolicyHelpers::dataDependency() TopologyPolicy::DependencyChecker TopologyPolicyHelpers::alwaysDependent() { return [](DataProcessorSpec const& dependent, DataProcessorSpec const& ancestor) { + O2_SIGNPOST_ID_GENERATE(sid, topology); + O2_SIGNPOST_START(topology, sid, "alwaysDependent", "Checking if %s depends on %s", dependent.name.c_str(), ancestor.name.c_str()); if (dependent.name == ancestor.name) { + O2_SIGNPOST_END(topology, sid, "alwaysDependent", "false. %s and %s are the same.", dependent.name.c_str(), ancestor.name.c_str()); return false; } if (ancestor.name == "internal-dpl-injected-dummy-sink") { + O2_SIGNPOST_END(topology, sid, "alwaysDependent", "false. %s is a dummy sink.", ancestor.name.c_str()); return false; } const std::regex matcher(".*output-proxy.*"); // Check if regex applies std::cmatch m; - if (std::regex_match(ancestor.name.data(), m, matcher) && std::regex_match(ancestor.name.data(), m, matcher)) { - return dataDeps(dependent, ancestor); + bool isAncestorOutputProxy = std::regex_match(ancestor.name.data(), m, matcher); + // For now dependent is always an output proxy. + assert(std::regex_match(dependent.name.data(), m, matcher)); + bool isAncestorExpendable = std::find_if(ancestor.labels.begin(), ancestor.labels.end(), [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }) != ancestor.labels.end(); + + bool isDependentResilient = std::find_if(dependent.labels.begin(), dependent.labels.end(), [](DataProcessorLabel const& label) { + return label.value == "resilient"; + }) != dependent.labels.end(); + bool isAncestorResilient = std::find_if(ancestor.labels.begin(), ancestor.labels.end(), [](DataProcessorLabel const& label) { + return label.value == "resilient"; + }) != ancestor.labels.end(); + + if (!isDependentResilient && isAncestorExpendable) { + O2_SIGNPOST_END(topology, sid, "alwaysDependent", "false. Ancestor %s is expendable while %s is non-resilient output proxy (dependent).", + ancestor.name.c_str(), dependent.name.c_str()); + return false; + } + + if (isAncestorOutputProxy || (!isDependentResilient && isAncestorResilient)) { + bool hasDependency = dataDeps(dependent, ancestor); + O2_SIGNPOST_END(topology, sid, "alwaysDependent", "%s. Dependent %s %s a dependency on ancestor %s.", + hasDependency ? "true" : "false", dependent.name.c_str(), hasDependency ? "has" : "has not", ancestor.name.c_str()); + return hasDependency; } + O2_SIGNPOST_END(topology, sid, "alwaysDependent", "true by default. Ancestor %s is not an output proxy.", ancestor.name.c_str()); return true; }; } From 004bc21cd626ed2bb287146068d5d9be771a8d6d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 2 Feb 2024 12:54:02 +0100 Subject: [PATCH 302/726] DPL Analysis: introduce PersistenColumn concept This way the compiler does not need to instanciate the conjuction type and it will point to exactly which column is not persistent, when needed. --- Framework/Core/include/Framework/ASoA.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Framework/Core/include/Framework/ASoA.h b/Framework/Core/include/Framework/ASoA.h index 4d07fd124a8fe..4be6455ad3b5e 100644 --- a/Framework/Core/include/Framework/ASoA.h +++ b/Framework/Core/include/Framework/ASoA.h @@ -1775,17 +1775,18 @@ void notBoundTable(const char* tableName); namespace row_helpers { +template +concept PersistentColumn = std::is_same_v; + template -std::array getArrowColumns(arrow::Table* table, framework::pack) +std::array getArrowColumns(arrow::Table* table, framework::pack) requires(PersistentColumn && ...) { - static_assert(std::conjunction_v, "Arrow columns: only persistent columns accepted (not dynamic and not index ones"); return std::array{o2::soa::getIndexFromLabel(table, Cs::columnLabel())...}; } template -std::array, sizeof...(Cs)> getChunks(arrow::Table* table, framework::pack, uint64_t ci) +std::array, sizeof...(Cs)> getChunks(arrow::Table* table, framework::pack, uint64_t ci) requires(PersistentColumn && ...) { - static_assert(std::conjunction_v, "Arrow chunks: only persistent columns accepted (not dynamic and not index ones"); return std::array, sizeof...(Cs)>{o2::soa::getIndexFromLabel(table, Cs::columnLabel())->chunk(ci)...}; } From 2489a281e9f1d8d00fa9bce8de2798dc43b87163 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 2 Feb 2024 12:54:37 +0100 Subject: [PATCH 303/726] DPL Analysis: introduce PersistentColumn concept This way the compiler does not need to instanciate the conjuction type and it will point to exactly which column is not persistent, when needed. --- Framework/Core/include/Framework/ASoA.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Framework/Core/include/Framework/ASoA.h b/Framework/Core/include/Framework/ASoA.h index 4d07fd124a8fe..4be6455ad3b5e 100644 --- a/Framework/Core/include/Framework/ASoA.h +++ b/Framework/Core/include/Framework/ASoA.h @@ -1775,17 +1775,18 @@ void notBoundTable(const char* tableName); namespace row_helpers { +template +concept PersistentColumn = std::is_same_v; + template -std::array getArrowColumns(arrow::Table* table, framework::pack) +std::array getArrowColumns(arrow::Table* table, framework::pack) requires(PersistentColumn && ...) { - static_assert(std::conjunction_v, "Arrow columns: only persistent columns accepted (not dynamic and not index ones"); return std::array{o2::soa::getIndexFromLabel(table, Cs::columnLabel())...}; } template -std::array, sizeof...(Cs)> getChunks(arrow::Table* table, framework::pack, uint64_t ci) +std::array, sizeof...(Cs)> getChunks(arrow::Table* table, framework::pack, uint64_t ci) requires(PersistentColumn && ...) { - static_assert(std::conjunction_v, "Arrow chunks: only persistent columns accepted (not dynamic and not index ones"); return std::array, sizeof...(Cs)>{o2::soa::getIndexFromLabel(table, Cs::columnLabel())->chunk(ci)...}; } From 29e513b5f284a9986d6fab4ed23618fc4200bb55 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 2 Feb 2024 13:06:51 +0100 Subject: [PATCH 304/726] DPL Analysis: introduce PersistentColumn concept This way the compiler does not need to instanciate the conjuction type and it will point to exactly which column is not persistent, when needed. --- Framework/Core/include/Framework/ASoA.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Framework/Core/include/Framework/ASoA.h b/Framework/Core/include/Framework/ASoA.h index 4d07fd124a8fe..07493c6db091d 100644 --- a/Framework/Core/include/Framework/ASoA.h +++ b/Framework/Core/include/Framework/ASoA.h @@ -1775,17 +1775,20 @@ void notBoundTable(const char* tableName); namespace row_helpers { +template +concept PersistentColumn = std::is_same_v; + template std::array getArrowColumns(arrow::Table* table, framework::pack) + requires(PersistentColumn && ...) { - static_assert(std::conjunction_v, "Arrow columns: only persistent columns accepted (not dynamic and not index ones"); return std::array{o2::soa::getIndexFromLabel(table, Cs::columnLabel())...}; } template std::array, sizeof...(Cs)> getChunks(arrow::Table* table, framework::pack, uint64_t ci) + requires(PersistentColumn && ...) { - static_assert(std::conjunction_v, "Arrow chunks: only persistent columns accepted (not dynamic and not index ones"); return std::array, sizeof...(Cs)>{o2::soa::getIndexFromLabel(table, Cs::columnLabel())->chunk(ci)...}; } From 78f4a8d7f5a7663ac3ffb305eacd3c9716a919ca Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 2 Feb 2024 13:15:24 +0100 Subject: [PATCH 305/726] DPL: add ability to disable inputs programmatically Disabled inputs will not result in an actual route for the data, however it will be stored in the configuration, so that analysis workflows will not need to have the configuration available at every step. --- Framework/Core/include/Framework/InputSpec.h | 3 ++ Framework/Core/include/Framework/OutputSpec.h | 4 ++ Framework/Core/src/DeviceSpecHelpers.cxx | 15 +++++- Framework/Core/src/WorkflowHelpers.cxx | 12 +++-- .../Core/src/WorkflowSerializationHelpers.cxx | 32 ++++++++++++- .../Core/test/test_FrameworkDataFlowToDDS.cxx | 48 ++++++++++++------- .../Core/test/test_WorkflowSerialization.cxx | 38 +++++++++++++++ 7 files changed, 130 insertions(+), 22 deletions(-) diff --git a/Framework/Core/include/Framework/InputSpec.h b/Framework/Core/include/Framework/InputSpec.h index 9be95e6853da2..7858aacb80664 100644 --- a/Framework/Core/include/Framework/InputSpec.h +++ b/Framework/Core/include/Framework/InputSpec.h @@ -74,6 +74,9 @@ struct InputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not the input is to be considered enabled. + /// Useful to programmatically disable inputs e.g. for the ProcessorOptions. + bool enabled = true; friend std::ostream& operator<<(std::ostream& stream, InputSpec const& arg); bool operator==(InputSpec const& that) const; diff --git a/Framework/Core/include/Framework/OutputSpec.h b/Framework/Core/include/Framework/OutputSpec.h index e92792faa6b14..51527bce4e339 100644 --- a/Framework/Core/include/Framework/OutputSpec.h +++ b/Framework/Core/include/Framework/OutputSpec.h @@ -86,6 +86,10 @@ struct OutputSpec { /// A set of configurables which can be used to customise the InputSpec. std::vector metadata; + /// Wether or not this output is enabled. This is useful to decide programmatically + /// wether or not to produce a given output. + bool enabled = true; + friend std::ostream& operator<<(std::ostream& stream, OutputSpec const& arg); }; diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index b1681a4779ceb..152ee24f1520e 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -679,19 +679,27 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext assert(policyPtr != nullptr); if (edge.isForward == false) { + auto& matcher = outputsMatchers[edge.outputGlobalIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } OutputRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], + matcher, channel.name, policyPtr, }; device.outputs.emplace_back(route); } else { + auto& matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex]; + if (matcher.enabled == false) { + throw runtime_error_f("Output %s is disabled but it was still used in topology", DataSpecUtils::describe(matcher).data()); + } ForwardRoute route{ edge.timeIndex, consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], + matcher, channel.name}; device.forwards.emplace_back(route); } @@ -915,6 +923,9 @@ void DeviceSpecHelpers::processInEdgeActions(std::vector& devices, auto const& inputSpec = consumer.inputs[edge.consumerInputIndex]; auto const& sourceChannel = consumerDevice.inputChannels[ci].name; + if (inputSpec.enabled == false) { + throw runtime_error_f("Input %s is disabled but it was still used in topology", DataSpecUtils::describe(inputSpec).data()); + } InputRoute route{ inputSpec, diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 6b4ad35f826be..f07c556b32a9b 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -841,11 +841,13 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, for (size_t wi = 0; wi < workflow.size(); ++wi) { auto& producer = workflow[wi]; - for (size_t oi = 0; oi < producer.outputs.size(); ++oi) { - auto& out = producer.outputs[oi]; + for (auto& output : producer.outputs) { + if (output.enabled == false) { + continue; + } auto uniqueOutputId = outputs.size(); availableOutputsInfo.emplace_back(LogicalOutputInfo{wi, uniqueOutputId, false}); - outputs.push_back(out); + outputs.push_back(output); } } }; @@ -879,6 +881,10 @@ void WorkflowHelpers::constructGraph(const WorkflowSpec& workflow, std::vector matches(constOutputs.size()); for (size_t consumer = 0; consumer < workflow.size(); ++consumer) { for (size_t input = 0; input < workflow[consumer].inputs.size(); ++input) { + // Skip disabled inputs. + if (workflow[consumer].inputs[input].enabled == false) { + continue; + } forwards.clear(); for (size_t i = 0; i < constOutputs.size(); i++) { matches[i] = DataSpecUtils::match(workflow[consumer].inputs[input], constOutputs[i]); diff --git a/Framework/Core/src/WorkflowSerializationHelpers.cxx b/Framework/Core/src/WorkflowSerializationHelpers.cxx index e20e23f98c90b..56fd43df4ca54 100644 --- a/Framework/Core/src/WorkflowSerializationHelpers.cxx +++ b/Framework/Core/src/WorkflowSerializationHelpers.cxx @@ -61,6 +61,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_INPUT_ORIGIN, IN_INPUT_DESCRIPTION, IN_INPUT_SUBSPEC, + IN_INPUT_ENABLED, IN_INPUT_ORIGIN_REF, IN_INPUT_DESCRIPTION_REF, IN_INPUT_SUBSPEC_REF, @@ -75,6 +76,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, IN_OUTPUT_BINDING, IN_OUTPUT_ORIGIN, IN_OUTPUT_DESCRIPTION, + IN_OUTPUT_ENABLED, IN_OUTPUT_SUBSPEC, IN_OUTPUT_LIFETIME, IN_OUTPUT_OPTIONS, @@ -167,6 +169,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_INPUT_SUBSPEC: s << "IN_INPUT_SUBSPEC"; break; + case State::IN_INPUT_ENABLED: + s << "IN_INPUT_ENABLED"; + break; case State::IN_INPUT_ORIGIN_REF: s << "IN_INPUT_ORIGIN_REF"; break; @@ -218,6 +223,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, case State::IN_OUTPUT_OPTIONS: s << "IN_OUTPUT_OPTIONS"; break; + case WorkflowImporter::State::IN_OUTPUT_ENABLED: + s << "IN_OUTPUT_ENABLED"; + break; case State::IN_OPTION: s << "IN_OPTION"; break; @@ -302,6 +310,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, dataProcessors.push_back(DataProcessorSpec{}); } else if (in(State::IN_INPUTS)) { push(State::IN_INPUT); + enabled = true; inputMatcherNodes.clear(); } else if (in(State::IN_INPUT_MATCHER)) { // start a new embedded matcher @@ -313,6 +322,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, // will be merged into the parent matcher } else if (in(State::IN_OUTPUTS)) { push(State::IN_OUTPUT); + enabled = true; outputHasSubSpec = false; } else if (in(State::IN_OPTIONS)) { push(State::IN_OPTION); @@ -370,9 +380,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().inputs.push_back(InputSpec({binding}, std::move(*matcher), lifetime, inputOptions)); } + dataProcessors.back().inputs.back().enabled = enabled; inputMatcherNodes.clear(); inputOptions.clear(); - } else if (in(State::IN_INPUT_MATCHER) && inputMatcherNodes.size() > 1) { data_matcher::Node child = std::move(inputMatcherNodes.back()); inputMatcherNodes.pop_back(); @@ -433,6 +443,7 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, } else { dataProcessors.back().outputs.push_back(OutputSpec({binding}, {origin, description}, lifetime)); } + dataProcessors.back().outputs.back().enabled = enabled; outputHasSubSpec = false; } else if (in(State::IN_OPTION)) { std::unique_ptr opt{nullptr}; @@ -545,12 +556,14 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_DATAPROCESSORS); } else if (in(State::IN_INPUTS)) { push(State::IN_INPUT); + enabled = true; } else if (in(State::IN_INPUT_OPTIONS)) { push(State::IN_OPTION); } else if (in(State::IN_OUTPUT_OPTIONS)) { push(State::IN_OPTION); } else if (in(State::IN_OUTPUTS)) { push(State::IN_OUTPUT); + enabled = true; outputHasSubSpec = false; } else if (in(State::IN_OPTIONS)) { push(State::IN_OPTION); @@ -602,6 +615,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_INPUT_DESCRIPTION_REF); } else if (in(State::IN_INPUT) && strncmp(str, "subspecRef", length) == 0) { push(State::IN_INPUT_SUBSPEC_REF); + } else if (in(State::IN_INPUT) && strncmp(str, "enabled", length) == 0) { + push(State::IN_INPUT_ENABLED); } else if (in(State::IN_INPUT) && strncmp(str, "matcher", length) == 0) { // the outermost matcher is starting here // we create a placeholder which is being updated later @@ -664,6 +679,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, push(State::IN_OUTPUT_LIFETIME); } else if (in(State::IN_OUTPUT) && strncmp(str, "metadata", length) == 0) { push(State::IN_OUTPUT_OPTIONS); + } else if (in(State::IN_OUTPUT) && strncmp(str, "enabled", length) == 0) { + push(State::IN_OUTPUT_ENABLED); } else if (in(State::IN_DATAPROCESSOR) && strncmp(str, "name", length) == 0) { push(State::IN_DATAPROCESSOR_NAME); } else if (in(State::IN_DATAPROCESSOR) && strncmp(str, "ranks", length) == 0) { @@ -734,6 +751,8 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, metadata.back().executable = s; } else if (in(State::IN_INPUT_BINDING)) { binding = s; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (s == "true"); } else if (in(State::IN_INPUT_ORIGIN)) { origin.runtimeInit(s.c_str(), std::min(s.size(), 4UL)); std::string v(s.c_str(), std::min(s.size(), 4UL)); @@ -841,6 +860,10 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, dataProcessors.back().inputTimeSliceId = i; } else if (in(State::IN_DATAPROCESSOR_MAX_TIMESLICES)) { dataProcessors.back().maxInputTimeslices = i; + } else if (in(State::IN_INPUT_ENABLED)) { + enabled = (i == 1); + } else if (in(State::IN_OUTPUT_ENABLED)) { + enabled = (i == 1); } pop(); return true; @@ -915,6 +938,9 @@ struct WorkflowImporter : public rapidjson::BaseReaderHandler, header::DataDescription description; size_t subspec; size_t ref; + // Keep track of the enabled state of the input/output + // Unless specified, inputs are enabled by default. + bool enabled = true; Lifetime lifetime; std::string metadatumKey; std::string metadatumValue; @@ -1103,6 +1129,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)input.lifetime); + w.Key("enabled"); + w.Uint((int)input.enabled); if (input.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); @@ -1152,6 +1180,8 @@ void WorkflowSerializationHelpers::dump(std::ostream& out, } w.Key("lifetime"); w.Uint((int)output.lifetime); + w.Key("enabled"); + w.Uint((int)output.enabled); if (output.metadata.empty() == false) { w.Key("metadata"); w.StartArray(); diff --git a/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx b/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx index 49420e4bd99ad..1a2fed675536a 100644 --- a/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx +++ b/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx @@ -169,14 +169,16 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "A1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 }, { "binding": "TST/A2/0", "origin": "TST", "description": "A2", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -195,7 +197,8 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "A1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [ @@ -204,7 +207,8 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "B1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -223,7 +227,8 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "A2", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [ @@ -232,7 +237,8 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "C1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -251,14 +257,16 @@ TEST_CASE("TestDDS") "origin": "TST", "description": "B1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 }, { "binding": "y", "origin": "TST", "description": "C1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [], @@ -433,14 +441,16 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "A1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 }, { "binding": "TST/A2/0", "origin": "TST", "description": "A2", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -459,7 +469,8 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "A1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [ @@ -468,7 +479,8 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "B1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -487,7 +499,8 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "A2", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [ @@ -496,7 +509,8 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "C1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "options": [], @@ -515,14 +529,16 @@ TEST_CASE("TestDDSExpendable") "origin": "TST", "description": "B1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 }, { "binding": "y", "origin": "TST", "description": "C1", "subspec": 0, - "lifetime": 0 + "lifetime": 0, + "enabled": 1 } ], "outputs": [], diff --git a/Framework/Core/test/test_WorkflowSerialization.cxx b/Framework/Core/test/test_WorkflowSerialization.cxx index 6e541f7d22f07..e7f39e7e38cbd 100644 --- a/Framework/Core/test/test_WorkflowSerialization.cxx +++ b/Framework/Core/test/test_WorkflowSerialization.cxx @@ -120,3 +120,41 @@ TEST_CASE("TestVerifyWildcard") // also check if the conversion to ConcreteDataMatcher is working at import // REQUIRE(std::get_if(&w1[0].inputs[0].matcher) != nullptr);; } + +/// Verify that disabled inputs / outputs are serialised correctly +TEST_CASE("TestDisabled") +{ + using namespace o2::framework; + WorkflowSpec w0{ + DataProcessorSpec{ + .name = "A", + .inputs = {{"clbPayload", "CLP"}, {"clbWrapper", "CLW"}}, + .outputs = {{"CP2", "1"}, {"CL2", "2"}}, + }}; + + w0[0].inputs[0].enabled = false; + w0[0].outputs[1].enabled = false; + + std::vector dataProcessorInfoOut{ + {"A", "test_Framework_test_SerializationWorkflow", {}}, + }; + + CommandInfo commandInfoOut{"o2-dpl-workflow -b --option 1 --option 2"}; + + std::vector dataProcessorInfoIn{}; + CommandInfo commandInfoIn; + + std::ostringstream firstDump; + WorkflowSerializationHelpers::dump(firstDump, w0, dataProcessorInfoOut, commandInfoOut); + std::istringstream is; + is.str(firstDump.str()); + WorkflowSpec w1; + WorkflowSerializationHelpers::import(is, w1, dataProcessorInfoIn, commandInfoIn); + REQUIRE(w1.size() == 1); + REQUIRE(w1[0].inputs.size() == 2); + REQUIRE(w1[0].inputs[0].enabled == false); + REQUIRE(w1[0].inputs[1].enabled == true); + REQUIRE(w1[0].outputs.size() == 2); + REQUIRE(w1[0].outputs[0].enabled == true); + REQUIRE(w1[0].outputs[1].enabled == false); +} From 0ebe458f7b5d5ca58439c06333bff523a8139328 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sun, 4 Feb 2024 15:46:21 +0100 Subject: [PATCH 306/726] DPL: move stuff out of line --- Framework/Core/CMakeLists.txt | 1 + .../Core/include/Framework/AlgorithmSpec.h | 19 ++--------- Framework/Core/src/AlgorithmSpec.cxx | 32 +++++++++++++++++++ 3 files changed, 36 insertions(+), 16 deletions(-) create mode 100644 Framework/Core/src/AlgorithmSpec.cxx diff --git a/Framework/Core/CMakeLists.txt b/Framework/Core/CMakeLists.txt index 25f2e60f402c4..05bcb1bd53083 100644 --- a/Framework/Core/CMakeLists.txt +++ b/Framework/Core/CMakeLists.txt @@ -11,6 +11,7 @@ o2_add_library(Framework SOURCES src/AODReaderHelpers.cxx + src/AlgorithmSpec.cxx src/ArrowSupport.cxx src/ArrowTableSlicingCache.cxx src/AnalysisDataModel.cxx diff --git a/Framework/Core/include/Framework/AlgorithmSpec.h b/Framework/Core/include/Framework/AlgorithmSpec.h index 544e6a4840548..98f7f18e0d24d 100644 --- a/Framework/Core/include/Framework/AlgorithmSpec.h +++ b/Framework/Core/include/Framework/AlgorithmSpec.h @@ -46,22 +46,9 @@ struct AlgorithmSpec { using ErrorCallback = std::function; using InitErrorCallback = std::function; - static AlgorithmSpec dummyAlgorithm() - { - return AlgorithmSpec{ProcessCallback{nullptr}}; - } - - static ErrorCallback& emptyErrorCallback() - { - static ErrorCallback callback = nullptr; - return callback; - } - - static InitErrorCallback& emptyInitErrorCallback() - { - static InitErrorCallback callback = nullptr; - return callback; - } + static AlgorithmSpec dummyAlgorithm(); + static ErrorCallback& emptyErrorCallback(); + static InitErrorCallback& emptyInitErrorCallback(); AlgorithmSpec() = default; diff --git a/Framework/Core/src/AlgorithmSpec.cxx b/Framework/Core/src/AlgorithmSpec.cxx new file mode 100644 index 0000000000000..218083c80947e --- /dev/null +++ b/Framework/Core/src/AlgorithmSpec.cxx @@ -0,0 +1,32 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +#include "Framework/AlgorithmSpec.h" + +namespace o2::framework +{ +AlgorithmSpec AlgorithmSpec::dummyAlgorithm() +{ + return AlgorithmSpec{ProcessCallback{nullptr}}; +} + +AlgorithmSpec::ErrorCallback& AlgorithmSpec::emptyErrorCallback() +{ + static ErrorCallback callback = nullptr; + return callback; +} + +AlgorithmSpec::InitErrorCallback& AlgorithmSpec::emptyInitErrorCallback() +{ + static InitErrorCallback callback = nullptr; + return callback; +} +} // namespace o2::framework From ec23d755b304618b35f3c48fe9094039ab2b9cfd Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sun, 4 Feb 2024 15:48:58 +0100 Subject: [PATCH 307/726] DPL Analysis: introduce PersistentColumn concept This way the compiler does not need to instanciate the conjuction type and it will point to exactly which column is not persistent, when needed. --- Framework/Core/include/Framework/ASoA.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Framework/Core/include/Framework/ASoA.h b/Framework/Core/include/Framework/ASoA.h index 4d07fd124a8fe..07493c6db091d 100644 --- a/Framework/Core/include/Framework/ASoA.h +++ b/Framework/Core/include/Framework/ASoA.h @@ -1775,17 +1775,20 @@ void notBoundTable(const char* tableName); namespace row_helpers { +template +concept PersistentColumn = std::is_same_v; + template std::array getArrowColumns(arrow::Table* table, framework::pack) + requires(PersistentColumn && ...) { - static_assert(std::conjunction_v, "Arrow columns: only persistent columns accepted (not dynamic and not index ones"); return std::array{o2::soa::getIndexFromLabel(table, Cs::columnLabel())...}; } template std::array, sizeof...(Cs)> getChunks(arrow::Table* table, framework::pack, uint64_t ci) + requires(PersistentColumn && ...) { - static_assert(std::conjunction_v, "Arrow chunks: only persistent columns accepted (not dynamic and not index ones"); return std::array, sizeof...(Cs)>{o2::soa::getIndexFromLabel(table, Cs::columnLabel())->chunk(ci)...}; } From 0da3c1ee8a93ffc83bf7917d9d96f2985d21e4f2 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 5 Feb 2024 13:12:27 +0100 Subject: [PATCH 308/726] DPL: reduce helpers for makeHolder / makeHolderTypes C++20 allows simplifying the enumeration. --- .../Core/include/Framework/TableBuilder.h | 42 +++++-------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/Framework/Core/include/Framework/TableBuilder.h b/Framework/Core/include/Framework/TableBuilder.h index 7be20edf2bd93..2b19b43785d70 100644 --- a/Framework/Core/include/Framework/TableBuilder.h +++ b/Framework/Core/include/Framework/TableBuilder.h @@ -641,44 +641,24 @@ auto constexpr to_tuple(T&& object) noexcept } } -template -constexpr auto makeHolderType(framework::pack&&) -{ - return typename HolderTrait>>::Holder{arrow::default_memory_pool()}; -} - -template -auto makeHolder(arrow::MemoryPool* pool, size_t nRows, framework::pack&&) -{ - return typename HolderTrait>>::Holder(pool, nRows); -} - -template -constexpr auto makeHolderTypesImpl(std::index_sequence, framework::pack&& pack) -{ - return std::tuple(makeHolderType(std::forward>(pack))...); -} - -template -auto makeHoldersImpl(arrow::MemoryPool* pool, size_t nRows, std::index_sequence, framework::pack&& pack) -{ - return new std::tuple(makeHolder(pool, nRows, std::forward>(pack))...); -} - template -constexpr auto makeHolderTypes(framework::pack&& pack) +constexpr auto makeHolderTypes() { - return makeHolderTypesImpl(std::make_index_sequence{}, std::forward>(pack)); + return [](std::index_sequence) { + return std::tuple(typename HolderTrait::Holder(arrow::default_memory_pool())...); + }(std::make_index_sequence{}); } template -auto makeHolders(arrow::MemoryPool* pool, size_t nRows, framework::pack&& pack) +auto makeHolders(arrow::MemoryPool* pool, size_t nRows) { - return makeHoldersImpl(pool, nRows, std::make_index_sequence{}, std::forward>(pack)); + return [pool, nRows](std::index_sequence) { + return new std::tuple(typename HolderTrait::Holder(pool, nRows)...); + }(std::make_index_sequence{}); } template -using IndexedHoldersTuple = decltype(makeHolderTypes(framework::pack{})); +using IndexedHoldersTuple = decltype(makeHolderTypes()); /// Helper class which creates a lambda suitable for building /// an arrow table from a tuple. This can be used, for example @@ -691,7 +671,7 @@ class TableBuilder using HoldersTuple = typename std::tuple::Holder...>; template - using HoldersTupleIndexed = decltype(makeHolderTypes(framework::pack{})); + using HoldersTupleIndexed = decltype(makeHolderTypes()); /// Get the builders, assumning they were created with a given pack /// of basic types @@ -708,7 +688,7 @@ class TableBuilder { mSchema = std::make_shared(TableBuilderHelpers::makeFields(columnNames)); - mHolders = makeHolders(mMemoryPool, nRows, framework::pack{}); + mHolders = makeHolders(mMemoryPool, nRows); mFinalizer = [](std::vector>& arrays, void* holders) -> bool { return TableBuilderHelpers::finalize(arrays, *(HoldersTupleIndexed*)holders); }; From 4623a2f7f3b1544acd5bc1de060ccbcafa280f58 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 7 Feb 2024 08:54:50 +0100 Subject: [PATCH 309/726] DPL Analysis: avoid extra cursorHelper methods --- .../Core/include/Framework/TableBuilder.h | 23 +++++-------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/Framework/Core/include/Framework/TableBuilder.h b/Framework/Core/include/Framework/TableBuilder.h index 2b19b43785d70..7de45f58d2e62 100644 --- a/Framework/Core/include/Framework/TableBuilder.h +++ b/Framework/Core/include/Framework/TableBuilder.h @@ -795,13 +795,17 @@ class TableBuilder template auto cursor() { - return cursorHelper(typename T::table_t::persistent_columns_t{}); + return [this](pack) { + return this->template persist({Cs::columnLabel()...}); + }(typename T::table_t::persistent_columns_t{}); } template auto cursor() { - return cursorHelper2(typename T::table_t::persistent_columns_t{}); + return [this](pack) { + return this->template persist({Cs::columnLabel()...}); + }(typename T::table_t::persistent_columns_t{}); } template @@ -861,21 +865,6 @@ class TableBuilder std::shared_ptr finalize(); private: - /// Helper which actually creates the insertion cursor. Notice that the - /// template argument T is a o2::soa::Table which contains only the - /// persistent columns. - template - auto cursorHelper(framework::pack) - { - return this->template persist({Cs::columnLabel()...}); - } - - template - auto cursorHelper2(framework::pack) - { - return this->template persist({Cs::columnLabel()...}); - } - bool (*mFinalizer)(std::vector>& arrays, void* holders); void (*mDestructor)(void* holders); void* mHolders; From 34bb338e82d72b0ccab05ba5faecda72f06bb220 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 8 Feb 2024 13:40:53 +0100 Subject: [PATCH 310/726] DPL: use the SendingPolicy for every kind of message sending --- .../Core/include/Framework/ChannelInfo.h | 3 ++ .../include/Framework/DataProcessingHelpers.h | 22 +++++----- .../Core/include/Framework/ForwardRoute.h | 9 ++-- Framework/Core/src/CommonServices.cxx | 10 ++--- Framework/Core/src/DataProcessingDevice.cxx | 22 +++------- Framework/Core/src/DataProcessingHelpers.cxx | 41 ++++++++----------- Framework/Core/src/DeviceSpecHelpers.cxx | 20 +++++---- Framework/Core/src/FairMQDeviceProxy.cxx | 3 +- .../src/DataInspectorService.cxx | 2 +- Utilities/DataSampling/src/Dispatcher.cxx | 3 +- 10 files changed, 63 insertions(+), 72 deletions(-) diff --git a/Framework/Core/include/Framework/ChannelInfo.h b/Framework/Core/include/Framework/ChannelInfo.h index 0ebc4a8506301..582ef72fec75d 100644 --- a/Framework/Core/include/Framework/ChannelInfo.h +++ b/Framework/Core/include/Framework/ChannelInfo.h @@ -74,6 +74,7 @@ struct OutputChannelInfo { ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; SendingPolicy const* policy; + ChannelIndex index = {-1}; }; struct OutputChannelState { @@ -89,6 +90,8 @@ struct ForwardChannelInfo { /// Wether or not it's a DPL internal channel. ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; + SendingPolicy const* policy; + ChannelIndex index = {-1}; }; struct ForwardChannelState { diff --git a/Framework/Core/include/Framework/DataProcessingHelpers.h b/Framework/Core/include/Framework/DataProcessingHelpers.h index 35f78f12cc405..7a05ddf25c46c 100644 --- a/Framework/Core/include/Framework/DataProcessingHelpers.h +++ b/Framework/Core/include/Framework/DataProcessingHelpers.h @@ -11,14 +11,16 @@ #ifndef O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ #define O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ -#include "Framework/TimesliceIndex.h" -#include +#include namespace o2::framework { - +struct ServiceRegistryRef; +struct ForwardChannelInfo; +struct ForwardChannelState; +struct OutputChannelInfo; struct OutputChannelSpec; -class FairMQDeviceProxy; +struct OutputChannelState; /// Generic helpers for DataProcessing releated functions. struct DataProcessingHelpers { @@ -26,15 +28,13 @@ struct DataProcessingHelpers { /// @param device the fair::mq::Device which needs to send the EndOfStream message /// @param channel the OutputChannelSpec of the channel which needs to be signaled /// for EndOfStream - static void sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel); + static void sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); - static void broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice); - - private: - static void sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); + /// Broadcast the oldest possible timeslice to all channels in output + static void broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice); }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/ForwardRoute.h b/Framework/Core/include/Framework/ForwardRoute.h index bae2eaacf1a44..40a76e3a9cce5 100644 --- a/Framework/Core/include/Framework/ForwardRoute.h +++ b/Framework/Core/include/Framework/ForwardRoute.h @@ -15,11 +15,11 @@ #include #include -namespace o2 -{ -namespace framework +namespace o2::framework { +struct SendingPolicy; + /// This uniquely identifies a route to be forwarded by the device if /// the InputSpec @a matcher matches an input which should also go to /// @a channel @@ -28,8 +28,9 @@ struct ForwardRoute { size_t maxTimeslices; InputSpec matcher; std::string channel; + // The policy to use to send to on this route. + SendingPolicy const* policy; }; -} // namespace framework } // namespace o2 #endif // FRAMEWORK_FORWARDROUTE_H diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index b7fc3410a1931..a0f7fcdc88201 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -588,7 +588,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() timesliceIndex.rescan(); } } - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -598,7 +598,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ctx.services(), info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); @@ -646,7 +646,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Queueing oldest possible timeslice %" PRIu64 " propagation for execution.", (uint64_t)oldestPossibleOutput.timeslice.value); AsyncQueueHelpers::post( - queue, decongestion.oldestPossibleTimesliceTask, [oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { + queue, decongestion.oldestPossibleTimesliceTask, [ref = services, oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, &decongestion); if (decongestion.lastTimeslice >= oldestPossibleOutput.timeslice.value) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Not sending already sent value: %" PRIu64 "> %" PRIu64, @@ -655,7 +655,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() } O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Running oldest possible timeslice %" PRIu64 " propagation.", (uint64_t)oldestPossibleOutput.timeslice.value); - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ref, oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -665,7 +665,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ref, info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 8c914e201a479..941da7bc968c4 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -680,19 +680,11 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, if (forwardedParts[fi].Size() == 0) { continue; } - auto channel = proxy.getForwardChannel(ChannelIndex{fi}); - LOG(debug) << "Forwarding to " << channel->GetName() << " " << fi; + ForwardChannelInfo info = proxy.getForwardChannelInfo(ChannelIndex{fi}); + LOG(debug) << "Forwarding to " << info.name << " " << fi; // in DPL we are using subchannel 0 only auto& parts = forwardedParts[fi]; - int timeout = 30000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel->GetName()); - channel->Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); - } else if (res == (size_t)fair::mq::TransferCode::error) { - LOGP(fatal, "Error while sending on channel {}", channel->GetName()); - } + info.policy->send(parts, ChannelIndex{fi}, registry); } auto& asyncQueue = registry.get(); @@ -713,7 +705,7 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, LOG(debug) << "Skipping channel"; continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestTimeslice.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(registry, info, state, oldestTimeslice.timeslice.value)) { LOGP(debug, "Forwarding to channel {} oldest possible timeslice {}, prio 20", info.name, oldestTimeslice.timeslice.value); } } @@ -1678,8 +1670,7 @@ void DataProcessingDevice::doRun(ServiceRegistryRef ref) for (auto& channel : spec.outputChannels) { LOGP(detail, "Sending end of stream to {}", channel.name); - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } // This is needed because the transport is deleted before the device. relayer.clear(); @@ -2460,8 +2451,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v if (state.streaming == StreamingState::EndOfStreaming) { LOGP(detail, "Broadcasting end of stream"); for (auto& channel : spec.outputChannels) { - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } switchState(StreamingState::Idle); } diff --git a/Framework/Core/src/DataProcessingHelpers.cxx b/Framework/Core/src/DataProcessingHelpers.cxx index d424a53a52bd9..6dc3379d41cf8 100644 --- a/Framework/Core/src/DataProcessingHelpers.cxx +++ b/Framework/Core/src/DataProcessingHelpers.cxx @@ -18,79 +18,74 @@ #include "Headers/DataHeader.h" #include "Headers/Stack.h" #include "Framework/Logger.h" +#include "Framework/SendingPolicy.h" +#include "Framework/RawDeviceService.h" #include #include namespace o2::framework { -void DataProcessingHelpers::sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel) +void DataProcessingHelpers::sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel) { + fair::mq::Device* device = ref.get().device(); fair::mq::Parts parts; - fair::mq::MessagePtr payload(device.NewMessage()); + fair::mq::MessagePtr payload(device->NewMessage()); SourceInfoHeader sih; sih.state = InputChannelState::Completed; - auto channelAlloc = o2::pmr::getTransportAllocator(device.GetChannel(channel.name, 0).Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(device->GetChannel(channel.name, 0).Transport()); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, sih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - device.Send(parts, channel.name, 0); + device->Send(parts, channel.name, 0); LOGP(info, "Sending end-of-stream message to channel {}", channel.name); } -void DataProcessingHelpers::sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice) +void doSendOldestPossibleTimeframe(ServiceRegistryRef ref, fair::mq::TransportFactory *transport, ChannelIndex index, SendingPolicy::SendingCallback const& callback, size_t timeslice) { fair::mq::Parts parts; - fair::mq::MessagePtr payload(channel.Transport()->CreateMessage()); + fair::mq::MessagePtr payload(transport->CreateMessage()); o2::framework::DomainInfoHeader dih; dih.oldestPossibleTimeslice = timeslice; - auto channelAlloc = o2::pmr::getTransportAllocator(channel.Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - auto timeout = 1000; - auto res = channel.Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending oldest possible timeslice after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel.GetName()); - channel.Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel.GetName()); - } - if (res < (size_t)fair::mq::TransferCode::success) { - LOGP(fatal, "Error sending oldest possible timeframe {} on channel {} (code {})", timeslice, channel.GetName(), res); - } + callback(parts, index, ref); } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->send, timeslice); state.oldestForChannel = {timeslice}; return true; } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->send, timeslice); state.oldestForChannel = {timeslice}; return true; } -void DataProcessingHelpers::broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice) +void DataProcessingHelpers::broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice) { + auto &proxy = ref.get(); for (int ci = 0; ci < proxy.getNumOutputChannels(); ++ci) { auto& info = proxy.getOutputChannelInfo({ci}); auto& state = proxy.getOutputChannelState({ci}); - sendOldestPossibleTimeframe(info, state, timeslice); + sendOldestPossibleTimeframe(ref, info, state, timeslice); } } diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index b1681a4779ceb..90b4f8c0c5226 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -680,19 +680,21 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext if (edge.isForward == false) { OutputRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], - channel.name, - policyPtr, + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = outputsMatchers[edge.outputGlobalIndex], + .channel = channel.name, + .policy = policyPtr, }; device.outputs.emplace_back(route); } else { ForwardRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], - channel.name}; + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex], + .channel = channel.name, + .policy = policyPtr, + }; device.forwards.emplace_back(route); } }; diff --git a/Framework/Core/src/FairMQDeviceProxy.cxx b/Framework/Core/src/FairMQDeviceProxy.cxx index c25a58e47ef2d..bdffddd5a4d1a 100644 --- a/Framework/Core/src/FairMQDeviceProxy.cxx +++ b/Framework/Core/src/FairMQDeviceProxy.cxx @@ -267,6 +267,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, + .index = channelIndex, }; mOutputChannelInfos.push_back(info); mOutputChannelStates.push_back({0}); @@ -345,7 +346,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto LOGP(fatal, "Expected channel {} not configured.", route.channel); } ChannelAccountingType dplChannel = (route.channel.rfind("from_", 0) == 0) ? ChannelAccountingType::DPL : ChannelAccountingType::RAWFMQ; - mForwardChannelInfos.push_back(ForwardChannelInfo{route.channel, dplChannel, channel->second.at(0)}); + mForwardChannelInfos.push_back(ForwardChannelInfo{.name = route.channel, .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, .index = channelIndex}); mForwardChannelStates.push_back(ForwardChannelState{0}); channelNameToChannel[route.channel] = channelIndex; LOGP(detail, "Binding forward channel {} to channel index {}", route.channel, channelIndex.value); diff --git a/Framework/DataInspector/src/DataInspectorService.cxx b/Framework/DataInspector/src/DataInspectorService.cxx index 40399a366a303..712bf10b6fb2e 100644 --- a/Framework/DataInspector/src/DataInspectorService.cxx +++ b/Framework/DataInspector/src/DataInspectorService.cxx @@ -245,4 +245,4 @@ ServiceSpec* DIServicePlugin::create() } }, .kind = ServiceKind::Global}; }; -} // namespace o2::framework \ No newline at end of file +} // namespace o2::framework diff --git a/Utilities/DataSampling/src/Dispatcher.cxx b/Utilities/DataSampling/src/Dispatcher.cxx index de78993ff083e..28ff4d5568da9 100644 --- a/Utilities/DataSampling/src/Dispatcher.cxx +++ b/Utilities/DataSampling/src/Dispatcher.cxx @@ -125,10 +125,9 @@ void Dispatcher::run(ProcessingContext& ctx) if (ctx.inputs().isValid("timer-stats")) { reportStats(ctx.services().get()); } - auto& proxy = ctx.services().get(); auto& relayer = ctx.services().get(); auto timeslice = relayer.getOldestPossibleOutput().timeslice.value; - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, timeslice); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), timeslice); } void Dispatcher::reportStats(Monitoring& monitoring) const From d723ff0539287817f1b0605557bd5dcc0a3fbd8d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 8 Feb 2024 13:40:53 +0100 Subject: [PATCH 311/726] DPL: add SendingPolicy for the case destination is expendable --- Framework/Core/src/SendingPolicy.cxx | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 17faef6dbd3d0..e8eb0f25914d5 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -103,6 +103,23 @@ std::vector SendingPolicy::createDefaultPolicies() } else if (res == (size_t) fair::mq::TransferCode::error) { LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}, + SendingPolicy{ + .name = "expendable", + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + auto has_label = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + return std::find_if(dest.labels.begin(), dest.labels.end(), has_label) != dest.labels.end(); }, + .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getOutputChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(info, "Error while sending on channel {}", channel->GetName()); + } }}, SendingPolicy{ .name = "default", .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return true; }, From 454be2b9ae2dc29e7447dd6380fe7ead4a86afc4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 8 Feb 2024 13:46:55 +0100 Subject: [PATCH 312/726] DPL: add SendingPolicy for the case destination is expendable --- Framework/Core/src/SendingPolicy.cxx | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 17faef6dbd3d0..e8eb0f25914d5 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -103,6 +103,23 @@ std::vector SendingPolicy::createDefaultPolicies() } else if (res == (size_t) fair::mq::TransferCode::error) { LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}, + SendingPolicy{ + .name = "expendable", + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + auto has_label = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + return std::find_if(dest.labels.begin(), dest.labels.end(), has_label) != dest.labels.end(); }, + .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getOutputChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(info, "Error while sending on channel {}", channel->GetName()); + } }}, SendingPolicy{ .name = "default", .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return true; }, From f76051457b59c5134945afa228927eb1e44867bc Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 8 Feb 2024 13:46:55 +0100 Subject: [PATCH 313/726] DPL: use the SendingPolicy for every kind of message sending --- .../Core/include/Framework/ChannelInfo.h | 3 ++ .../include/Framework/DataProcessingHelpers.h | 22 +++++----- .../Core/include/Framework/ForwardRoute.h | 9 ++-- Framework/Core/src/CommonServices.cxx | 10 ++--- Framework/Core/src/DataProcessingDevice.cxx | 22 +++------- Framework/Core/src/DataProcessingHelpers.cxx | 41 ++++++++----------- Framework/Core/src/DeviceSpecHelpers.cxx | 20 +++++---- Framework/Core/src/FairMQDeviceProxy.cxx | 3 +- .../src/DataInspectorService.cxx | 2 +- Utilities/DataSampling/src/Dispatcher.cxx | 3 +- 10 files changed, 63 insertions(+), 72 deletions(-) diff --git a/Framework/Core/include/Framework/ChannelInfo.h b/Framework/Core/include/Framework/ChannelInfo.h index 0ebc4a8506301..582ef72fec75d 100644 --- a/Framework/Core/include/Framework/ChannelInfo.h +++ b/Framework/Core/include/Framework/ChannelInfo.h @@ -74,6 +74,7 @@ struct OutputChannelInfo { ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; SendingPolicy const* policy; + ChannelIndex index = {-1}; }; struct OutputChannelState { @@ -89,6 +90,8 @@ struct ForwardChannelInfo { /// Wether or not it's a DPL internal channel. ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; + SendingPolicy const* policy; + ChannelIndex index = {-1}; }; struct ForwardChannelState { diff --git a/Framework/Core/include/Framework/DataProcessingHelpers.h b/Framework/Core/include/Framework/DataProcessingHelpers.h index 35f78f12cc405..7a05ddf25c46c 100644 --- a/Framework/Core/include/Framework/DataProcessingHelpers.h +++ b/Framework/Core/include/Framework/DataProcessingHelpers.h @@ -11,14 +11,16 @@ #ifndef O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ #define O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ -#include "Framework/TimesliceIndex.h" -#include +#include namespace o2::framework { - +struct ServiceRegistryRef; +struct ForwardChannelInfo; +struct ForwardChannelState; +struct OutputChannelInfo; struct OutputChannelSpec; -class FairMQDeviceProxy; +struct OutputChannelState; /// Generic helpers for DataProcessing releated functions. struct DataProcessingHelpers { @@ -26,15 +28,13 @@ struct DataProcessingHelpers { /// @param device the fair::mq::Device which needs to send the EndOfStream message /// @param channel the OutputChannelSpec of the channel which needs to be signaled /// for EndOfStream - static void sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel); + static void sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); - static void broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice); - - private: - static void sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); + /// Broadcast the oldest possible timeslice to all channels in output + static void broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice); }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/ForwardRoute.h b/Framework/Core/include/Framework/ForwardRoute.h index bae2eaacf1a44..40a76e3a9cce5 100644 --- a/Framework/Core/include/Framework/ForwardRoute.h +++ b/Framework/Core/include/Framework/ForwardRoute.h @@ -15,11 +15,11 @@ #include #include -namespace o2 -{ -namespace framework +namespace o2::framework { +struct SendingPolicy; + /// This uniquely identifies a route to be forwarded by the device if /// the InputSpec @a matcher matches an input which should also go to /// @a channel @@ -28,8 +28,9 @@ struct ForwardRoute { size_t maxTimeslices; InputSpec matcher; std::string channel; + // The policy to use to send to on this route. + SendingPolicy const* policy; }; -} // namespace framework } // namespace o2 #endif // FRAMEWORK_FORWARDROUTE_H diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index b7fc3410a1931..a0f7fcdc88201 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -588,7 +588,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() timesliceIndex.rescan(); } } - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -598,7 +598,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ctx.services(), info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); @@ -646,7 +646,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Queueing oldest possible timeslice %" PRIu64 " propagation for execution.", (uint64_t)oldestPossibleOutput.timeslice.value); AsyncQueueHelpers::post( - queue, decongestion.oldestPossibleTimesliceTask, [oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { + queue, decongestion.oldestPossibleTimesliceTask, [ref = services, oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, &decongestion); if (decongestion.lastTimeslice >= oldestPossibleOutput.timeslice.value) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Not sending already sent value: %" PRIu64 "> %" PRIu64, @@ -655,7 +655,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() } O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Running oldest possible timeslice %" PRIu64 " propagation.", (uint64_t)oldestPossibleOutput.timeslice.value); - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ref, oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -665,7 +665,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ref, info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 8c914e201a479..0305946116b9b 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -680,19 +680,11 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, if (forwardedParts[fi].Size() == 0) { continue; } - auto channel = proxy.getForwardChannel(ChannelIndex{fi}); - LOG(debug) << "Forwarding to " << channel->GetName() << " " << fi; + ForwardChannelInfo info = proxy.getForwardChannelInfo(ChannelIndex{fi}); + LOG(debug) << "Forwarding to " << info.name << " " << fi; // in DPL we are using subchannel 0 only auto& parts = forwardedParts[fi]; - int timeout = 30000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel->GetName()); - channel->Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); - } else if (res == (size_t)fair::mq::TransferCode::error) { - LOGP(fatal, "Error while sending on channel {}", channel->GetName()); - } + info.policy->send(parts, ChannelIndex{fi}, registry); } auto& asyncQueue = registry.get(); @@ -713,7 +705,7 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, LOG(debug) << "Skipping channel"; continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestTimeslice.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(registry, info, state, oldestTimeslice.timeslice.value)) { LOGP(debug, "Forwarding to channel {} oldest possible timeslice {}, prio 20", info.name, oldestTimeslice.timeslice.value); } } @@ -1678,8 +1670,7 @@ void DataProcessingDevice::doRun(ServiceRegistryRef ref) for (auto& channel : spec.outputChannels) { LOGP(detail, "Sending end of stream to {}", channel.name); - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } // This is needed because the transport is deleted before the device. relayer.clear(); @@ -2460,8 +2451,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v if (state.streaming == StreamingState::EndOfStreaming) { LOGP(detail, "Broadcasting end of stream"); for (auto& channel : spec.outputChannels) { - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } switchState(StreamingState::Idle); } diff --git a/Framework/Core/src/DataProcessingHelpers.cxx b/Framework/Core/src/DataProcessingHelpers.cxx index d424a53a52bd9..6dc3379d41cf8 100644 --- a/Framework/Core/src/DataProcessingHelpers.cxx +++ b/Framework/Core/src/DataProcessingHelpers.cxx @@ -18,79 +18,74 @@ #include "Headers/DataHeader.h" #include "Headers/Stack.h" #include "Framework/Logger.h" +#include "Framework/SendingPolicy.h" +#include "Framework/RawDeviceService.h" #include #include namespace o2::framework { -void DataProcessingHelpers::sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel) +void DataProcessingHelpers::sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel) { + fair::mq::Device* device = ref.get().device(); fair::mq::Parts parts; - fair::mq::MessagePtr payload(device.NewMessage()); + fair::mq::MessagePtr payload(device->NewMessage()); SourceInfoHeader sih; sih.state = InputChannelState::Completed; - auto channelAlloc = o2::pmr::getTransportAllocator(device.GetChannel(channel.name, 0).Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(device->GetChannel(channel.name, 0).Transport()); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, sih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - device.Send(parts, channel.name, 0); + device->Send(parts, channel.name, 0); LOGP(info, "Sending end-of-stream message to channel {}", channel.name); } -void DataProcessingHelpers::sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice) +void doSendOldestPossibleTimeframe(ServiceRegistryRef ref, fair::mq::TransportFactory *transport, ChannelIndex index, SendingPolicy::SendingCallback const& callback, size_t timeslice) { fair::mq::Parts parts; - fair::mq::MessagePtr payload(channel.Transport()->CreateMessage()); + fair::mq::MessagePtr payload(transport->CreateMessage()); o2::framework::DomainInfoHeader dih; dih.oldestPossibleTimeslice = timeslice; - auto channelAlloc = o2::pmr::getTransportAllocator(channel.Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - auto timeout = 1000; - auto res = channel.Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending oldest possible timeslice after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel.GetName()); - channel.Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel.GetName()); - } - if (res < (size_t)fair::mq::TransferCode::success) { - LOGP(fatal, "Error sending oldest possible timeframe {} on channel {} (code {})", timeslice, channel.GetName(), res); - } + callback(parts, index, ref); } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->send, timeslice); state.oldestForChannel = {timeslice}; return true; } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->send, timeslice); state.oldestForChannel = {timeslice}; return true; } -void DataProcessingHelpers::broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice) +void DataProcessingHelpers::broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice) { + auto &proxy = ref.get(); for (int ci = 0; ci < proxy.getNumOutputChannels(); ++ci) { auto& info = proxy.getOutputChannelInfo({ci}); auto& state = proxy.getOutputChannelState({ci}); - sendOldestPossibleTimeframe(info, state, timeslice); + sendOldestPossibleTimeframe(ref, info, state, timeslice); } } diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index b1681a4779ceb..90b4f8c0c5226 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -680,19 +680,21 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext if (edge.isForward == false) { OutputRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], - channel.name, - policyPtr, + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = outputsMatchers[edge.outputGlobalIndex], + .channel = channel.name, + .policy = policyPtr, }; device.outputs.emplace_back(route); } else { ForwardRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], - channel.name}; + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex], + .channel = channel.name, + .policy = policyPtr, + }; device.forwards.emplace_back(route); } }; diff --git a/Framework/Core/src/FairMQDeviceProxy.cxx b/Framework/Core/src/FairMQDeviceProxy.cxx index c25a58e47ef2d..bdffddd5a4d1a 100644 --- a/Framework/Core/src/FairMQDeviceProxy.cxx +++ b/Framework/Core/src/FairMQDeviceProxy.cxx @@ -267,6 +267,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, + .index = channelIndex, }; mOutputChannelInfos.push_back(info); mOutputChannelStates.push_back({0}); @@ -345,7 +346,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto LOGP(fatal, "Expected channel {} not configured.", route.channel); } ChannelAccountingType dplChannel = (route.channel.rfind("from_", 0) == 0) ? ChannelAccountingType::DPL : ChannelAccountingType::RAWFMQ; - mForwardChannelInfos.push_back(ForwardChannelInfo{route.channel, dplChannel, channel->second.at(0)}); + mForwardChannelInfos.push_back(ForwardChannelInfo{.name = route.channel, .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, .index = channelIndex}); mForwardChannelStates.push_back(ForwardChannelState{0}); channelNameToChannel[route.channel] = channelIndex; LOGP(detail, "Binding forward channel {} to channel index {}", route.channel, channelIndex.value); diff --git a/Framework/DataInspector/src/DataInspectorService.cxx b/Framework/DataInspector/src/DataInspectorService.cxx index 40399a366a303..712bf10b6fb2e 100644 --- a/Framework/DataInspector/src/DataInspectorService.cxx +++ b/Framework/DataInspector/src/DataInspectorService.cxx @@ -245,4 +245,4 @@ ServiceSpec* DIServicePlugin::create() } }, .kind = ServiceKind::Global}; }; -} // namespace o2::framework \ No newline at end of file +} // namespace o2::framework diff --git a/Utilities/DataSampling/src/Dispatcher.cxx b/Utilities/DataSampling/src/Dispatcher.cxx index de78993ff083e..28ff4d5568da9 100644 --- a/Utilities/DataSampling/src/Dispatcher.cxx +++ b/Utilities/DataSampling/src/Dispatcher.cxx @@ -125,10 +125,9 @@ void Dispatcher::run(ProcessingContext& ctx) if (ctx.inputs().isValid("timer-stats")) { reportStats(ctx.services().get()); } - auto& proxy = ctx.services().get(); auto& relayer = ctx.services().get(); auto timeslice = relayer.getOldestPossibleOutput().timeslice.value; - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, timeslice); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), timeslice); } void Dispatcher::reportStats(Monitoring& monitoring) const From 0e96e7b0b4f2323e1787307fe6db298a5733ba56 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 8 Feb 2024 13:49:06 +0100 Subject: [PATCH 314/726] DPL: add SendingPolicy for the case destination is expendable --- Framework/Core/src/SendingPolicy.cxx | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 17faef6dbd3d0..21e60dbcfa32e 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -29,7 +29,7 @@ std::vector SendingPolicy::createDefaultPolicies() { return {SendingPolicy{ .name = "dispatcher", - .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { if (source.name == "Dispatcher") { return true; } @@ -103,6 +103,23 @@ std::vector SendingPolicy::createDefaultPolicies() } else if (res == (size_t) fair::mq::TransferCode::error) { LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}, + SendingPolicy{ + .name = "expendable", + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + auto has_label = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + return std::find_if(dest.labels.begin(), dest.labels.end(), has_label) != dest.labels.end(); }, + .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getOutputChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(info, "Error while sending on channel {}", channel->GetName()); + } }}, SendingPolicy{ .name = "default", .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return true; }, From 87ac28589a34ac8bb68ca86c7269d1b93ea9863f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 8 Feb 2024 13:57:26 +0100 Subject: [PATCH 315/726] DPL: use the SendingPolicy for every kind of message sending --- .../Core/include/Framework/ChannelInfo.h | 3 ++ .../include/Framework/DataProcessingHelpers.h | 22 +++++----- .../Core/include/Framework/ForwardRoute.h | 9 ++-- Framework/Core/src/CommonServices.cxx | 10 ++--- Framework/Core/src/DataProcessingDevice.cxx | 22 +++------- Framework/Core/src/DataProcessingHelpers.cxx | 41 ++++++++----------- Framework/Core/src/DeviceSpecHelpers.cxx | 20 +++++---- Framework/Core/src/FairMQDeviceProxy.cxx | 3 +- .../src/DataInspectorService.cxx | 2 +- Utilities/DataSampling/src/Dispatcher.cxx | 3 +- 10 files changed, 63 insertions(+), 72 deletions(-) diff --git a/Framework/Core/include/Framework/ChannelInfo.h b/Framework/Core/include/Framework/ChannelInfo.h index 0ebc4a8506301..582ef72fec75d 100644 --- a/Framework/Core/include/Framework/ChannelInfo.h +++ b/Framework/Core/include/Framework/ChannelInfo.h @@ -74,6 +74,7 @@ struct OutputChannelInfo { ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; SendingPolicy const* policy; + ChannelIndex index = {-1}; }; struct OutputChannelState { @@ -89,6 +90,8 @@ struct ForwardChannelInfo { /// Wether or not it's a DPL internal channel. ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; + SendingPolicy const* policy; + ChannelIndex index = {-1}; }; struct ForwardChannelState { diff --git a/Framework/Core/include/Framework/DataProcessingHelpers.h b/Framework/Core/include/Framework/DataProcessingHelpers.h index 35f78f12cc405..7a05ddf25c46c 100644 --- a/Framework/Core/include/Framework/DataProcessingHelpers.h +++ b/Framework/Core/include/Framework/DataProcessingHelpers.h @@ -11,14 +11,16 @@ #ifndef O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ #define O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ -#include "Framework/TimesliceIndex.h" -#include +#include namespace o2::framework { - +struct ServiceRegistryRef; +struct ForwardChannelInfo; +struct ForwardChannelState; +struct OutputChannelInfo; struct OutputChannelSpec; -class FairMQDeviceProxy; +struct OutputChannelState; /// Generic helpers for DataProcessing releated functions. struct DataProcessingHelpers { @@ -26,15 +28,13 @@ struct DataProcessingHelpers { /// @param device the fair::mq::Device which needs to send the EndOfStream message /// @param channel the OutputChannelSpec of the channel which needs to be signaled /// for EndOfStream - static void sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel); + static void sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); - static void broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice); - - private: - static void sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); + /// Broadcast the oldest possible timeslice to all channels in output + static void broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice); }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/ForwardRoute.h b/Framework/Core/include/Framework/ForwardRoute.h index bae2eaacf1a44..40a76e3a9cce5 100644 --- a/Framework/Core/include/Framework/ForwardRoute.h +++ b/Framework/Core/include/Framework/ForwardRoute.h @@ -15,11 +15,11 @@ #include #include -namespace o2 -{ -namespace framework +namespace o2::framework { +struct SendingPolicy; + /// This uniquely identifies a route to be forwarded by the device if /// the InputSpec @a matcher matches an input which should also go to /// @a channel @@ -28,8 +28,9 @@ struct ForwardRoute { size_t maxTimeslices; InputSpec matcher; std::string channel; + // The policy to use to send to on this route. + SendingPolicy const* policy; }; -} // namespace framework } // namespace o2 #endif // FRAMEWORK_FORWARDROUTE_H diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index b7fc3410a1931..a0f7fcdc88201 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -588,7 +588,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() timesliceIndex.rescan(); } } - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -598,7 +598,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ctx.services(), info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); @@ -646,7 +646,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Queueing oldest possible timeslice %" PRIu64 " propagation for execution.", (uint64_t)oldestPossibleOutput.timeslice.value); AsyncQueueHelpers::post( - queue, decongestion.oldestPossibleTimesliceTask, [oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { + queue, decongestion.oldestPossibleTimesliceTask, [ref = services, oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, &decongestion); if (decongestion.lastTimeslice >= oldestPossibleOutput.timeslice.value) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Not sending already sent value: %" PRIu64 "> %" PRIu64, @@ -655,7 +655,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() } O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Running oldest possible timeslice %" PRIu64 " propagation.", (uint64_t)oldestPossibleOutput.timeslice.value); - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ref, oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -665,7 +665,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ref, info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 8c914e201a479..0305946116b9b 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -680,19 +680,11 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, if (forwardedParts[fi].Size() == 0) { continue; } - auto channel = proxy.getForwardChannel(ChannelIndex{fi}); - LOG(debug) << "Forwarding to " << channel->GetName() << " " << fi; + ForwardChannelInfo info = proxy.getForwardChannelInfo(ChannelIndex{fi}); + LOG(debug) << "Forwarding to " << info.name << " " << fi; // in DPL we are using subchannel 0 only auto& parts = forwardedParts[fi]; - int timeout = 30000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel->GetName()); - channel->Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); - } else if (res == (size_t)fair::mq::TransferCode::error) { - LOGP(fatal, "Error while sending on channel {}", channel->GetName()); - } + info.policy->send(parts, ChannelIndex{fi}, registry); } auto& asyncQueue = registry.get(); @@ -713,7 +705,7 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, LOG(debug) << "Skipping channel"; continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestTimeslice.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(registry, info, state, oldestTimeslice.timeslice.value)) { LOGP(debug, "Forwarding to channel {} oldest possible timeslice {}, prio 20", info.name, oldestTimeslice.timeslice.value); } } @@ -1678,8 +1670,7 @@ void DataProcessingDevice::doRun(ServiceRegistryRef ref) for (auto& channel : spec.outputChannels) { LOGP(detail, "Sending end of stream to {}", channel.name); - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } // This is needed because the transport is deleted before the device. relayer.clear(); @@ -2460,8 +2451,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v if (state.streaming == StreamingState::EndOfStreaming) { LOGP(detail, "Broadcasting end of stream"); for (auto& channel : spec.outputChannels) { - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } switchState(StreamingState::Idle); } diff --git a/Framework/Core/src/DataProcessingHelpers.cxx b/Framework/Core/src/DataProcessingHelpers.cxx index d424a53a52bd9..d3b6f9d0a99fa 100644 --- a/Framework/Core/src/DataProcessingHelpers.cxx +++ b/Framework/Core/src/DataProcessingHelpers.cxx @@ -18,79 +18,74 @@ #include "Headers/DataHeader.h" #include "Headers/Stack.h" #include "Framework/Logger.h" +#include "Framework/SendingPolicy.h" +#include "Framework/RawDeviceService.h" #include #include namespace o2::framework { -void DataProcessingHelpers::sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel) +void DataProcessingHelpers::sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel) { + fair::mq::Device* device = ref.get().device(); fair::mq::Parts parts; - fair::mq::MessagePtr payload(device.NewMessage()); + fair::mq::MessagePtr payload(device->NewMessage()); SourceInfoHeader sih; sih.state = InputChannelState::Completed; - auto channelAlloc = o2::pmr::getTransportAllocator(device.GetChannel(channel.name, 0).Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(device->GetChannel(channel.name, 0).Transport()); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, sih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - device.Send(parts, channel.name, 0); + device->Send(parts, channel.name, 0); LOGP(info, "Sending end-of-stream message to channel {}", channel.name); } -void DataProcessingHelpers::sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice) +void doSendOldestPossibleTimeframe(ServiceRegistryRef ref, fair::mq::TransportFactory* transport, ChannelIndex index, SendingPolicy::SendingCallback const& callback, size_t timeslice) { fair::mq::Parts parts; - fair::mq::MessagePtr payload(channel.Transport()->CreateMessage()); + fair::mq::MessagePtr payload(transport->CreateMessage()); o2::framework::DomainInfoHeader dih; dih.oldestPossibleTimeslice = timeslice; - auto channelAlloc = o2::pmr::getTransportAllocator(channel.Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - auto timeout = 1000; - auto res = channel.Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending oldest possible timeslice after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel.GetName()); - channel.Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel.GetName()); - } - if (res < (size_t)fair::mq::TransferCode::success) { - LOGP(fatal, "Error sending oldest possible timeframe {} on channel {} (code {})", timeslice, channel.GetName(), res); - } + callback(parts, index, ref); } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->send, timeslice); state.oldestForChannel = {timeslice}; return true; } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->send, timeslice); state.oldestForChannel = {timeslice}; return true; } -void DataProcessingHelpers::broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice) +void DataProcessingHelpers::broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice) { + auto& proxy = ref.get(); for (int ci = 0; ci < proxy.getNumOutputChannels(); ++ci) { auto& info = proxy.getOutputChannelInfo({ci}); auto& state = proxy.getOutputChannelState({ci}); - sendOldestPossibleTimeframe(info, state, timeslice); + sendOldestPossibleTimeframe(ref, info, state, timeslice); } } diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index b1681a4779ceb..90b4f8c0c5226 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -680,19 +680,21 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext if (edge.isForward == false) { OutputRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], - channel.name, - policyPtr, + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = outputsMatchers[edge.outputGlobalIndex], + .channel = channel.name, + .policy = policyPtr, }; device.outputs.emplace_back(route); } else { ForwardRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], - channel.name}; + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex], + .channel = channel.name, + .policy = policyPtr, + }; device.forwards.emplace_back(route); } }; diff --git a/Framework/Core/src/FairMQDeviceProxy.cxx b/Framework/Core/src/FairMQDeviceProxy.cxx index c25a58e47ef2d..bdffddd5a4d1a 100644 --- a/Framework/Core/src/FairMQDeviceProxy.cxx +++ b/Framework/Core/src/FairMQDeviceProxy.cxx @@ -267,6 +267,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, + .index = channelIndex, }; mOutputChannelInfos.push_back(info); mOutputChannelStates.push_back({0}); @@ -345,7 +346,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto LOGP(fatal, "Expected channel {} not configured.", route.channel); } ChannelAccountingType dplChannel = (route.channel.rfind("from_", 0) == 0) ? ChannelAccountingType::DPL : ChannelAccountingType::RAWFMQ; - mForwardChannelInfos.push_back(ForwardChannelInfo{route.channel, dplChannel, channel->second.at(0)}); + mForwardChannelInfos.push_back(ForwardChannelInfo{.name = route.channel, .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, .index = channelIndex}); mForwardChannelStates.push_back(ForwardChannelState{0}); channelNameToChannel[route.channel] = channelIndex; LOGP(detail, "Binding forward channel {} to channel index {}", route.channel, channelIndex.value); diff --git a/Framework/DataInspector/src/DataInspectorService.cxx b/Framework/DataInspector/src/DataInspectorService.cxx index 40399a366a303..712bf10b6fb2e 100644 --- a/Framework/DataInspector/src/DataInspectorService.cxx +++ b/Framework/DataInspector/src/DataInspectorService.cxx @@ -245,4 +245,4 @@ ServiceSpec* DIServicePlugin::create() } }, .kind = ServiceKind::Global}; }; -} // namespace o2::framework \ No newline at end of file +} // namespace o2::framework diff --git a/Utilities/DataSampling/src/Dispatcher.cxx b/Utilities/DataSampling/src/Dispatcher.cxx index de78993ff083e..28ff4d5568da9 100644 --- a/Utilities/DataSampling/src/Dispatcher.cxx +++ b/Utilities/DataSampling/src/Dispatcher.cxx @@ -125,10 +125,9 @@ void Dispatcher::run(ProcessingContext& ctx) if (ctx.inputs().isValid("timer-stats")) { reportStats(ctx.services().get()); } - auto& proxy = ctx.services().get(); auto& relayer = ctx.services().get(); auto timeslice = relayer.getOldestPossibleOutput().timeslice.value; - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, timeslice); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), timeslice); } void Dispatcher::reportStats(Monitoring& monitoring) const From c614f05a69948d22c9b14e938f60d717fe6b1e1b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 8 Feb 2024 13:57:26 +0100 Subject: [PATCH 316/726] DPL: add SendingPolicy for the case destination is expendable --- Framework/Core/src/SendingPolicy.cxx | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 17faef6dbd3d0..21e60dbcfa32e 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -29,7 +29,7 @@ std::vector SendingPolicy::createDefaultPolicies() { return {SendingPolicy{ .name = "dispatcher", - .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { if (source.name == "Dispatcher") { return true; } @@ -103,6 +103,23 @@ std::vector SendingPolicy::createDefaultPolicies() } else if (res == (size_t) fair::mq::TransferCode::error) { LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}, + SendingPolicy{ + .name = "expendable", + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + auto has_label = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + return std::find_if(dest.labels.begin(), dest.labels.end(), has_label) != dest.labels.end(); }, + .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getOutputChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(info, "Error while sending on channel {}", channel->GetName()); + } }}, SendingPolicy{ .name = "default", .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return true; }, From b868bb65bc115e7cbcf2e12a2822568c686b1a47 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 9 Feb 2024 21:01:27 +0100 Subject: [PATCH 317/726] DPL: use the SendingPolicy for every kind of message sending --- .../Core/include/Framework/ChannelInfo.h | 4 ++ .../include/Framework/DataProcessingHelpers.h | 22 +++--- Framework/Core/include/Framework/DriverInfo.h | 3 + .../Core/include/Framework/ForwardRoute.h | 9 +-- .../Core/include/Framework/SendingPolicy.h | 8 +++ Framework/Core/src/CommonServices.cxx | 10 +-- Framework/Core/src/DataProcessingDevice.cxx | 22 ++---- Framework/Core/src/DataProcessingHelpers.cxx | 41 +++++------ Framework/Core/src/DeviceSpecHelpers.cxx | 35 ++++++--- Framework/Core/src/DeviceSpecHelpers.h | 5 +- Framework/Core/src/FairMQDeviceProxy.cxx | 3 +- Framework/Core/src/SendingPolicy.cxx | 72 +++++++++++++++++++ Framework/Core/src/runDataProcessing.cxx | 3 + Framework/Core/test/test_DeviceSpec.cxx | 6 +- .../src/DataInspectorService.cxx | 2 +- Utilities/DataSampling/src/Dispatcher.cxx | 3 +- 16 files changed, 171 insertions(+), 77 deletions(-) diff --git a/Framework/Core/include/Framework/ChannelInfo.h b/Framework/Core/include/Framework/ChannelInfo.h index 0ebc4a8506301..1f4bd8bc9e90e 100644 --- a/Framework/Core/include/Framework/ChannelInfo.h +++ b/Framework/Core/include/Framework/ChannelInfo.h @@ -67,6 +67,7 @@ struct InputChannelInfo { }; struct SendingPolicy; +struct ForwardingPolicy; /// Output channel information struct OutputChannelInfo { @@ -74,6 +75,7 @@ struct OutputChannelInfo { ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; SendingPolicy const* policy; + ChannelIndex index = {-1}; }; struct OutputChannelState { @@ -89,6 +91,8 @@ struct ForwardChannelInfo { /// Wether or not it's a DPL internal channel. ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; + ForwardingPolicy const* policy; + ChannelIndex index = {-1}; }; struct ForwardChannelState { diff --git a/Framework/Core/include/Framework/DataProcessingHelpers.h b/Framework/Core/include/Framework/DataProcessingHelpers.h index 35f78f12cc405..7a05ddf25c46c 100644 --- a/Framework/Core/include/Framework/DataProcessingHelpers.h +++ b/Framework/Core/include/Framework/DataProcessingHelpers.h @@ -11,14 +11,16 @@ #ifndef O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ #define O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ -#include "Framework/TimesliceIndex.h" -#include +#include namespace o2::framework { - +struct ServiceRegistryRef; +struct ForwardChannelInfo; +struct ForwardChannelState; +struct OutputChannelInfo; struct OutputChannelSpec; -class FairMQDeviceProxy; +struct OutputChannelState; /// Generic helpers for DataProcessing releated functions. struct DataProcessingHelpers { @@ -26,15 +28,13 @@ struct DataProcessingHelpers { /// @param device the fair::mq::Device which needs to send the EndOfStream message /// @param channel the OutputChannelSpec of the channel which needs to be signaled /// for EndOfStream - static void sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel); + static void sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); - static void broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice); - - private: - static void sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); + /// Broadcast the oldest possible timeslice to all channels in output + static void broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice); }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/DriverInfo.h b/Framework/Core/include/Framework/DriverInfo.h index 043a689087325..45c4c85a32d01 100644 --- a/Framework/Core/include/Framework/DriverInfo.h +++ b/Framework/Core/include/Framework/DriverInfo.h @@ -111,6 +111,9 @@ struct DriverInfo { /// These are the policies which can be applied to decide how /// we send data. std::vector sendingPolicies; + /// These are the policies which can be applied to decide how + /// we forward data. + std::vector forwardingPolicies; /// The argc with which the driver was started. int argc; /// The argv with which the driver was started. diff --git a/Framework/Core/include/Framework/ForwardRoute.h b/Framework/Core/include/Framework/ForwardRoute.h index bae2eaacf1a44..0e3d4d12da24a 100644 --- a/Framework/Core/include/Framework/ForwardRoute.h +++ b/Framework/Core/include/Framework/ForwardRoute.h @@ -15,11 +15,11 @@ #include #include -namespace o2 -{ -namespace framework +namespace o2::framework { +struct ForwardingPolicy; + /// This uniquely identifies a route to be forwarded by the device if /// the InputSpec @a matcher matches an input which should also go to /// @a channel @@ -28,8 +28,9 @@ struct ForwardRoute { size_t maxTimeslices; InputSpec matcher; std::string channel; + // The policy to use to send to on this route. + ForwardingPolicy const* policy; }; -} // namespace framework } // namespace o2 #endif // FRAMEWORK_FORWARDROUTE_H diff --git a/Framework/Core/include/Framework/SendingPolicy.h b/Framework/Core/include/Framework/SendingPolicy.h index 1d68412d46db8..2f9472d1d8607 100644 --- a/Framework/Core/include/Framework/SendingPolicy.h +++ b/Framework/Core/include/Framework/SendingPolicy.h @@ -32,6 +32,14 @@ struct SendingPolicy { static std::vector createDefaultPolicies(); }; +struct ForwardingPolicy { + using ForwardingCallback = std::function; + std::string name = "invalid"; + EdgeMatcher matcher = nullptr; + ForwardingCallback forward = nullptr; + static std::vector createDefaultPolicies(); +}; + } // namespace o2::framework #endif // O2_FRAMEWORK_SENDINGPOLICY_H_ diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index b7fc3410a1931..a0f7fcdc88201 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -588,7 +588,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() timesliceIndex.rescan(); } } - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -598,7 +598,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ctx.services(), info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); @@ -646,7 +646,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Queueing oldest possible timeslice %" PRIu64 " propagation for execution.", (uint64_t)oldestPossibleOutput.timeslice.value); AsyncQueueHelpers::post( - queue, decongestion.oldestPossibleTimesliceTask, [oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { + queue, decongestion.oldestPossibleTimesliceTask, [ref = services, oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, &decongestion); if (decongestion.lastTimeslice >= oldestPossibleOutput.timeslice.value) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Not sending already sent value: %" PRIu64 "> %" PRIu64, @@ -655,7 +655,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() } O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Running oldest possible timeslice %" PRIu64 " propagation.", (uint64_t)oldestPossibleOutput.timeslice.value); - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ref, oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -665,7 +665,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ref, info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 8c914e201a479..3bcb8e4c8ef89 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -680,19 +680,11 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, if (forwardedParts[fi].Size() == 0) { continue; } - auto channel = proxy.getForwardChannel(ChannelIndex{fi}); - LOG(debug) << "Forwarding to " << channel->GetName() << " " << fi; + ForwardChannelInfo info = proxy.getForwardChannelInfo(ChannelIndex{fi}); + LOG(debug) << "Forwarding to " << info.name << " " << fi; // in DPL we are using subchannel 0 only auto& parts = forwardedParts[fi]; - int timeout = 30000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel->GetName()); - channel->Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); - } else if (res == (size_t)fair::mq::TransferCode::error) { - LOGP(fatal, "Error while sending on channel {}", channel->GetName()); - } + info.policy->forward(parts, ChannelIndex{fi}, registry); } auto& asyncQueue = registry.get(); @@ -713,7 +705,7 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, LOG(debug) << "Skipping channel"; continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestTimeslice.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(registry, info, state, oldestTimeslice.timeslice.value)) { LOGP(debug, "Forwarding to channel {} oldest possible timeslice {}, prio 20", info.name, oldestTimeslice.timeslice.value); } } @@ -1678,8 +1670,7 @@ void DataProcessingDevice::doRun(ServiceRegistryRef ref) for (auto& channel : spec.outputChannels) { LOGP(detail, "Sending end of stream to {}", channel.name); - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } // This is needed because the transport is deleted before the device. relayer.clear(); @@ -2460,8 +2451,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v if (state.streaming == StreamingState::EndOfStreaming) { LOGP(detail, "Broadcasting end of stream"); for (auto& channel : spec.outputChannels) { - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } switchState(StreamingState::Idle); } diff --git a/Framework/Core/src/DataProcessingHelpers.cxx b/Framework/Core/src/DataProcessingHelpers.cxx index d424a53a52bd9..58fda5a13bae9 100644 --- a/Framework/Core/src/DataProcessingHelpers.cxx +++ b/Framework/Core/src/DataProcessingHelpers.cxx @@ -18,79 +18,74 @@ #include "Headers/DataHeader.h" #include "Headers/Stack.h" #include "Framework/Logger.h" +#include "Framework/SendingPolicy.h" +#include "Framework/RawDeviceService.h" #include #include namespace o2::framework { -void DataProcessingHelpers::sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel) +void DataProcessingHelpers::sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel) { + fair::mq::Device* device = ref.get().device(); fair::mq::Parts parts; - fair::mq::MessagePtr payload(device.NewMessage()); + fair::mq::MessagePtr payload(device->NewMessage()); SourceInfoHeader sih; sih.state = InputChannelState::Completed; - auto channelAlloc = o2::pmr::getTransportAllocator(device.GetChannel(channel.name, 0).Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(device->GetChannel(channel.name, 0).Transport()); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, sih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - device.Send(parts, channel.name, 0); + device->Send(parts, channel.name, 0); LOGP(info, "Sending end-of-stream message to channel {}", channel.name); } -void DataProcessingHelpers::sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice) +void doSendOldestPossibleTimeframe(ServiceRegistryRef ref, fair::mq::TransportFactory* transport, ChannelIndex index, SendingPolicy::SendingCallback const& callback, size_t timeslice) { fair::mq::Parts parts; - fair::mq::MessagePtr payload(channel.Transport()->CreateMessage()); + fair::mq::MessagePtr payload(transport->CreateMessage()); o2::framework::DomainInfoHeader dih; dih.oldestPossibleTimeslice = timeslice; - auto channelAlloc = o2::pmr::getTransportAllocator(channel.Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - auto timeout = 1000; - auto res = channel.Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending oldest possible timeslice after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel.GetName()); - channel.Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel.GetName()); - } - if (res < (size_t)fair::mq::TransferCode::success) { - LOGP(fatal, "Error sending oldest possible timeframe {} on channel {} (code {})", timeslice, channel.GetName(), res); - } + callback(parts, index, ref); } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->forward, timeslice); state.oldestForChannel = {timeslice}; return true; } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->send, timeslice); state.oldestForChannel = {timeslice}; return true; } -void DataProcessingHelpers::broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice) +void DataProcessingHelpers::broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice) { + auto& proxy = ref.get(); for (int ci = 0; ci < proxy.getNumOutputChannels(); ++ci) { auto& info = proxy.getOutputChannelInfo({ci}); auto& state = proxy.getOutputChannelState({ci}); - sendOldestPossibleTimeframe(info, state, timeslice); + sendOldestPossibleTimeframe(ref, info, state, timeslice); } } diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index b1681a4779ceb..e9df320fe8b95 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -493,6 +493,7 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext const std::vector& outputsMatchers, const std::vector& channelPolicies, const std::vector& sendingPolicies, + const std::vector& forwardingPolicies, std::string const& channelPrefix, ComputingOffer const& defaultOffer, OverrideServiceSpecs const& overrideServices) @@ -656,7 +657,7 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext // whether this is a real OutputRoute or if it's a forward from // a previous consumer device. // FIXME: where do I find the InputSpec for the forward? - auto appendOutputRouteToSourceDeviceChannel = [&outputsMatchers, &workflow, &devices, &logicalEdges, &sendingPolicies, &configContext]( + auto appendOutputRouteToSourceDeviceChannel = [&outputsMatchers, &workflow, &devices, &logicalEdges, &sendingPolicies, &forwardingPolicies, &configContext]( size_t ei, size_t di, size_t ci) { assert(ei < logicalEdges.size()); assert(di < devices.size()); @@ -670,29 +671,40 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext assert(edge.outputGlobalIndex < outputsMatchers.size()); // Iterate over all the policies and apply the first one that matches. SendingPolicy const* policyPtr = nullptr; + ForwardingPolicy const* forwardPolicyPtr = nullptr; for (auto& policy : sendingPolicies) { if (policy.matcher(producer, consumer, configContext)) { policyPtr = &policy; break; } } + assert(forwardingPolicies.empty() == false); + for (auto& policy : forwardingPolicies) { + if (policy.matcher(producer, consumer, configContext)) { + forwardPolicyPtr = &policy; + break; + } + } assert(policyPtr != nullptr); + assert(forwardPolicyPtr != nullptr); if (edge.isForward == false) { OutputRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], - channel.name, - policyPtr, + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = outputsMatchers[edge.outputGlobalIndex], + .channel = channel.name, + .policy = policyPtr, }; device.outputs.emplace_back(route); } else { ForwardRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], - channel.name}; + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex], + .channel = channel.name, + .policy = forwardPolicyPtr, + }; device.forwards.emplace_back(route); } }; @@ -1051,6 +1063,7 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf std::vector const& resourcePolicies, std::vector const& callbacksPolicies, std::vector const& sendingPolicies, + std::vector const& forwardingPolicies, std::vector& devices, ResourceManager& resourceManager, std::string const& uniqueWorkflowId, @@ -1111,7 +1124,7 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf defaultOffer.memory /= deviceCount + 1; processOutEdgeActions(configContext, devices, deviceIndex, connections, resourceManager, outEdgeIndex, logicalEdges, - outActions, workflow, outputs, channelPolicies, sendingPolicies, channelPrefix, defaultOffer, overrideServices); + outActions, workflow, outputs, channelPolicies, sendingPolicies, forwardingPolicies, channelPrefix, defaultOffer, overrideServices); // FIXME: is this not the case??? std::sort(connections.begin(), connections.end()); diff --git a/Framework/Core/src/DeviceSpecHelpers.h b/Framework/Core/src/DeviceSpecHelpers.h index 1fb1866ae2dc3..17601c81342c1 100644 --- a/Framework/Core/src/DeviceSpecHelpers.h +++ b/Framework/Core/src/DeviceSpecHelpers.h @@ -53,6 +53,7 @@ struct DeviceSpecHelpers { std::vector const& resourcePolicies, std::vector const& callbacksPolicies, std::vector const& sendingPolicy, + std::vector const& forwardingPolicies, std::vector& devices, ResourceManager& resourceManager, std::string const& uniqueWorkflowId, @@ -80,9 +81,10 @@ struct DeviceSpecHelpers { std::vector dispatchPolicies = DispatchPolicy::createDefaultPolicies(); std::vector resourcePolicies = ResourcePolicy::createDefaultPolicies(); std::vector sendingPolicies = SendingPolicy::createDefaultPolicies(); + std::vector forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); dataProcessorSpecs2DeviceSpecs(workflow, channelPolicies, completionPolicies, dispatchPolicies, resourcePolicies, callbacksPolicies, - sendingPolicies, devices, + sendingPolicies, forwardingPolicies, devices, resourceManager, uniqueWorkflowId, configContext, optimizeTopology, resourcesMonitoringInterval, channelPrefix, overrideServices); } @@ -150,6 +152,7 @@ struct DeviceSpecHelpers { const std::vector& outputs, std::vector const& channelPolicies, std::vector const& sendingPolicies, + std::vector const& forwardingPolicies, std::string const& channelPrefix, ComputingOffer const& defaultOffer, OverrideServiceSpecs const& overrideServices = {}); diff --git a/Framework/Core/src/FairMQDeviceProxy.cxx b/Framework/Core/src/FairMQDeviceProxy.cxx index c25a58e47ef2d..bdffddd5a4d1a 100644 --- a/Framework/Core/src/FairMQDeviceProxy.cxx +++ b/Framework/Core/src/FairMQDeviceProxy.cxx @@ -267,6 +267,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, + .index = channelIndex, }; mOutputChannelInfos.push_back(info); mOutputChannelStates.push_back({0}); @@ -345,7 +346,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto LOGP(fatal, "Expected channel {} not configured.", route.channel); } ChannelAccountingType dplChannel = (route.channel.rfind("from_", 0) == 0) ? ChannelAccountingType::DPL : ChannelAccountingType::RAWFMQ; - mForwardChannelInfos.push_back(ForwardChannelInfo{route.channel, dplChannel, channel->second.at(0)}); + mForwardChannelInfos.push_back(ForwardChannelInfo{.name = route.channel, .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, .index = channelIndex}); mForwardChannelStates.push_back(ForwardChannelState{0}); channelNameToChannel[route.channel] = channelIndex; LOGP(detail, "Binding forward channel {} to channel index {}", route.channel, channelIndex.value); diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 17faef6dbd3d0..7c3ea631d072c 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -119,4 +119,76 @@ std::vector SendingPolicy::createDefaultPolicies() LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}}; } + +std::vector ForwardingPolicy::createDefaultPolicies() +{ + return {ForwardingPolicy{ + .name = "profiling", + .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return getenv("DPL_DEBUG_MESSAGE_SIZE"); }, + .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getForwardChannel(channelIndex); + auto timeout = 1000; + int count = 0; + auto& relayer = registry.get(); + for (auto& part : parts) { + auto* dh = o2::header::get(part->GetData()); + if (dh == nullptr) { + // This is a payload. + continue; + } + LOGP(info, "Sent {}/{}/{} for a total of {} bytes", dh->dataOrigin, dh->dataDescription, dh->subSpecification, dh->payloadSize); + count+= dh->payloadSize; + auto* dph = o2::header::get(part->GetData()); + if (dph == nullptr) { + // This is a payload. + continue; + } + auto oldestPossibleOutput = relayer.getOldestPossibleOutput(); + if ((size_t)dph->startTime < oldestPossibleOutput.timeslice.value) { + LOGP(error, "Sent startTime {} while oldestPossibleOutput is {}. This should not be possible.", dph->startTime, oldestPossibleOutput.timeslice.value); + } + } + LOGP(info, "Sent {} parts for a total of {} bytes", parts.Size(), count); + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout/1000, channel->GetName()); + channel->Send(parts); + LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(fatal, "Error while sending on channel {}", channel->GetName()); + } }}, + ForwardingPolicy{ + .name = "expendable", + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + auto has_label = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + return std::find_if(dest.labels.begin(), dest.labels.end(), has_label) != dest.labels.end(); }, + .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getForwardChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(info, "Error while sending on channel {}", channel->GetName()); + } }}, + ForwardingPolicy{ + .name = "default", + .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return true; }, + .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getForwardChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout/1000, channel->GetName()); + channel->Send(parts); + LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(fatal, "Error while sending on channel {}", channel->GetName()); + } }}}; +} } // namespace o2::framework diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 9ca46e060b1a2..cfbb616b0d0a5 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1650,6 +1650,7 @@ int runStateMachine(DataProcessorSpecs const& workflow, driverInfo.resourcePolicies, driverInfo.callbacksPolicies, driverInfo.sendingPolicies, + driverInfo.forwardingPolicies, runningWorkflow.devices, *resourceManager, driverInfo.uniqueWorkflowId, @@ -2644,6 +2645,7 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& workflow, { std::vector currentArgs; std::vector plugins; + std::vector forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); for (int ai = 1; ai < argc; ++ai) { currentArgs.emplace_back(argv[ai]); @@ -3001,6 +3003,7 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& workflow, }; DriverInfo driverInfo{ .sendingPolicies = sendingPolicies, + .forwardingPolicies = forwardingPolicies, .callbacksPolicies = callbacksPolicies}; driverInfo.states.reserve(10); driverInfo.sigintRequested = false; diff --git a/Framework/Core/test/test_DeviceSpec.cxx b/Framework/Core/test/test_DeviceSpec.cxx index 99494d5f8ea4f..72b19da542a76 100644 --- a/Framework/Core/test/test_DeviceSpec.cxx +++ b/Framework/Core/test/test_DeviceSpec.cxx @@ -421,6 +421,7 @@ TEST_CASE("TestOutEdgeProcessingHelpers") auto configContext = makeEmptyConfigContext(); auto channelPolicies = ChannelConfigurationPolicy::createDefaultPolicies(*configContext); auto sendingPolicies = SendingPolicy::createDefaultPolicies(); + auto forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); std::vector resources{ComputingResourceHelpers::getLocalhostResource()}; SimpleResourceManager rm(resources); @@ -429,7 +430,7 @@ TEST_CASE("TestOutEdgeProcessingHelpers") defaultOffer.memory = 0.01; DeviceSpecHelpers::processOutEdgeActions(*configContext, devices, deviceIndex, connections, rm, edgeOutIndex, logicalEdges, - actions, workflow, globalOutputs, channelPolicies, sendingPolicies, "", defaultOffer); + actions, workflow, globalOutputs, channelPolicies, sendingPolicies, forwardingPolicies, "", defaultOffer); std::vector expectedDeviceIndex = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}, {1, 0, 1}, {1, 0, 1}, {1, 1, 2}, {1, 1, 2}, {1, 2, 3}, {1, 2, 3}}; REQUIRE(devices.size() == 4); @@ -713,6 +714,7 @@ TEST_CASE("TestSimpleWildcard") auto configContext = makeEmptyConfigContext(); auto channelPolicies = ChannelConfigurationPolicy::createDefaultPolicies(*configContext); auto sendingPolicies = SendingPolicy::createDefaultPolicies(); + auto forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); std::vector devices; std::vector deviceIndex; @@ -751,7 +753,7 @@ TEST_CASE("TestSimpleWildcard") defaultOffer.memory = 0.01; DeviceSpecHelpers::processOutEdgeActions(*configContext, devices, deviceIndex, connections, rm, edgeOutIndex, logicalEdges, - outActions, workflow, globalOutputs, channelPolicies, sendingPolicies, "", defaultOffer); + outActions, workflow, globalOutputs, channelPolicies, sendingPolicies, forwardingPolicies, "", defaultOffer); REQUIRE(devices.size() == 2); ; // Two devices have outputs: A and Timer diff --git a/Framework/DataInspector/src/DataInspectorService.cxx b/Framework/DataInspector/src/DataInspectorService.cxx index 40399a366a303..712bf10b6fb2e 100644 --- a/Framework/DataInspector/src/DataInspectorService.cxx +++ b/Framework/DataInspector/src/DataInspectorService.cxx @@ -245,4 +245,4 @@ ServiceSpec* DIServicePlugin::create() } }, .kind = ServiceKind::Global}; }; -} // namespace o2::framework \ No newline at end of file +} // namespace o2::framework diff --git a/Utilities/DataSampling/src/Dispatcher.cxx b/Utilities/DataSampling/src/Dispatcher.cxx index de78993ff083e..28ff4d5568da9 100644 --- a/Utilities/DataSampling/src/Dispatcher.cxx +++ b/Utilities/DataSampling/src/Dispatcher.cxx @@ -125,10 +125,9 @@ void Dispatcher::run(ProcessingContext& ctx) if (ctx.inputs().isValid("timer-stats")) { reportStats(ctx.services().get()); } - auto& proxy = ctx.services().get(); auto& relayer = ctx.services().get(); auto timeslice = relayer.getOldestPossibleOutput().timeslice.value; - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, timeslice); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), timeslice); } void Dispatcher::reportStats(Monitoring& monitoring) const From 29ce029734b3e1d4096821e231628b0434bbbc21 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 9 Feb 2024 21:01:27 +0100 Subject: [PATCH 318/726] DPL: add SendingPolicy for the case destination is expendable --- Framework/Core/src/SendingPolicy.cxx | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 7c3ea631d072c..b572fe8b86911 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -29,7 +29,7 @@ std::vector SendingPolicy::createDefaultPolicies() { return {SendingPolicy{ .name = "dispatcher", - .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { if (source.name == "Dispatcher") { return true; } @@ -103,6 +103,23 @@ std::vector SendingPolicy::createDefaultPolicies() } else if (res == (size_t) fair::mq::TransferCode::error) { LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}, + SendingPolicy{ + .name = "expendable", + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + auto has_label = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + return std::find_if(dest.labels.begin(), dest.labels.end(), has_label) != dest.labels.end(); }, + .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getOutputChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(info, "Error while sending on channel {}", channel->GetName()); + } }}, SendingPolicy{ .name = "default", .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return true; }, From eb9aadcf7ff44f8d0dd25c8f2192212cdbf9f3a7 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 10 Feb 2024 00:32:25 +0100 Subject: [PATCH 319/726] DPL: use the SendingPolicy for every kind of message sending --- .../Core/include/Framework/ChannelInfo.h | 4 + .../include/Framework/DataProcessingHelpers.h | 22 +++--- Framework/Core/include/Framework/DriverInfo.h | 3 + .../Core/include/Framework/ForwardRoute.h | 9 ++- .../Core/include/Framework/SendingPolicy.h | 9 +++ Framework/Core/src/CommonServices.cxx | 10 +-- Framework/Core/src/DataProcessingDevice.cxx | 24 +++--- Framework/Core/src/DataProcessingHelpers.cxx | 41 +++++----- Framework/Core/src/DeviceSpecHelpers.cxx | 35 ++++++--- Framework/Core/src/DeviceSpecHelpers.h | 5 +- .../Core/src/ExternalFairMQDeviceProxy.cxx | 9 ++- Framework/Core/src/FairMQDeviceProxy.cxx | 3 +- Framework/Core/src/SendingPolicy.cxx | 77 +++++++++++++++++++ Framework/Core/src/runDataProcessing.cxx | 3 + Framework/Core/test/test_DeviceSpec.cxx | 6 +- .../src/DataInspectorService.cxx | 2 +- Utilities/DataSampling/src/Dispatcher.cxx | 3 +- 17 files changed, 188 insertions(+), 77 deletions(-) diff --git a/Framework/Core/include/Framework/ChannelInfo.h b/Framework/Core/include/Framework/ChannelInfo.h index 0ebc4a8506301..1f4bd8bc9e90e 100644 --- a/Framework/Core/include/Framework/ChannelInfo.h +++ b/Framework/Core/include/Framework/ChannelInfo.h @@ -67,6 +67,7 @@ struct InputChannelInfo { }; struct SendingPolicy; +struct ForwardingPolicy; /// Output channel information struct OutputChannelInfo { @@ -74,6 +75,7 @@ struct OutputChannelInfo { ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; SendingPolicy const* policy; + ChannelIndex index = {-1}; }; struct OutputChannelState { @@ -89,6 +91,8 @@ struct ForwardChannelInfo { /// Wether or not it's a DPL internal channel. ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; + ForwardingPolicy const* policy; + ChannelIndex index = {-1}; }; struct ForwardChannelState { diff --git a/Framework/Core/include/Framework/DataProcessingHelpers.h b/Framework/Core/include/Framework/DataProcessingHelpers.h index 35f78f12cc405..7a05ddf25c46c 100644 --- a/Framework/Core/include/Framework/DataProcessingHelpers.h +++ b/Framework/Core/include/Framework/DataProcessingHelpers.h @@ -11,14 +11,16 @@ #ifndef O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ #define O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ -#include "Framework/TimesliceIndex.h" -#include +#include namespace o2::framework { - +struct ServiceRegistryRef; +struct ForwardChannelInfo; +struct ForwardChannelState; +struct OutputChannelInfo; struct OutputChannelSpec; -class FairMQDeviceProxy; +struct OutputChannelState; /// Generic helpers for DataProcessing releated functions. struct DataProcessingHelpers { @@ -26,15 +28,13 @@ struct DataProcessingHelpers { /// @param device the fair::mq::Device which needs to send the EndOfStream message /// @param channel the OutputChannelSpec of the channel which needs to be signaled /// for EndOfStream - static void sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel); + static void sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); - static void broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice); - - private: - static void sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); + /// Broadcast the oldest possible timeslice to all channels in output + static void broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice); }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/DriverInfo.h b/Framework/Core/include/Framework/DriverInfo.h index 043a689087325..45c4c85a32d01 100644 --- a/Framework/Core/include/Framework/DriverInfo.h +++ b/Framework/Core/include/Framework/DriverInfo.h @@ -111,6 +111,9 @@ struct DriverInfo { /// These are the policies which can be applied to decide how /// we send data. std::vector sendingPolicies; + /// These are the policies which can be applied to decide how + /// we forward data. + std::vector forwardingPolicies; /// The argc with which the driver was started. int argc; /// The argv with which the driver was started. diff --git a/Framework/Core/include/Framework/ForwardRoute.h b/Framework/Core/include/Framework/ForwardRoute.h index bae2eaacf1a44..0e3d4d12da24a 100644 --- a/Framework/Core/include/Framework/ForwardRoute.h +++ b/Framework/Core/include/Framework/ForwardRoute.h @@ -15,11 +15,11 @@ #include #include -namespace o2 -{ -namespace framework +namespace o2::framework { +struct ForwardingPolicy; + /// This uniquely identifies a route to be forwarded by the device if /// the InputSpec @a matcher matches an input which should also go to /// @a channel @@ -28,8 +28,9 @@ struct ForwardRoute { size_t maxTimeslices; InputSpec matcher; std::string channel; + // The policy to use to send to on this route. + ForwardingPolicy const* policy; }; -} // namespace framework } // namespace o2 #endif // FRAMEWORK_FORWARDROUTE_H diff --git a/Framework/Core/include/Framework/SendingPolicy.h b/Framework/Core/include/Framework/SendingPolicy.h index 1d68412d46db8..7cc5c16e2c923 100644 --- a/Framework/Core/include/Framework/SendingPolicy.h +++ b/Framework/Core/include/Framework/SendingPolicy.h @@ -32,6 +32,15 @@ struct SendingPolicy { static std::vector createDefaultPolicies(); }; +struct ForwardingPolicy { + using ForwardingCallback = std::function; + std::string name = "invalid"; + EdgeMatcher matcher = nullptr; + ForwardingCallback forward = nullptr; + static std::vector createDefaultPolicies(); + static ForwardingPolicy createDefaultForwardingPolicy(); +}; + } // namespace o2::framework #endif // O2_FRAMEWORK_SENDINGPOLICY_H_ diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index b7fc3410a1931..a0f7fcdc88201 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -588,7 +588,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() timesliceIndex.rescan(); } } - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -598,7 +598,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ctx.services(), info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); @@ -646,7 +646,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Queueing oldest possible timeslice %" PRIu64 " propagation for execution.", (uint64_t)oldestPossibleOutput.timeslice.value); AsyncQueueHelpers::post( - queue, decongestion.oldestPossibleTimesliceTask, [oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { + queue, decongestion.oldestPossibleTimesliceTask, [ref = services, oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, &decongestion); if (decongestion.lastTimeslice >= oldestPossibleOutput.timeslice.value) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Not sending already sent value: %" PRIu64 "> %" PRIu64, @@ -655,7 +655,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() } O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Running oldest possible timeslice %" PRIu64 " propagation.", (uint64_t)oldestPossibleOutput.timeslice.value); - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ref, oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -665,7 +665,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ref, info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 8c914e201a479..50934bd59a729 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -680,19 +680,15 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, if (forwardedParts[fi].Size() == 0) { continue; } - auto channel = proxy.getForwardChannel(ChannelIndex{fi}); - LOG(debug) << "Forwarding to " << channel->GetName() << " " << fi; + ForwardChannelInfo info = proxy.getForwardChannelInfo(ChannelIndex{fi}); + LOG(debug) << "Forwarding to " << info.name << " " << fi; // in DPL we are using subchannel 0 only auto& parts = forwardedParts[fi]; - int timeout = 30000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel->GetName()); - channel->Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); - } else if (res == (size_t)fair::mq::TransferCode::error) { - LOGP(fatal, "Error while sending on channel {}", channel->GetName()); + if (info.policy == nullptr) { + LOG(error) << "Forwarding to " << info.name << " " << fi << " has no policy"; + continue; } + info.policy->forward(parts, ChannelIndex{fi}, registry); } auto& asyncQueue = registry.get(); @@ -713,7 +709,7 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, LOG(debug) << "Skipping channel"; continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestTimeslice.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(registry, info, state, oldestTimeslice.timeslice.value)) { LOGP(debug, "Forwarding to channel {} oldest possible timeslice {}, prio 20", info.name, oldestTimeslice.timeslice.value); } } @@ -1678,8 +1674,7 @@ void DataProcessingDevice::doRun(ServiceRegistryRef ref) for (auto& channel : spec.outputChannels) { LOGP(detail, "Sending end of stream to {}", channel.name); - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } // This is needed because the transport is deleted before the device. relayer.clear(); @@ -2460,8 +2455,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v if (state.streaming == StreamingState::EndOfStreaming) { LOGP(detail, "Broadcasting end of stream"); for (auto& channel : spec.outputChannels) { - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } switchState(StreamingState::Idle); } diff --git a/Framework/Core/src/DataProcessingHelpers.cxx b/Framework/Core/src/DataProcessingHelpers.cxx index d424a53a52bd9..58fda5a13bae9 100644 --- a/Framework/Core/src/DataProcessingHelpers.cxx +++ b/Framework/Core/src/DataProcessingHelpers.cxx @@ -18,79 +18,74 @@ #include "Headers/DataHeader.h" #include "Headers/Stack.h" #include "Framework/Logger.h" +#include "Framework/SendingPolicy.h" +#include "Framework/RawDeviceService.h" #include #include namespace o2::framework { -void DataProcessingHelpers::sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel) +void DataProcessingHelpers::sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel) { + fair::mq::Device* device = ref.get().device(); fair::mq::Parts parts; - fair::mq::MessagePtr payload(device.NewMessage()); + fair::mq::MessagePtr payload(device->NewMessage()); SourceInfoHeader sih; sih.state = InputChannelState::Completed; - auto channelAlloc = o2::pmr::getTransportAllocator(device.GetChannel(channel.name, 0).Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(device->GetChannel(channel.name, 0).Transport()); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, sih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - device.Send(parts, channel.name, 0); + device->Send(parts, channel.name, 0); LOGP(info, "Sending end-of-stream message to channel {}", channel.name); } -void DataProcessingHelpers::sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice) +void doSendOldestPossibleTimeframe(ServiceRegistryRef ref, fair::mq::TransportFactory* transport, ChannelIndex index, SendingPolicy::SendingCallback const& callback, size_t timeslice) { fair::mq::Parts parts; - fair::mq::MessagePtr payload(channel.Transport()->CreateMessage()); + fair::mq::MessagePtr payload(transport->CreateMessage()); o2::framework::DomainInfoHeader dih; dih.oldestPossibleTimeslice = timeslice; - auto channelAlloc = o2::pmr::getTransportAllocator(channel.Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - auto timeout = 1000; - auto res = channel.Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending oldest possible timeslice after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel.GetName()); - channel.Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel.GetName()); - } - if (res < (size_t)fair::mq::TransferCode::success) { - LOGP(fatal, "Error sending oldest possible timeframe {} on channel {} (code {})", timeslice, channel.GetName(), res); - } + callback(parts, index, ref); } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->forward, timeslice); state.oldestForChannel = {timeslice}; return true; } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->send, timeslice); state.oldestForChannel = {timeslice}; return true; } -void DataProcessingHelpers::broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice) +void DataProcessingHelpers::broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice) { + auto& proxy = ref.get(); for (int ci = 0; ci < proxy.getNumOutputChannels(); ++ci) { auto& info = proxy.getOutputChannelInfo({ci}); auto& state = proxy.getOutputChannelState({ci}); - sendOldestPossibleTimeframe(info, state, timeslice); + sendOldestPossibleTimeframe(ref, info, state, timeslice); } } diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index b1681a4779ceb..e9df320fe8b95 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -493,6 +493,7 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext const std::vector& outputsMatchers, const std::vector& channelPolicies, const std::vector& sendingPolicies, + const std::vector& forwardingPolicies, std::string const& channelPrefix, ComputingOffer const& defaultOffer, OverrideServiceSpecs const& overrideServices) @@ -656,7 +657,7 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext // whether this is a real OutputRoute or if it's a forward from // a previous consumer device. // FIXME: where do I find the InputSpec for the forward? - auto appendOutputRouteToSourceDeviceChannel = [&outputsMatchers, &workflow, &devices, &logicalEdges, &sendingPolicies, &configContext]( + auto appendOutputRouteToSourceDeviceChannel = [&outputsMatchers, &workflow, &devices, &logicalEdges, &sendingPolicies, &forwardingPolicies, &configContext]( size_t ei, size_t di, size_t ci) { assert(ei < logicalEdges.size()); assert(di < devices.size()); @@ -670,29 +671,40 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext assert(edge.outputGlobalIndex < outputsMatchers.size()); // Iterate over all the policies and apply the first one that matches. SendingPolicy const* policyPtr = nullptr; + ForwardingPolicy const* forwardPolicyPtr = nullptr; for (auto& policy : sendingPolicies) { if (policy.matcher(producer, consumer, configContext)) { policyPtr = &policy; break; } } + assert(forwardingPolicies.empty() == false); + for (auto& policy : forwardingPolicies) { + if (policy.matcher(producer, consumer, configContext)) { + forwardPolicyPtr = &policy; + break; + } + } assert(policyPtr != nullptr); + assert(forwardPolicyPtr != nullptr); if (edge.isForward == false) { OutputRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], - channel.name, - policyPtr, + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = outputsMatchers[edge.outputGlobalIndex], + .channel = channel.name, + .policy = policyPtr, }; device.outputs.emplace_back(route); } else { ForwardRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], - channel.name}; + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex], + .channel = channel.name, + .policy = forwardPolicyPtr, + }; device.forwards.emplace_back(route); } }; @@ -1051,6 +1063,7 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf std::vector const& resourcePolicies, std::vector const& callbacksPolicies, std::vector const& sendingPolicies, + std::vector const& forwardingPolicies, std::vector& devices, ResourceManager& resourceManager, std::string const& uniqueWorkflowId, @@ -1111,7 +1124,7 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf defaultOffer.memory /= deviceCount + 1; processOutEdgeActions(configContext, devices, deviceIndex, connections, resourceManager, outEdgeIndex, logicalEdges, - outActions, workflow, outputs, channelPolicies, sendingPolicies, channelPrefix, defaultOffer, overrideServices); + outActions, workflow, outputs, channelPolicies, sendingPolicies, forwardingPolicies, channelPrefix, defaultOffer, overrideServices); // FIXME: is this not the case??? std::sort(connections.begin(), connections.end()); diff --git a/Framework/Core/src/DeviceSpecHelpers.h b/Framework/Core/src/DeviceSpecHelpers.h index 1fb1866ae2dc3..17601c81342c1 100644 --- a/Framework/Core/src/DeviceSpecHelpers.h +++ b/Framework/Core/src/DeviceSpecHelpers.h @@ -53,6 +53,7 @@ struct DeviceSpecHelpers { std::vector const& resourcePolicies, std::vector const& callbacksPolicies, std::vector const& sendingPolicy, + std::vector const& forwardingPolicies, std::vector& devices, ResourceManager& resourceManager, std::string const& uniqueWorkflowId, @@ -80,9 +81,10 @@ struct DeviceSpecHelpers { std::vector dispatchPolicies = DispatchPolicy::createDefaultPolicies(); std::vector resourcePolicies = ResourcePolicy::createDefaultPolicies(); std::vector sendingPolicies = SendingPolicy::createDefaultPolicies(); + std::vector forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); dataProcessorSpecs2DeviceSpecs(workflow, channelPolicies, completionPolicies, dispatchPolicies, resourcePolicies, callbacksPolicies, - sendingPolicies, devices, + sendingPolicies, forwardingPolicies, devices, resourceManager, uniqueWorkflowId, configContext, optimizeTopology, resourcesMonitoringInterval, channelPrefix, overrideServices); } @@ -150,6 +152,7 @@ struct DeviceSpecHelpers { const std::vector& outputs, std::vector const& channelPolicies, std::vector const& sendingPolicies, + std::vector const& forwardingPolicies, std::string const& channelPrefix, ComputingOffer const& defaultOffer, OverrideServiceSpecs const& overrideServices = {}); diff --git a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx index 0f769ae2dc850..b2957b8f3953c 100644 --- a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx +++ b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx @@ -29,6 +29,7 @@ #include "Framework/TimingInfo.h" #include "Framework/DeviceState.h" #include "Framework/Monitoring.h" +#include "Framework/SendingPolicy.h" #include "Headers/DataHeader.h" #include "Headers/Stack.h" #include "DecongestionService.h" @@ -1010,7 +1011,13 @@ DataProcessorSpec specifyFairMQDeviceMultiOutputProxy(char const* name, if (device->GetChannels().count(channel) == 0) { throw std::runtime_error("no corresponding output channel found for input '" + channel + "'"); } - ForwardRoute route{0, 1, spec, channel}; + static auto policy = ForwardingPolicy::createDefaultForwardingPolicy(); + ForwardRoute route{ + .timeslice = 0, + .maxTimeslices = 1, + .matcher = spec, + .channel = channel, + .policy = &policy}; // this we will try to fix on the framework level, there will be an API to // set external routes. Basically, this has to be added while setting up the // workflow. After that, the actual spec provided by the service is supposed diff --git a/Framework/Core/src/FairMQDeviceProxy.cxx b/Framework/Core/src/FairMQDeviceProxy.cxx index c25a58e47ef2d..bdffddd5a4d1a 100644 --- a/Framework/Core/src/FairMQDeviceProxy.cxx +++ b/Framework/Core/src/FairMQDeviceProxy.cxx @@ -267,6 +267,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, + .index = channelIndex, }; mOutputChannelInfos.push_back(info); mOutputChannelStates.push_back({0}); @@ -345,7 +346,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto LOGP(fatal, "Expected channel {} not configured.", route.channel); } ChannelAccountingType dplChannel = (route.channel.rfind("from_", 0) == 0) ? ChannelAccountingType::DPL : ChannelAccountingType::RAWFMQ; - mForwardChannelInfos.push_back(ForwardChannelInfo{route.channel, dplChannel, channel->second.at(0)}); + mForwardChannelInfos.push_back(ForwardChannelInfo{.name = route.channel, .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, .index = channelIndex}); mForwardChannelStates.push_back(ForwardChannelState{0}); channelNameToChannel[route.channel] = channelIndex; LOGP(detail, "Binding forward channel {} to channel index {}", route.channel, channelIndex.value); diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 17faef6dbd3d0..3d65975919676 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -119,4 +119,81 @@ std::vector SendingPolicy::createDefaultPolicies() LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}}; } + +ForwardingPolicy ForwardingPolicy::createDefaultForwardingPolicy() +{ + return ForwardingPolicy{ + .name = "default", + .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return true; }, + .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getForwardChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout/1000, channel->GetName()); + channel->Send(parts); + LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(fatal, "Error while sending on channel {}", channel->GetName()); + } }}; +} + +std::vector ForwardingPolicy::createDefaultPolicies() +{ + return {ForwardingPolicy{ + .name = "profiling", + .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return getenv("DPL_DEBUG_MESSAGE_SIZE"); }, + .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getForwardChannel(channelIndex); + auto timeout = 1000; + int count = 0; + auto& relayer = registry.get(); + for (auto& part : parts) { + auto* dh = o2::header::get(part->GetData()); + if (dh == nullptr) { + // This is a payload. + continue; + } + LOGP(info, "Sent {}/{}/{} for a total of {} bytes", dh->dataOrigin, dh->dataDescription, dh->subSpecification, dh->payloadSize); + count+= dh->payloadSize; + auto* dph = o2::header::get(part->GetData()); + if (dph == nullptr) { + // This is a payload. + continue; + } + auto oldestPossibleOutput = relayer.getOldestPossibleOutput(); + if ((size_t)dph->startTime < oldestPossibleOutput.timeslice.value) { + LOGP(error, "Sent startTime {} while oldestPossibleOutput is {}. This should not be possible.", dph->startTime, oldestPossibleOutput.timeslice.value); + } + } + LOGP(info, "Sent {} parts for a total of {} bytes", parts.Size(), count); + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout/1000, channel->GetName()); + channel->Send(parts); + LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(fatal, "Error while sending on channel {}", channel->GetName()); + } }}, + ForwardingPolicy{ + .name = "expendable", + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + auto has_label = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + return std::find_if(dest.labels.begin(), dest.labels.end(), has_label) != dest.labels.end(); }, + .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getForwardChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(info, "Error while sending on channel {}", channel->GetName()); + } }}, + createDefaultForwardingPolicy()}; +} } // namespace o2::framework diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 9ca46e060b1a2..cfbb616b0d0a5 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1650,6 +1650,7 @@ int runStateMachine(DataProcessorSpecs const& workflow, driverInfo.resourcePolicies, driverInfo.callbacksPolicies, driverInfo.sendingPolicies, + driverInfo.forwardingPolicies, runningWorkflow.devices, *resourceManager, driverInfo.uniqueWorkflowId, @@ -2644,6 +2645,7 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& workflow, { std::vector currentArgs; std::vector plugins; + std::vector forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); for (int ai = 1; ai < argc; ++ai) { currentArgs.emplace_back(argv[ai]); @@ -3001,6 +3003,7 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& workflow, }; DriverInfo driverInfo{ .sendingPolicies = sendingPolicies, + .forwardingPolicies = forwardingPolicies, .callbacksPolicies = callbacksPolicies}; driverInfo.states.reserve(10); driverInfo.sigintRequested = false; diff --git a/Framework/Core/test/test_DeviceSpec.cxx b/Framework/Core/test/test_DeviceSpec.cxx index 99494d5f8ea4f..72b19da542a76 100644 --- a/Framework/Core/test/test_DeviceSpec.cxx +++ b/Framework/Core/test/test_DeviceSpec.cxx @@ -421,6 +421,7 @@ TEST_CASE("TestOutEdgeProcessingHelpers") auto configContext = makeEmptyConfigContext(); auto channelPolicies = ChannelConfigurationPolicy::createDefaultPolicies(*configContext); auto sendingPolicies = SendingPolicy::createDefaultPolicies(); + auto forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); std::vector resources{ComputingResourceHelpers::getLocalhostResource()}; SimpleResourceManager rm(resources); @@ -429,7 +430,7 @@ TEST_CASE("TestOutEdgeProcessingHelpers") defaultOffer.memory = 0.01; DeviceSpecHelpers::processOutEdgeActions(*configContext, devices, deviceIndex, connections, rm, edgeOutIndex, logicalEdges, - actions, workflow, globalOutputs, channelPolicies, sendingPolicies, "", defaultOffer); + actions, workflow, globalOutputs, channelPolicies, sendingPolicies, forwardingPolicies, "", defaultOffer); std::vector expectedDeviceIndex = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}, {1, 0, 1}, {1, 0, 1}, {1, 1, 2}, {1, 1, 2}, {1, 2, 3}, {1, 2, 3}}; REQUIRE(devices.size() == 4); @@ -713,6 +714,7 @@ TEST_CASE("TestSimpleWildcard") auto configContext = makeEmptyConfigContext(); auto channelPolicies = ChannelConfigurationPolicy::createDefaultPolicies(*configContext); auto sendingPolicies = SendingPolicy::createDefaultPolicies(); + auto forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); std::vector devices; std::vector deviceIndex; @@ -751,7 +753,7 @@ TEST_CASE("TestSimpleWildcard") defaultOffer.memory = 0.01; DeviceSpecHelpers::processOutEdgeActions(*configContext, devices, deviceIndex, connections, rm, edgeOutIndex, logicalEdges, - outActions, workflow, globalOutputs, channelPolicies, sendingPolicies, "", defaultOffer); + outActions, workflow, globalOutputs, channelPolicies, sendingPolicies, forwardingPolicies, "", defaultOffer); REQUIRE(devices.size() == 2); ; // Two devices have outputs: A and Timer diff --git a/Framework/DataInspector/src/DataInspectorService.cxx b/Framework/DataInspector/src/DataInspectorService.cxx index 40399a366a303..712bf10b6fb2e 100644 --- a/Framework/DataInspector/src/DataInspectorService.cxx +++ b/Framework/DataInspector/src/DataInspectorService.cxx @@ -245,4 +245,4 @@ ServiceSpec* DIServicePlugin::create() } }, .kind = ServiceKind::Global}; }; -} // namespace o2::framework \ No newline at end of file +} // namespace o2::framework diff --git a/Utilities/DataSampling/src/Dispatcher.cxx b/Utilities/DataSampling/src/Dispatcher.cxx index de78993ff083e..28ff4d5568da9 100644 --- a/Utilities/DataSampling/src/Dispatcher.cxx +++ b/Utilities/DataSampling/src/Dispatcher.cxx @@ -125,10 +125,9 @@ void Dispatcher::run(ProcessingContext& ctx) if (ctx.inputs().isValid("timer-stats")) { reportStats(ctx.services().get()); } - auto& proxy = ctx.services().get(); auto& relayer = ctx.services().get(); auto timeslice = relayer.getOldestPossibleOutput().timeslice.value; - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, timeslice); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), timeslice); } void Dispatcher::reportStats(Monitoring& monitoring) const From 7897c3742059165a7cf6be3264f5f2641206753e Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 10 Feb 2024 00:32:25 +0100 Subject: [PATCH 320/726] DPL: add SendingPolicy for the case destination is expendable --- Framework/Core/src/SendingPolicy.cxx | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 3d65975919676..3aeccb680c150 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -29,7 +29,7 @@ std::vector SendingPolicy::createDefaultPolicies() { return {SendingPolicy{ .name = "dispatcher", - .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { if (source.name == "Dispatcher") { return true; } @@ -103,6 +103,23 @@ std::vector SendingPolicy::createDefaultPolicies() } else if (res == (size_t) fair::mq::TransferCode::error) { LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}, + SendingPolicy{ + .name = "expendable", + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + auto has_label = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + return std::find_if(dest.labels.begin(), dest.labels.end(), has_label) != dest.labels.end(); }, + .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getOutputChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(info, "Error while sending on channel {}", channel->GetName()); + } }}, SendingPolicy{ .name = "default", .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return true; }, From f3fcdd4dbff0adc793cf82ac84e6cfa9c20adf39 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 10 Feb 2024 01:13:36 +0100 Subject: [PATCH 321/726] DPL: keep code checker happy --- Framework/DataInspector/src/DataInspector.cxx | 4 ++-- Framework/DataInspector/src/DataInspector.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Framework/DataInspector/src/DataInspector.cxx b/Framework/DataInspector/src/DataInspector.cxx index a38b173cdd22b..b948d969f4d10 100644 --- a/Framework/DataInspector/src/DataInspector.cxx +++ b/Framework/DataInspector/src/DataInspector.cxx @@ -44,7 +44,7 @@ using namespace rapidjson; -namespace o2::framework::DataInspector +namespace o2::framework::data_inspector { #if BOOST_ENDIAN_BIG_BYTE static const auto endianness = "BIG"; @@ -147,4 +147,4 @@ std::vector serializeO2Messages(const std::vector& refs, con return messages; } -} // namespace o2::framework::DataInspector +} // namespace o2::framework::data_inspector diff --git a/Framework/DataInspector/src/DataInspector.h b/Framework/DataInspector/src/DataInspector.h index 27c9987a45081..5e28f6bb3853b 100644 --- a/Framework/DataInspector/src/DataInspector.h +++ b/Framework/DataInspector/src/DataInspector.h @@ -20,7 +20,7 @@ #include -namespace o2::framework::DataInspector +namespace o2::framework::data_inspector { inline bool isNonInternalDevice(const DeviceSpec& spec) { @@ -28,6 +28,6 @@ inline bool isNonInternalDevice(const DeviceSpec& spec) } std::vector serializeO2Messages(const std::vector& refs, const std::string& deviceName); -} // namespace o2::framework::DataInspector +} // namespace o2::framework::data_inspector #endif // O2_DATAINSPECTOR_H From d37ae9a6cfc3e605747f82f2b10bc43b5234d686 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 10 Feb 2024 01:17:27 +0100 Subject: [PATCH 322/726] DPL: keep code checker happy --- Framework/DataInspector/src/DataInspector.cxx | 4 ++-- Framework/DataInspector/src/DataInspector.h | 4 ++-- Framework/DataInspector/src/DataInspectorService.cxx | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Framework/DataInspector/src/DataInspector.cxx b/Framework/DataInspector/src/DataInspector.cxx index a38b173cdd22b..b948d969f4d10 100644 --- a/Framework/DataInspector/src/DataInspector.cxx +++ b/Framework/DataInspector/src/DataInspector.cxx @@ -44,7 +44,7 @@ using namespace rapidjson; -namespace o2::framework::DataInspector +namespace o2::framework::data_inspector { #if BOOST_ENDIAN_BIG_BYTE static const auto endianness = "BIG"; @@ -147,4 +147,4 @@ std::vector serializeO2Messages(const std::vector& refs, con return messages; } -} // namespace o2::framework::DataInspector +} // namespace o2::framework::data_inspector diff --git a/Framework/DataInspector/src/DataInspector.h b/Framework/DataInspector/src/DataInspector.h index 27c9987a45081..5e28f6bb3853b 100644 --- a/Framework/DataInspector/src/DataInspector.h +++ b/Framework/DataInspector/src/DataInspector.h @@ -20,7 +20,7 @@ #include -namespace o2::framework::DataInspector +namespace o2::framework::data_inspector { inline bool isNonInternalDevice(const DeviceSpec& spec) { @@ -28,6 +28,6 @@ inline bool isNonInternalDevice(const DeviceSpec& spec) } std::vector serializeO2Messages(const std::vector& refs, const std::string& deviceName); -} // namespace o2::framework::DataInspector +} // namespace o2::framework::data_inspector #endif // O2_DATAINSPECTOR_H diff --git a/Framework/DataInspector/src/DataInspectorService.cxx b/Framework/DataInspector/src/DataInspectorService.cxx index 40399a366a303..b24abd27d3c6a 100644 --- a/Framework/DataInspector/src/DataInspectorService.cxx +++ b/Framework/DataInspector/src/DataInspectorService.cxx @@ -205,7 +205,7 @@ ServiceSpec* DIServicePlugin::create() std::string runId = std::getenv("O2_DATAINSPECTOR_ID"); const auto& spec = services.get(); - if (DataInspector::isNonInternalDevice(spec)) { + if (data_inspector::isNonInternalDevice(spec)) { auto* diService = new DataInspectorProxyService(services, spec, proxyAddress, proxyPort, runId); return ServiceHandle{TypeIdHelpers::uniqueId(), diService}; } else { @@ -238,11 +238,11 @@ ServiceSpec* DIServicePlugin::create() } // Send copy to proxy - auto proxyMessages = DataInspector::serializeO2Messages(refs, registry.get < DeviceSpec const>().name); + auto proxyMessages = data_inspector::serializeO2Messages(refs, registry.get < DeviceSpec const>().name); for (auto &proxyMessage: proxyMessages) { diService.send(std::move(proxyMessage)); } } }, .kind = ServiceKind::Global}; }; -} // namespace o2::framework \ No newline at end of file +} // namespace o2::framework From bd6037f96dd90aa6901a3c73933892277cd04da1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 10 Feb 2024 01:17:29 +0100 Subject: [PATCH 323/726] DPL: use the SendingPolicy for every kind of message sending --- .../Core/include/Framework/ChannelInfo.h | 4 + .../include/Framework/DataProcessingHelpers.h | 22 +++--- Framework/Core/include/Framework/DriverInfo.h | 3 + .../Core/include/Framework/ForwardRoute.h | 9 ++- .../Core/include/Framework/SendingPolicy.h | 9 +++ Framework/Core/src/CommonServices.cxx | 10 +-- Framework/Core/src/DataProcessingDevice.cxx | 24 +++--- Framework/Core/src/DataProcessingHelpers.cxx | 41 +++++----- Framework/Core/src/DeviceSpecHelpers.cxx | 35 ++++++--- Framework/Core/src/DeviceSpecHelpers.h | 5 +- .../Core/src/ExternalFairMQDeviceProxy.cxx | 17 +++- Framework/Core/src/FairMQDeviceProxy.cxx | 3 +- Framework/Core/src/SendingPolicy.cxx | 77 +++++++++++++++++++ Framework/Core/src/runDataProcessing.cxx | 3 + Framework/Core/test/test_DeviceSpec.cxx | 6 +- Utilities/DataSampling/src/Dispatcher.cxx | 3 +- 16 files changed, 194 insertions(+), 77 deletions(-) diff --git a/Framework/Core/include/Framework/ChannelInfo.h b/Framework/Core/include/Framework/ChannelInfo.h index 0ebc4a8506301..1f4bd8bc9e90e 100644 --- a/Framework/Core/include/Framework/ChannelInfo.h +++ b/Framework/Core/include/Framework/ChannelInfo.h @@ -67,6 +67,7 @@ struct InputChannelInfo { }; struct SendingPolicy; +struct ForwardingPolicy; /// Output channel information struct OutputChannelInfo { @@ -74,6 +75,7 @@ struct OutputChannelInfo { ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; SendingPolicy const* policy; + ChannelIndex index = {-1}; }; struct OutputChannelState { @@ -89,6 +91,8 @@ struct ForwardChannelInfo { /// Wether or not it's a DPL internal channel. ChannelAccountingType channelType = ChannelAccountingType::DPL; fair::mq::Channel& channel; + ForwardingPolicy const* policy; + ChannelIndex index = {-1}; }; struct ForwardChannelState { diff --git a/Framework/Core/include/Framework/DataProcessingHelpers.h b/Framework/Core/include/Framework/DataProcessingHelpers.h index 35f78f12cc405..7a05ddf25c46c 100644 --- a/Framework/Core/include/Framework/DataProcessingHelpers.h +++ b/Framework/Core/include/Framework/DataProcessingHelpers.h @@ -11,14 +11,16 @@ #ifndef O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ #define O2_FRAMEWORK_DATAPROCESSINGHELPERS_H_ -#include "Framework/TimesliceIndex.h" -#include +#include namespace o2::framework { - +struct ServiceRegistryRef; +struct ForwardChannelInfo; +struct ForwardChannelState; +struct OutputChannelInfo; struct OutputChannelSpec; -class FairMQDeviceProxy; +struct OutputChannelState; /// Generic helpers for DataProcessing releated functions. struct DataProcessingHelpers { @@ -26,15 +28,13 @@ struct DataProcessingHelpers { /// @param device the fair::mq::Device which needs to send the EndOfStream message /// @param channel the OutputChannelSpec of the channel which needs to be signaled /// for EndOfStream - static void sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel); + static void sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice); /// @returns true if we did send the oldest possible timeslice message, false otherwise. - static bool sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); - static void broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice); - - private: - static void sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice); + static bool sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice); + /// Broadcast the oldest possible timeslice to all channels in output + static void broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice); }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/DriverInfo.h b/Framework/Core/include/Framework/DriverInfo.h index 043a689087325..45c4c85a32d01 100644 --- a/Framework/Core/include/Framework/DriverInfo.h +++ b/Framework/Core/include/Framework/DriverInfo.h @@ -111,6 +111,9 @@ struct DriverInfo { /// These are the policies which can be applied to decide how /// we send data. std::vector sendingPolicies; + /// These are the policies which can be applied to decide how + /// we forward data. + std::vector forwardingPolicies; /// The argc with which the driver was started. int argc; /// The argv with which the driver was started. diff --git a/Framework/Core/include/Framework/ForwardRoute.h b/Framework/Core/include/Framework/ForwardRoute.h index bae2eaacf1a44..0e3d4d12da24a 100644 --- a/Framework/Core/include/Framework/ForwardRoute.h +++ b/Framework/Core/include/Framework/ForwardRoute.h @@ -15,11 +15,11 @@ #include #include -namespace o2 -{ -namespace framework +namespace o2::framework { +struct ForwardingPolicy; + /// This uniquely identifies a route to be forwarded by the device if /// the InputSpec @a matcher matches an input which should also go to /// @a channel @@ -28,8 +28,9 @@ struct ForwardRoute { size_t maxTimeslices; InputSpec matcher; std::string channel; + // The policy to use to send to on this route. + ForwardingPolicy const* policy; }; -} // namespace framework } // namespace o2 #endif // FRAMEWORK_FORWARDROUTE_H diff --git a/Framework/Core/include/Framework/SendingPolicy.h b/Framework/Core/include/Framework/SendingPolicy.h index 1d68412d46db8..7cc5c16e2c923 100644 --- a/Framework/Core/include/Framework/SendingPolicy.h +++ b/Framework/Core/include/Framework/SendingPolicy.h @@ -32,6 +32,15 @@ struct SendingPolicy { static std::vector createDefaultPolicies(); }; +struct ForwardingPolicy { + using ForwardingCallback = std::function; + std::string name = "invalid"; + EdgeMatcher matcher = nullptr; + ForwardingCallback forward = nullptr; + static std::vector createDefaultPolicies(); + static ForwardingPolicy createDefaultForwardingPolicy(); +}; + } // namespace o2::framework #endif // O2_FRAMEWORK_SENDINGPOLICY_H_ diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index b7fc3410a1931..a0f7fcdc88201 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -588,7 +588,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() timesliceIndex.rescan(); } } - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -598,7 +598,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ctx.services(), info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); @@ -646,7 +646,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Queueing oldest possible timeslice %" PRIu64 " propagation for execution.", (uint64_t)oldestPossibleOutput.timeslice.value); AsyncQueueHelpers::post( - queue, decongestion.oldestPossibleTimesliceTask, [oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { + queue, decongestion.oldestPossibleTimesliceTask, [ref = services, oldestPossibleOutput, &decongestion, &proxy, &spec, device, ×liceIndex]() { O2_SIGNPOST_ID_FROM_POINTER(cid, data_processor_context, &decongestion); if (decongestion.lastTimeslice >= oldestPossibleOutput.timeslice.value) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Not sending already sent value: %" PRIu64 "> %" PRIu64, @@ -655,7 +655,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() } O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Running oldest possible timeslice %" PRIu64 " propagation.", (uint64_t)oldestPossibleOutput.timeslice.value); - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, oldestPossibleOutput.timeslice.value); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ref, oldestPossibleOutput.timeslice.value); for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) { auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi}); @@ -665,7 +665,7 @@ o2::framework::ServiceSpec CommonServices::decongestionSpec() O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Skipping channel %{public}s", info.name.c_str()); continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestPossibleOutput.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(ref, info, state, oldestPossibleOutput.timeslice.value)) { O2_SIGNPOST_EVENT_EMIT(data_processor_context, cid, "oldest_possible_timeslice", "Forwarding to channel %{public}s oldest possible timeslice %" PRIu64 ", priority %d", info.name.c_str(), (uint64_t)oldestPossibleOutput.timeslice.value, 20); diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 8c914e201a479..50934bd59a729 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -680,19 +680,15 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, if (forwardedParts[fi].Size() == 0) { continue; } - auto channel = proxy.getForwardChannel(ChannelIndex{fi}); - LOG(debug) << "Forwarding to " << channel->GetName() << " " << fi; + ForwardChannelInfo info = proxy.getForwardChannelInfo(ChannelIndex{fi}); + LOG(debug) << "Forwarding to " << info.name << " " << fi; // in DPL we are using subchannel 0 only auto& parts = forwardedParts[fi]; - int timeout = 30000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel->GetName()); - channel->Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); - } else if (res == (size_t)fair::mq::TransferCode::error) { - LOGP(fatal, "Error while sending on channel {}", channel->GetName()); + if (info.policy == nullptr) { + LOG(error) << "Forwarding to " << info.name << " " << fi << " has no policy"; + continue; } + info.policy->forward(parts, ChannelIndex{fi}, registry); } auto& asyncQueue = registry.get(); @@ -713,7 +709,7 @@ static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, LOG(debug) << "Skipping channel"; continue; } - if (DataProcessingHelpers::sendOldestPossibleTimeframe(info, state, oldestTimeslice.timeslice.value)) { + if (DataProcessingHelpers::sendOldestPossibleTimeframe(registry, info, state, oldestTimeslice.timeslice.value)) { LOGP(debug, "Forwarding to channel {} oldest possible timeslice {}, prio 20", info.name, oldestTimeslice.timeslice.value); } } @@ -1678,8 +1674,7 @@ void DataProcessingDevice::doRun(ServiceRegistryRef ref) for (auto& channel : spec.outputChannels) { LOGP(detail, "Sending end of stream to {}", channel.name); - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } // This is needed because the transport is deleted before the device. relayer.clear(); @@ -2460,8 +2455,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v if (state.streaming == StreamingState::EndOfStreaming) { LOGP(detail, "Broadcasting end of stream"); for (auto& channel : spec.outputChannels) { - auto& rawDevice = ref.get(); - DataProcessingHelpers::sendEndOfStream(*rawDevice.device(), channel); + DataProcessingHelpers::sendEndOfStream(ref, channel); } switchState(StreamingState::Idle); } diff --git a/Framework/Core/src/DataProcessingHelpers.cxx b/Framework/Core/src/DataProcessingHelpers.cxx index d424a53a52bd9..58fda5a13bae9 100644 --- a/Framework/Core/src/DataProcessingHelpers.cxx +++ b/Framework/Core/src/DataProcessingHelpers.cxx @@ -18,79 +18,74 @@ #include "Headers/DataHeader.h" #include "Headers/Stack.h" #include "Framework/Logger.h" +#include "Framework/SendingPolicy.h" +#include "Framework/RawDeviceService.h" #include #include namespace o2::framework { -void DataProcessingHelpers::sendEndOfStream(fair::mq::Device& device, OutputChannelSpec const& channel) +void DataProcessingHelpers::sendEndOfStream(ServiceRegistryRef const& ref, OutputChannelSpec const& channel) { + fair::mq::Device* device = ref.get().device(); fair::mq::Parts parts; - fair::mq::MessagePtr payload(device.NewMessage()); + fair::mq::MessagePtr payload(device->NewMessage()); SourceInfoHeader sih; sih.state = InputChannelState::Completed; - auto channelAlloc = o2::pmr::getTransportAllocator(device.GetChannel(channel.name, 0).Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(device->GetChannel(channel.name, 0).Transport()); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, sih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - device.Send(parts, channel.name, 0); + device->Send(parts, channel.name, 0); LOGP(info, "Sending end-of-stream message to channel {}", channel.name); } -void DataProcessingHelpers::sendOldestPossibleTimeframe(fair::mq::Channel& channel, size_t timeslice) +void doSendOldestPossibleTimeframe(ServiceRegistryRef ref, fair::mq::TransportFactory* transport, ChannelIndex index, SendingPolicy::SendingCallback const& callback, size_t timeslice) { fair::mq::Parts parts; - fair::mq::MessagePtr payload(channel.Transport()->CreateMessage()); + fair::mq::MessagePtr payload(transport->CreateMessage()); o2::framework::DomainInfoHeader dih; dih.oldestPossibleTimeslice = timeslice; - auto channelAlloc = o2::pmr::getTransportAllocator(channel.Transport()); + auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dih}); // sigh... See if we can avoid having it const by not // exposing it to the user in the first place. parts.AddPart(std::move(header)); parts.AddPart(std::move(payload)); - auto timeout = 1000; - auto res = channel.Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending oldest possible timeslice after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel.GetName()); - channel.Send(parts); - LOGP(info, "Downstream backpressure on {} recovered.", channel.GetName()); - } - if (res < (size_t)fair::mq::TransferCode::success) { - LOGP(fatal, "Error sending oldest possible timeframe {} on channel {} (code {})", timeslice, channel.GetName(), res); - } + callback(parts, index, ref); } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, ForwardChannelInfo const& info, ForwardChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->forward, timeslice); state.oldestForChannel = {timeslice}; return true; } -bool DataProcessingHelpers::sendOldestPossibleTimeframe(OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) +bool DataProcessingHelpers::sendOldestPossibleTimeframe(ServiceRegistryRef const& ref, OutputChannelInfo const& info, OutputChannelState& state, size_t timeslice) { if (state.oldestForChannel.value >= timeslice) { return false; } - sendOldestPossibleTimeframe(info.channel, timeslice); + doSendOldestPossibleTimeframe(ref, info.channel.Transport(), info.index, info.policy->send, timeslice); state.oldestForChannel = {timeslice}; return true; } -void DataProcessingHelpers::broadcastOldestPossibleTimeslice(FairMQDeviceProxy& proxy, size_t timeslice) +void DataProcessingHelpers::broadcastOldestPossibleTimeslice(ServiceRegistryRef const& ref, size_t timeslice) { + auto& proxy = ref.get(); for (int ci = 0; ci < proxy.getNumOutputChannels(); ++ci) { auto& info = proxy.getOutputChannelInfo({ci}); auto& state = proxy.getOutputChannelState({ci}); - sendOldestPossibleTimeframe(info, state, timeslice); + sendOldestPossibleTimeframe(ref, info, state, timeslice); } } diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index b1681a4779ceb..e9df320fe8b95 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -493,6 +493,7 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext const std::vector& outputsMatchers, const std::vector& channelPolicies, const std::vector& sendingPolicies, + const std::vector& forwardingPolicies, std::string const& channelPrefix, ComputingOffer const& defaultOffer, OverrideServiceSpecs const& overrideServices) @@ -656,7 +657,7 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext // whether this is a real OutputRoute or if it's a forward from // a previous consumer device. // FIXME: where do I find the InputSpec for the forward? - auto appendOutputRouteToSourceDeviceChannel = [&outputsMatchers, &workflow, &devices, &logicalEdges, &sendingPolicies, &configContext]( + auto appendOutputRouteToSourceDeviceChannel = [&outputsMatchers, &workflow, &devices, &logicalEdges, &sendingPolicies, &forwardingPolicies, &configContext]( size_t ei, size_t di, size_t ci) { assert(ei < logicalEdges.size()); assert(di < devices.size()); @@ -670,29 +671,40 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext assert(edge.outputGlobalIndex < outputsMatchers.size()); // Iterate over all the policies and apply the first one that matches. SendingPolicy const* policyPtr = nullptr; + ForwardingPolicy const* forwardPolicyPtr = nullptr; for (auto& policy : sendingPolicies) { if (policy.matcher(producer, consumer, configContext)) { policyPtr = &policy; break; } } + assert(forwardingPolicies.empty() == false); + for (auto& policy : forwardingPolicies) { + if (policy.matcher(producer, consumer, configContext)) { + forwardPolicyPtr = &policy; + break; + } + } assert(policyPtr != nullptr); + assert(forwardPolicyPtr != nullptr); if (edge.isForward == false) { OutputRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - outputsMatchers[edge.outputGlobalIndex], - channel.name, - policyPtr, + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = outputsMatchers[edge.outputGlobalIndex], + .channel = channel.name, + .policy = policyPtr, }; device.outputs.emplace_back(route); } else { ForwardRoute route{ - edge.timeIndex, - consumer.maxInputTimeslices, - workflow[edge.consumer].inputs[edge.consumerInputIndex], - channel.name}; + .timeslice = edge.timeIndex, + .maxTimeslices = consumer.maxInputTimeslices, + .matcher = workflow[edge.consumer].inputs[edge.consumerInputIndex], + .channel = channel.name, + .policy = forwardPolicyPtr, + }; device.forwards.emplace_back(route); } }; @@ -1051,6 +1063,7 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf std::vector const& resourcePolicies, std::vector const& callbacksPolicies, std::vector const& sendingPolicies, + std::vector const& forwardingPolicies, std::vector& devices, ResourceManager& resourceManager, std::string const& uniqueWorkflowId, @@ -1111,7 +1124,7 @@ void DeviceSpecHelpers::dataProcessorSpecs2DeviceSpecs(const WorkflowSpec& workf defaultOffer.memory /= deviceCount + 1; processOutEdgeActions(configContext, devices, deviceIndex, connections, resourceManager, outEdgeIndex, logicalEdges, - outActions, workflow, outputs, channelPolicies, sendingPolicies, channelPrefix, defaultOffer, overrideServices); + outActions, workflow, outputs, channelPolicies, sendingPolicies, forwardingPolicies, channelPrefix, defaultOffer, overrideServices); // FIXME: is this not the case??? std::sort(connections.begin(), connections.end()); diff --git a/Framework/Core/src/DeviceSpecHelpers.h b/Framework/Core/src/DeviceSpecHelpers.h index 1fb1866ae2dc3..17601c81342c1 100644 --- a/Framework/Core/src/DeviceSpecHelpers.h +++ b/Framework/Core/src/DeviceSpecHelpers.h @@ -53,6 +53,7 @@ struct DeviceSpecHelpers { std::vector const& resourcePolicies, std::vector const& callbacksPolicies, std::vector const& sendingPolicy, + std::vector const& forwardingPolicies, std::vector& devices, ResourceManager& resourceManager, std::string const& uniqueWorkflowId, @@ -80,9 +81,10 @@ struct DeviceSpecHelpers { std::vector dispatchPolicies = DispatchPolicy::createDefaultPolicies(); std::vector resourcePolicies = ResourcePolicy::createDefaultPolicies(); std::vector sendingPolicies = SendingPolicy::createDefaultPolicies(); + std::vector forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); dataProcessorSpecs2DeviceSpecs(workflow, channelPolicies, completionPolicies, dispatchPolicies, resourcePolicies, callbacksPolicies, - sendingPolicies, devices, + sendingPolicies, forwardingPolicies, devices, resourceManager, uniqueWorkflowId, configContext, optimizeTopology, resourcesMonitoringInterval, channelPrefix, overrideServices); } @@ -150,6 +152,7 @@ struct DeviceSpecHelpers { const std::vector& outputs, std::vector const& channelPolicies, std::vector const& sendingPolicies, + std::vector const& forwardingPolicies, std::string const& channelPrefix, ComputingOffer const& defaultOffer, OverrideServiceSpecs const& overrideServices = {}); diff --git a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx index 0f769ae2dc850..4846363efd93b 100644 --- a/Framework/Core/src/ExternalFairMQDeviceProxy.cxx +++ b/Framework/Core/src/ExternalFairMQDeviceProxy.cxx @@ -29,6 +29,7 @@ #include "Framework/TimingInfo.h" #include "Framework/DeviceState.h" #include "Framework/Monitoring.h" +#include "Framework/SendingPolicy.h" #include "Headers/DataHeader.h" #include "Headers/Stack.h" #include "DecongestionService.h" @@ -923,10 +924,16 @@ DataProcessorSpec specifyFairMQDeviceOutputProxy(char const* name, auto lastDataProcessingHeader = std::make_shared(0, 0); auto& spec = const_cast(deviceSpec); + static auto policy = ForwardingPolicy::createDefaultForwardingPolicy(); for (auto const& inputSpec : inputSpecs) { // this is a prototype, in principle we want to have all spec objects const // and so only the const object can be retrieved from service registry - ForwardRoute route{0, 1, inputSpec, outputChannelName}; + ForwardRoute route{ + .timeslice = 0, + .maxTimeslices = 1, + .matcher = inputSpec, + .channel = outputChannelName, + .policy = &policy}; spec.forwards.emplace_back(route); } @@ -1010,7 +1017,13 @@ DataProcessorSpec specifyFairMQDeviceMultiOutputProxy(char const* name, if (device->GetChannels().count(channel) == 0) { throw std::runtime_error("no corresponding output channel found for input '" + channel + "'"); } - ForwardRoute route{0, 1, spec, channel}; + static auto policy = ForwardingPolicy::createDefaultForwardingPolicy(); + ForwardRoute route{ + .timeslice = 0, + .maxTimeslices = 1, + .matcher = spec, + .channel = channel, + .policy = &policy}; // this we will try to fix on the framework level, there will be an API to // set external routes. Basically, this has to be added while setting up the // workflow. After that, the actual spec provided by the service is supposed diff --git a/Framework/Core/src/FairMQDeviceProxy.cxx b/Framework/Core/src/FairMQDeviceProxy.cxx index c25a58e47ef2d..bdffddd5a4d1a 100644 --- a/Framework/Core/src/FairMQDeviceProxy.cxx +++ b/Framework/Core/src/FairMQDeviceProxy.cxx @@ -267,6 +267,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, + .index = channelIndex, }; mOutputChannelInfos.push_back(info); mOutputChannelStates.push_back({0}); @@ -345,7 +346,7 @@ void FairMQDeviceProxy::bind(std::vector const& outputs, std::vecto LOGP(fatal, "Expected channel {} not configured.", route.channel); } ChannelAccountingType dplChannel = (route.channel.rfind("from_", 0) == 0) ? ChannelAccountingType::DPL : ChannelAccountingType::RAWFMQ; - mForwardChannelInfos.push_back(ForwardChannelInfo{route.channel, dplChannel, channel->second.at(0)}); + mForwardChannelInfos.push_back(ForwardChannelInfo{.name = route.channel, .channelType = dplChannel, .channel = channel->second.at(0), .policy = route.policy, .index = channelIndex}); mForwardChannelStates.push_back(ForwardChannelState{0}); channelNameToChannel[route.channel] = channelIndex; LOGP(detail, "Binding forward channel {} to channel index {}", route.channel, channelIndex.value); diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 17faef6dbd3d0..3d65975919676 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -119,4 +119,81 @@ std::vector SendingPolicy::createDefaultPolicies() LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}}; } + +ForwardingPolicy ForwardingPolicy::createDefaultForwardingPolicy() +{ + return ForwardingPolicy{ + .name = "default", + .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return true; }, + .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getForwardChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout/1000, channel->GetName()); + channel->Send(parts); + LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(fatal, "Error while sending on channel {}", channel->GetName()); + } }}; +} + +std::vector ForwardingPolicy::createDefaultPolicies() +{ + return {ForwardingPolicy{ + .name = "profiling", + .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return getenv("DPL_DEBUG_MESSAGE_SIZE"); }, + .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getForwardChannel(channelIndex); + auto timeout = 1000; + int count = 0; + auto& relayer = registry.get(); + for (auto& part : parts) { + auto* dh = o2::header::get(part->GetData()); + if (dh == nullptr) { + // This is a payload. + continue; + } + LOGP(info, "Sent {}/{}/{} for a total of {} bytes", dh->dataOrigin, dh->dataDescription, dh->subSpecification, dh->payloadSize); + count+= dh->payloadSize; + auto* dph = o2::header::get(part->GetData()); + if (dph == nullptr) { + // This is a payload. + continue; + } + auto oldestPossibleOutput = relayer.getOldestPossibleOutput(); + if ((size_t)dph->startTime < oldestPossibleOutput.timeslice.value) { + LOGP(error, "Sent startTime {} while oldestPossibleOutput is {}. This should not be possible.", dph->startTime, oldestPossibleOutput.timeslice.value); + } + } + LOGP(info, "Sent {} parts for a total of {} bytes", parts.Size(), count); + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout/1000, channel->GetName()); + channel->Send(parts); + LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(fatal, "Error while sending on channel {}", channel->GetName()); + } }}, + ForwardingPolicy{ + .name = "expendable", + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + auto has_label = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + return std::find_if(dest.labels.begin(), dest.labels.end(), has_label) != dest.labels.end(); }, + .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getForwardChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(info, "Error while sending on channel {}", channel->GetName()); + } }}, + createDefaultForwardingPolicy()}; +} } // namespace o2::framework diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 9ca46e060b1a2..cfbb616b0d0a5 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1650,6 +1650,7 @@ int runStateMachine(DataProcessorSpecs const& workflow, driverInfo.resourcePolicies, driverInfo.callbacksPolicies, driverInfo.sendingPolicies, + driverInfo.forwardingPolicies, runningWorkflow.devices, *resourceManager, driverInfo.uniqueWorkflowId, @@ -2644,6 +2645,7 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& workflow, { std::vector currentArgs; std::vector plugins; + std::vector forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); for (int ai = 1; ai < argc; ++ai) { currentArgs.emplace_back(argv[ai]); @@ -3001,6 +3003,7 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& workflow, }; DriverInfo driverInfo{ .sendingPolicies = sendingPolicies, + .forwardingPolicies = forwardingPolicies, .callbacksPolicies = callbacksPolicies}; driverInfo.states.reserve(10); driverInfo.sigintRequested = false; diff --git a/Framework/Core/test/test_DeviceSpec.cxx b/Framework/Core/test/test_DeviceSpec.cxx index 99494d5f8ea4f..72b19da542a76 100644 --- a/Framework/Core/test/test_DeviceSpec.cxx +++ b/Framework/Core/test/test_DeviceSpec.cxx @@ -421,6 +421,7 @@ TEST_CASE("TestOutEdgeProcessingHelpers") auto configContext = makeEmptyConfigContext(); auto channelPolicies = ChannelConfigurationPolicy::createDefaultPolicies(*configContext); auto sendingPolicies = SendingPolicy::createDefaultPolicies(); + auto forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); std::vector resources{ComputingResourceHelpers::getLocalhostResource()}; SimpleResourceManager rm(resources); @@ -429,7 +430,7 @@ TEST_CASE("TestOutEdgeProcessingHelpers") defaultOffer.memory = 0.01; DeviceSpecHelpers::processOutEdgeActions(*configContext, devices, deviceIndex, connections, rm, edgeOutIndex, logicalEdges, - actions, workflow, globalOutputs, channelPolicies, sendingPolicies, "", defaultOffer); + actions, workflow, globalOutputs, channelPolicies, sendingPolicies, forwardingPolicies, "", defaultOffer); std::vector expectedDeviceIndex = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}, {1, 0, 1}, {1, 0, 1}, {1, 1, 2}, {1, 1, 2}, {1, 2, 3}, {1, 2, 3}}; REQUIRE(devices.size() == 4); @@ -713,6 +714,7 @@ TEST_CASE("TestSimpleWildcard") auto configContext = makeEmptyConfigContext(); auto channelPolicies = ChannelConfigurationPolicy::createDefaultPolicies(*configContext); auto sendingPolicies = SendingPolicy::createDefaultPolicies(); + auto forwardingPolicies = ForwardingPolicy::createDefaultPolicies(); std::vector devices; std::vector deviceIndex; @@ -751,7 +753,7 @@ TEST_CASE("TestSimpleWildcard") defaultOffer.memory = 0.01; DeviceSpecHelpers::processOutEdgeActions(*configContext, devices, deviceIndex, connections, rm, edgeOutIndex, logicalEdges, - outActions, workflow, globalOutputs, channelPolicies, sendingPolicies, "", defaultOffer); + outActions, workflow, globalOutputs, channelPolicies, sendingPolicies, forwardingPolicies, "", defaultOffer); REQUIRE(devices.size() == 2); ; // Two devices have outputs: A and Timer diff --git a/Utilities/DataSampling/src/Dispatcher.cxx b/Utilities/DataSampling/src/Dispatcher.cxx index de78993ff083e..28ff4d5568da9 100644 --- a/Utilities/DataSampling/src/Dispatcher.cxx +++ b/Utilities/DataSampling/src/Dispatcher.cxx @@ -125,10 +125,9 @@ void Dispatcher::run(ProcessingContext& ctx) if (ctx.inputs().isValid("timer-stats")) { reportStats(ctx.services().get()); } - auto& proxy = ctx.services().get(); auto& relayer = ctx.services().get(); auto timeslice = relayer.getOldestPossibleOutput().timeslice.value; - DataProcessingHelpers::broadcastOldestPossibleTimeslice(proxy, timeslice); + DataProcessingHelpers::broadcastOldestPossibleTimeslice(ctx.services(), timeslice); } void Dispatcher::reportStats(Monitoring& monitoring) const From 37b4c4302e6c90e4b01413d3896a150426e57481 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 10 Feb 2024 01:17:29 +0100 Subject: [PATCH 324/726] DPL: add SendingPolicy for the case destination is expendable --- Framework/Core/src/SendingPolicy.cxx | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 3d65975919676..3aeccb680c150 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -29,7 +29,7 @@ std::vector SendingPolicy::createDefaultPolicies() { return {SendingPolicy{ .name = "dispatcher", - .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { if (source.name == "Dispatcher") { return true; } @@ -103,6 +103,23 @@ std::vector SendingPolicy::createDefaultPolicies() } else if (res == (size_t) fair::mq::TransferCode::error) { LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}, + SendingPolicy{ + .name = "expendable", + .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { + auto has_label = [](DataProcessorLabel const& label) { + return label.value == "expendable"; + }; + return std::find_if(dest.labels.begin(), dest.labels.end(), has_label) != dest.labels.end(); }, + .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { + auto &proxy = registry.get(); + auto *channel = proxy.getOutputChannel(channelIndex); + auto timeout = 1000; + auto res = channel->Send(parts, timeout); + if (res == (size_t)fair::mq::TransferCode::timeout) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + } else if (res == (size_t) fair::mq::TransferCode::error) { + LOGP(info, "Error while sending on channel {}", channel->GetName()); + } }}, SendingPolicy{ .name = "default", .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return true; }, From dca1c3af1f24062b13444cccf781b4102efe1e9b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 10 Feb 2024 09:25:41 +0100 Subject: [PATCH 325/726] DPL: keep code checker happy --- Framework/DataInspector/src/DataInspector.cxx | 4 ++-- Framework/DataInspector/src/DataInspector.h | 4 ++-- Framework/DataInspector/src/DataInspectorService.cxx | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Framework/DataInspector/src/DataInspector.cxx b/Framework/DataInspector/src/DataInspector.cxx index a38b173cdd22b..b948d969f4d10 100644 --- a/Framework/DataInspector/src/DataInspector.cxx +++ b/Framework/DataInspector/src/DataInspector.cxx @@ -44,7 +44,7 @@ using namespace rapidjson; -namespace o2::framework::DataInspector +namespace o2::framework::data_inspector { #if BOOST_ENDIAN_BIG_BYTE static const auto endianness = "BIG"; @@ -147,4 +147,4 @@ std::vector serializeO2Messages(const std::vector& refs, con return messages; } -} // namespace o2::framework::DataInspector +} // namespace o2::framework::data_inspector diff --git a/Framework/DataInspector/src/DataInspector.h b/Framework/DataInspector/src/DataInspector.h index 27c9987a45081..5e28f6bb3853b 100644 --- a/Framework/DataInspector/src/DataInspector.h +++ b/Framework/DataInspector/src/DataInspector.h @@ -20,7 +20,7 @@ #include -namespace o2::framework::DataInspector +namespace o2::framework::data_inspector { inline bool isNonInternalDevice(const DeviceSpec& spec) { @@ -28,6 +28,6 @@ inline bool isNonInternalDevice(const DeviceSpec& spec) } std::vector serializeO2Messages(const std::vector& refs, const std::string& deviceName); -} // namespace o2::framework::DataInspector +} // namespace o2::framework::data_inspector #endif // O2_DATAINSPECTOR_H diff --git a/Framework/DataInspector/src/DataInspectorService.cxx b/Framework/DataInspector/src/DataInspectorService.cxx index 40399a366a303..661065da35ff6 100644 --- a/Framework/DataInspector/src/DataInspectorService.cxx +++ b/Framework/DataInspector/src/DataInspectorService.cxx @@ -205,7 +205,7 @@ ServiceSpec* DIServicePlugin::create() std::string runId = std::getenv("O2_DATAINSPECTOR_ID"); const auto& spec = services.get(); - if (DataInspector::isNonInternalDevice(spec)) { + if (data_inspector::isNonInternalDevice(spec)) { auto* diService = new DataInspectorProxyService(services, spec, proxyAddress, proxyPort, runId); return ServiceHandle{TypeIdHelpers::uniqueId(), diService}; } else { @@ -238,11 +238,11 @@ ServiceSpec* DIServicePlugin::create() } // Send copy to proxy - auto proxyMessages = DataInspector::serializeO2Messages(refs, registry.get < DeviceSpec const>().name); + auto proxyMessages = data_inspector::serializeO2Messages(refs, registry.get().name); for (auto &proxyMessage: proxyMessages) { diService.send(std::move(proxyMessage)); } } }, .kind = ServiceKind::Global}; }; -} // namespace o2::framework \ No newline at end of file +} // namespace o2::framework From eb0e5147a4fafcb0298736f3f5117ab38c24997d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 12 Feb 2024 10:44:18 +0100 Subject: [PATCH 326/726] DPL: hide exception handling from header --- .../include/Framework/runDataProcessing.h | 43 ++----------------- Framework/Core/src/runDataProcessing.cxx | 39 +++++++++++++++++ 2 files changed, 42 insertions(+), 40 deletions(-) diff --git a/Framework/Core/include/Framework/runDataProcessing.h b/Framework/Core/include/Framework/runDataProcessing.h index bfda47d07971c..186d76499c1d6 100644 --- a/Framework/Core/include/Framework/runDataProcessing.h +++ b/Framework/Core/include/Framework/runDataProcessing.h @@ -25,7 +25,6 @@ #include "Framework/CustomWorkflowTerminationHook.h" #include "Framework/CommonServices.h" #include "Framework/WorkflowCustomizationHelpers.h" -#include "Framework/RuntimeError.h" #include "Framework/ResourcePolicyHelpers.h" #include "Framework/Logger.h" #include "Framework/CheckTypes.h" @@ -33,12 +32,6 @@ #include #include -#include - -namespace boost -{ -class exception; -} namespace o2::framework { @@ -147,9 +140,6 @@ int doMain(int argc, char** argv, o2::framework::WorkflowSpec const& specs, std::vector const& workflowOptions, o2::framework::ConfigContext& configContext); -void doBoostException(boost::exception& e, const char*); -void doDPLException(o2::framework::RuntimeErrorRef& ref, char const*); -void doUnknownException(std::string const& s, char const*); void doDefaultWorkflowTerminationHook(); template @@ -220,41 +210,14 @@ int mainNoCatch(int argc, char** argv) resourcePolicies, callbacksPolicies, sendingPolicies, workflowOptions, configContext); } +int callMain(int argc, char** argv, int (*)(int, char**)); + int main(int argc, char** argv) { using namespace o2::framework; using namespace boost::program_options; - static bool noCatch = getenv("O2_NO_CATCHALL_EXCEPTIONS") && strcmp(getenv("O2_NO_CATCHALL_EXCEPTIONS"), "0"); - int result = 1; - if (noCatch) { - try { - result = mainNoCatch(argc, argv); - } catch (o2::framework::RuntimeErrorRef& ref) { - doDPLException(ref, argv[0]); - throw; - } - } else { - try { - // The 0 here is an int, therefore having the template matching in the - // SFINAE expression above fit better the version which invokes user code over - // the default one. - // The default policy is a catch all pub/sub setup to be consistent with the past. - result = mainNoCatch(argc, argv); - } catch (boost::exception& e) { - doBoostException(e, argv[0]); - throw; - } catch (std::exception const& error) { - doUnknownException(error.what(), argv[0]); - throw; - } catch (o2::framework::RuntimeErrorRef& ref) { - doDPLException(ref, argv[0]); - throw; - } catch (...) { - doUnknownException("", argv[0]); - throw; - } - } + int result = callMain(argc, argv, mainNoCatch); char* idstring = nullptr; for (int argi = 0; argi < argc; argi++) { diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index cfbb616b0d0a5..64c034ebb8533 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -177,6 +177,45 @@ bpo::options_description gHiddenDeviceOptions("Hidden child options"); O2_DECLARE_DYNAMIC_LOG(driver); +void doBoostException(boost::exception& e, const char*); +void doDPLException(o2::framework::RuntimeErrorRef& ref, char const*); +void doUnknownException(std::string const& s, char const*); + +int callMain(int argc, char** argv, int (*mainNoCatch)(int, char**)) +{ + static bool noCatch = getenv("O2_NO_CATCHALL_EXCEPTIONS") && strcmp(getenv("O2_NO_CATCHALL_EXCEPTIONS"), "0"); + int result = 1; + if (noCatch) { + try { + result = mainNoCatch(argc, argv); + } catch (o2::framework::RuntimeErrorRef& ref) { + doDPLException(ref, argv[0]); + throw; + } + } else { + try { + // The 0 here is an int, therefore having the template matching in the + // SFINAE expression above fit better the version which invokes user code over + // the default one. + // The default policy is a catch all pub/sub setup to be consistent with the past. + result = mainNoCatch(argc, argv); + } catch (boost::exception& e) { + doBoostException(e, argv[0]); + throw; + } catch (std::exception const& error) { + doUnknownException(error.what(), argv[0]); + throw; + } catch (o2::framework::RuntimeErrorRef& ref) { + doDPLException(ref, argv[0]); + throw; + } catch (...) { + doUnknownException("", argv[0]); + throw; + } + } + return result; +} + // Read from a given fd and print it. // return true if we can still read from it, // return false if we need to close the input pipe. From fe24d077b30ebd5d5acd8fbbcdc100013b5ef1e3 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 12 Feb 2024 17:42:23 +0100 Subject: [PATCH 327/726] DPL: apparently we also need non-critical --- Framework/Core/src/SendingPolicy.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 3aeccb680c150..4b98a87052225 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -107,7 +107,7 @@ std::vector SendingPolicy::createDefaultPolicies() .name = "expendable", .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { auto has_label = [](DataProcessorLabel const& label) { - return label.value == "expendable"; + return label.value == "expendable" || label.value == "non-critical"; }; return std::find_if(dest.labels.begin(), dest.labels.end(), has_label) != dest.labels.end(); }, .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { From e4a6f20db541626565dce37f66a8ba0e0cabb4ab Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 13 Feb 2024 11:03:52 +0100 Subject: [PATCH 328/726] DPL: fix non lossy dispatcher --- Framework/Core/src/SendingPolicy.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 3aeccb680c150..b24761dd728ff 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -61,7 +61,7 @@ std::vector SendingPolicy::createDefaultPolicies() if (state.droppedMessages > 10) { timeout = 0; } - size_t result = info.channel.Send(parts, timeout); + int64_t result = info.channel.Send(parts, timeout); if (result > 0) { state.droppedMessages = 0; } else if (state.droppedMessages < std::numeric_limits::max()) { From f264f26638bda1bb537e1337d9a9914ef414f842 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 13 Feb 2024 11:42:41 +0100 Subject: [PATCH 329/726] DPL: fix non lossy dispatcher --- Framework/Core/src/SendingPolicy.cxx | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 3aeccb680c150..28fb1a7524da3 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -61,7 +61,7 @@ std::vector SendingPolicy::createDefaultPolicies() if (state.droppedMessages > 10) { timeout = 0; } - size_t result = info.channel.Send(parts, timeout); + int64_t result = info.channel.Send(parts, timeout); if (result > 0) { state.droppedMessages = 0; } else if (state.droppedMessages < std::numeric_limits::max()) { @@ -113,12 +113,18 @@ std::vector SendingPolicy::createDefaultPolicies() .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { auto &proxy = registry.get(); auto *channel = proxy.getOutputChannel(channelIndex); + OutputChannelState& state = proxy.getOutputChannelState(channelIndex); auto timeout = 1000; + if (state.droppedMessages > 0) { + timeout = 0; + } auto res = channel->Send(parts, timeout); if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}. Switching to dropping mode.", timeout/1000, channel->GetName()); + state.droppedMessages++; } else if (res == (size_t) fair::mq::TransferCode::error) { - LOGP(info, "Error while sending on channel {}", channel->GetName()); + LOGP(warning, "Error while sending on expendable channel {}. Switching to lossy mode for that channel.", channel->GetName()); + state.droppedMessages++; } }}, SendingPolicy{ .name = "default", @@ -204,12 +210,18 @@ std::vector ForwardingPolicy::createDefaultPolicies() .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { auto &proxy = registry.get(); auto *channel = proxy.getForwardChannel(channelIndex); + OutputChannelState& state = proxy.getOutputChannelState(channelIndex); auto timeout = 1000; + if (state.droppedMessages > 0) { + timeout = 0; + } auto res = channel->Send(parts, timeout); if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}. Switching to dropping mode.", timeout/1000, channel->GetName()); + state.droppedMessages++; } else if (res == (size_t) fair::mq::TransferCode::error) { - LOGP(info, "Error while sending on channel {}", channel->GetName()); + LOGP(warning, "Error sending after {}s. Downstream backpressure detected on expendable channel {}. Switching to dropping mode.", timeout/1000, channel->GetName()); + state.droppedMessages++; } }}, createDefaultForwardingPolicy()}; } From 6a985cea11eae89a3a977fa815fa273eb0da3d6d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 13 Feb 2024 13:40:00 +0100 Subject: [PATCH 330/726] DPL: fix non lossy dispatcher --- Framework/Core/src/SendingPolicy.cxx | 49 ++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 13 deletions(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 3aeccb680c150..2c99382873789 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -56,13 +56,16 @@ std::vector SendingPolicy::createDefaultPolicies() // non-blocking approach. int64_t timeout = 10; if (state.droppedMessages == 10 + 1) { - LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode"; + LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode."; + } + if (state.droppedMessages == 0) { + timeout = 10; } if (state.droppedMessages > 10) { timeout = 0; } - size_t result = info.channel.Send(parts, timeout); - if (result > 0) { + int64_t result = info.channel.Send(parts, timeout); + if (result >= 0) { state.droppedMessages = 0; } else if (state.droppedMessages < std::numeric_limits::max()) { state.droppedMessages++; @@ -113,12 +116,22 @@ std::vector SendingPolicy::createDefaultPolicies() .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { auto &proxy = registry.get(); auto *channel = proxy.getOutputChannel(channelIndex); + OutputChannelState& state = proxy.getOutputChannelState(channelIndex); auto timeout = 1000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); - } else if (res == (size_t) fair::mq::TransferCode::error) { - LOGP(info, "Error while sending on channel {}", channel->GetName()); + if (state.droppedMessages > 0) { + timeout = 0; + } + if (state.droppedMessages == 1) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}. Switching to dropping mode.", timeout/1000, channel->GetName()); + } + if (state.droppedMessages == 0) { + timeout = 1000; + } + int64_t res = channel->Send(parts, timeout); + if (res >= 0) { + state.droppedMessages = 0; + } else { + state.droppedMessages++; } }}, SendingPolicy{ .name = "default", @@ -204,12 +217,22 @@ std::vector ForwardingPolicy::createDefaultPolicies() .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { auto &proxy = registry.get(); auto *channel = proxy.getForwardChannel(channelIndex); + OutputChannelState& state = proxy.getOutputChannelState(channelIndex); auto timeout = 1000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); - } else if (res == (size_t) fair::mq::TransferCode::error) { - LOGP(info, "Error while sending on channel {}", channel->GetName()); + if (state.droppedMessages > 0) { + timeout = 0; + } + if (state.droppedMessages == 1) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}. Switching to dropping mode.", timeout/1000, channel->GetName()); + } + if (state.droppedMessages == 0) { + timeout = 1000; + } + int64_t res = channel->Send(parts, timeout); + if (res >= 0) { + state.droppedMessages = 0; + } else { + state.droppedMessages++; } }}, createDefaultForwardingPolicy()}; } From 0a5cde1678491a4994867cb53faa0c9fb69d04b4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 13 Feb 2024 13:43:36 +0100 Subject: [PATCH 331/726] DPL: fix lossy sending / forwarding policies --- Framework/Core/src/SendingPolicy.cxx | 49 ++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 13 deletions(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 3aeccb680c150..2c99382873789 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -56,13 +56,16 @@ std::vector SendingPolicy::createDefaultPolicies() // non-blocking approach. int64_t timeout = 10; if (state.droppedMessages == 10 + 1) { - LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode"; + LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode."; + } + if (state.droppedMessages == 0) { + timeout = 10; } if (state.droppedMessages > 10) { timeout = 0; } - size_t result = info.channel.Send(parts, timeout); - if (result > 0) { + int64_t result = info.channel.Send(parts, timeout); + if (result >= 0) { state.droppedMessages = 0; } else if (state.droppedMessages < std::numeric_limits::max()) { state.droppedMessages++; @@ -113,12 +116,22 @@ std::vector SendingPolicy::createDefaultPolicies() .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { auto &proxy = registry.get(); auto *channel = proxy.getOutputChannel(channelIndex); + OutputChannelState& state = proxy.getOutputChannelState(channelIndex); auto timeout = 1000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); - } else if (res == (size_t) fair::mq::TransferCode::error) { - LOGP(info, "Error while sending on channel {}", channel->GetName()); + if (state.droppedMessages > 0) { + timeout = 0; + } + if (state.droppedMessages == 1) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}. Switching to dropping mode.", timeout/1000, channel->GetName()); + } + if (state.droppedMessages == 0) { + timeout = 1000; + } + int64_t res = channel->Send(parts, timeout); + if (res >= 0) { + state.droppedMessages = 0; + } else { + state.droppedMessages++; } }}, SendingPolicy{ .name = "default", @@ -204,12 +217,22 @@ std::vector ForwardingPolicy::createDefaultPolicies() .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { auto &proxy = registry.get(); auto *channel = proxy.getForwardChannel(channelIndex); + OutputChannelState& state = proxy.getOutputChannelState(channelIndex); auto timeout = 1000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); - } else if (res == (size_t) fair::mq::TransferCode::error) { - LOGP(info, "Error while sending on channel {}", channel->GetName()); + if (state.droppedMessages > 0) { + timeout = 0; + } + if (state.droppedMessages == 1) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}. Switching to dropping mode.", timeout/1000, channel->GetName()); + } + if (state.droppedMessages == 0) { + timeout = 1000; + } + int64_t res = channel->Send(parts, timeout); + if (res >= 0) { + state.droppedMessages = 0; + } else { + state.droppedMessages++; } }}, createDefaultForwardingPolicy()}; } From 280a47a1d763a5a07fe64529e0388eb070dd3334 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 13 Feb 2024 14:39:06 +0100 Subject: [PATCH 332/726] DPL: fix lossy sending / forwarding policies --- Framework/Core/src/SendingPolicy.cxx | 53 ++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 15 deletions(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 3aeccb680c150..da83b644efe60 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -56,13 +56,16 @@ std::vector SendingPolicy::createDefaultPolicies() // non-blocking approach. int64_t timeout = 10; if (state.droppedMessages == 10 + 1) { - LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode"; + LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode."; + } + if (state.droppedMessages == 0) { + timeout = 10; } if (state.droppedMessages > 10) { timeout = 0; } - size_t result = info.channel.Send(parts, timeout); - if (result > 0) { + int64_t result = info.channel.Send(parts, timeout); + if (result >= 0) { state.droppedMessages = 0; } else if (state.droppedMessages < std::numeric_limits::max()) { state.droppedMessages++; @@ -97,10 +100,10 @@ std::vector SendingPolicy::createDefaultPolicies() LOGP(info, "Sent {} parts for a total of {} bytes", parts.Size(), count); auto res = channel->Send(parts, timeout); if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout/1000, channel->GetName()); + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel->GetName()); channel->Send(parts); LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); - } else if (res == (size_t) fair::mq::TransferCode::error) { + } else if (res == (size_t)fair::mq::TransferCode::error) { LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}, SendingPolicy{ @@ -113,12 +116,22 @@ std::vector SendingPolicy::createDefaultPolicies() .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { auto &proxy = registry.get(); auto *channel = proxy.getOutputChannel(channelIndex); + OutputChannelState& state = proxy.getOutputChannelState(channelIndex); auto timeout = 1000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); - } else if (res == (size_t) fair::mq::TransferCode::error) { - LOGP(info, "Error while sending on channel {}", channel->GetName()); + if (state.droppedMessages > 0) { + timeout = 0; + } + if (state.droppedMessages == 1) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}. Switching to dropping mode.", timeout / 1000, channel->GetName()); + } + if (state.droppedMessages == 0) { + timeout = 1000; + } + int64_t res = channel->Send(parts, timeout); + if (res >= 0) { + state.droppedMessages = 0; + } else { + state.droppedMessages++; } }}, SendingPolicy{ .name = "default", @@ -204,12 +217,22 @@ std::vector ForwardingPolicy::createDefaultPolicies() .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { auto &proxy = registry.get(); auto *channel = proxy.getForwardChannel(channelIndex); + OutputChannelState& state = proxy.getOutputChannelState(channelIndex); auto timeout = 1000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); - } else if (res == (size_t) fair::mq::TransferCode::error) { - LOGP(info, "Error while sending on channel {}", channel->GetName()); + if (state.droppedMessages > 0) { + timeout = 0; + } + if (state.droppedMessages == 1) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}. Switching to dropping mode.", timeout/1000, channel->GetName()); + } + if (state.droppedMessages == 0) { + timeout = 1000; + } + int64_t res = channel->Send(parts, timeout); + if (res >= 0) { + state.droppedMessages = 0; + } else { + state.droppedMessages++; } }}, createDefaultForwardingPolicy()}; } From 0026e429e25a006f1884bd16c1470e940fb29da5 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 13 Feb 2024 14:39:58 +0100 Subject: [PATCH 333/726] DPL: drop reference to the now obsolete non-critical label --- Framework/Core/src/SendingPolicy.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index da83b644efe60..3956ff0e99899 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -39,9 +39,9 @@ std::vector SendingPolicy::createDefaultPolicies() return true; } } - // Check if any of the destination's labels is "expendable" or "non-critical" + // Check if any of the destination's labels is "expendable" for (auto const& label : dest.labels) { - if (label.value == "expendable" || label.value == "non-critical") { + if (label.value == "expendable") { return true; } } From 623613eed9ebd9c3db3f4d9d0e97f39b7ec1e292 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 13 Feb 2024 14:42:07 +0100 Subject: [PATCH 334/726] DPL: fix lossy sending / forwarding policies --- Framework/Core/src/SendingPolicy.cxx | 53 ++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 15 deletions(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 3aeccb680c150..52123378e1e46 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -56,13 +56,16 @@ std::vector SendingPolicy::createDefaultPolicies() // non-blocking approach. int64_t timeout = 10; if (state.droppedMessages == 10 + 1) { - LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode"; + LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode."; + } + if (state.droppedMessages == 0) { + timeout = 10; } if (state.droppedMessages > 10) { timeout = 0; } - size_t result = info.channel.Send(parts, timeout); - if (result > 0) { + int64_t result = info.channel.Send(parts, timeout); + if (result >= 0) { state.droppedMessages = 0; } else if (state.droppedMessages < std::numeric_limits::max()) { state.droppedMessages++; @@ -97,10 +100,10 @@ std::vector SendingPolicy::createDefaultPolicies() LOGP(info, "Sent {} parts for a total of {} bytes", parts.Size(), count); auto res = channel->Send(parts, timeout); if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout/1000, channel->GetName()); + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on {}.", timeout / 1000, channel->GetName()); channel->Send(parts); LOGP(info, "Downstream backpressure on {} recovered.", channel->GetName()); - } else if (res == (size_t) fair::mq::TransferCode::error) { + } else if (res == (size_t)fair::mq::TransferCode::error) { LOGP(fatal, "Error while sending on channel {}", channel->GetName()); } }}, SendingPolicy{ @@ -113,12 +116,22 @@ std::vector SendingPolicy::createDefaultPolicies() .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { auto &proxy = registry.get(); auto *channel = proxy.getOutputChannel(channelIndex); + OutputChannelState& state = proxy.getOutputChannelState(channelIndex); auto timeout = 1000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); - } else if (res == (size_t) fair::mq::TransferCode::error) { - LOGP(info, "Error while sending on channel {}", channel->GetName()); + if (state.droppedMessages > 0) { + timeout = 0; + } + if (state.droppedMessages == 1) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}. Switching to dropping mode.", timeout / 1000, channel->GetName()); + } + if (state.droppedMessages == 0) { + timeout = 1000; + } + int64_t res = channel->Send(parts, timeout); + if (res >= 0) { + state.droppedMessages = 0; + } else { + state.droppedMessages++; } }}, SendingPolicy{ .name = "default", @@ -204,12 +217,22 @@ std::vector ForwardingPolicy::createDefaultPolicies() .forward = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { auto &proxy = registry.get(); auto *channel = proxy.getForwardChannel(channelIndex); + OutputChannelState& state = proxy.getOutputChannelState(channelIndex); auto timeout = 1000; - auto res = channel->Send(parts, timeout); - if (res == (size_t)fair::mq::TransferCode::timeout) { - LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}.", timeout/1000, channel->GetName()); - } else if (res == (size_t) fair::mq::TransferCode::error) { - LOGP(info, "Error while sending on channel {}", channel->GetName()); + if (state.droppedMessages > 0) { + timeout = 0; + } + if (state.droppedMessages == 1) { + LOGP(warning, "Timed out sending after {}s. Downstream backpressure detected on expendable channel {}. Switching to dropping mode.", timeout / 1000, channel->GetName()); + } + if (state.droppedMessages == 0) { + timeout = 1000; + } + int64_t res = channel->Send(parts, timeout); + if (res >= 0) { + state.droppedMessages = 0; + } else { + state.droppedMessages++; } }}, createDefaultForwardingPolicy()}; } From 466f501fa1bfa1520ed92a075aaf60ccbb6914cb Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 13 Feb 2024 14:42:07 +0100 Subject: [PATCH 335/726] DPL: drop reference to the now obsolete non-critical label --- Framework/Core/src/SendingPolicy.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 52123378e1e46..3dbbc451915b0 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -39,9 +39,9 @@ std::vector SendingPolicy::createDefaultPolicies() return true; } } - // Check if any of the destination's labels is "expendable" or "non-critical" + // Check if any of the destination's labels is "expendable" for (auto const& label : dest.labels) { - if (label.value == "expendable" || label.value == "non-critical") { + if (label.value == "expendable") { return true; } } From 6f6ec56bc0a79cb82525033ff2bab9a3a1b1313f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 13 Feb 2024 16:17:20 +0100 Subject: [PATCH 336/726] DPL: make sure homogeneous_apply_ref complains when we are missing an explicit constructor --- Framework/Foundation/include/Framework/StructToTuple.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Framework/Foundation/include/Framework/StructToTuple.h b/Framework/Foundation/include/Framework/StructToTuple.h index d94c0e3aaae03..dbfccbcb95acc 100644 --- a/Framework/Foundation/include/Framework/StructToTuple.h +++ b/Framework/Foundation/include/Framework/StructToTuple.h @@ -135,10 +135,13 @@ struct UniversalType { template consteval auto brace_constructible_size(auto... Members) { - if constexpr (requires { T{Members...}; } == false) + if constexpr (requires { T{Members...}; } == false) { + static_assert(sizeof...(Members) != 0, "You need to make sure that you have implicit constructors or that you call the explicit constructor correctly."); return sizeof...(Members) - 1; - else + } + else { return brace_constructible_size(Members..., UniversalType{}); + } } #else template @@ -282,6 +285,7 @@ auto homogeneous_apply_refs(L l, T&& object) using type = std::decay_t; constexpr int nesting = B ? 1 : 0; constexpr unsigned long numElements = brace_constructible_size() - nesting; + static_assert(numElements > 99, "Too many elements in the struct"); // clang-format off if DPL_HOMOGENEOUS_APPLY_ENTRY (9, 9) else if DPL_HOMOGENEOUS_APPLY_ENTRY (9, 8) From a643e97a4ac25c13523acd1b901a33158606c82f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 13 Feb 2024 16:31:57 +0100 Subject: [PATCH 337/726] DPL: make sure homogeneous_apply_ref complains when we are missing an explicit constructor --- Framework/Foundation/include/Framework/StructToTuple.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Framework/Foundation/include/Framework/StructToTuple.h b/Framework/Foundation/include/Framework/StructToTuple.h index d94c0e3aaae03..b8b0c0d5800fe 100644 --- a/Framework/Foundation/include/Framework/StructToTuple.h +++ b/Framework/Foundation/include/Framework/StructToTuple.h @@ -135,10 +135,13 @@ struct UniversalType { template consteval auto brace_constructible_size(auto... Members) { - if constexpr (requires { T{Members...}; } == false) + if constexpr (requires { T{Members...}; } == false) { + static_assert(sizeof...(Members) != 0, "You need to make sure that you have implicit constructors or that you call the explicit constructor correctly."); return sizeof...(Members) - 1; - else + } + else { return brace_constructible_size(Members..., UniversalType{}); + } } #else template @@ -282,6 +285,7 @@ auto homogeneous_apply_refs(L l, T&& object) using type = std::decay_t; constexpr int nesting = B ? 1 : 0; constexpr unsigned long numElements = brace_constructible_size() - nesting; + static_assert(numElements < 99, "Too many elements in the struct"); // clang-format off if DPL_HOMOGENEOUS_APPLY_ENTRY (9, 9) else if DPL_HOMOGENEOUS_APPLY_ENTRY (9, 8) From 025c3aebd0e1ae92e83271a38706d66d0e2ce62a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 13 Feb 2024 17:19:21 +0100 Subject: [PATCH 338/726] DPL: make sure homogeneous_apply_ref complains when we are missing an explicit constructor --- Framework/Foundation/include/Framework/StructToTuple.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Framework/Foundation/include/Framework/StructToTuple.h b/Framework/Foundation/include/Framework/StructToTuple.h index d94c0e3aaae03..0ef3be4efcb3c 100644 --- a/Framework/Foundation/include/Framework/StructToTuple.h +++ b/Framework/Foundation/include/Framework/StructToTuple.h @@ -135,10 +135,12 @@ struct UniversalType { template consteval auto brace_constructible_size(auto... Members) { - if constexpr (requires { T{Members...}; } == false) + if constexpr (requires { T{Members...}; } == false) { + static_assert(sizeof...(Members) != 0, "You need to make sure that you have implicit constructors or that you call the explicit constructor correctly."); return sizeof...(Members) - 1; - else + } else { return brace_constructible_size(Members..., UniversalType{}); + } } #else template @@ -282,6 +284,7 @@ auto homogeneous_apply_refs(L l, T&& object) using type = std::decay_t; constexpr int nesting = B ? 1 : 0; constexpr unsigned long numElements = brace_constructible_size() - nesting; + static_assert(numElements < 99, "Too many elements in the struct"); // clang-format off if DPL_HOMOGENEOUS_APPLY_ENTRY (9, 9) else if DPL_HOMOGENEOUS_APPLY_ENTRY (9, 8) From 7940f6c9c315c908147a2e280a19670b8aa12290 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 14 Feb 2024 08:50:56 +0100 Subject: [PATCH 339/726] DPL: make sure homogeneous_apply_ref complains when we are missing an explicit constructor --- Framework/Foundation/include/Framework/StructToTuple.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Framework/Foundation/include/Framework/StructToTuple.h b/Framework/Foundation/include/Framework/StructToTuple.h index d94c0e3aaae03..6d9a6a2ef305a 100644 --- a/Framework/Foundation/include/Framework/StructToTuple.h +++ b/Framework/Foundation/include/Framework/StructToTuple.h @@ -135,10 +135,12 @@ struct UniversalType { template consteval auto brace_constructible_size(auto... Members) { - if constexpr (requires { T{Members...}; } == false) + if constexpr (requires { T{Members...}; } == false) { + static_assert(sizeof...(Members) != 0, "You need to make sure that you have implicit constructors or that you call the explicit constructor correctly."); return sizeof...(Members) - 1; - else + } else { return brace_constructible_size(Members..., UniversalType{}); + } } #else template From 983f5e4a130027a838ef1e02c06cf4c1f2dc03e6 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 14 Feb 2024 08:50:56 +0100 Subject: [PATCH 340/726] DPL: check for the maximum number of members supported for tasks --- Framework/Foundation/include/Framework/StructToTuple.h | 1 + 1 file changed, 1 insertion(+) diff --git a/Framework/Foundation/include/Framework/StructToTuple.h b/Framework/Foundation/include/Framework/StructToTuple.h index 6d9a6a2ef305a..0ef3be4efcb3c 100644 --- a/Framework/Foundation/include/Framework/StructToTuple.h +++ b/Framework/Foundation/include/Framework/StructToTuple.h @@ -284,6 +284,7 @@ auto homogeneous_apply_refs(L l, T&& object) using type = std::decay_t; constexpr int nesting = B ? 1 : 0; constexpr unsigned long numElements = brace_constructible_size() - nesting; + static_assert(numElements < 99, "Too many elements in the struct"); // clang-format off if DPL_HOMOGENEOUS_APPLY_ENTRY (9, 9) else if DPL_HOMOGENEOUS_APPLY_ENTRY (9, 8) From fc2a17a575f2551f163cb60f605dca6a46ed4fd2 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 14 Feb 2024 16:40:15 +0100 Subject: [PATCH 341/726] DPL: allow extracting data from processing context directly using arguments --- .../Core/include/Framework/AlgorithmSpec.h | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/Framework/Core/include/Framework/AlgorithmSpec.h b/Framework/Core/include/Framework/AlgorithmSpec.h index 98f7f18e0d24d..e98a3abefbce1 100644 --- a/Framework/Core/include/Framework/AlgorithmSpec.h +++ b/Framework/Core/include/Framework/AlgorithmSpec.h @@ -84,6 +84,33 @@ struct AlgorithmPlugin { virtual AlgorithmSpec create() = 0; }; +template +struct StringLiteral { + constexpr StringLiteral(const char (&str)[N]) + { + std::copy_n(str, N, value); + } + + char value[N]; +}; + +// Allow fetching inputs from the context using a string literal. +template +struct Input { + // The size of the string is available as a constant expression. + static constexpr auto size = sizeof(lit.value); + // and so is the string's content. + static constexpr auto contents = lit.value; + ProcessingContext& ctx; + Input(ProcessingContext& c) : ctx(c) + { + } + operator T const&() const + { + return ctx.inputs().template get(lit.value); + } +}; + template > struct ContextElementTraits { static decltype(auto) get(ProcessingContext& ctx) @@ -137,6 +164,22 @@ struct ContextElementTraits { } }; +template +struct ContextElementTraits const> { + static Input get(ProcessingContext& ctx) + { + return Input{ctx}; + } +}; + +template +struct ContextElementTraits> { + static Input get(ProcessingContext& ctx) + { + static_assert(always_static_assert_v>, "Should be Input const&"); + } +}; + template AlgorithmSpec::ProcessCallback adaptStatelessF(std::function callback) { @@ -177,6 +220,11 @@ AlgorithmSpec::ProcessCallback adaptStatelessP(R (*callback)(ARGS...)) /// inputs.get("someInt"); /// }} /// +/// and if you have C++20 enabled you can also do: +/// +/// AlgorithmSpec{[](Input<"someInt", int> someInt){ +/// someInt.value; // do something with the inputs +/// } /// Notice you can specify in any order any of InputRecord, DataAllocator, /// ConfigParamRegistry or any of the services which are usually hanging /// from the ServiceRegistry, e.g. ControlService. From abeda3ba165b0bbc0df6daeea73d3886a9087c22 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 14 Feb 2024 16:53:51 +0100 Subject: [PATCH 342/726] DPL: calibration workflow example --- Framework/TestWorkflows/CMakeLists.txt | 4 + .../src/o2DummyCalibrationWorkflow.cxx | 73 +++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 Framework/TestWorkflows/src/o2DummyCalibrationWorkflow.cxx diff --git a/Framework/TestWorkflows/CMakeLists.txt b/Framework/TestWorkflows/CMakeLists.txt index c2c89e19b894e..848337200a4d2 100644 --- a/Framework/TestWorkflows/CMakeLists.txt +++ b/Framework/TestWorkflows/CMakeLists.txt @@ -33,6 +33,10 @@ o2_add_dpl_workflow(diamond-workflow SOURCES src/o2DiamondWorkflow.cxx COMPONENT_NAME TestWorkflows) +o2_add_dpl_workflow(dummy-calibration-workflow + SOURCES src/o2DummyCalibrationWorkflow.cxx + COMPONENT_NAME TestWorkflows) + o2_add_dpl_workflow(diamond-workflow-leaky SOURCES src/o2DiamondWorkflowLeaky.cxx COMPONENT_NAME TestWorkflows) diff --git a/Framework/TestWorkflows/src/o2DummyCalibrationWorkflow.cxx b/Framework/TestWorkflows/src/o2DummyCalibrationWorkflow.cxx new file mode 100644 index 0000000000000..94916cfa36984 --- /dev/null +++ b/Framework/TestWorkflows/src/o2DummyCalibrationWorkflow.cxx @@ -0,0 +1,73 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#include "Framework/ConfigParamSpec.h" +#include "Framework/DataTakingContext.h" +#include "Framework/CompletionPolicyHelpers.h" +#include "Framework/DeviceSpec.h" +#include "Framework/ControlService.h" +#include "Framework/Configurable.h" +#include "Framework/RunningWorkflowInfo.h" +#include "Framework/CallbackService.h" +#include "Framework/EndOfStreamContext.h" +#include + +#include +#include + +using namespace o2::framework; + +#include "Framework/runDataProcessing.h" + +// This is how you can define your processing in a declarative way +WorkflowSpec defineDataProcessing(ConfigContext const& specs) +{ + DataProcessorSpec a{ + .name = "counter", + .outputs = {OutputSpec{{"counter"}, "TST", "A1"}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, ProcessingContext& pcx) { + static int counter = 0; + auto& aData = outputs.make(OutputRef{"counter"}); + aData = counter++; + if (counter == 10) { + pcx.services().get().endOfStream(); + } + })}, + }; + + DataProcessorSpec b{ + .name = "aggregator", + .inputs = {InputSpec{"x", "TST", "A1", Lifetime::Timeframe}}, + .outputs = {OutputSpec{{"average"}, "TST", "B1", Lifetime::Sporadic}}, + .algorithm = adaptStateful([](CallbackService& callbacks) { + static int sum = 0; + auto eosCallback = [](EndOfStreamContext &ctx) { + auto& aData = ctx.outputs().make(OutputRef{"average"}); + aData = sum; + ctx.services().get().endOfStream(); + }; + callbacks.set(eosCallback); + return adaptStateless([](Input<"x", int> const& x) + { + sum += x; + std::cout << "Sum: " << sum << std::endl; + }); })}; + + DataProcessorSpec c{.name = "publisher", + .inputs = {InputSpec{"average", "TST", "B1", Lifetime::Sporadic}}, + .algorithm = adaptStateless([](Input<"average", int> const& counter) { + std::cout << "Counter to publish: " << counter << std::endl; + })}; + + return workflow::concat(WorkflowSpec{a}, + WorkflowSpec{b}, + WorkflowSpec{c}); +} From a29393538806fdfa497239168460da9876bd0f07 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 15 Feb 2024 10:44:11 +0100 Subject: [PATCH 343/726] DPL: allow extracting data from processing context directly using arguments --- .../TPC/workflow/src/FileReaderWorkflow.cxx | 20 ++++---- .../Core/include/Framework/AlgorithmSpec.h | 48 +++++++++++++++++++ 2 files changed, 58 insertions(+), 10 deletions(-) diff --git a/Detectors/TPC/workflow/src/FileReaderWorkflow.cxx b/Detectors/TPC/workflow/src/FileReaderWorkflow.cxx index c50b5685cdcd0..091674b447105 100644 --- a/Detectors/TPC/workflow/src/FileReaderWorkflow.cxx +++ b/Detectors/TPC/workflow/src/FileReaderWorkflow.cxx @@ -38,13 +38,13 @@ void customize(std::vector& workflowOptions) using namespace o2::framework; -enum struct Input { Clusters, - Tracks +enum struct InputType { Clusters, + Tracks }; -const std::unordered_map InputMap{ - {"clusters", Input::Clusters}, - {"tracks", Input::Tracks}}; +const std::unordered_map InputMap{ + {"clusters", InputType::Clusters}, + {"tracks", InputType::Tracks}}; /// MC info is processed by default, disabled by using command line option `--disable-mc` /// @@ -56,24 +56,24 @@ WorkflowSpec defineDataProcessing(ConfigContext const& cfgc) auto inputType = cfgc.options().get("input-type"); bool doMC = not cfgc.options().get("disable-mc"); - std::vector inputTypes; + std::vector inputTypes; try { - inputTypes = o2::RangeTokenizer::tokenize(inputType, [](std::string const& token) { return InputMap.at(token); }); + inputTypes = o2::RangeTokenizer::tokenize(inputType, [](std::string const& token) { return InputMap.at(token); }); } catch (std::out_of_range&) { throw std::invalid_argument(std::string("invalid input type: ") + inputType); } - auto isEnabled = [&inputTypes](Input type) { + auto isEnabled = [&inputTypes](InputType type) { return std::find(inputTypes.begin(), inputTypes.end(), type) != inputTypes.end(); }; - if (isEnabled(Input::Clusters)) { + if (isEnabled(InputType::Clusters)) { specs.emplace_back(o2::tpc::getClusterReaderSpec(doMC)); if (!getenv("DPL_DISABLE_TPC_TRIGGER_READER") || atoi(getenv("DPL_DISABLE_TPC_TRIGGER_READER")) != 1) { specs.emplace_back(o2::tpc::getTPCTriggerReaderSpec()); } } - if (isEnabled(Input::Tracks)) { + if (isEnabled(InputType::Tracks)) { specs.push_back(o2::tpc::getTPCTrackReaderSpec(doMC)); } diff --git a/Framework/Core/include/Framework/AlgorithmSpec.h b/Framework/Core/include/Framework/AlgorithmSpec.h index 98f7f18e0d24d..e98a3abefbce1 100644 --- a/Framework/Core/include/Framework/AlgorithmSpec.h +++ b/Framework/Core/include/Framework/AlgorithmSpec.h @@ -84,6 +84,33 @@ struct AlgorithmPlugin { virtual AlgorithmSpec create() = 0; }; +template +struct StringLiteral { + constexpr StringLiteral(const char (&str)[N]) + { + std::copy_n(str, N, value); + } + + char value[N]; +}; + +// Allow fetching inputs from the context using a string literal. +template +struct Input { + // The size of the string is available as a constant expression. + static constexpr auto size = sizeof(lit.value); + // and so is the string's content. + static constexpr auto contents = lit.value; + ProcessingContext& ctx; + Input(ProcessingContext& c) : ctx(c) + { + } + operator T const&() const + { + return ctx.inputs().template get(lit.value); + } +}; + template > struct ContextElementTraits { static decltype(auto) get(ProcessingContext& ctx) @@ -137,6 +164,22 @@ struct ContextElementTraits { } }; +template +struct ContextElementTraits const> { + static Input get(ProcessingContext& ctx) + { + return Input{ctx}; + } +}; + +template +struct ContextElementTraits> { + static Input get(ProcessingContext& ctx) + { + static_assert(always_static_assert_v>, "Should be Input const&"); + } +}; + template AlgorithmSpec::ProcessCallback adaptStatelessF(std::function callback) { @@ -177,6 +220,11 @@ AlgorithmSpec::ProcessCallback adaptStatelessP(R (*callback)(ARGS...)) /// inputs.get("someInt"); /// }} /// +/// and if you have C++20 enabled you can also do: +/// +/// AlgorithmSpec{[](Input<"someInt", int> someInt){ +/// someInt.value; // do something with the inputs +/// } /// Notice you can specify in any order any of InputRecord, DataAllocator, /// ConfigParamRegistry or any of the services which are usually hanging /// from the ServiceRegistry, e.g. ControlService. From 0266b45c904e4169b28cf56b197bd8a7278576a3 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 15 Feb 2024 10:44:11 +0100 Subject: [PATCH 344/726] DPL: calibration workflow example --- Framework/TestWorkflows/CMakeLists.txt | 4 + .../src/o2DummyCalibrationWorkflow.cxx | 73 +++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 Framework/TestWorkflows/src/o2DummyCalibrationWorkflow.cxx diff --git a/Framework/TestWorkflows/CMakeLists.txt b/Framework/TestWorkflows/CMakeLists.txt index c2c89e19b894e..848337200a4d2 100644 --- a/Framework/TestWorkflows/CMakeLists.txt +++ b/Framework/TestWorkflows/CMakeLists.txt @@ -33,6 +33,10 @@ o2_add_dpl_workflow(diamond-workflow SOURCES src/o2DiamondWorkflow.cxx COMPONENT_NAME TestWorkflows) +o2_add_dpl_workflow(dummy-calibration-workflow + SOURCES src/o2DummyCalibrationWorkflow.cxx + COMPONENT_NAME TestWorkflows) + o2_add_dpl_workflow(diamond-workflow-leaky SOURCES src/o2DiamondWorkflowLeaky.cxx COMPONENT_NAME TestWorkflows) diff --git a/Framework/TestWorkflows/src/o2DummyCalibrationWorkflow.cxx b/Framework/TestWorkflows/src/o2DummyCalibrationWorkflow.cxx new file mode 100644 index 0000000000000..94916cfa36984 --- /dev/null +++ b/Framework/TestWorkflows/src/o2DummyCalibrationWorkflow.cxx @@ -0,0 +1,73 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#include "Framework/ConfigParamSpec.h" +#include "Framework/DataTakingContext.h" +#include "Framework/CompletionPolicyHelpers.h" +#include "Framework/DeviceSpec.h" +#include "Framework/ControlService.h" +#include "Framework/Configurable.h" +#include "Framework/RunningWorkflowInfo.h" +#include "Framework/CallbackService.h" +#include "Framework/EndOfStreamContext.h" +#include + +#include +#include + +using namespace o2::framework; + +#include "Framework/runDataProcessing.h" + +// This is how you can define your processing in a declarative way +WorkflowSpec defineDataProcessing(ConfigContext const& specs) +{ + DataProcessorSpec a{ + .name = "counter", + .outputs = {OutputSpec{{"counter"}, "TST", "A1"}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, ProcessingContext& pcx) { + static int counter = 0; + auto& aData = outputs.make(OutputRef{"counter"}); + aData = counter++; + if (counter == 10) { + pcx.services().get().endOfStream(); + } + })}, + }; + + DataProcessorSpec b{ + .name = "aggregator", + .inputs = {InputSpec{"x", "TST", "A1", Lifetime::Timeframe}}, + .outputs = {OutputSpec{{"average"}, "TST", "B1", Lifetime::Sporadic}}, + .algorithm = adaptStateful([](CallbackService& callbacks) { + static int sum = 0; + auto eosCallback = [](EndOfStreamContext &ctx) { + auto& aData = ctx.outputs().make(OutputRef{"average"}); + aData = sum; + ctx.services().get().endOfStream(); + }; + callbacks.set(eosCallback); + return adaptStateless([](Input<"x", int> const& x) + { + sum += x; + std::cout << "Sum: " << sum << std::endl; + }); })}; + + DataProcessorSpec c{.name = "publisher", + .inputs = {InputSpec{"average", "TST", "B1", Lifetime::Sporadic}}, + .algorithm = adaptStateless([](Input<"average", int> const& counter) { + std::cout << "Counter to publish: " << counter << std::endl; + })}; + + return workflow::concat(WorkflowSpec{a}, + WorkflowSpec{b}, + WorkflowSpec{c}); +} From ac00ed6b19021bc9d5e765aa02559cda30cf1515 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 16 Feb 2024 12:28:17 +0100 Subject: [PATCH 345/726] DPL: allow Signposts to work as LOGF(level) replacement This introduces a specialised API to emit signposts which can be used in place of LOG(info), LOG(warning) , LOG(error). Using it, errors and warning messages will appear as signposts as well (matching the correct interval and so on), while the info messages will appear as signposts if the signpost is enabled, without duplication, simplifying grepping. --- .../Foundation/include/Framework/Signpost.h | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 6dad1ed9f6341..7750687088a1d 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -114,6 +114,21 @@ o2_log_handle_t* o2_walk_logs(bool (*callback)(char const* name, void* log, void #define O2_LOG_ENABLED(log) false #endif +#if !defined(O2_LOG_MACRO_RAW) && __has_include("Framework/Logger.h") +#include "Framework/Logger.h" +#define O2_LOG_MACRO_RAW(level, ...) LOGF(level, __VA_ARGS__) +#elif !defined(O2_LOG_MACRO_RAW) +// If we do not have the fairlogger, we simply print out the signposts to the console. +// This is useful for things like the tests, which this way do not need to depend on the FairLogger. +#define O2_LOG_MACRO_RAW(level, format, ...) \ + do { \ + printf(#level ":" #format, __VA_ARGS__); \ + printf("\n"); \ + } while (0) +#else +#define O2_LOG_MACRO_RAW(...) +#endif // O2_LOG_MACRO_RAW + #if !defined(O2_LOG_MACRO) && __has_include("Framework/Logger.h") #include "Framework/Logger.h" #define O2_LOG_MACRO(...) LOGF(info, __VA_ARGS__) @@ -480,6 +495,38 @@ void o2_debug_log_set_stacktrace(_o2_log_t* log, int stacktrace) _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ } \ }) + +// Similar to the above, however it will print a normal info message if the signpost is not enabled. +#define O2_SIGNPOST_EVENT_EMIT_INFO(log, id, name, format, ...) __extension__({ \ + if (O2_BUILTIN_UNLIKELY(O2_SIGNPOST_ENABLED_MAC(log))) { \ + O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (O2_BUILTIN_UNLIKELY(private_o2_log_##log->stacktrace)) { \ + _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } else { \ + O2_LOG_MACRO_RAW(info, format, ##__VA_ARGS__); \ + } \ +}) + +// Similar to the above, however it will always print a normal error message regardless of the signpost being enabled or not. +#define O2_SIGNPOST_EVENT_EMIT_ERROR(log, id, name, format, ...) __extension__({ \ + if (O2_BUILTIN_UNLIKELY(O2_SIGNPOST_ENABLED_MAC(log))) { \ + O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (O2_BUILTIN_UNLIKELY(private_o2_log_##log->stacktrace)) { \ + _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } \ + O2_LOG_MACRO_RAW(error, format, ##__VA_ARGS__); \ +}) + +// Similar to the above, however it will also print a normal warning message regardless of the signpost being enabled or not. +#define O2_SIGNPOST_EVENT_EMIT_WARN(log, id, name, format, ...) __extension__({ \ + if (O2_BUILTIN_UNLIKELY(O2_SIGNPOST_ENABLED_MAC(log))) { \ + O2_SIGNPOST_EVENT_EMIT_MAC(log, id, name, format, ##__VA_ARGS__); \ + } else if (O2_BUILTIN_UNLIKELY(private_o2_log_##log->stacktrace)) { \ + _o2_signpost_event_emit(private_o2_log_##log, id, name, remove_engineering_type(format).data(), ##__VA_ARGS__); \ + } \ + O2_RAW_LOG_RAW(warn, ##__VA_ARGS__); \ +}) + #define O2_SIGNPOST_START(log, id, name, format, ...) \ if (O2_BUILTIN_UNLIKELY(O2_SIGNPOST_ENABLED_MAC(log))) { \ O2_SIGNPOST_START_MAC(log, id, name, format, ##__VA_ARGS__); \ From eeb81b848f0cdb610be7994585d2e7af445787a1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 16 Feb 2024 12:28:17 +0100 Subject: [PATCH 346/726] DPL: allow to enable signposts for a given data processor --- Framework/Core/src/DeviceSpecHelpers.cxx | 1 + 1 file changed, 1 insertion(+) diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index e9df320fe8b95..43ba0f2d250e5 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -1497,6 +1497,7 @@ void DeviceSpecHelpers::prepareArguments(bool defaultQuiet, bool defaultStopped, realOdesc.add_options()("network-interface", bpo::value()); realOdesc.add_options()("early-forward-policy", bpo::value()); realOdesc.add_options()("session", bpo::value()); + realOdesc.add_options()("signposts", bpo::value()); filterArgsFct(expansions.we_wordc, expansions.we_wordv, realOdesc); wordfree(&expansions); return; From fba4259e31e071c81df3d4f3adbf594b426097a1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 16 Feb 2024 12:35:24 +0100 Subject: [PATCH 347/726] DPL: improve Signposts documentation --- Framework/Core/COOKBOOK.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/Framework/Core/COOKBOOK.md b/Framework/Core/COOKBOOK.md index 1a27079c3dc13..c24bb1bdbf542 100644 --- a/Framework/Core/COOKBOOK.md +++ b/Framework/Core/COOKBOOK.md @@ -543,12 +543,11 @@ and then you can either upload it to https://www.speedscope.app or use chrome:// Debug log entries for several DPL components are now provided via the Signpost API. -Such streams can be enabled using the "Signpost" part of the Device Inspector GUI. +Such streams can be enabled using the "Signpost" part of the Device Inspector GUI or alternatively +from the command line, by specifying the `--signposts ,...` option or exporting the variable `DPL_SIGNPOSTS=,...`). -Alternatively for the case were the GUI is not available, you can attach to a process via GDB / LLDB and invoke the `o2_debug_log_set_stacktrace` function, passing it the log you want to enable and the amount of stacktrace you want to see. E.g. to enable a log created with `O2_DECLARE_DYNAMIC_LOG(completion)` you can use: +Streams can be explicitly enabled or disabled in code using the `O2_SIGNPOST_ENABLE()` and `O2_SIGNPOST_DISABLE` macros. -``` -p o2_debug_log_set_stacktrace(private_o2_log_completion, 1) -``` +If a process is already running and you wish to enable one or more of its signposts logs, you can do so using the `o2-log` utility, passing the address of the log to enable and the PID of the running process. E.g. `o2-log -p -a `. -If you are on a mac, you can also use Instruments to visualise your Signpost, just like any other macOS application. In order to do so you need to enable the "Signpost" instrument, making sure you add `ch.cern.aliceo2.completion` to the list of loggers to watch. +Finally, on macOS, you can also use Instruments to visualise your Signpost, just like any other macOS application. In order to do so you need to enable the "Signpost" instrument, making sure you add `ch.cern.aliceo2.completion` to the list of loggers to watch. From 679a07c24f3d9b98d4bb0aebf3de7f454300d817 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 16 Feb 2024 12:36:18 +0100 Subject: [PATCH 348/726] DPL: Improve Signposts --- Framework/CHANGELOG.md | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/Framework/CHANGELOG.md b/Framework/CHANGELOG.md index 2973174f81d28..1134cfd0ee83d 100644 --- a/Framework/CHANGELOG.md +++ b/Framework/CHANGELOG.md @@ -1,4 +1,14 @@ -* Improved C++20 support. Most of the macros which were failing when C++20 - support is enabled now seem to work fine. The issue seems to be related to - some forward declaration logic which seems to be not working correctly in - ROOT 6.30.01. The issue is discussed in and it seems to be not trivial to fix with the current ROOT version. +# 2024-02-16: Improved Signposts. + +In particular: + +* New API so that Signposts can now act as a replacement of LOGF(info), LOGF(error), LOGF(warn). +* Improved documentation, including some hints about how to use `o2-log`. +* Bug fix to get `--signposts` work on a per device basis. + +# 2024-01-10: Improved C++20 support. + +Most of the macros which were failing when C++20 +support is enabled now seem to work fine. The issue seems to be related to +some forward declaration logic which seems to be not working correctly in +ROOT 6.30.01. The issue is discussed in and it seems to be not trivial to fix with the current ROOT version. From 4e66c5407b5a9a43d5647d892bca634fa1b2cd4c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 16 Feb 2024 12:48:45 +0100 Subject: [PATCH 349/726] DPL: improve Signposts documentation --- Framework/Core/COOKBOOK.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/Framework/Core/COOKBOOK.md b/Framework/Core/COOKBOOK.md index 1a27079c3dc13..d92006e63d650 100644 --- a/Framework/Core/COOKBOOK.md +++ b/Framework/Core/COOKBOOK.md @@ -543,12 +543,10 @@ and then you can either upload it to https://www.speedscope.app or use chrome:// Debug log entries for several DPL components are now provided via the Signpost API. -Such streams can be enabled using the "Signpost" part of the Device Inspector GUI. +Such streams can be enabled using the "Signpost" part of the Device Inspector GUI or alternatively from the command line, by specifying the `--signposts ,...` option or exporting the variable `DPL_SIGNPOSTS=,...`). -Alternatively for the case were the GUI is not available, you can attach to a process via GDB / LLDB and invoke the `o2_debug_log_set_stacktrace` function, passing it the log you want to enable and the amount of stacktrace you want to see. E.g. to enable a log created with `O2_DECLARE_DYNAMIC_LOG(completion)` you can use: +Streams can be explicitly enabled or disabled in code using the `O2_SIGNPOST_ENABLE()` and `O2_SIGNPOST_DISABLE` macros. -``` -p o2_debug_log_set_stacktrace(private_o2_log_completion, 1) -``` +If a process is already running and you wish to enable one or more of its signposts logs, you can do so using the `o2-log` utility, passing the address of the log to enable and the PID of the running process. E.g. `o2-log -p -a `. -If you are on a mac, you can also use Instruments to visualise your Signpost, just like any other macOS application. In order to do so you need to enable the "Signpost" instrument, making sure you add `ch.cern.aliceo2.completion` to the list of loggers to watch. +Finally, on macOS, you can also use Instruments to visualise your Signpost, just like any other macOS application. In order to do so you need to enable the "Signpost" instrument, making sure you add `ch.cern.aliceo2.completion` to the list of loggers to watch. From 1d21b4b9df67042f908d1f277a5cb3d105b0ecfe Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 16 Feb 2024 12:48:45 +0100 Subject: [PATCH 350/726] DPL: Improve Signposts --- Framework/CHANGELOG.md | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/Framework/CHANGELOG.md b/Framework/CHANGELOG.md index 2973174f81d28..e7278733de246 100644 --- a/Framework/CHANGELOG.md +++ b/Framework/CHANGELOG.md @@ -1,4 +1,13 @@ -* Improved C++20 support. Most of the macros which were failing when C++20 - support is enabled now seem to work fine. The issue seems to be related to - some forward declaration logic which seems to be not working correctly in - ROOT 6.30.01. The issue is discussed in and it seems to be not trivial to fix with the current ROOT version. +# 2024-02-16: Improved Signposts. + +In particular: + +* New API so that Signposts can now act as a replacement of LOGF(info), LOGF(error), LOGF(warn). +* Improved documentation, including some hints about how to use `o2-log`. +* Bug fix to get `--signposts` work on a per device basis. + +# 2024-01-10: Improved C++20 support. + +Most of the macros which were failing when C++20 support is enabled now seem to work fine. The issue seems to be related to +some forward declaration logic which seems to be not working correctly in +ROOT 6.30.01. The issue is discussed in and it seems to be not trivial to fix with the current ROOT version. From 671e9dddbeb7faea12c76d8fe3aee640dfb1eea0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 16 Feb 2024 12:48:45 +0100 Subject: [PATCH 351/726] DPL: introduce flag to signal that data should not be dropped at EoS --- .../Headers/include/Headers/DataHeader.h | 5 +++-- .../include/Framework/DataProcessingHeader.h | 3 +++ .../Core/include/Framework/DeviceContext.h | 10 ++++++++++ .../Core/include/Framework/DeviceStateEnums.h | 2 ++ Framework/Core/include/Framework/TimingInfo.h | 4 +++- Framework/Core/src/DataAllocator.cxx | 1 + Framework/Core/src/DataProcessingDevice.cxx | 20 ++++++++++++++----- Framework/Core/src/DeviceSpecHelpers.cxx | 4 +++- Framework/Core/src/O2ControlHelpers.cxx | 2 ++ Framework/Core/src/runDataProcessing.cxx | 2 ++ 10 files changed, 44 insertions(+), 9 deletions(-) diff --git a/DataFormats/Headers/include/Headers/DataHeader.h b/DataFormats/Headers/include/Headers/DataHeader.h index c37eff9b34f20..e4ddaded20aba 100644 --- a/DataFormats/Headers/include/Headers/DataHeader.h +++ b/DataFormats/Headers/include/Headers/DataHeader.h @@ -372,8 +372,9 @@ struct BaseHeader { union { uint32_t flags; struct { - uint32_t flagsNextHeader : 1, // do we have a next header after this one? - flagsUnused : 31; // currently unused + uint32_t flagsNextHeader : 1, // do we have a next header after this one? + flagsReserved : 15, // reserved for future use + flagsDerivedHeader : 16; // reserved for usage by the derived header }; }; diff --git a/Framework/Core/include/Framework/DataProcessingHeader.h b/Framework/Core/include/Framework/DataProcessingHeader.h index 5c068b4e4179a..484dbb9d51a8e 100644 --- a/Framework/Core/include/Framework/DataProcessingHeader.h +++ b/Framework/Core/include/Framework/DataProcessingHeader.h @@ -42,6 +42,9 @@ namespace o2::framework /// @ingroup aliceo2_dataformats_dataheader struct DataProcessingHeader : public header::BaseHeader { static constexpr uint64_t DUMMY_CREATION_TIME_OFFSET = 0x8000000000000000; + // The following flags are used to indicate the behavior of the data processing + static constexpr int32_t KEEP_AT_EOS_FLAG = 1; + /// We return some number of milliseconds, offsetting int by 0x8000000000000000 /// to make sure we can understand when the dummy constructor of DataProcessingHeader was /// used without overriding it with an actual real time from epoch. diff --git a/Framework/Core/include/Framework/DeviceContext.h b/Framework/Core/include/Framework/DeviceContext.h index 3777e7f608b75..04cf663d5e276 100644 --- a/Framework/Core/include/Framework/DeviceContext.h +++ b/Framework/Core/include/Framework/DeviceContext.h @@ -28,8 +28,18 @@ struct ComputingQuotaStats; struct DeviceContext { ComputingQuotaStats* quotaStats = nullptr; uv_timer_t* gracePeriodTimer = nullptr; + uv_timer_t* dataProcessingGracePeriodTimer = nullptr; uv_signal_t* sigusr1Handle = nullptr; int expectedRegionCallbacks = 0; + // The timeout for the data processing to stop on this device. + // After this is reached, incoming data not marked to be kept will + // be dropped and the data processing will be stopped. However the + // calibrations will still be done and objects resulting from calibrations + // will be marked to be kept. + int dataProcessingTimeout = 0; + // The timeout for the whole processing to stop on this device. + // This includes the grace period for processing and the time + // for the calibrations to be done. int exitTransitionTimeout = 0; }; diff --git a/Framework/Core/include/Framework/DeviceStateEnums.h b/Framework/Core/include/Framework/DeviceStateEnums.h index 291faac0ac982..a4c02c70c2bf6 100644 --- a/Framework/Core/include/Framework/DeviceStateEnums.h +++ b/Framework/Core/include/Framework/DeviceStateEnums.h @@ -30,6 +30,8 @@ enum struct TransitionHandlingState { NoTransition, /// A transition was notified to be requested Requested, + /// Only calibrations can be done + DataProcessingExpired, /// A transition needs to be fullfilled ASAP Expired }; diff --git a/Framework/Core/include/Framework/TimingInfo.h b/Framework/Core/include/Framework/TimingInfo.h index 84f3971ad3a4a..1734da8dd3941 100644 --- a/Framework/Core/include/Framework/TimingInfo.h +++ b/Framework/Core/include/Framework/TimingInfo.h @@ -36,9 +36,11 @@ struct TimingInfo { /// from a new run, as being processed by the current stream. /// FIXME: for now this is the same as the above. bool streamRunNumberChanged = false; + /// Wether this kind of data should be flushed during end of stream. + bool keepAtEndOfStream = false; static bool timesliceIsTimer(size_t timeslice) { return timeslice > 1652945069870351; } - bool isTimer() const { return timesliceIsTimer(timeslice); }; + [[nodiscard]] bool isTimer() const { return timesliceIsTimer(timeslice); }; }; } // namespace o2::framework diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index de500cd6d6c4e..157e530392d4e 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -118,6 +118,7 @@ fair::mq::MessagePtr DataAllocator::headerMessageFromOutput(Output const& spec, dh.runNumber = timingInfo.runNumber; DataProcessingHeader dph{timingInfo.timeslice, 1, timingInfo.creation}; + static_cast(dph).flagsDerivedHeader |= timingInfo.keepAtEndOfStream ? DataProcessingHeader::KEEP_AT_EOS_FLAG : 0; auto& proxy = mRegistry.get(); auto* transport = proxy.getOutputTransport(routeIndex); diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 50934bd59a729..6225001aba828 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -969,6 +969,7 @@ void DataProcessingDevice::InitTask() } deviceContext.expectedRegionCallbacks = std::stoi(fConfig->GetValue("expected-region-callbacks")); + deviceContext.dataProcessingTimeout = std::stoi(fConfig->GetValue("data-processing-timeout")); deviceContext.exitTransitionTimeout = std::stoi(fConfig->GetValue("exit-transition-timeout")); for (auto& channel : GetChannels()) { @@ -1611,9 +1612,13 @@ void DataProcessingDevice::doPrepare(ServiceRegistryRef ref) void DataProcessingDevice::doRun(ServiceRegistryRef ref) { auto& context = ref.get(); + O2_SIGNPOST_ID_FROM_POINTER(dpid, device, &context); auto switchState = [ref](StreamingState newState) { auto& state = ref.get(); - LOG(detail) << "New state " << (int)newState << " old state " << (int)state.streaming; + auto& context = ref.get(); + O2_SIGNPOST_ID_FROM_POINTER(dpid, device, &context); + O2_SIGNPOST_END(device, dpid, "state", "End of processing state %d", (int)state.streaming); + O2_SIGNPOST_START(device, dpid, "state", "Starting processing state %d", (int)newState); state.streaming = newState; ref.get().notifyStreamingState(state.streaming); }; @@ -1652,7 +1657,7 @@ void DataProcessingDevice::doRun(ServiceRegistryRef ref) } if (state.streaming == StreamingState::EndOfStreaming) { - LOGP(detail, "We are in EndOfStreaming. Flushing queues."); + O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "Flushing queues."); // We keep processing data until we are Idle. // FIXME: not sure this is the correct way to drain the queues, but // I guess we will see. @@ -1663,6 +1668,11 @@ void DataProcessingDevice::doRun(ServiceRegistryRef ref) while (DataProcessingDevice::tryDispatchComputation(ref, context.completed) && hasOnlyGenerated == false) { relayer.processDanglingInputs(context.expirationHandlers, *context.registry, false); } + + auto &timingInfo = ref.get(); + // We should keep the data generated at end of stream only for those + // which are not sources. + timingInfo.keepAtEndOfStream = (hasOnlyGenerated == false); EndOfStreamContext eosContext{*context.registry, ref.get()}; context.preEOSCallbacks(eosContext); @@ -1673,7 +1683,7 @@ void DataProcessingDevice::doRun(ServiceRegistryRef ref) context.postEOSCallbacks(eosContext); for (auto& channel : spec.outputChannels) { - LOGP(detail, "Sending end of stream to {}", channel.name); + O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "Sending end of stream to %s.", channel.name.c_str()); DataProcessingHelpers::sendEndOfStream(ref, channel); } // This is needed because the transport is deleted before the device. @@ -1685,7 +1695,7 @@ void DataProcessingDevice::doRun(ServiceRegistryRef ref) *context.wasActive = true; } // On end of stream we shut down all output pollers. - LOGP(detail, "Shutting down output pollers"); + O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "Shutting down output pollers."); for (auto& poller : state.activeOutputPollers) { uv_poll_stop(poller); } @@ -1694,7 +1704,7 @@ void DataProcessingDevice::doRun(ServiceRegistryRef ref) if (state.streaming == StreamingState::Idle) { // On end of stream we shut down all output pollers. - LOGP(detail, "We are in Idle. Shutting down output pollers."); + O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "Shutting down output pollers."); for (auto& poller : state.activeOutputPollers) { uv_poll_stop(poller); } diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 43ba0f2d250e5..526719907519f 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -1475,6 +1475,7 @@ void DeviceSpecHelpers::prepareArguments(bool defaultQuiet, bool defaultStopped, realOdesc.add_options()("child-driver", bpo::value()); realOdesc.add_options()("rate", bpo::value()); realOdesc.add_options()("exit-transition-timeout", bpo::value()); + realOdesc.add_options()("data-processing-timeout", bpo::value()); realOdesc.add_options()("expected-region-callbacks", bpo::value()); realOdesc.add_options()("timeframes-rate-limit", bpo::value()); realOdesc.add_options()("environment", bpo::value()); @@ -1659,6 +1660,7 @@ boost::program_options::options_description DeviceSpecHelpers::getForwardedDevic ("control-port", bpo::value(), "Utility port to be used by O2 Control") // ("rate", bpo::value(), "rate for a data source device (Hz)") // ("exit-transition-timeout", bpo::value(), "timeout before switching to READY state") // + ("data-processing-timeout", bpo::value(), "timeout before switching to calibration processing mode") // ("expected-region-callbacks", bpo::value(), "region callbacks to expect before starting") // ("timeframes-rate-limit", bpo::value()->default_value("0"), "how many timeframes can be in fly") // ("shm-monitor", bpo::value(), "whether to use the shared memory monitor") // @@ -1689,7 +1691,7 @@ boost::program_options::options_description DeviceSpecHelpers::getForwardedDevic ("infologger-mode", bpo::value(), "O2_INFOLOGGER_MODE override") // ("infologger-severity", bpo::value(), "minimun FairLogger severity which goes to info logger") // ("dpl-tracing-flags", bpo::value(), "pipe separated list of events to trace") // - ("signposts", bpo::value(), // + ("signposts", bpo::value()->default_value(defaultSignposts), // "comma separated list of signposts to enable (any of `completion`, `data_processor_context`, `stream_context`, `device`, `monitoring_service`)") // ("child-driver", bpo::value(), "external driver to start childs with (e.g. valgrind)"); // diff --git a/Framework/Core/src/O2ControlHelpers.cxx b/Framework/Core/src/O2ControlHelpers.cxx index 88132572a6210..e19e4a995d505 100644 --- a/Framework/Core/src/O2ControlHelpers.cxx +++ b/Framework/Core/src/O2ControlHelpers.cxx @@ -261,6 +261,8 @@ void dumpCommand(std::ostream& dumpOut, const DeviceExecution& execution, std::s dumpOut << indLevel << indScheme << "- \"-b\"\n"; dumpOut << indLevel << indScheme << "- \"--exit-transition-timeout\"\n"; dumpOut << indLevel << indScheme << "- \"'{{ exit_transition_timeout }}'\"\n"; + dumpOut << indLevel << indScheme << "- \"--data-processing-timeout\"\n"; + dumpOut << indLevel << indScheme << "- \"'{{ data_processing_timeout }}'\"\n"; dumpOut << indLevel << indScheme << "- \"--monitoring-backend\"\n"; dumpOut << indLevel << indScheme << "- \"'{{ monitoring_dpl_url }}'\"\n"; dumpOut << indLevel << indScheme << "- \"--session\"\n"; diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 64c034ebb8533..0ef7aa8bffb50 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -1006,6 +1006,7 @@ int doChild(int argc, char** argv, ServiceRegistry& serviceRegistry, // declared in the workflow definition are allowed. runner.AddHook([&spec, driverConfig, defaultDriverClient](fair::mq::DeviceRunner& r) { std::string defaultExitTransitionTimeout = "0"; + std::string defaultDataProcessingTimeout = "0"; std::string defaultInfologgerMode = ""; o2::framework::DeploymentMode deploymentMode = o2::framework::DefaultsHelpers::deploymentMode(); if (deploymentMode == o2::framework::DeploymentMode::OnlineDDS) { @@ -1024,6 +1025,7 @@ int doChild(int argc, char** argv, ServiceRegistry& serviceRegistry, ("signposts", bpo::value()->default_value(defaultSignposts ? defaultSignposts : ""), "comma separated list of signposts to enable") // ("expected-region-callbacks", bpo::value()->default_value("0"), "how many region callbacks we are expecting") // ("exit-transition-timeout", bpo::value()->default_value(defaultExitTransitionTimeout), "how many second to wait before switching from RUN to READY") // + ("data-processing-timeout", bpo::value()->default_value(defaultDataProcessingTimeout), "how many second to wait before switching from RUN to CALIBRATION") // ("timeframes-rate-limit", bpo::value()->default_value("0"), "how many timeframe can be in fly at the same moment (0 disables)") // ("configuration,cfg", bpo::value()->default_value("command-line"), "configuration backend") // ("infologger-mode", bpo::value()->default_value(defaultInfologgerMode), "O2_INFOLOGGER_MODE override"); From aca46d026ad3649cfdd67ed69404432dcf0fdb03 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 16 Feb 2024 12:48:45 +0100 Subject: [PATCH 352/726] DPL: attempt to enable calibration mode --- Framework/Core/src/DataProcessingDevice.cxx | 84 ++++++++++++++------- 1 file changed, 55 insertions(+), 29 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 6225001aba828..0710a6e4232fe 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -120,12 +120,21 @@ void on_idle_timer(uv_timer_t* handle) void on_transition_requested_expired(uv_timer_t* handle) { ZoneScopedN("Transition expired"); - auto* state = (DeviceState*)handle->data; + auto *state = (DeviceState*)handle->data; state->loopReason |= DeviceState::TIMER_EXPIRED; - LOGP(info, "Timer expired. Forcing transition to READY"); + LOGP(info, "Grace period for Data Processing & calibration expired."); state->transitionHandling = TransitionHandlingState::Expired; } +void on_data_processing_grace_expired(uv_timer_t* handle) +{ + ZoneScopedN("Transition expired"); + auto *state = (DeviceState*)handle->data; + state->loopReason |= DeviceState::TIMER_EXPIRED; + LOGP(info, "Grace period for Data Processing Expired. Waiting for calibration."); + state->transitionHandling = TransitionHandlingState::DataProcessingExpired; +} + void on_communication_requested(uv_async_t* s) { auto* state = (DeviceState*)s->data; @@ -219,12 +228,15 @@ DataProcessingDevice::DataProcessingDevice(RunningDeviceRef running, ServiceRegi // one with the thread id. For the moment we simply use the first one. void run_callback(uv_work_t* handle) { - ZoneScopedN("run_callback"); auto* task = (TaskStreamInfo*)handle->data; auto ref = ServiceRegistryRef{*task->registry, ServiceRegistry::globalStreamSalt(task->id.index + 1)}; + // We create a new signpost interval for this specific data processor. Same id, same data processor. + auto& dataProcessorContext = ref.get(); + O2_SIGNPOST_ID_FROM_POINTER(sid, device, &dataProcessorContext); + O2_SIGNPOST_START(device, sid, "run_callback", "Starting run callback on stream %d", task->id.index); DataProcessingDevice::doPrepare(ref); DataProcessingDevice::doRun(ref); - // FrameMark; + O2_SIGNPOST_END(device, sid, "run_callback", "Done processing data for stream %d", task->id.index); } // Once the processing in a thread is done, this is executed on the main thread. @@ -1228,6 +1240,8 @@ void DataProcessingDevice::Run() auto& state = ref.get(); state.loopReason = DeviceState::LoopReason::FIRST_LOOP; bool firstLoop = true; + O2_SIGNPOST_ID_FROM_POINTER(lid, device, state.loop); + O2_SIGNPOST_START(device, lid, "device_state", "First iteration of the device loop"); while (state.transitionHandling != TransitionHandlingState::Expired) { if (state.nextFairMQState.empty() == false) { (void)this->ChangeState(state.nextFairMQState.back()); @@ -1259,13 +1273,13 @@ void DataProcessingDevice::Run() state.loopReason |= DeviceState::LoopReason::PREVIOUSLY_ACTIVE; } if (NewStatePending()) { + O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "New state pending. Waiting for it to be handled."); shouldNotWait = true; state.loopReason |= DeviceState::LoopReason::NEW_STATE_PENDING; } if (state.transitionHandling == TransitionHandlingState::NoTransition && NewStatePending()) { state.transitionHandling = TransitionHandlingState::Requested; auto& deviceContext = ref.get(); - auto timeout = deviceContext.exitTransitionTimeout; // Check if we only have timers bool onlyTimers = true; auto& spec = ref.get(); @@ -1278,31 +1292,40 @@ void DataProcessingDevice::Run() if (onlyTimers) { state.streaming = StreamingState::EndOfStreaming; } - if (timeout != 0 && state.streaming != StreamingState::Idle) { + + if (deviceContext.exitTransitionTimeout != 0 && state.streaming != StreamingState::Idle) { state.transitionHandling = TransitionHandlingState::Requested; ref.get().call(ServiceRegistryRef{ref}); uv_update_time(state.loop); - uv_timer_start(deviceContext.gracePeriodTimer, on_transition_requested_expired, timeout * 1000, 0); + uv_timer_start(deviceContext.gracePeriodTimer, on_transition_requested_expired, deviceContext.exitTransitionTimeout * 1000, 0); + // In case we have a calibration grace period it will always be longer than the data processing timeout + if (deviceContext.dataProcessingTimeout != deviceContext.exitTransitionTimeout) { + uv_timer_start(deviceContext.dataProcessingGracePeriodTimer, on_data_processing_grace_expired, deviceContext.dataProcessingTimeout * 1000, 0); + } else { + deviceContext.dataProcessingGracePeriodTimer = nullptr; + } if (mProcessingPolicies.termination == TerminationPolicy::QUIT) { - LOGP(info, "New state requested. Waiting for {} seconds before quitting.", timeout); + O2_SIGNPOST_EVENT_EMIT_INFO(device, lid, "run_loop", "New state requested. Waiting for %d seconds before quitting.", (int)deviceContext.exitTransitionTimeout); } else { - LOGP(info, "New state requested. Waiting for {} seconds before switching to READY state.", timeout); + O2_SIGNPOST_EVENT_EMIT_INFO(device, lid, "run_loop", "New state requested. Waiting for %d seconds before switching to READY state.", (int)deviceContext.exitTransitionTimeout); } } else { state.transitionHandling = TransitionHandlingState::Expired; - if (timeout == 0 && mProcessingPolicies.termination == TerminationPolicy::QUIT) { - LOGP(info, "New state requested. No timeout set, quitting immediately as per --completion-policy"); - } else if (timeout == 0 && mProcessingPolicies.termination != TerminationPolicy::QUIT) { - LOGP(info, "New state requested. No timeout set, switching to READY state immediately"); + if (deviceContext.exitTransitionTimeout == 0 && mProcessingPolicies.termination == TerminationPolicy::QUIT) { + O2_SIGNPOST_EVENT_EMIT_INFO(device, lid, "run_loop", "New state requested. No timeout set, quitting immediately as per --completion-policy"); + } else if (deviceContext.exitTransitionTimeout == 0 && mProcessingPolicies.termination != TerminationPolicy::QUIT) { + O2_SIGNPOST_EVENT_EMIT_INFO(device, lid, "run_loop", "New state requested. No timeout set, switching to READY state immediately"); } else if (mProcessingPolicies.termination == TerminationPolicy::QUIT) { - LOGP(info, "New state pending and we are already idle, quitting immediately as per --completion-policy"); + O2_SIGNPOST_EVENT_EMIT_INFO(device, lid, "run_loop", "New state pending and we are already idle, quitting immediately as per --completion-policy"); } else { - LOGP(info, "New state pending and we are already idle, switching to READY immediately."); + O2_SIGNPOST_EVENT_EMIT_INFO(device, lid, "runb_loop", "New state pending and we are already idle, switching to READY immediately."); } } } - // If we are Idle, we can then consider the transition to be expired. - if (state.transitionHandling == TransitionHandlingState::Requested && state.streaming == StreamingState::Idle) { + // If we are Idle, we can then consider the transition to be expired when it was requested or when the data processing timeout expired. + if ((state.transitionHandling == TransitionHandlingState::Requested || state.transitionHandling == TransitionHandlingState::DataProcessingExpired) + && state.streaming == StreamingState::Idle) { + O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "State transition requested and we are now in Idle. We can consider it to be completed."); state.transitionHandling = TransitionHandlingState::Expired; } TracyPlot("shouldNotWait", (int)shouldNotWait); @@ -1323,8 +1346,8 @@ void DataProcessingDevice::Run() // - we can trigger further events from the queue // - we can guarantee this is the last thing we do in the loop ( // assuming no one else is adding to the queue before this point). - auto onDrop = [®istry = mServiceRegistry](TimesliceSlot slot, std::vector& dropped, TimesliceIndex::OldestOutputInfo oldestOutputInfo) { - LOGP(debug, "Dropping message from slot {}. Forwarding as needed.", slot.index); + auto onDrop = [®istry = mServiceRegistry, lid](TimesliceSlot slot, std::vector& dropped, TimesliceIndex::OldestOutputInfo oldestOutputInfo) { + O2_SIGNPOST_START(device, lid, "run_loop", "Dropping message from slot %" PRIu64 ". Forwarding as needed.", (uint64_t)slot.index); ServiceRegistryRef ref{registry}; ref.get(); ref.get(); @@ -1343,7 +1366,9 @@ void DataProcessingDevice::Run() auto& dpContext = ref.get(); dpContext.preLoopCallbacks(ref); } + O2_SIGNPOST_END(device, lid, "run_loop", "Run loop completed. %{}s", shouldNotWait ? "Will immediately schedule a new one" : "Waiting for next event."); uv_run(state.loop, shouldNotWait ? UV_RUN_NOWAIT : UV_RUN_ONCE); + O2_SIGNPOST_START(device, lid, "run_loop", "Run loop started. Loop reason %d.", state.loopReason); if ((state.loopReason & state.tracingFlags) != 0) { state.severityStack.push_back((int)fair::Logger::GetConsoleSeverity()); fair::Logger::SetConsoleSeverity(fair::Severity::trace); @@ -1351,17 +1376,15 @@ void DataProcessingDevice::Run() fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back()); state.severityStack.pop_back(); } - TracyPlot("loopReason", (int64_t)(uint64_t)state.loopReason); - LOGP(debug, "Loop reason mask {:b} & {:b} = {:b}", - state.loopReason, state.tracingFlags, - state.loopReason & state.tracingFlags); + O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "Loop reason mask %x & %x = %x", state.loopReason, state.tracingFlags, state.loopReason & state.tracingFlags); if ((state.loopReason & DeviceState::LoopReason::OOB_ACTIVITY) != 0) { - LOGP(debug, "We were awakened by a OOB event. Rescanning everything."); + O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "Out of band activity detected. Rescanning everything."); relayer.rescan(); } if (!state.pendingOffers.empty()) { + O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "Pending %" PRIu64 " offers. updating the ComputingQuotaEvaluator.", (uint64_t)state.pendingOffers.size()); ref.get().updateOffers(state.pendingOffers, uv_now(state.loop)); } } @@ -1435,8 +1458,9 @@ void DataProcessingDevice::Run() } else { mWasActive = false; } - FrameMark; } + + O2_SIGNPOST_END(device, lid, "run_loop", "Run loop completed. Transition handling state %d.", state.transitionHandling); auto& spec = ref.get(); /// Cleanup messages which are still pending on exit. for (size_t ci = 0; ci < spec.inputChannels.size(); ++ci) { @@ -1450,8 +1474,9 @@ void DataProcessingDevice::Run() /// non-data triggers like those which are time based. void DataProcessingDevice::doPrepare(ServiceRegistryRef ref) { - ZoneScopedN("DataProcessingDevice::doPrepare"); auto& context = ref.get(); + O2_SIGNPOST_ID_FROM_POINTER(dpid, device, &context); + O2_SIGNPOST_START(device, dpid, "do_prepare", "Starting DataProcessorContext::doPrepare."); *context.wasActive = false; { @@ -1479,7 +1504,7 @@ void DataProcessingDevice::doPrepare(ServiceRegistryRef ref) }); // Whether or not all the channels are completed - LOGP(debug, "Processing {} input channels.", spec.inputChannels.size()); + O2_SIGNPOST_EVENT_EMIT(device, dpid, "do_prepare", "Processing %zu input channels.", spec.inputChannels.size()); /// Sort channels by oldest possible timeframe and /// process them in such order. static std::vector pollOrder; @@ -1491,13 +1516,14 @@ void DataProcessingDevice::doPrepare(ServiceRegistryRef ref) // Nothing to poll... if (pollOrder.empty()) { + O2_SIGNPOST_END(device, dpid, "do_prepare", "Nothing to poll. Waiting for next iteration."); return; } auto currentOldest = state.inputChannelInfos[pollOrder.front()].oldestForChannel; auto currentNewest = state.inputChannelInfos[pollOrder.back()].oldestForChannel; auto delta = currentNewest.value - currentOldest.value; - LOGP(debug, "oldest possible timeframe range {}, {} => {} delta", currentOldest.value, currentNewest.value, - delta); + O2_SIGNPOST_EVENT_EMIT(device, dpid, "do_prepare", "Oldest possible timeframe range %" PRIu64 " => %" PRIu64 " delta %" PRIu64, + (int64_t)currentOldest.value, (int64_t)currentNewest.value, (int64_t)delta); auto& infos = state.inputChannelInfos; if (context.balancingInputs) { From 7d0324bb31589f2133b0275ff7c937b3f2dea049 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 16 Feb 2024 12:50:35 +0100 Subject: [PATCH 353/726] [DRAFT] DPL: Introduce calibration mode --- Framework/CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Framework/CHANGELOG.md b/Framework/CHANGELOG.md index e7278733de246..f220e02b87dc7 100644 --- a/Framework/CHANGELOG.md +++ b/Framework/CHANGELOG.md @@ -1,3 +1,8 @@ +# 2024-02-17: Introduce calibration mode + +This introduces a new --data-processing-timeout `` option which can be used to specify +that data processing should finish after `` seconds. After it, only the messages which are produced on EndOfStream will be send. + # 2024-02-16: Improved Signposts. In particular: From c03d18f398e6812c1b17267bb4f318c94ada7189 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 16 Feb 2024 20:41:35 +0100 Subject: [PATCH 354/726] DPL: make sure O2_SIGNPOST_ID_FROM_POINTER allows for const pointers --- Framework/Foundation/include/Framework/Signpost.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 7750687088a1d..9ea8aea1192a9 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -202,8 +202,6 @@ struct _o2_log_t { bool _o2_lock_free_stack_push(_o2_lock_free_stack& stack, const int& value, bool spin = false); bool _o2_lock_free_stack_pop(_o2_lock_free_stack& stack, int& value, bool spin = false); -//_o2_signpost_id_t _o2_signpost_id_generate_local(_o2_log_t* log); -//_o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer); void* _o2_log_create(char const* name, int stacktrace); void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); void _o2_signpost_interval_begin(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); @@ -224,7 +222,7 @@ inline _o2_signpost_id_t _o2_signpost_id_generate_local(_o2_log_t* log) // Generate a unique id for a signpost. Do not use this directly, use O2_SIGNPOST_ID_FROM_POINTER instead. // Notice that this will fail for pointers to bytes as it might overlap with the id above. -inline _o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer) +inline _o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void const* pointer) { assert(((int64_t)pointer & 1) != 1); _o2_signpost_id_t uniqueId{(int64_t)pointer}; From fcf7b808eb1aaf7ab94f0df9ca0e48c50a7dfdd4 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:10:16 +0100 Subject: [PATCH 355/726] DPL: replace Tracy allocation profiler with Signposts --- Framework/Core/src/DataAllocator.cxx | 7 ++++ Framework/Core/src/DataProcessingDevice.cxx | 42 +++++++++++++++------ Framework/Core/src/LifetimeHelpers.cxx | 8 +++- 3 files changed, 44 insertions(+), 13 deletions(-) diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index de500cd6d6c4e..fe38283d5e2de 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -35,6 +35,7 @@ #include O2_DECLARE_DYNAMIC_LOG(stream_context); +O2_DECLARE_DYNAMIC_LOG(parts); namespace o2::framework { @@ -129,6 +130,8 @@ void DataAllocator::addPartToContext(RouteIndex routeIndex, fair::mq::MessagePtr o2::header::SerializationMethod serializationMethod) { auto headerMessage = headerMessageFromOutput(spec, routeIndex, serializationMethod, 0); + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, headerMessage->GetData()); + O2_SIGNPOST_START(parts, pid, "parts", "addPartToContext %p", headerMessage->GetData()); // FIXME: this is kind of ugly, we know that we can change the content of the // header message because we have just created it, but the API declares it const @@ -150,6 +153,8 @@ void DataAllocator::adopt(const Output& spec, std::string* ptr) // the correct payload size is set later when sending the // StringContext, see DataProcessor::doSend auto header = headerMessageFromOutput(spec, routeIndex, o2::header::gSerializationMethodNone, 0); + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, header->GetData()); + O2_SIGNPOST_START(parts, pid, "parts", "addPartToContext %p", header->GetData()); mRegistry.get().addString(std::move(header), std::move(payload), routeIndex); assert(payload.get() == nullptr); } @@ -206,6 +211,8 @@ void DataAllocator::adopt(const Output& spec, LifetimeHolder& tb) auto& timingInfo = mRegistry.get(); RouteIndex routeIndex = matchDataHeader(spec, timingInfo.timeslice); auto header = headerMessageFromOutput(spec, routeIndex, o2::header::gSerializationMethodArrow, 0); + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, header->GetData()); + O2_SIGNPOST_START(parts, pid, "parts", "adopt %p", header->GetData()); auto& context = mRegistry.get(); auto creator = [transport = context.proxy().getOutputTransport(routeIndex)](size_t s) -> std::unique_ptr { diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 50934bd59a729..1fd35471c8fc1 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -92,7 +92,10 @@ struct formatter : ostream_format }; } // namespace fmt +// A log to use for general device logging O2_DECLARE_DYNAMIC_LOG(device); +// Special log to keep track of the lifetime of the parts +O2_DECLARE_DYNAMIC_LOG(parts); using namespace o2::framework; using ConfigurationInterface = o2::configuration::ConfigurationInterface; @@ -1804,9 +1807,14 @@ void DataProcessingDevice::handleData(ServiceRegistryRef ref, InputChannelInfo& LOGP(error, "DataHeader payloadSize mismatch"); continue; } - TracyPlot("payload size", (int64_t)dh->payloadSize); auto dph = o2::header::get(headerData); - TracyAlloc(parts.At(pi + 1)->GetData(), parts.At(pi + 1)->GetSize()); + // We only deal with the tracking of parts if the log is enabled. + // This is because in principle we should track the size of each of + // the parts and sum it up. Not for now. + if (O2_LOG_ENABLED(parts) == true) { + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, headerData); + O2_SIGNPOST_START(parts, pid, "parts", "Processing DataHeader with splitPayloadParts %d and splitPayloadIndex %d", dh->splitPayloadParts, dh->splitPayloadIndex); + } if (!dph) { insertInputInfo(pi, 2, InputType::Invalid); LOGP(error, "Header stack does not contain DataProcessingHeader"); @@ -2180,10 +2188,12 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v // Function to cleanup record. For the moment we // simply use it to keep track of input messages // which are not needed, to display them in the GUI. -#ifdef TRACY_ENABLE auto cleanupRecord = [](InputRecord& record) { - for (size_t ii = 0, ie = record.size(); ii < ie; ++ii) { - DataRef input = record.getByPos(ii); + if (O2_LOG_ENABLED(parts) == false) { + return; + } + for (size_t pi = 0, pe = record.size(); pi < pe; ++pi) { + DataRef input = record.getByPos(pi); if (input.header == nullptr) { continue; } @@ -2196,10 +2206,21 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v if (!dh) { continue; } - TracyFree(input.payload); + // We use the address of the first header of a split payload + // to identify the interval. + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, dh); + O2_SIGNPOST_END(parts, pid, "parts", "Cleaning up parts associated to %p", dh); + + // No split parts, we simply skip the payload + if (dh->splitPayloadParts > 0 && dh->splitPayloadParts == dh->splitPayloadIndex) { + // this is indicating a sequence of payloads following the header + // FIXME: we will probably also set the DataHeader version + pi += dh->splitPayloadParts - 1; + } else { + size_t pi = pi + (dh->splitPayloadParts > 0 ? dh->splitPayloadParts : 1) * 2; + } } }; -#endif auto switchState = [ref](StreamingState newState) { auto& control = ref.get(); @@ -2432,6 +2453,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v // We forward inputs only when we consume them. If we simply Process them, // we keep them for next message arriving. if (action.op == CompletionPolicy::CompletionOp::Consume) { + cleanupRecord(record); context.postDispatchingCallbacks(processContext); ref.get().call(o2::framework::ServiceRegistryRef{ref}); } @@ -2441,11 +2463,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), false, action.op == CompletionPolicy::CompletionOp::Consume); } context.postForwardingCallbacks(processContext); - if (action.op == CompletionPolicy::CompletionOp::Consume) { -#ifdef TRACY_ENABLE - cleanupRecord(record); -#endif - } else if (action.op == CompletionPolicy::CompletionOp::Process) { + if (action.op == CompletionPolicy::CompletionOp::Process) { cleanTimers(action.slot, record); } } diff --git a/Framework/Core/src/LifetimeHelpers.cxx b/Framework/Core/src/LifetimeHelpers.cxx index 1aa53fa0493ca..8c8605d41849c 100644 --- a/Framework/Core/src/LifetimeHelpers.cxx +++ b/Framework/Core/src/LifetimeHelpers.cxx @@ -25,6 +25,7 @@ #include "Framework/FairMQDeviceProxy.h" #include "Framework/Formatters.h" #include "Framework/DeviceState.h" +#include "Framework/Signpost.h" #include "Headers/DataHeader.h" #include "Headers/DataHeaderHelpers.h" @@ -44,6 +45,8 @@ using namespace o2::header; using namespace fair; +O2_DECLARE_DYNAMIC_LOG(parts); + namespace o2::framework { @@ -411,7 +414,6 @@ ExpirationHandler::Handler LifetimeHelpers::enumerate(ConcreteDataMatcher const& assert(!ref.payload); auto timestamp = VariableContextHelpers::getTimeslice(variables).value; - LOGP(debug, "Enumerating record"); DataHeader dh; dh.dataOrigin = matcher.origin; dh.dataDescription = matcher.description; @@ -432,6 +434,8 @@ ExpirationHandler::Handler LifetimeHelpers::enumerate(ConcreteDataMatcher const& auto&& transport = deviceProxy.getInputChannel(channelIndex)->Transport(); auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dh, dph}); + O2_SIGNPOST_ID_FROM_POINTER(hid, parts, header->GetData()); + O2_SIGNPOST_START(parts, hid, "parts", "Enumerating part %p with timestamp %zu", header->GetData(), timestamp); ref.header = std::move(header); auto payload = transport->CreateMessage(sizeof(counter_t)); @@ -486,6 +490,8 @@ ExpirationHandler::Handler LifetimeHelpers::dummy(ConcreteDataMatcher const& mat auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dh, dph}); ref.header = std::move(header); + O2_SIGNPOST_ID_FROM_POINTER(hid, parts, header->GetData()); + O2_SIGNPOST_START(parts, hid, "parts", "Enumerating part %p with timestamp %zu", header->GetData(), timestamp); auto payload = transport->CreateMessage(0); ref.payload = std::move(payload); }; From 29bae99542b44a261a6e60a1620dfddb8b967f50 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:10:16 +0100 Subject: [PATCH 356/726] DPL: use signposts for OOB callback --- Framework/Core/src/DataProcessingDevice.cxx | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 1fd35471c8fc1..ce58903553524 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -318,7 +318,7 @@ void on_socket_polled(uv_poll_t* poller, int status, int events) O2_SIGNPOST_END(device, sid, "socket_state", "Socket disconnected in context %{public}s", context->name); } break; case UV_PRIORITIZED: { - O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for context %{public}s", context->name); + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Socket prioritized for context %{public}s", context->name); } break; } // We do nothing, all the logic for now stays in DataProcessingDevice::doRun() @@ -326,6 +326,7 @@ void on_socket_polled(uv_poll_t* poller, int status, int events) void on_out_of_band_polled(uv_poll_t* poller, int status, int events) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); auto* context = (PollerContext*)poller->data; context->state->loopReason |= DeviceState::OOB_ACTIVITY; if (status < 0) { @@ -334,32 +335,27 @@ void on_out_of_band_polled(uv_poll_t* poller, int status, int events) } switch (events) { case UV_READABLE: { - ZoneScopedN("socket readable event"); + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_INCOMING; assert(context->channelInfo); - LOGP(debug, "oob socket {} polled UV_READABLE.", - context->name, - context->channelInfo->hasPendingEvents); context->channelInfo->readPolled = true; } break; case UV_WRITABLE: { - ZoneScopedN("socket writeable"); + O2_SIGNPOST_END(device, sid, "socket_state", "OOB socket connected for channel %{public}s", context->name); if (context->read) { - LOG(debug) << "socket polled UV_CONNECT" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "OOB socket connected for read in context %{public}s", context->name); uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &on_out_of_band_polled); } else { - LOG(debug) << "socket polled UV_WRITABLE" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "OOB socket connected for write for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_OUTGOING; } } break; case UV_DISCONNECT: { - ZoneScopedN("socket disconnect"); - LOG(debug) << "socket polled UV_DISCONNECT"; + O2_SIGNPOST_END(device, sid, "socket_state", "OOB socket disconnected in context %{public}s", context->name); uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled); } break; case UV_PRIORITIZED: { - ZoneScopedN("socket prioritized"); - LOG(debug) << "socket polled UV_PRIORITIZED"; + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "OOB socket prioritized for context %{public}s", context->name); } break; } // We do nothing, all the logic for now stays in DataProcessingDevice::doRun() From e482dad4508fc99b3be2a4d681c015bf522af1eb Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:10:16 +0100 Subject: [PATCH 357/726] DPL: rename TracyLockable to O2_LOCKABLE --- .../Core/include/Framework/DataRelayer.h | 2 +- Framework/Core/include/Framework/DataSender.h | 2 +- Framework/Core/src/DataRelayer.cxx | 32 +++++++++---------- Framework/Core/src/DataSender.cxx | 2 +- Framework/Core/src/ServiceRegistry.cxx | 6 ++-- .../Foundation/include/Framework/Tracing.h | 4 +-- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Framework/Core/include/Framework/DataRelayer.h b/Framework/Core/include/Framework/DataRelayer.h index 8fdedbdd1119e..1ebcf549d6a29 100644 --- a/Framework/Core/include/Framework/DataRelayer.h +++ b/Framework/Core/include/Framework/DataRelayer.h @@ -194,7 +194,7 @@ class DataRelayer std::vector mPruneOps; size_t mMaxLanes; - TracyLockableN(std::recursive_mutex, mMutex, "data relayer mutex"); + O2_LOCKABLE_NAMED(std::recursive_mutex, mMutex, "data relayer mutex"); }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/DataSender.h b/Framework/Core/include/Framework/DataSender.h index 7cdc0ea537bea..2937bd80f78f8 100644 --- a/Framework/Core/include/Framework/DataSender.h +++ b/Framework/Core/include/Framework/DataSender.h @@ -61,7 +61,7 @@ class DataSender std::vector mPresent; std::vector mPresentDefaults; - TracyLockableN(std::recursive_mutex, mMutex, "data relayer mutex"); + O2_LOCKABLE_NAMED(std::recursive_mutex, mMutex, "data relayer mutex"); }; } // namespace o2::framework diff --git a/Framework/Core/src/DataRelayer.cxx b/Framework/Core/src/DataRelayer.cxx index 0b459dd155a21..1daf4bbd2a20b 100644 --- a/Framework/Core/src/DataRelayer.cxx +++ b/Framework/Core/src/DataRelayer.cxx @@ -70,7 +70,7 @@ DataRelayer::DataRelayer(const CompletionPolicy& policy, mInputMatchers{DataRelayerHelpers::createInputMatchers(routes)}, mMaxLanes{InputRouteHelpers::maxLanes(routes)} { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); if (policy.configureRelayer == nullptr) { static int pipelineLength = DefaultsHelpers::pipelineLength(); @@ -100,7 +100,7 @@ DataRelayer::DataRelayer(const CompletionPolicy& policy, TimesliceId DataRelayer::getTimesliceForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto& variables = mTimesliceIndex.getVariablesForSlot(slot); return VariableContextHelpers::getTimeslice(variables); } @@ -109,7 +109,7 @@ DataRelayer::ActivityStats DataRelayer::processDanglingInputs(std::vector lock(mMutex); + std::scoped_lock lock(mMutex); auto& deviceProxy = services.get(); ActivityStats activity; @@ -394,7 +394,7 @@ DataRelayer::RelayChoice size_t nPayloads, std::function&, TimesliceIndex::OldestOutputInfo)> onDrop) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); DataProcessingHeader const* dph = o2::header::get(rawHeader); // IMPLEMENTATION DETAILS // @@ -616,7 +616,7 @@ DataRelayer::RelayChoice void DataRelayer::getReadyToProcess(std::vector& completed) { LOGP(debug, "DataRelayer::getReadyToProcess"); - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); // THE STATE const auto& cache = mCache; @@ -749,7 +749,7 @@ void DataRelayer::getReadyToProcess(std::vector& comp void DataRelayer::updateCacheStatus(TimesliceSlot slot, CacheEntryStatus oldStatus, CacheEntryStatus newStatus) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); const auto numInputTypes = mDistinctRoutesIndex.size(); auto markInputDone = [&cachedStateMetrics = mCachedStateMetrics, @@ -767,7 +767,7 @@ void DataRelayer::updateCacheStatus(TimesliceSlot slot, CacheEntryStatus oldStat std::vector DataRelayer::consumeAllInputsForTimeslice(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); const auto numInputTypes = mDistinctRoutesIndex.size(); // State of the computation @@ -821,7 +821,7 @@ std::vector DataRelayer::consumeAllInputsForTimeslice std::vector DataRelayer::consumeExistingInputsForTimeslice(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); const auto numInputTypes = mDistinctRoutesIndex.size(); // State of the computation @@ -864,7 +864,7 @@ std::vector DataRelayer::consumeExistingInputsForTime void DataRelayer::clear() { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); for (auto& cache : mCache) { cache.clear(); @@ -886,7 +886,7 @@ size_t /// the time pipelining. void DataRelayer::setPipelineLength(size_t s) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); mTimesliceIndex.resize(s); mVariableContextes.resize(s); @@ -895,7 +895,7 @@ void DataRelayer::setPipelineLength(size_t s) void DataRelayer::publishMetrics() { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto numInputTypes = mDistinctRoutesIndex.size(); // FIXME: many of the DataRelayer function rely on allocated cache, so its @@ -932,31 +932,31 @@ void DataRelayer::publishMetrics() uint32_t DataRelayer::getFirstTFOrbitForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getFirstTFOrbit(mTimesliceIndex.getVariablesForSlot(slot)); } uint32_t DataRelayer::getFirstTFCounterForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getFirstTFCounter(mTimesliceIndex.getVariablesForSlot(slot)); } uint32_t DataRelayer::getRunNumberForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getRunNumber(mTimesliceIndex.getVariablesForSlot(slot)); } uint64_t DataRelayer::getCreationTimeForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getCreationTime(mTimesliceIndex.getVariablesForSlot(slot)); } void DataRelayer::sendContextState() { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto& states = mContext.get(); for (size_t ci = 0; ci < mTimesliceIndex.size(); ++ci) { auto slot = TimesliceSlot{ci}; diff --git a/Framework/Core/src/DataSender.cxx b/Framework/Core/src/DataSender.cxx index f0753102e2d40..3acce1c4b9704 100644 --- a/Framework/Core/src/DataSender.cxx +++ b/Framework/Core/src/DataSender.cxx @@ -51,7 +51,7 @@ DataSender::DataSender(ServiceRegistryRef registry) mSpec{registry.get()}, mDistinctRoutesIndex{createDistinctOutputRouteIndex(mSpec.outputs)} { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto numInputTypes = mDistinctRoutesIndex.size(); auto& routes = mSpec.outputs; diff --git a/Framework/Core/src/ServiceRegistry.cxx b/Framework/Core/src/ServiceRegistry.cxx index 7b77c521fe6a7..09921f6b4a7d4 100644 --- a/Framework/Core/src/ServiceRegistry.cxx +++ b/Framework/Core/src/ServiceRegistry.cxx @@ -271,7 +271,7 @@ void ServiceRegistry::postRenderGUICallbacks(ServiceRegistryRef ref) void ServiceRegistry::bindService(ServiceRegistry::Salt salt, ServiceSpec const& spec, void* service) const { - static TracyLockableN(std::mutex, bindMutex, "bind mutex"); + static O2_LOCKABLE_NAMED(std::mutex, bindMutex, "bind mutex"); // Stream services need to store their callbacks in the stream context. // This is to make sure we invoke the correct callback only once per // stream, since they could bind multiple times. @@ -280,12 +280,12 @@ void ServiceRegistry::bindService(ServiceRegistry::Salt salt, ServiceSpec const& if (spec.kind == ServiceKind::Stream) { ServiceRegistryRef ref{const_cast(*this), salt}; auto& streamContext = ref.get(); - std::scoped_lock lock(bindMutex); + std::scoped_lock lock(bindMutex); auto& dataProcessorContext = ref.get(); ContextHelpers::bindStreamService(dataProcessorContext, streamContext, spec, service); } else { ServiceRegistryRef ref{const_cast(*this), salt}; - std::scoped_lock lock(bindMutex); + std::scoped_lock lock(bindMutex); if (ref.active()) { auto& dataProcessorContext = ref.get(); ContextHelpers::bindProcessorService(dataProcessorContext, spec, service); diff --git a/Framework/Foundation/include/Framework/Tracing.h b/Framework/Foundation/include/Framework/Tracing.h index f5bcecd3889d7..6c9c65f4423e5 100644 --- a/Framework/Foundation/include/Framework/Tracing.h +++ b/Framework/Foundation/include/Framework/Tracing.h @@ -39,8 +39,8 @@ #define TracyAppInfo(...) \ while (false) { \ } -#define TracyLockableN(T, V, N) T V -#define LockableBase(T) T +#define O2_LOCKABLE_NAMED(T, V, N) T V +#define O2_LOCKABLE(T) T #endif #endif // O2_FRAMEWORK_TRACING_H_ From 56a82107e0c74b8ef8ed7c37fcd6bd1c0f80c27d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:10:16 +0100 Subject: [PATCH 358/726] DPL: do not use Tracy anymore Tracy usage never took off, so I am removing all the explicit calls to it and I will replace them with Signpost usage, where it still makes sense. --- .../Core/src/CommonMessageBackendsHelpers.h | 1 - Framework/Core/src/CommonServices.cxx | 1 - Framework/Core/src/DataProcessingDevice.cxx | 29 ------------------- 3 files changed, 31 deletions(-) diff --git a/Framework/Core/src/CommonMessageBackendsHelpers.h b/Framework/Core/src/CommonMessageBackendsHelpers.h index 10d8a9c5d347a..a3684906ff838 100644 --- a/Framework/Core/src/CommonMessageBackendsHelpers.h +++ b/Framework/Core/src/CommonMessageBackendsHelpers.h @@ -38,7 +38,6 @@ struct CommonMessageBackendsHelpers { static ServiceProcessingCallback sendCallback() { return [](ProcessingContext& ctx, void* service) { - ZoneScopedN("send message callback"); T* context = reinterpret_cast(service); DataProcessor::doSend(ctx.services().get(), *context, ctx.services()); }; diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index a0f7fcdc88201..0d0828729ef42 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -741,7 +741,6 @@ auto sendRelayerMetrics(ServiceRegistryRef registry, DataProcessingStats& stats) stats.updateStats({static_cast(static_cast(ProcessingStatsId::AVAILABLE_MANAGED_SHM_BASE) + (runningWorkflow.shmSegmentId % 512)), DataProcessingStats::Op::SetIfPositive, freeMemory}); } - ZoneScopedN("send metrics"); auto device = registry.get().device(); int64_t totalBytesIn = 0; diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index ce58903553524..0a887f40efdcf 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -115,14 +115,12 @@ struct ServiceKindExtractor { /// Watching stdin for commands probably a better approach. void on_idle_timer(uv_timer_t* handle) { - ZoneScopedN("Idle timer"); auto* state = (DeviceState*)handle->data; state->loopReason |= DeviceState::TIMER_EXPIRED; } void on_transition_requested_expired(uv_timer_t* handle) { - ZoneScopedN("Transition expired"); auto* state = (DeviceState*)handle->data; state->loopReason |= DeviceState::TIMER_EXPIRED; LOGP(info, "Timer expired. Forcing transition to READY"); @@ -222,12 +220,10 @@ DataProcessingDevice::DataProcessingDevice(RunningDeviceRef running, ServiceRegi // one with the thread id. For the moment we simply use the first one. void run_callback(uv_work_t* handle) { - ZoneScopedN("run_callback"); auto* task = (TaskStreamInfo*)handle->data; auto ref = ServiceRegistryRef{*task->registry, ServiceRegistry::globalStreamSalt(task->id.index + 1)}; DataProcessingDevice::doPrepare(ref); DataProcessingDevice::doRun(ref); - // FrameMark; } // Once the processing in a thread is done, this is executed on the main thread. @@ -268,7 +264,6 @@ void run_completion(uv_work_t* handle, int status) quotaEvaluator.handleExpired(reportExpiredOffer); quotaEvaluator.dispose(task->id.index); task->running = false; - ZoneScopedN("run_completion"); } // Context for polling @@ -377,8 +372,6 @@ void DataProcessingDevice::Init() context.statefulProcess = nullptr; context.error = spec.algorithm.onError; context.initError = spec.algorithm.onInitError; - TracyAppInfo(spec.name.data(), spec.name.size()); - ZoneScopedN("DataProcessingDevice::Init"); auto configStore = DeviceConfigurationHelpers::getConfiguration(mServiceRegistry, spec.name.c_str(), spec.options); if (configStore == nullptr) { @@ -412,7 +405,6 @@ void DataProcessingDevice::Init() if (context.initError) { context.initErrorHandling = [&errorCallback = context.initError, &serviceRegistry = mServiceRegistry](RuntimeErrorRef e) { - ZoneScopedN("Error handling"); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. ServiceRegistryRef ref{serviceRegistry, ServiceRegistry::globalDeviceSalt()}; @@ -426,7 +418,6 @@ void DataProcessingDevice::Init() }; } else { context.initErrorHandling = [&serviceRegistry = mServiceRegistry](RuntimeErrorRef e) { - ZoneScopedN("Error handling"); auto& err = error_from_ref(e); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. @@ -449,7 +440,6 @@ void DataProcessingDevice::Init() try { context.statefulProcess = context.init(initContext); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); if (context.initErrorHandling) { (context.initErrorHandling)(e); } @@ -458,14 +448,12 @@ void DataProcessingDevice::Init() try { context.statefulProcess = context.init(initContext); } catch (std::exception& ex) { - ZoneScopedN("error handling"); /// Convert a standard exception to a RuntimeErrorRef /// Notice how this will lose the backtrace information /// and report the exception coming from here. auto e = runtime_error(ex.what()); (context.initErrorHandling)(e); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); (context.initErrorHandling)(e); } } @@ -621,7 +609,6 @@ static auto toBeforwardedMessageSet = [](std::vector& cachedForwar // FIXME: do it in a smarter way than O(N^2) static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, std::vector& currentSetOfInputs, TimesliceIndex::OldestOutputInfo oldestTimeslice, bool copy, bool consume = true) { - ZoneScopedN("forward inputs"); auto& proxy = registry.get(); // we collect all messages per forward in a map and send them together std::vector forwardedParts; @@ -1078,7 +1065,6 @@ void DataProcessingDevice::fillContext(DataProcessorContext& context, DeviceCont if (context.error != nullptr) { context.errorHandling = [&errorCallback = context.error, &serviceRegistry = mServiceRegistry](RuntimeErrorRef e, InputRecord& record) { - ZoneScopedN("Error handling"); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. ServiceRegistryRef ref{serviceRegistry, ServiceRegistry::globalDeviceSalt()}; @@ -1093,7 +1079,6 @@ void DataProcessingDevice::fillContext(DataProcessorContext& context, DeviceCont } else { context.errorHandling = [&errorPolicy = mProcessingPolicies.error, &serviceRegistry = mServiceRegistry](RuntimeErrorRef e, InputRecord& record) { - ZoneScopedN("Error handling"); auto& err = error_from_ref(e); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. @@ -1242,8 +1227,6 @@ void DataProcessingDevice::Run() // so that devices which do not have a timer can still start an // enumeration. { - ZoneScopedN("uv idle"); - TracyPlot("past activity", (int64_t)mWasActive); ServiceRegistryRef ref{mServiceRegistry}; ref.get().flushPending(mServiceRegistry); auto shouldNotWait = (mWasActive && @@ -1303,7 +1286,6 @@ void DataProcessingDevice::Run() if (state.transitionHandling == TransitionHandlingState::Requested && state.streaming == StreamingState::Idle) { state.transitionHandling = TransitionHandlingState::Expired; } - TracyPlot("shouldNotWait", (int)shouldNotWait); if (state.severityStack.empty() == false) { fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back()); state.severityStack.pop_back(); @@ -1349,7 +1331,6 @@ void DataProcessingDevice::Run() fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back()); state.severityStack.pop_back(); } - TracyPlot("loopReason", (int64_t)(uint64_t)state.loopReason); LOGP(debug, "Loop reason mask {:b} & {:b} = {:b}", state.loopReason, state.tracingFlags, state.loopReason & state.tracingFlags); @@ -1433,7 +1414,6 @@ void DataProcessingDevice::Run() } else { mWasActive = false; } - FrameMark; } auto& spec = ref.get(); /// Cleanup messages which are still pending on exit. @@ -1448,12 +1428,10 @@ void DataProcessingDevice::Run() /// non-data triggers like those which are time based. void DataProcessingDevice::doPrepare(ServiceRegistryRef ref) { - ZoneScopedN("DataProcessingDevice::doPrepare"); auto& context = ref.get(); *context.wasActive = false; { - ZoneScopedN("CallbackService::Id::ClockTick"); ref.get().call(); } // Whether or not we had something to do. @@ -1734,7 +1712,6 @@ struct WaitBackpressurePolicy { void DataProcessingDevice::handleData(ServiceRegistryRef ref, InputChannelInfo& info) { auto& context = ref.get(); - ZoneScopedN("DataProcessingDevice::handleData"); enum struct InputType : int { Invalid = 0, @@ -1763,7 +1740,6 @@ void DataProcessingDevice::handleData(ServiceRegistryRef ref, InputChannelInfo& auto& parts = info.parts; stats.updateStats({(int)ProcessingStatsId::TOTAL_INPUTS, DataProcessingStats::Op::Set, (int64_t)parts.Size()}); - TracyPlot("messages received", (int64_t)parts.Size()); std::vector results; // we can reserve the upper limit results.reserve(parts.Size() / 2); @@ -2060,7 +2036,6 @@ void update_maximum(std::atomic& maximum_value, T const& value) noexcept bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::vector& completed) { auto& context = ref.get(); - ZoneScopedN("DataProcessingDevice::tryDispatchComputation"); LOGP(debug, "DataProcessingDevice::tryDispatchComputation"); // This is the actual hidden state for the outer loop. In case we decide we // want to support multithreaded dispatching of operations, I can simply @@ -2135,7 +2110,6 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v auto prepareAllocatorForCurrentTimeSlice = [ref](TimesliceSlot i) -> void { auto& relayer = ref.get(); auto& timingInfo = ref.get(); - ZoneScopedN("DataProcessingDevice::prepareForCurrentTimeslice"); auto timeslice = relayer.getTimesliceForSlot(i); timingInfo.timeslice = timeslice.value; @@ -2422,21 +2396,18 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v try { runNoCatch(action); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); (context.errorHandling)(e, record); } } else { try { runNoCatch(action); } catch (std::exception& ex) { - ZoneScopedN("error handling"); /// Convert a standard exception to a RuntimeErrorRef /// Notice how this will lose the backtrace information /// and report the exception coming from here. auto e = runtime_error(ex.what()); (context.errorHandling)(e, record); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); (context.errorHandling)(e, record); } } From b5d41a03d914d482eeee4776b5ff012a06765493 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:10:16 +0100 Subject: [PATCH 359/726] DPL: drop unused Tracy APIs ZoneScoped*, TracyAppInfo, FrameMark, TracyAlloc, TracyPlot were all removed and will be replaced by Signposts as needed. --- .../Foundation/include/Framework/Tracing.h | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/Framework/Foundation/include/Framework/Tracing.h b/Framework/Foundation/include/Framework/Tracing.h index 6c9c65f4423e5..91c3b99eb8dd0 100644 --- a/Framework/Foundation/include/Framework/Tracing.h +++ b/Framework/Foundation/include/Framework/Tracing.h @@ -15,30 +15,6 @@ #define DPL_HAS_TRACING #include #else -#define ZoneScoped \ - while (false) { \ - } -#define FrameMark \ - while (false) { \ - } -#define TracyPlot(...) \ - while (false) { \ - } -#define ZoneScopedN(...) \ - while (false) { \ - } -#define ZoneScopedNS(...) \ - while (false) { \ - } -#define TracyAlloc(...) \ - while (false) { \ - } -#define TracyFree(...) \ - while (false) { \ - } -#define TracyAppInfo(...) \ - while (false) { \ - } #define O2_LOCKABLE_NAMED(T, V, N) T V #define O2_LOCKABLE(T) T #endif From 73e5911bda21e3f7079c3c759d748373dea11232 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:10:16 +0100 Subject: [PATCH 360/726] DPL: drop tracy support from the GUI No one is using it and Signposts are probably a better alternative. --- .../GUISupport/src/FrameworkGUIDeviceInspector.cxx | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index eb62fcc09d338..3ef8b99ed26bb 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -261,9 +261,6 @@ void displayDeviceInspector(DeviceSpec const& spec, ImGui::Text("Pid: %d (exit status: %d)", info.pid, info.exitStatus); } ImGui::Text("Device state: %s", info.deviceState.data()); -#ifdef DPL_ENABLE_TRACING - ImGui::Text("Tracy Port: %d", info.tracyPort); -#endif ImGui::Text("Rank: %zu/%zu%%%zu/%zu", spec.rank, spec.nSlots, spec.inputTimesliceId, spec.maxInputTimeslices); if (ImGui::Button(ICON_FA_BUG "Attach debugger")) { @@ -324,16 +321,6 @@ void displayDeviceInspector(DeviceSpec const& spec, } #endif -#if DPL_ENABLE_TRACING - ImGui::SameLine(); - if (ImGui::Button("Tracy")) { - std::string tracyPort = std::to_string(info.tracyPort); - auto cmd = fmt::format("tracy-profiler -p {} -a 127.0.0.1 &", info.tracyPort); - LOG(debug) << cmd; - int retVal = system(cmd.c_str()); - (void)retVal; - } -#endif if (control.controller) { if (ImGui::Button("Offer SHM")) { control.controller->write("/shm-offer 1000", strlen("/shm-offer 1000")); From 653ecf58f80efd07ff7a2c040e869be8d6a27716 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:10:16 +0100 Subject: [PATCH 361/726] DPL: drop tracy support for good --- Framework/Core/src/DataProcessingDevice.cxx | 4 ---- Framework/Foundation/include/Framework/Tracing.h | 7 ++++--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 0a887f40efdcf..c175e4e47e0c9 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -8,10 +8,6 @@ // In applying this license CERN does not waive the privileges and immunities // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -#ifdef DPL_ENABLE_TRACING -#define TRACY_ENABLE -#include -#endif #include "Framework/AsyncQueue.h" #include "Framework/DataProcessingDevice.h" #include "Framework/ChannelMatching.h" diff --git a/Framework/Foundation/include/Framework/Tracing.h b/Framework/Foundation/include/Framework/Tracing.h index 91c3b99eb8dd0..72c052ca6c161 100644 --- a/Framework/Foundation/include/Framework/Tracing.h +++ b/Framework/Foundation/include/Framework/Tracing.h @@ -11,9 +11,10 @@ #ifndef O2_FRAMEWORK_TRACING_H_ #define O2_FRAMEWORK_TRACING_H_ -#if DPL_ENABLE_TRACING && __has_include() -#define DPL_HAS_TRACING -#include +#if DPL_ENABLE_TRACING +// FIXME: not implemented yet in terms of Signposts +#define O2_LOCKABLE_NAMED(T, V, N) T V +#define O2_LOCKABLE(T) T #else #define O2_LOCKABLE_NAMED(T, V, N) T V #define O2_LOCKABLE(T) T From a75ef55efc126b0afecbf4cbbea7acd3ce605ddf Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:10:16 +0100 Subject: [PATCH 362/726] [FEAT] Drop support for Tracy --- Framework/CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Framework/CHANGELOG.md b/Framework/CHANGELOG.md index e7278733de246..044274d9515c4 100644 --- a/Framework/CHANGELOG.md +++ b/Framework/CHANGELOG.md @@ -1,3 +1,12 @@ +# 2024-02-22: Drop Tracy support + +Tracy support never took off, so I am dropping it. This was mostly because people do not know about it and having a per process profile GUI was way unpractical. Moreover, needing an extra compile time flag meant one most likely did not have the support compiled in when needed. + +I have therefore decided to replace it with signposts, which hopefully will see better adoption thanks +to the integration with Instruments on mac and the easy way they can be enabled dynamically. + +We could then reintroduce Tracy support as a hook on top of signposts, if really needed. + # 2024-02-16: Improved Signposts. In particular: From eba163e2b62d29bd153eaaab9c30dff02286aeea Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:19:01 +0100 Subject: [PATCH 363/726] DPL: replace Tracy allocation profiler with Signposts --- Framework/Core/src/DataAllocator.cxx | 7 ++++ Framework/Core/src/DataProcessingDevice.cxx | 42 +++++++++++++++------ Framework/Core/src/LifetimeHelpers.cxx | 8 +++- 3 files changed, 44 insertions(+), 13 deletions(-) diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index de500cd6d6c4e..fe38283d5e2de 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -35,6 +35,7 @@ #include O2_DECLARE_DYNAMIC_LOG(stream_context); +O2_DECLARE_DYNAMIC_LOG(parts); namespace o2::framework { @@ -129,6 +130,8 @@ void DataAllocator::addPartToContext(RouteIndex routeIndex, fair::mq::MessagePtr o2::header::SerializationMethod serializationMethod) { auto headerMessage = headerMessageFromOutput(spec, routeIndex, serializationMethod, 0); + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, headerMessage->GetData()); + O2_SIGNPOST_START(parts, pid, "parts", "addPartToContext %p", headerMessage->GetData()); // FIXME: this is kind of ugly, we know that we can change the content of the // header message because we have just created it, but the API declares it const @@ -150,6 +153,8 @@ void DataAllocator::adopt(const Output& spec, std::string* ptr) // the correct payload size is set later when sending the // StringContext, see DataProcessor::doSend auto header = headerMessageFromOutput(spec, routeIndex, o2::header::gSerializationMethodNone, 0); + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, header->GetData()); + O2_SIGNPOST_START(parts, pid, "parts", "addPartToContext %p", header->GetData()); mRegistry.get().addString(std::move(header), std::move(payload), routeIndex); assert(payload.get() == nullptr); } @@ -206,6 +211,8 @@ void DataAllocator::adopt(const Output& spec, LifetimeHolder& tb) auto& timingInfo = mRegistry.get(); RouteIndex routeIndex = matchDataHeader(spec, timingInfo.timeslice); auto header = headerMessageFromOutput(spec, routeIndex, o2::header::gSerializationMethodArrow, 0); + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, header->GetData()); + O2_SIGNPOST_START(parts, pid, "parts", "adopt %p", header->GetData()); auto& context = mRegistry.get(); auto creator = [transport = context.proxy().getOutputTransport(routeIndex)](size_t s) -> std::unique_ptr { diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 50934bd59a729..387a9ab753575 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -92,7 +92,10 @@ struct formatter : ostream_format }; } // namespace fmt +// A log to use for general device logging O2_DECLARE_DYNAMIC_LOG(device); +// Special log to keep track of the lifetime of the parts +O2_DECLARE_DYNAMIC_LOG(parts); using namespace o2::framework; using ConfigurationInterface = o2::configuration::ConfigurationInterface; @@ -1804,9 +1807,14 @@ void DataProcessingDevice::handleData(ServiceRegistryRef ref, InputChannelInfo& LOGP(error, "DataHeader payloadSize mismatch"); continue; } - TracyPlot("payload size", (int64_t)dh->payloadSize); auto dph = o2::header::get(headerData); - TracyAlloc(parts.At(pi + 1)->GetData(), parts.At(pi + 1)->GetSize()); + // We only deal with the tracking of parts if the log is enabled. + // This is because in principle we should track the size of each of + // the parts and sum it up. Not for now. + if (O2_LOG_ENABLED(parts) == true) { + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, headerData); + O2_SIGNPOST_START(parts, pid, "parts", "Processing DataHeader with splitPayloadParts %d and splitPayloadIndex %d", dh->splitPayloadParts, dh->splitPayloadIndex); + } if (!dph) { insertInputInfo(pi, 2, InputType::Invalid); LOGP(error, "Header stack does not contain DataProcessingHeader"); @@ -2180,10 +2188,12 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v // Function to cleanup record. For the moment we // simply use it to keep track of input messages // which are not needed, to display them in the GUI. -#ifdef TRACY_ENABLE auto cleanupRecord = [](InputRecord& record) { - for (size_t ii = 0, ie = record.size(); ii < ie; ++ii) { - DataRef input = record.getByPos(ii); + if (O2_LOG_ENABLED(parts) == false) { + return; + } + for (size_t pi = 0, pe = record.size(); pi < pe; ++pi) { + DataRef input = record.getByPos(pi); if (input.header == nullptr) { continue; } @@ -2196,10 +2206,21 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v if (!dh) { continue; } - TracyFree(input.payload); + // We use the address of the first header of a split payload + // to identify the interval. + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, dh); + O2_SIGNPOST_END(parts, pid, "parts", "Cleaning up parts associated to %p", dh); + + // No split parts, we simply skip the payload + if (dh->splitPayloadParts > 0 && dh->splitPayloadParts == dh->splitPayloadIndex) { + // this is indicating a sequence of payloads following the header + // FIXME: we will probably also set the DataHeader version + pi += dh->splitPayloadParts - 1; + } else { + size_t pi = pi + (dh->splitPayloadParts > 0 ? dh->splitPayloadParts : 1) * 2; + } } }; -#endif auto switchState = [ref](StreamingState newState) { auto& control = ref.get(); @@ -2432,6 +2453,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v // We forward inputs only when we consume them. If we simply Process them, // we keep them for next message arriving. if (action.op == CompletionPolicy::CompletionOp::Consume) { + cleanupRecord(record); context.postDispatchingCallbacks(processContext); ref.get().call(o2::framework::ServiceRegistryRef{ref}); } @@ -2441,11 +2463,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), false, action.op == CompletionPolicy::CompletionOp::Consume); } context.postForwardingCallbacks(processContext); - if (action.op == CompletionPolicy::CompletionOp::Consume) { -#ifdef TRACY_ENABLE - cleanupRecord(record); -#endif - } else if (action.op == CompletionPolicy::CompletionOp::Process) { + if (action.op == CompletionPolicy::CompletionOp::Process) { cleanTimers(action.slot, record); } } diff --git a/Framework/Core/src/LifetimeHelpers.cxx b/Framework/Core/src/LifetimeHelpers.cxx index 1aa53fa0493ca..8c8605d41849c 100644 --- a/Framework/Core/src/LifetimeHelpers.cxx +++ b/Framework/Core/src/LifetimeHelpers.cxx @@ -25,6 +25,7 @@ #include "Framework/FairMQDeviceProxy.h" #include "Framework/Formatters.h" #include "Framework/DeviceState.h" +#include "Framework/Signpost.h" #include "Headers/DataHeader.h" #include "Headers/DataHeaderHelpers.h" @@ -44,6 +45,8 @@ using namespace o2::header; using namespace fair; +O2_DECLARE_DYNAMIC_LOG(parts); + namespace o2::framework { @@ -411,7 +414,6 @@ ExpirationHandler::Handler LifetimeHelpers::enumerate(ConcreteDataMatcher const& assert(!ref.payload); auto timestamp = VariableContextHelpers::getTimeslice(variables).value; - LOGP(debug, "Enumerating record"); DataHeader dh; dh.dataOrigin = matcher.origin; dh.dataDescription = matcher.description; @@ -432,6 +434,8 @@ ExpirationHandler::Handler LifetimeHelpers::enumerate(ConcreteDataMatcher const& auto&& transport = deviceProxy.getInputChannel(channelIndex)->Transport(); auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dh, dph}); + O2_SIGNPOST_ID_FROM_POINTER(hid, parts, header->GetData()); + O2_SIGNPOST_START(parts, hid, "parts", "Enumerating part %p with timestamp %zu", header->GetData(), timestamp); ref.header = std::move(header); auto payload = transport->CreateMessage(sizeof(counter_t)); @@ -486,6 +490,8 @@ ExpirationHandler::Handler LifetimeHelpers::dummy(ConcreteDataMatcher const& mat auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dh, dph}); ref.header = std::move(header); + O2_SIGNPOST_ID_FROM_POINTER(hid, parts, header->GetData()); + O2_SIGNPOST_START(parts, hid, "parts", "Enumerating part %p with timestamp %zu", header->GetData(), timestamp); auto payload = transport->CreateMessage(0); ref.payload = std::move(payload); }; From 02954611aaf64aeb8d2619c28cc7c40ccb0adc44 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:19:01 +0100 Subject: [PATCH 364/726] DPL: use signposts for OOB callback --- Framework/Core/src/DataProcessingDevice.cxx | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 387a9ab753575..175fc8ab50f6e 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -318,7 +318,7 @@ void on_socket_polled(uv_poll_t* poller, int status, int events) O2_SIGNPOST_END(device, sid, "socket_state", "Socket disconnected in context %{public}s", context->name); } break; case UV_PRIORITIZED: { - O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for context %{public}s", context->name); + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Socket prioritized for context %{public}s", context->name); } break; } // We do nothing, all the logic for now stays in DataProcessingDevice::doRun() @@ -326,6 +326,7 @@ void on_socket_polled(uv_poll_t* poller, int status, int events) void on_out_of_band_polled(uv_poll_t* poller, int status, int events) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); auto* context = (PollerContext*)poller->data; context->state->loopReason |= DeviceState::OOB_ACTIVITY; if (status < 0) { @@ -334,32 +335,27 @@ void on_out_of_band_polled(uv_poll_t* poller, int status, int events) } switch (events) { case UV_READABLE: { - ZoneScopedN("socket readable event"); + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_INCOMING; assert(context->channelInfo); - LOGP(debug, "oob socket {} polled UV_READABLE.", - context->name, - context->channelInfo->hasPendingEvents); context->channelInfo->readPolled = true; } break; case UV_WRITABLE: { - ZoneScopedN("socket writeable"); + O2_SIGNPOST_END(device, sid, "socket_state", "OOB socket connected for channel %{public}s", context->name); if (context->read) { - LOG(debug) << "socket polled UV_CONNECT" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "OOB socket connected for read in context %{public}s", context->name); uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &on_out_of_band_polled); } else { - LOG(debug) << "socket polled UV_WRITABLE" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "OOB socket connected for write for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_OUTGOING; } } break; case UV_DISCONNECT: { - ZoneScopedN("socket disconnect"); - LOG(debug) << "socket polled UV_DISCONNECT"; + O2_SIGNPOST_END(device, sid, "socket_state", "OOB socket disconnected in context %{public}s", context->name); uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled); } break; case UV_PRIORITIZED: { - ZoneScopedN("socket prioritized"); - LOG(debug) << "socket polled UV_PRIORITIZED"; + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "OOB socket prioritized for context %{public}s", context->name); } break; } // We do nothing, all the logic for now stays in DataProcessingDevice::doRun() From a83b94be6c9d2d1ea14aab7537418995228ab793 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:19:01 +0100 Subject: [PATCH 365/726] DPL: rename TracyLockable to O2_LOCKABLE --- .../Core/include/Framework/DataRelayer.h | 2 +- Framework/Core/include/Framework/DataSender.h | 2 +- Framework/Core/src/DataRelayer.cxx | 32 +++++++++---------- Framework/Core/src/DataSender.cxx | 2 +- Framework/Core/src/ServiceRegistry.cxx | 6 ++-- .../Foundation/include/Framework/Tracing.h | 4 +-- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Framework/Core/include/Framework/DataRelayer.h b/Framework/Core/include/Framework/DataRelayer.h index 8fdedbdd1119e..1ebcf549d6a29 100644 --- a/Framework/Core/include/Framework/DataRelayer.h +++ b/Framework/Core/include/Framework/DataRelayer.h @@ -194,7 +194,7 @@ class DataRelayer std::vector mPruneOps; size_t mMaxLanes; - TracyLockableN(std::recursive_mutex, mMutex, "data relayer mutex"); + O2_LOCKABLE_NAMED(std::recursive_mutex, mMutex, "data relayer mutex"); }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/DataSender.h b/Framework/Core/include/Framework/DataSender.h index 7cdc0ea537bea..2937bd80f78f8 100644 --- a/Framework/Core/include/Framework/DataSender.h +++ b/Framework/Core/include/Framework/DataSender.h @@ -61,7 +61,7 @@ class DataSender std::vector mPresent; std::vector mPresentDefaults; - TracyLockableN(std::recursive_mutex, mMutex, "data relayer mutex"); + O2_LOCKABLE_NAMED(std::recursive_mutex, mMutex, "data relayer mutex"); }; } // namespace o2::framework diff --git a/Framework/Core/src/DataRelayer.cxx b/Framework/Core/src/DataRelayer.cxx index 0b459dd155a21..1daf4bbd2a20b 100644 --- a/Framework/Core/src/DataRelayer.cxx +++ b/Framework/Core/src/DataRelayer.cxx @@ -70,7 +70,7 @@ DataRelayer::DataRelayer(const CompletionPolicy& policy, mInputMatchers{DataRelayerHelpers::createInputMatchers(routes)}, mMaxLanes{InputRouteHelpers::maxLanes(routes)} { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); if (policy.configureRelayer == nullptr) { static int pipelineLength = DefaultsHelpers::pipelineLength(); @@ -100,7 +100,7 @@ DataRelayer::DataRelayer(const CompletionPolicy& policy, TimesliceId DataRelayer::getTimesliceForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto& variables = mTimesliceIndex.getVariablesForSlot(slot); return VariableContextHelpers::getTimeslice(variables); } @@ -109,7 +109,7 @@ DataRelayer::ActivityStats DataRelayer::processDanglingInputs(std::vector lock(mMutex); + std::scoped_lock lock(mMutex); auto& deviceProxy = services.get(); ActivityStats activity; @@ -394,7 +394,7 @@ DataRelayer::RelayChoice size_t nPayloads, std::function&, TimesliceIndex::OldestOutputInfo)> onDrop) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); DataProcessingHeader const* dph = o2::header::get(rawHeader); // IMPLEMENTATION DETAILS // @@ -616,7 +616,7 @@ DataRelayer::RelayChoice void DataRelayer::getReadyToProcess(std::vector& completed) { LOGP(debug, "DataRelayer::getReadyToProcess"); - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); // THE STATE const auto& cache = mCache; @@ -749,7 +749,7 @@ void DataRelayer::getReadyToProcess(std::vector& comp void DataRelayer::updateCacheStatus(TimesliceSlot slot, CacheEntryStatus oldStatus, CacheEntryStatus newStatus) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); const auto numInputTypes = mDistinctRoutesIndex.size(); auto markInputDone = [&cachedStateMetrics = mCachedStateMetrics, @@ -767,7 +767,7 @@ void DataRelayer::updateCacheStatus(TimesliceSlot slot, CacheEntryStatus oldStat std::vector DataRelayer::consumeAllInputsForTimeslice(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); const auto numInputTypes = mDistinctRoutesIndex.size(); // State of the computation @@ -821,7 +821,7 @@ std::vector DataRelayer::consumeAllInputsForTimeslice std::vector DataRelayer::consumeExistingInputsForTimeslice(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); const auto numInputTypes = mDistinctRoutesIndex.size(); // State of the computation @@ -864,7 +864,7 @@ std::vector DataRelayer::consumeExistingInputsForTime void DataRelayer::clear() { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); for (auto& cache : mCache) { cache.clear(); @@ -886,7 +886,7 @@ size_t /// the time pipelining. void DataRelayer::setPipelineLength(size_t s) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); mTimesliceIndex.resize(s); mVariableContextes.resize(s); @@ -895,7 +895,7 @@ void DataRelayer::setPipelineLength(size_t s) void DataRelayer::publishMetrics() { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto numInputTypes = mDistinctRoutesIndex.size(); // FIXME: many of the DataRelayer function rely on allocated cache, so its @@ -932,31 +932,31 @@ void DataRelayer::publishMetrics() uint32_t DataRelayer::getFirstTFOrbitForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getFirstTFOrbit(mTimesliceIndex.getVariablesForSlot(slot)); } uint32_t DataRelayer::getFirstTFCounterForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getFirstTFCounter(mTimesliceIndex.getVariablesForSlot(slot)); } uint32_t DataRelayer::getRunNumberForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getRunNumber(mTimesliceIndex.getVariablesForSlot(slot)); } uint64_t DataRelayer::getCreationTimeForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getCreationTime(mTimesliceIndex.getVariablesForSlot(slot)); } void DataRelayer::sendContextState() { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto& states = mContext.get(); for (size_t ci = 0; ci < mTimesliceIndex.size(); ++ci) { auto slot = TimesliceSlot{ci}; diff --git a/Framework/Core/src/DataSender.cxx b/Framework/Core/src/DataSender.cxx index f0753102e2d40..3acce1c4b9704 100644 --- a/Framework/Core/src/DataSender.cxx +++ b/Framework/Core/src/DataSender.cxx @@ -51,7 +51,7 @@ DataSender::DataSender(ServiceRegistryRef registry) mSpec{registry.get()}, mDistinctRoutesIndex{createDistinctOutputRouteIndex(mSpec.outputs)} { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto numInputTypes = mDistinctRoutesIndex.size(); auto& routes = mSpec.outputs; diff --git a/Framework/Core/src/ServiceRegistry.cxx b/Framework/Core/src/ServiceRegistry.cxx index 7b77c521fe6a7..09921f6b4a7d4 100644 --- a/Framework/Core/src/ServiceRegistry.cxx +++ b/Framework/Core/src/ServiceRegistry.cxx @@ -271,7 +271,7 @@ void ServiceRegistry::postRenderGUICallbacks(ServiceRegistryRef ref) void ServiceRegistry::bindService(ServiceRegistry::Salt salt, ServiceSpec const& spec, void* service) const { - static TracyLockableN(std::mutex, bindMutex, "bind mutex"); + static O2_LOCKABLE_NAMED(std::mutex, bindMutex, "bind mutex"); // Stream services need to store their callbacks in the stream context. // This is to make sure we invoke the correct callback only once per // stream, since they could bind multiple times. @@ -280,12 +280,12 @@ void ServiceRegistry::bindService(ServiceRegistry::Salt salt, ServiceSpec const& if (spec.kind == ServiceKind::Stream) { ServiceRegistryRef ref{const_cast(*this), salt}; auto& streamContext = ref.get(); - std::scoped_lock lock(bindMutex); + std::scoped_lock lock(bindMutex); auto& dataProcessorContext = ref.get(); ContextHelpers::bindStreamService(dataProcessorContext, streamContext, spec, service); } else { ServiceRegistryRef ref{const_cast(*this), salt}; - std::scoped_lock lock(bindMutex); + std::scoped_lock lock(bindMutex); if (ref.active()) { auto& dataProcessorContext = ref.get(); ContextHelpers::bindProcessorService(dataProcessorContext, spec, service); diff --git a/Framework/Foundation/include/Framework/Tracing.h b/Framework/Foundation/include/Framework/Tracing.h index f5bcecd3889d7..6c9c65f4423e5 100644 --- a/Framework/Foundation/include/Framework/Tracing.h +++ b/Framework/Foundation/include/Framework/Tracing.h @@ -39,8 +39,8 @@ #define TracyAppInfo(...) \ while (false) { \ } -#define TracyLockableN(T, V, N) T V -#define LockableBase(T) T +#define O2_LOCKABLE_NAMED(T, V, N) T V +#define O2_LOCKABLE(T) T #endif #endif // O2_FRAMEWORK_TRACING_H_ From c5f7aaf11e502e2bf70e0ea74f79936e771848ac Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:19:01 +0100 Subject: [PATCH 366/726] DPL: do not use Tracy anymore Tracy usage never took off, so I am removing all the explicit calls to it and I will replace them with Signpost usage, where it still makes sense. --- .../Core/src/CommonMessageBackendsHelpers.h | 1 - Framework/Core/src/CommonServices.cxx | 1 - Framework/Core/src/DataProcessingDevice.cxx | 29 ------------------- 3 files changed, 31 deletions(-) diff --git a/Framework/Core/src/CommonMessageBackendsHelpers.h b/Framework/Core/src/CommonMessageBackendsHelpers.h index 10d8a9c5d347a..a3684906ff838 100644 --- a/Framework/Core/src/CommonMessageBackendsHelpers.h +++ b/Framework/Core/src/CommonMessageBackendsHelpers.h @@ -38,7 +38,6 @@ struct CommonMessageBackendsHelpers { static ServiceProcessingCallback sendCallback() { return [](ProcessingContext& ctx, void* service) { - ZoneScopedN("send message callback"); T* context = reinterpret_cast(service); DataProcessor::doSend(ctx.services().get(), *context, ctx.services()); }; diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index a0f7fcdc88201..0d0828729ef42 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -741,7 +741,6 @@ auto sendRelayerMetrics(ServiceRegistryRef registry, DataProcessingStats& stats) stats.updateStats({static_cast(static_cast(ProcessingStatsId::AVAILABLE_MANAGED_SHM_BASE) + (runningWorkflow.shmSegmentId % 512)), DataProcessingStats::Op::SetIfPositive, freeMemory}); } - ZoneScopedN("send metrics"); auto device = registry.get().device(); int64_t totalBytesIn = 0; diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 175fc8ab50f6e..75ff3df50c3ea 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -115,14 +115,12 @@ struct ServiceKindExtractor { /// Watching stdin for commands probably a better approach. void on_idle_timer(uv_timer_t* handle) { - ZoneScopedN("Idle timer"); auto* state = (DeviceState*)handle->data; state->loopReason |= DeviceState::TIMER_EXPIRED; } void on_transition_requested_expired(uv_timer_t* handle) { - ZoneScopedN("Transition expired"); auto* state = (DeviceState*)handle->data; state->loopReason |= DeviceState::TIMER_EXPIRED; LOGP(info, "Timer expired. Forcing transition to READY"); @@ -222,12 +220,10 @@ DataProcessingDevice::DataProcessingDevice(RunningDeviceRef running, ServiceRegi // one with the thread id. For the moment we simply use the first one. void run_callback(uv_work_t* handle) { - ZoneScopedN("run_callback"); auto* task = (TaskStreamInfo*)handle->data; auto ref = ServiceRegistryRef{*task->registry, ServiceRegistry::globalStreamSalt(task->id.index + 1)}; DataProcessingDevice::doPrepare(ref); DataProcessingDevice::doRun(ref); - // FrameMark; } // Once the processing in a thread is done, this is executed on the main thread. @@ -268,7 +264,6 @@ void run_completion(uv_work_t* handle, int status) quotaEvaluator.handleExpired(reportExpiredOffer); quotaEvaluator.dispose(task->id.index); task->running = false; - ZoneScopedN("run_completion"); } // Context for polling @@ -377,8 +372,6 @@ void DataProcessingDevice::Init() context.statefulProcess = nullptr; context.error = spec.algorithm.onError; context.initError = spec.algorithm.onInitError; - TracyAppInfo(spec.name.data(), spec.name.size()); - ZoneScopedN("DataProcessingDevice::Init"); auto configStore = DeviceConfigurationHelpers::getConfiguration(mServiceRegistry, spec.name.c_str(), spec.options); if (configStore == nullptr) { @@ -412,7 +405,6 @@ void DataProcessingDevice::Init() if (context.initError) { context.initErrorHandling = [&errorCallback = context.initError, &serviceRegistry = mServiceRegistry](RuntimeErrorRef e) { - ZoneScopedN("Error handling"); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. ServiceRegistryRef ref{serviceRegistry, ServiceRegistry::globalDeviceSalt()}; @@ -426,7 +418,6 @@ void DataProcessingDevice::Init() }; } else { context.initErrorHandling = [&serviceRegistry = mServiceRegistry](RuntimeErrorRef e) { - ZoneScopedN("Error handling"); auto& err = error_from_ref(e); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. @@ -449,7 +440,6 @@ void DataProcessingDevice::Init() try { context.statefulProcess = context.init(initContext); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); if (context.initErrorHandling) { (context.initErrorHandling)(e); } @@ -458,14 +448,12 @@ void DataProcessingDevice::Init() try { context.statefulProcess = context.init(initContext); } catch (std::exception& ex) { - ZoneScopedN("error handling"); /// Convert a standard exception to a RuntimeErrorRef /// Notice how this will lose the backtrace information /// and report the exception coming from here. auto e = runtime_error(ex.what()); (context.initErrorHandling)(e); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); (context.initErrorHandling)(e); } } @@ -621,7 +609,6 @@ static auto toBeforwardedMessageSet = [](std::vector& cachedForwar // FIXME: do it in a smarter way than O(N^2) static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, std::vector& currentSetOfInputs, TimesliceIndex::OldestOutputInfo oldestTimeslice, bool copy, bool consume = true) { - ZoneScopedN("forward inputs"); auto& proxy = registry.get(); // we collect all messages per forward in a map and send them together std::vector forwardedParts; @@ -1078,7 +1065,6 @@ void DataProcessingDevice::fillContext(DataProcessorContext& context, DeviceCont if (context.error != nullptr) { context.errorHandling = [&errorCallback = context.error, &serviceRegistry = mServiceRegistry](RuntimeErrorRef e, InputRecord& record) { - ZoneScopedN("Error handling"); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. ServiceRegistryRef ref{serviceRegistry, ServiceRegistry::globalDeviceSalt()}; @@ -1093,7 +1079,6 @@ void DataProcessingDevice::fillContext(DataProcessorContext& context, DeviceCont } else { context.errorHandling = [&errorPolicy = mProcessingPolicies.error, &serviceRegistry = mServiceRegistry](RuntimeErrorRef e, InputRecord& record) { - ZoneScopedN("Error handling"); auto& err = error_from_ref(e); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. @@ -1242,8 +1227,6 @@ void DataProcessingDevice::Run() // so that devices which do not have a timer can still start an // enumeration. { - ZoneScopedN("uv idle"); - TracyPlot("past activity", (int64_t)mWasActive); ServiceRegistryRef ref{mServiceRegistry}; ref.get().flushPending(mServiceRegistry); auto shouldNotWait = (mWasActive && @@ -1303,7 +1286,6 @@ void DataProcessingDevice::Run() if (state.transitionHandling == TransitionHandlingState::Requested && state.streaming == StreamingState::Idle) { state.transitionHandling = TransitionHandlingState::Expired; } - TracyPlot("shouldNotWait", (int)shouldNotWait); if (state.severityStack.empty() == false) { fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back()); state.severityStack.pop_back(); @@ -1349,7 +1331,6 @@ void DataProcessingDevice::Run() fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back()); state.severityStack.pop_back(); } - TracyPlot("loopReason", (int64_t)(uint64_t)state.loopReason); LOGP(debug, "Loop reason mask {:b} & {:b} = {:b}", state.loopReason, state.tracingFlags, state.loopReason & state.tracingFlags); @@ -1433,7 +1414,6 @@ void DataProcessingDevice::Run() } else { mWasActive = false; } - FrameMark; } auto& spec = ref.get(); /// Cleanup messages which are still pending on exit. @@ -1448,12 +1428,10 @@ void DataProcessingDevice::Run() /// non-data triggers like those which are time based. void DataProcessingDevice::doPrepare(ServiceRegistryRef ref) { - ZoneScopedN("DataProcessingDevice::doPrepare"); auto& context = ref.get(); *context.wasActive = false; { - ZoneScopedN("CallbackService::Id::ClockTick"); ref.get().call(); } // Whether or not we had something to do. @@ -1734,7 +1712,6 @@ struct WaitBackpressurePolicy { void DataProcessingDevice::handleData(ServiceRegistryRef ref, InputChannelInfo& info) { auto& context = ref.get(); - ZoneScopedN("DataProcessingDevice::handleData"); enum struct InputType : int { Invalid = 0, @@ -1763,7 +1740,6 @@ void DataProcessingDevice::handleData(ServiceRegistryRef ref, InputChannelInfo& auto& parts = info.parts; stats.updateStats({(int)ProcessingStatsId::TOTAL_INPUTS, DataProcessingStats::Op::Set, (int64_t)parts.Size()}); - TracyPlot("messages received", (int64_t)parts.Size()); std::vector results; // we can reserve the upper limit results.reserve(parts.Size() / 2); @@ -2060,7 +2036,6 @@ void update_maximum(std::atomic& maximum_value, T const& value) noexcept bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::vector& completed) { auto& context = ref.get(); - ZoneScopedN("DataProcessingDevice::tryDispatchComputation"); LOGP(debug, "DataProcessingDevice::tryDispatchComputation"); // This is the actual hidden state for the outer loop. In case we decide we // want to support multithreaded dispatching of operations, I can simply @@ -2135,7 +2110,6 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v auto prepareAllocatorForCurrentTimeSlice = [ref](TimesliceSlot i) -> void { auto& relayer = ref.get(); auto& timingInfo = ref.get(); - ZoneScopedN("DataProcessingDevice::prepareForCurrentTimeslice"); auto timeslice = relayer.getTimesliceForSlot(i); timingInfo.timeslice = timeslice.value; @@ -2422,21 +2396,18 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v try { runNoCatch(action); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); (context.errorHandling)(e, record); } } else { try { runNoCatch(action); } catch (std::exception& ex) { - ZoneScopedN("error handling"); /// Convert a standard exception to a RuntimeErrorRef /// Notice how this will lose the backtrace information /// and report the exception coming from here. auto e = runtime_error(ex.what()); (context.errorHandling)(e, record); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); (context.errorHandling)(e, record); } } From 23d38302df6414abe92559ca3a6badafb109434c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:19:01 +0100 Subject: [PATCH 367/726] DPL: drop unused Tracy APIs ZoneScoped*, TracyAppInfo, FrameMark, TracyAlloc, TracyPlot were all removed and will be replaced by Signposts as needed. --- .../Foundation/include/Framework/Tracing.h | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/Framework/Foundation/include/Framework/Tracing.h b/Framework/Foundation/include/Framework/Tracing.h index 6c9c65f4423e5..91c3b99eb8dd0 100644 --- a/Framework/Foundation/include/Framework/Tracing.h +++ b/Framework/Foundation/include/Framework/Tracing.h @@ -15,30 +15,6 @@ #define DPL_HAS_TRACING #include #else -#define ZoneScoped \ - while (false) { \ - } -#define FrameMark \ - while (false) { \ - } -#define TracyPlot(...) \ - while (false) { \ - } -#define ZoneScopedN(...) \ - while (false) { \ - } -#define ZoneScopedNS(...) \ - while (false) { \ - } -#define TracyAlloc(...) \ - while (false) { \ - } -#define TracyFree(...) \ - while (false) { \ - } -#define TracyAppInfo(...) \ - while (false) { \ - } #define O2_LOCKABLE_NAMED(T, V, N) T V #define O2_LOCKABLE(T) T #endif From 196f66c99375ea0bc7292ecaef191d7cfc503508 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:19:01 +0100 Subject: [PATCH 368/726] DPL: drop tracy support from the GUI No one is using it and Signposts are probably a better alternative. --- .../GUISupport/src/FrameworkGUIDeviceInspector.cxx | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index eb62fcc09d338..3ef8b99ed26bb 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -261,9 +261,6 @@ void displayDeviceInspector(DeviceSpec const& spec, ImGui::Text("Pid: %d (exit status: %d)", info.pid, info.exitStatus); } ImGui::Text("Device state: %s", info.deviceState.data()); -#ifdef DPL_ENABLE_TRACING - ImGui::Text("Tracy Port: %d", info.tracyPort); -#endif ImGui::Text("Rank: %zu/%zu%%%zu/%zu", spec.rank, spec.nSlots, spec.inputTimesliceId, spec.maxInputTimeslices); if (ImGui::Button(ICON_FA_BUG "Attach debugger")) { @@ -324,16 +321,6 @@ void displayDeviceInspector(DeviceSpec const& spec, } #endif -#if DPL_ENABLE_TRACING - ImGui::SameLine(); - if (ImGui::Button("Tracy")) { - std::string tracyPort = std::to_string(info.tracyPort); - auto cmd = fmt::format("tracy-profiler -p {} -a 127.0.0.1 &", info.tracyPort); - LOG(debug) << cmd; - int retVal = system(cmd.c_str()); - (void)retVal; - } -#endif if (control.controller) { if (ImGui::Button("Offer SHM")) { control.controller->write("/shm-offer 1000", strlen("/shm-offer 1000")); From 20fada9f64355bf67f0ad2caba88997641e623bd Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:19:01 +0100 Subject: [PATCH 369/726] DPL: drop tracy support for good --- Framework/Core/src/DataProcessingDevice.cxx | 4 ---- Framework/Foundation/include/Framework/Tracing.h | 7 ++++--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 75ff3df50c3ea..4a9f5009b342e 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -8,10 +8,6 @@ // In applying this license CERN does not waive the privileges and immunities // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -#ifdef DPL_ENABLE_TRACING -#define TRACY_ENABLE -#include -#endif #include "Framework/AsyncQueue.h" #include "Framework/DataProcessingDevice.h" #include "Framework/ChannelMatching.h" diff --git a/Framework/Foundation/include/Framework/Tracing.h b/Framework/Foundation/include/Framework/Tracing.h index 91c3b99eb8dd0..72c052ca6c161 100644 --- a/Framework/Foundation/include/Framework/Tracing.h +++ b/Framework/Foundation/include/Framework/Tracing.h @@ -11,9 +11,10 @@ #ifndef O2_FRAMEWORK_TRACING_H_ #define O2_FRAMEWORK_TRACING_H_ -#if DPL_ENABLE_TRACING && __has_include() -#define DPL_HAS_TRACING -#include +#if DPL_ENABLE_TRACING +// FIXME: not implemented yet in terms of Signposts +#define O2_LOCKABLE_NAMED(T, V, N) T V +#define O2_LOCKABLE(T) T #else #define O2_LOCKABLE_NAMED(T, V, N) T V #define O2_LOCKABLE(T) T From 46d28a27e2472cf3e7215e7c467b2aa7d3cffbfb Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 22 Feb 2024 12:19:01 +0100 Subject: [PATCH 370/726] [FEAT] Drop support for Tracy --- Framework/CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Framework/CHANGELOG.md b/Framework/CHANGELOG.md index e7278733de246..044274d9515c4 100644 --- a/Framework/CHANGELOG.md +++ b/Framework/CHANGELOG.md @@ -1,3 +1,12 @@ +# 2024-02-22: Drop Tracy support + +Tracy support never took off, so I am dropping it. This was mostly because people do not know about it and having a per process profile GUI was way unpractical. Moreover, needing an extra compile time flag meant one most likely did not have the support compiled in when needed. + +I have therefore decided to replace it with signposts, which hopefully will see better adoption thanks +to the integration with Instruments on mac and the easy way they can be enabled dynamically. + +We could then reintroduce Tracy support as a hook on top of signposts, if really needed. + # 2024-02-16: Improved Signposts. In particular: From cf44ce925ac12722847fae3dd0d54d52c91e80c5 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 23 Feb 2024 06:56:31 +0100 Subject: [PATCH 371/726] [O2-4660] DPL: increase Dispatcher sending policy timeout Now to 50 ms * 10 before switching to lossy. --- Framework/Core/src/SendingPolicy.cxx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index 3dbbc451915b0..bf97d56b4a5b8 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -50,16 +50,16 @@ std::vector SendingPolicy::createDefaultPolicies() auto &proxy = registry.get(); OutputChannelInfo const& info = proxy.getOutputChannelInfo(channelIndex); OutputChannelState& state = proxy.getOutputChannelState(channelIndex); - // Default timeout is 10ms. + // Default timeout is 50ms. // We count the number of consecutively dropped messages. // If we have more than 10, we switch to a completely // non-blocking approach. - int64_t timeout = 10; + int64_t timeout = 50; if (state.droppedMessages == 10 + 1) { LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode."; } if (state.droppedMessages == 0) { - timeout = 10; + timeout = 50; } if (state.droppedMessages > 10) { timeout = 0; From 6700ec309da85f324d0738525910850a219ff180 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 23 Feb 2024 08:14:05 +0100 Subject: [PATCH 372/726] DPL: drop support for serialising / deserializing schema --- .../include/Framework/TMessageSerializer.h | 8 --- Framework/Core/src/TMessageSerializer.cxx | 64 ------------------- 2 files changed, 72 deletions(-) diff --git a/Framework/Core/include/Framework/TMessageSerializer.h b/Framework/Core/include/Framework/TMessageSerializer.h index b457f0ffc5f99..d9f0ca8edf12e 100644 --- a/Framework/Core/include/Framework/TMessageSerializer.h +++ b/Framework/Core/include/Framework/TMessageSerializer.h @@ -80,14 +80,6 @@ struct TMessageSerializer { template static inline std::unique_ptr deserialize(std::byte* buffer, size_t size); - // load the schema information from a message/buffer - static void loadSchema(const fair::mq::Message& msg); - static void loadSchema(gsl::span buffer); - - // write the schema into an empty message/buffer - static void fillSchema(fair::mq::Message& msg, const StreamerList& streamers); - static void fillSchema(FairTMessage& msg, const StreamerList& streamers); - // get the streamers static StreamerList getStreamers(); diff --git a/Framework/Core/src/TMessageSerializer.cxx b/Framework/Core/src/TMessageSerializer.cxx index 6276cd74152cc..f3e8a37a2b8d2 100644 --- a/Framework/Core/src/TMessageSerializer.cxx +++ b/Framework/Core/src/TMessageSerializer.cxx @@ -17,70 +17,6 @@ using namespace o2::framework; TMessageSerializer::StreamerList TMessageSerializer::sStreamers{}; std::mutex TMessageSerializer::sStreamersLock{}; -void TMessageSerializer::loadSchema(gsl::span buffer) -{ - std::unique_ptr obj = deserialize(buffer); - - TObjArray* pSchemas = dynamic_cast(obj.get()); - if (!pSchemas) { - return; - } - - // TODO: this is a bit of a problem in general: non-owning ROOT containers should become - // owners at deserialize, otherwise there is a leak. Switch to a better container. - pSchemas->SetOwner(kTRUE); - - for (int i = 0; i < pSchemas->GetEntriesFast(); i++) { - TStreamerInfo* pSchema = dynamic_cast(pSchemas->At(i)); - if (!pSchema) { - continue; - } - int version = pSchema->GetClassVersion(); - TClass* pClass = TClass::GetClass(pSchema->GetName()); - if (!pClass) { - continue; - } - if (pClass->GetClassVersion() == version) { - continue; - } - TObjArray* pInfos = const_cast(pClass->GetStreamerInfos()); - if (!pInfos) { - continue; - } - TVirtualStreamerInfo* pInfo = dynamic_cast(pInfos->At(version)); - if (pInfo) { - continue; - } - pSchema->SetClass(pClass); - pSchema->BuildOld(); - pInfos->AddAtAndExpand(pSchema, version); - pSchemas->Remove(pSchema); - } -} - -void TMessageSerializer::fillSchema(FairTMessage& msg, const StreamerList& streamers) -{ - // TODO: this is a bit of a problem in general: non-owning ROOT containers should become - // owners at deserialize, otherwise there is a leak. Switch to a better container. - TObjArray infoArray{}; - for (const auto& info : streamers) { - infoArray.Add(info); - } - serialize(msg, &infoArray); -} - -void TMessageSerializer::loadSchema(const fair::mq::Message& msg) { loadSchema(as_span(msg)); } -void TMessageSerializer::fillSchema(fair::mq::Message& msg, const StreamerList& streamers) -{ - // TODO: this is a bit of a problem in general: non-owning ROOT containers should become - // owners at deserialize, otherwise there is a leak. Switch to a better container. - TObjArray infoArray{}; - for (const auto& info : streamers) { - infoArray.Add(info); - } - Serialize(msg, &infoArray); -} - void TMessageSerializer::updateStreamers(const FairTMessage& message, StreamerList& streamers) { std::lock_guard lock{TMessageSerializer::sStreamersLock}; From 9b7b506da0463f21a1d6db90fe62528b1c626e1e Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 23 Feb 2024 08:19:26 +0100 Subject: [PATCH 373/726] [O2-4578] DPL: attempt at removing a race condition in the rate limiting --- Framework/Core/src/CommonDataProcessors.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 48a3eb1da95b9..05950d9d1e855 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -580,11 +580,11 @@ DataProcessorSpec CommonDataProcessors::getDummySink(std::vector cons auto oldestPossingTimeslice = timesliceIndex.getOldestPossibleOutput().timeslice.value; if (channel != device->GetChannels().end()) { fair::mq::MessagePtr payload(device->NewMessage()); - size_t* consumed = (size_t*)malloc(sizeof(size_t)); + auto* consumed = (size_t*)malloc(sizeof(size_t)); *consumed = oldestPossingTimeslice; if (*consumed != lastTimeslice) { payload->Rebuild(consumed, sizeof(int64_t), nullptr, nullptr); - channel->second[0].Send(payload); + auto result = channel->second[0].Send(payload, 100); lastTimeslice = *consumed; } } From 443c2d7b12d0ca2965fa6e04461f943724a57b1e Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 23 Feb 2024 08:14:05 +0100 Subject: [PATCH 374/726] DPL: drop support for serialising / deserializing schema --- .../include/Framework/TMessageSerializer.h | 55 ++---------- Framework/Core/src/TMessageSerializer.cxx | 89 ------------------- 2 files changed, 6 insertions(+), 138 deletions(-) diff --git a/Framework/Core/include/Framework/TMessageSerializer.h b/Framework/Core/include/Framework/TMessageSerializer.h index b457f0ffc5f99..52c4de984b221 100644 --- a/Framework/Core/include/Framework/TMessageSerializer.h +++ b/Framework/Core/include/Framework/TMessageSerializer.h @@ -26,9 +26,7 @@ #include #include -namespace o2 -{ -namespace framework +namespace o2::framework { class FairTMessage; @@ -51,28 +49,23 @@ class FairTMessage : public TMessage struct TMessageSerializer { using StreamerList = std::vector; using CompressionLevel = int; - enum class CacheStreamers { yes, - no }; static void Serialize(fair::mq::Message& msg, const TObject* input, - CacheStreamers streamers = CacheStreamers::no, CompressionLevel compressionLevel = -1); template static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl, // - CacheStreamers streamers = CacheStreamers::no, // CompressionLevel compressionLevel = -1); template static void Deserialize(const fair::mq::Message& msg, std::unique_ptr& output); static void serialize(FairTMessage& msg, const TObject* input, - CacheStreamers streamers = CacheStreamers::no, CompressionLevel compressionLevel = -1); template static void serialize(FairTMessage& msg, const T* input, // - const TClass* cl, CacheStreamers streamers = CacheStreamers::no, // + const TClass* cl, CompressionLevel compressionLevel = -1); template @@ -80,45 +73,22 @@ struct TMessageSerializer { template static inline std::unique_ptr deserialize(std::byte* buffer, size_t size); - // load the schema information from a message/buffer - static void loadSchema(const fair::mq::Message& msg); - static void loadSchema(gsl::span buffer); - - // write the schema into an empty message/buffer - static void fillSchema(fair::mq::Message& msg, const StreamerList& streamers); - static void fillSchema(FairTMessage& msg, const StreamerList& streamers); - // get the streamers static StreamerList getStreamers(); - // update the streamer list with infos appropriate for this type - static void updateStreamers(const TObject* object); - private: - // update the cache of streamer infos for serialized classes - static void updateStreamers(const FairTMessage& message, StreamerList& streamers); - - // for now this is a static, maybe it would be better to move the storage somewhere else? - static StreamerList sStreamers; - static std::mutex sStreamersLock; }; inline void TMessageSerializer::serialize(FairTMessage& tm, const TObject* input, - CacheStreamers streamers, CompressionLevel compressionLevel) { - return serialize(tm, input, nullptr, streamers, compressionLevel); + return serialize(tm, input, nullptr, compressionLevel); } template inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // - const TClass* cl, CacheStreamers streamers, // - CompressionLevel compressionLevel) + const TClass* cl, CompressionLevel compressionLevel) { - if (streamers == CacheStreamers::yes) { - tm.EnableSchemaEvolution(true); - } - if (compressionLevel >= 0) { // if negative, skip to use ROOT default tm.SetCompressionLevel(compressionLevel); @@ -130,10 +100,6 @@ inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, } else { tm.WriteObjectAny(input, cl); } - - if (streamers == CacheStreamers::yes) { - updateStreamers(tm, sStreamers); - } } template @@ -172,12 +138,11 @@ inline void FairTMessage::free(void* /*data*/, void* hint) } inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input, - TMessageSerializer::CacheStreamers streamers, TMessageSerializer::CompressionLevel compressionLevel) { std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - serialize(*tm, input, input->Class(), streamers, compressionLevel); + serialize(*tm, input, input->Class(), compressionLevel); msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); tm.release(); @@ -186,12 +151,11 @@ inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* template inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, // const TClass* cl, // - TMessageSerializer::CacheStreamers streamers, // TMessageSerializer::CompressionLevel compressionLevel) { std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - serialize(*tm, input, cl, streamers, compressionLevel); + serialize(*tm, input, cl, compressionLevel); msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); tm.release(); @@ -205,12 +169,6 @@ inline void TMessageSerializer::Deserialize(const fair::mq::Message& msg, std::u output = deserialize(as_span(msg)); } -inline TMessageSerializer::StreamerList TMessageSerializer::getStreamers() -{ - std::lock_guard lock{TMessageSerializer::sStreamersLock}; - return sStreamers; -} - // gsl::narrow is used to do a runtime narrowing check, this might be a bit paranoid, // we would probably be fine with e.g. gsl::narrow_cast (or just a static_cast) inline gsl::span as_span(const fair::mq::Message& msg) @@ -224,6 +182,5 @@ inline gsl::span as_span(const FairTMessage& msg) gsl::narrow::size_type>(msg.BufferSize())}; } -} // namespace framework } // namespace o2 #endif // FRAMEWORK_TMESSAGESERIALIZER_H diff --git a/Framework/Core/src/TMessageSerializer.cxx b/Framework/Core/src/TMessageSerializer.cxx index 6276cd74152cc..5388a6d716cda 100644 --- a/Framework/Core/src/TMessageSerializer.cxx +++ b/Framework/Core/src/TMessageSerializer.cxx @@ -13,92 +13,3 @@ #include using namespace o2::framework; - -TMessageSerializer::StreamerList TMessageSerializer::sStreamers{}; -std::mutex TMessageSerializer::sStreamersLock{}; - -void TMessageSerializer::loadSchema(gsl::span buffer) -{ - std::unique_ptr obj = deserialize(buffer); - - TObjArray* pSchemas = dynamic_cast(obj.get()); - if (!pSchemas) { - return; - } - - // TODO: this is a bit of a problem in general: non-owning ROOT containers should become - // owners at deserialize, otherwise there is a leak. Switch to a better container. - pSchemas->SetOwner(kTRUE); - - for (int i = 0; i < pSchemas->GetEntriesFast(); i++) { - TStreamerInfo* pSchema = dynamic_cast(pSchemas->At(i)); - if (!pSchema) { - continue; - } - int version = pSchema->GetClassVersion(); - TClass* pClass = TClass::GetClass(pSchema->GetName()); - if (!pClass) { - continue; - } - if (pClass->GetClassVersion() == version) { - continue; - } - TObjArray* pInfos = const_cast(pClass->GetStreamerInfos()); - if (!pInfos) { - continue; - } - TVirtualStreamerInfo* pInfo = dynamic_cast(pInfos->At(version)); - if (pInfo) { - continue; - } - pSchema->SetClass(pClass); - pSchema->BuildOld(); - pInfos->AddAtAndExpand(pSchema, version); - pSchemas->Remove(pSchema); - } -} - -void TMessageSerializer::fillSchema(FairTMessage& msg, const StreamerList& streamers) -{ - // TODO: this is a bit of a problem in general: non-owning ROOT containers should become - // owners at deserialize, otherwise there is a leak. Switch to a better container. - TObjArray infoArray{}; - for (const auto& info : streamers) { - infoArray.Add(info); - } - serialize(msg, &infoArray); -} - -void TMessageSerializer::loadSchema(const fair::mq::Message& msg) { loadSchema(as_span(msg)); } -void TMessageSerializer::fillSchema(fair::mq::Message& msg, const StreamerList& streamers) -{ - // TODO: this is a bit of a problem in general: non-owning ROOT containers should become - // owners at deserialize, otherwise there is a leak. Switch to a better container. - TObjArray infoArray{}; - for (const auto& info : streamers) { - infoArray.Add(info); - } - Serialize(msg, &infoArray); -} - -void TMessageSerializer::updateStreamers(const FairTMessage& message, StreamerList& streamers) -{ - std::lock_guard lock{TMessageSerializer::sStreamersLock}; - - TIter nextStreamer(message.GetStreamerInfos()); // unfortunately ROOT uses TList* here - // this looks like we could use std::map here. - while (TVirtualStreamerInfo* in = static_cast(nextStreamer())) { - auto found = std::find_if(streamers.begin(), streamers.end(), [&](const auto& old) { - return (old->GetName() == in->GetName() && old->GetClassVersion() == in->GetClassVersion()); - }); - if (found == streamers.end()) { - streamers.push_back(in); - } - } -} - -void TMessageSerializer::updateStreamers(const TObject* object) -{ - FairTMessage msg(kMESS_OBJECT); - serialize(msg, object, CacheStreamers::yes, CompressionLevel{0}); -} From fc7baac78d87738867856832de86f701454b97b9 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 23 Feb 2024 10:13:36 +0100 Subject: [PATCH 375/726] DPL: drop support for serialising / deserializing schema --- .../include/Framework/TMessageSerializer.h | 72 +++------------ Framework/Core/src/TMessageSerializer.cxx | 89 ------------------- 2 files changed, 12 insertions(+), 149 deletions(-) diff --git a/Framework/Core/include/Framework/TMessageSerializer.h b/Framework/Core/include/Framework/TMessageSerializer.h index b457f0ffc5f99..1f08b456c0218 100644 --- a/Framework/Core/include/Framework/TMessageSerializer.h +++ b/Framework/Core/include/Framework/TMessageSerializer.h @@ -26,9 +26,7 @@ #include #include -namespace o2 -{ -namespace framework +namespace o2::framework { class FairTMessage; @@ -51,74 +49,41 @@ class FairTMessage : public TMessage struct TMessageSerializer { using StreamerList = std::vector; using CompressionLevel = int; - enum class CacheStreamers { yes, - no }; static void Serialize(fair::mq::Message& msg, const TObject* input, - CacheStreamers streamers = CacheStreamers::no, CompressionLevel compressionLevel = -1); template static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl, // - CacheStreamers streamers = CacheStreamers::no, // CompressionLevel compressionLevel = -1); template static void Deserialize(const fair::mq::Message& msg, std::unique_ptr& output); static void serialize(FairTMessage& msg, const TObject* input, - CacheStreamers streamers = CacheStreamers::no, CompressionLevel compressionLevel = -1); template - static void serialize(FairTMessage& msg, const T* input, // - const TClass* cl, CacheStreamers streamers = CacheStreamers::no, // + static void serialize(FairTMessage& msg, const T* input, // + const TClass* cl, CompressionLevel compressionLevel = -1); template static std::unique_ptr deserialize(gsl::span buffer); template static inline std::unique_ptr deserialize(std::byte* buffer, size_t size); - - // load the schema information from a message/buffer - static void loadSchema(const fair::mq::Message& msg); - static void loadSchema(gsl::span buffer); - - // write the schema into an empty message/buffer - static void fillSchema(fair::mq::Message& msg, const StreamerList& streamers); - static void fillSchema(FairTMessage& msg, const StreamerList& streamers); - - // get the streamers - static StreamerList getStreamers(); - - // update the streamer list with infos appropriate for this type - static void updateStreamers(const TObject* object); - - private: - // update the cache of streamer infos for serialized classes - static void updateStreamers(const FairTMessage& message, StreamerList& streamers); - - // for now this is a static, maybe it would be better to move the storage somewhere else? - static StreamerList sStreamers; - static std::mutex sStreamersLock; }; inline void TMessageSerializer::serialize(FairTMessage& tm, const TObject* input, - CacheStreamers streamers, CompressionLevel compressionLevel) { - return serialize(tm, input, nullptr, streamers, compressionLevel); + return serialize(tm, input, nullptr, compressionLevel); } template -inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // - const TClass* cl, CacheStreamers streamers, // - CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // + const TClass* cl, CompressionLevel compressionLevel) { - if (streamers == CacheStreamers::yes) { - tm.EnableSchemaEvolution(true); - } - if (compressionLevel >= 0) { // if negative, skip to use ROOT default tm.SetCompressionLevel(compressionLevel); @@ -130,10 +95,6 @@ inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, } else { tm.WriteObjectAny(input, cl); } - - if (streamers == CacheStreamers::yes) { - updateStreamers(tm, sStreamers); - } } template @@ -172,26 +133,24 @@ inline void FairTMessage::free(void* /*data*/, void* hint) } inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input, - TMessageSerializer::CacheStreamers streamers, TMessageSerializer::CompressionLevel compressionLevel) { std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - serialize(*tm, input, input->Class(), streamers, compressionLevel); + serialize(*tm, input, input->Class(), compressionLevel); msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); tm.release(); } template -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, // - const TClass* cl, // - TMessageSerializer::CacheStreamers streamers, // +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, // + const TClass* cl, // TMessageSerializer::CompressionLevel compressionLevel) { std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - serialize(*tm, input, cl, streamers, compressionLevel); + serialize(*tm, input, cl, compressionLevel); msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); tm.release(); @@ -205,12 +164,6 @@ inline void TMessageSerializer::Deserialize(const fair::mq::Message& msg, std::u output = deserialize(as_span(msg)); } -inline TMessageSerializer::StreamerList TMessageSerializer::getStreamers() -{ - std::lock_guard lock{TMessageSerializer::sStreamersLock}; - return sStreamers; -} - // gsl::narrow is used to do a runtime narrowing check, this might be a bit paranoid, // we would probably be fine with e.g. gsl::narrow_cast (or just a static_cast) inline gsl::span as_span(const fair::mq::Message& msg) @@ -221,9 +174,8 @@ inline gsl::span as_span(const fair::mq::Message& msg) inline gsl::span as_span(const FairTMessage& msg) { return gsl::span{reinterpret_cast(msg.Buffer()), - gsl::narrow::size_type>(msg.BufferSize())}; + gsl::narrow::size_type>(msg.BufferSize())}; } -} // namespace framework -} // namespace o2 +} // namespace o2::framework #endif // FRAMEWORK_TMESSAGESERIALIZER_H diff --git a/Framework/Core/src/TMessageSerializer.cxx b/Framework/Core/src/TMessageSerializer.cxx index 6276cd74152cc..5388a6d716cda 100644 --- a/Framework/Core/src/TMessageSerializer.cxx +++ b/Framework/Core/src/TMessageSerializer.cxx @@ -13,92 +13,3 @@ #include using namespace o2::framework; - -TMessageSerializer::StreamerList TMessageSerializer::sStreamers{}; -std::mutex TMessageSerializer::sStreamersLock{}; - -void TMessageSerializer::loadSchema(gsl::span buffer) -{ - std::unique_ptr obj = deserialize(buffer); - - TObjArray* pSchemas = dynamic_cast(obj.get()); - if (!pSchemas) { - return; - } - - // TODO: this is a bit of a problem in general: non-owning ROOT containers should become - // owners at deserialize, otherwise there is a leak. Switch to a better container. - pSchemas->SetOwner(kTRUE); - - for (int i = 0; i < pSchemas->GetEntriesFast(); i++) { - TStreamerInfo* pSchema = dynamic_cast(pSchemas->At(i)); - if (!pSchema) { - continue; - } - int version = pSchema->GetClassVersion(); - TClass* pClass = TClass::GetClass(pSchema->GetName()); - if (!pClass) { - continue; - } - if (pClass->GetClassVersion() == version) { - continue; - } - TObjArray* pInfos = const_cast(pClass->GetStreamerInfos()); - if (!pInfos) { - continue; - } - TVirtualStreamerInfo* pInfo = dynamic_cast(pInfos->At(version)); - if (pInfo) { - continue; - } - pSchema->SetClass(pClass); - pSchema->BuildOld(); - pInfos->AddAtAndExpand(pSchema, version); - pSchemas->Remove(pSchema); - } -} - -void TMessageSerializer::fillSchema(FairTMessage& msg, const StreamerList& streamers) -{ - // TODO: this is a bit of a problem in general: non-owning ROOT containers should become - // owners at deserialize, otherwise there is a leak. Switch to a better container. - TObjArray infoArray{}; - for (const auto& info : streamers) { - infoArray.Add(info); - } - serialize(msg, &infoArray); -} - -void TMessageSerializer::loadSchema(const fair::mq::Message& msg) { loadSchema(as_span(msg)); } -void TMessageSerializer::fillSchema(fair::mq::Message& msg, const StreamerList& streamers) -{ - // TODO: this is a bit of a problem in general: non-owning ROOT containers should become - // owners at deserialize, otherwise there is a leak. Switch to a better container. - TObjArray infoArray{}; - for (const auto& info : streamers) { - infoArray.Add(info); - } - Serialize(msg, &infoArray); -} - -void TMessageSerializer::updateStreamers(const FairTMessage& message, StreamerList& streamers) -{ - std::lock_guard lock{TMessageSerializer::sStreamersLock}; - - TIter nextStreamer(message.GetStreamerInfos()); // unfortunately ROOT uses TList* here - // this looks like we could use std::map here. - while (TVirtualStreamerInfo* in = static_cast(nextStreamer())) { - auto found = std::find_if(streamers.begin(), streamers.end(), [&](const auto& old) { - return (old->GetName() == in->GetName() && old->GetClassVersion() == in->GetClassVersion()); - }); - if (found == streamers.end()) { - streamers.push_back(in); - } - } -} - -void TMessageSerializer::updateStreamers(const TObject* object) -{ - FairTMessage msg(kMESS_OBJECT); - serialize(msg, object, CacheStreamers::yes, CompressionLevel{0}); -} From af2fb49149c828b2c28c6b67e36b12ebabe4c3d6 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 23 Feb 2024 20:36:58 +0100 Subject: [PATCH 376/726] DPL: avoid TMessage usage TMessage does not allow for non owned buffers, so we end up having an extra buffer in private memory for (de)serializing. Using TBufferFile directly allows to avoid that, so this moves the whole ROOT serialization support in DPL to use it. --- .../src/AODJAlienReaderHelpers.h | 2 + .../Core/include/Framework/DataAllocator.h | 1 + .../Core/include/Framework/DataRefUtils.h | 13 +- .../include/Framework/RootMessageContext.h | 3 + .../Framework/RootSerializationSupport.h | 3 +- .../include/Framework/TMessageSerializer.h | 130 ++++++++---------- Framework/Core/src/CommonDataProcessors.cxx | 4 +- Framework/Core/src/TMessageSerializer.cxx | 31 +++++ Framework/Core/test/test_DataRefUtils.cxx | 28 +++- .../Core/test/test_TMessageSerializer.cxx | 31 +++-- Utilities/Mergers/src/ObjectStore.cxx | 11 +- Utilities/Mergers/test/benchmark_Types.cxx | 11 +- 12 files changed, 167 insertions(+), 101 deletions(-) diff --git a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h index 655e4b6c0b439..4b9fd710aca14 100644 --- a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h +++ b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h @@ -16,7 +16,9 @@ #include "Framework/AlgorithmSpec.h" #include "Framework/Logger.h" #include + #include +class TFile; namespace o2::framework::readers { diff --git a/Framework/Core/include/Framework/DataAllocator.h b/Framework/Core/include/Framework/DataAllocator.h index 8151d2f83c6c6..029e922aeb90b 100644 --- a/Framework/Core/include/Framework/DataAllocator.h +++ b/Framework/Core/include/Framework/DataAllocator.h @@ -359,6 +359,7 @@ class DataAllocator } else if constexpr (has_root_dictionary::value == true || is_specialization_v == true) { // Serialize a snapshot of an object with root dictionary payloadMessage = proxy.createOutputMessage(routeIndex); + payloadMessage->Rebuild(4096, {64}); if constexpr (is_specialization_v == true) { // Explicitely ROOT serialize a snapshot of object. // An object wrapped into type `ROOTSerialized` is explicitely marked to be ROOT serialized diff --git a/Framework/Core/include/Framework/DataRefUtils.h b/Framework/Core/include/Framework/DataRefUtils.h index defd10244bca5..e59f986f09250 100644 --- a/Framework/Core/include/Framework/DataRefUtils.h +++ b/Framework/Core/include/Framework/DataRefUtils.h @@ -71,12 +71,15 @@ struct DataRefUtils { throw runtime_error("Attempt to extract a TMessage from non-ROOT serialised message"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); - auto* storedClass = ftm.GetClass(); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); auto* requestedClass = RSS::TClass::GetClass(typeid(T)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); // should always have the class description if has_root_dictionary is true assert(requestedClass != nullptr); + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* object = ftm.ReadObjectAny(storedClass); if (object == nullptr) { throw runtime_error_f("Failed to read object with name %s from message using ROOT serialization.", @@ -146,7 +149,11 @@ struct DataRefUtils { throw runtime_error("ROOT serialization not supported, dictionary not found for data type"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); + ftm.InitMap(); + auto *classInfo = ftm.ReadClass(); + ftm.SetBufferOffset(0); + ftm.ResetMap(); result.reset(static_cast(ftm.ReadObjectAny(cl))); if (result.get() == nullptr) { throw runtime_error_f("Unable to extract class %s", cl == nullptr ? "" : cl->GetName()); diff --git a/Framework/Core/include/Framework/RootMessageContext.h b/Framework/Core/include/Framework/RootMessageContext.h index bef60ebbbf9f9..b1124880cf30f 100644 --- a/Framework/Core/include/Framework/RootMessageContext.h +++ b/Framework/Core/include/Framework/RootMessageContext.h @@ -72,6 +72,9 @@ class RootSerializedObject : public MessageContext::ContextObject fair::mq::Parts finalize() final { assert(mParts.Size() == 1); + if (mPayloadMsg->GetSize() < sizeof(char*)) { + mPayloadMsg->Rebuild(4096, {64}); + } TMessageSerializer::Serialize(*mPayloadMsg, mObject.get(), nullptr); mParts.AddPart(std::move(mPayloadMsg)); return ContextObject::finalize(); diff --git a/Framework/Core/include/Framework/RootSerializationSupport.h b/Framework/Core/include/Framework/RootSerializationSupport.h index cbf7408b13c7d..a44093f9c02bf 100644 --- a/Framework/Core/include/Framework/RootSerializationSupport.h +++ b/Framework/Core/include/Framework/RootSerializationSupport.h @@ -21,7 +21,8 @@ namespace o2::framework /// compiler. struct RootSerializationSupport { using TClass = ::TClass; - using FairTMessage = o2::framework::FairTMessage; + using FairInputTBuffer = o2::framework::FairInputTBuffer; + using FairOutputBuffer = o2::framework::FairOutputTBuffer; using TObject = ::TObject; }; diff --git a/Framework/Core/include/Framework/TMessageSerializer.h b/Framework/Core/include/Framework/TMessageSerializer.h index 1f08b456c0218..ca18eb21abfa1 100644 --- a/Framework/Core/include/Framework/TMessageSerializer.h +++ b/Framework/Core/include/Framework/TMessageSerializer.h @@ -16,9 +16,8 @@ #include "Framework/RuntimeError.h" #include -#include +#include #include -#include #include #include #include @@ -28,67 +27,76 @@ namespace o2::framework { -class FairTMessage; +class FairOutputTBuffer; +class FairInputTBuffer; // utilities to produce a span over a byte buffer held by various message types // this is to avoid littering code with casts and conversions (span has a signed index type(!)) -gsl::span as_span(const FairTMessage& msg); +gsl::span as_span(const FairInputTBuffer& msg); +gsl::span as_span(const FairOutputTBuffer& msg); gsl::span as_span(const fair::mq::Message& msg); -class FairTMessage : public TMessage +// A TBufferFile which we can use to serialise data to a FairMQ message. +class FairOutputTBuffer : public TBufferFile { public: - using TMessage::TMessage; - FairTMessage() : TMessage(kMESS_OBJECT) {} - FairTMessage(void* buf, Int_t len) : TMessage(buf, len) { ResetBit(kIsOwner); } - FairTMessage(gsl::span buf) : TMessage(buf.data(), buf.size()) { ResetBit(kIsOwner); } + // This is to serialise data to FairMQ. We embed the pointer to the message + // in the data itself, so that we can use it to reallocate the message if needed. + // The FairMQ message retains ownership of the data. + // When deserialising the root object, keep in mind one needs to skip the 8 bytes + // for the pointer. + FairOutputTBuffer(fair::mq::Message& msg) + : TBufferFile(TBuffer::kWrite, msg.GetSize() - sizeof(char*), embedInItself(msg), false, fairMQrealloc) + { + } + // Helper function to keep track of the FairMQ message that holds the data + // in the data itself. We can use this to make sure the message can be reallocated + // even if we simply have a pointer to the data. Hopefully ROOT will not play dirty + // with us. + void* embedInItself(fair::mq::Message& msg); // helper function to clean up the object holding the data after it is transported. - static void free(void* /*data*/, void* hint); + static char* fairMQrealloc(char* oldData, size_t newSize, size_t oldSize); }; -struct TMessageSerializer { - using StreamerList = std::vector; - using CompressionLevel = int; +class FairInputTBuffer : public TBufferFile +{ + public: + // This is to serialise data to FairMQ. The provided message is expeted to have 8 bytes + // of overhead, where the source embedded the pointer for the reallocation. + // Notice this will break if the sender and receiver are not using the same + // size for a pointer. + FairInputTBuffer(char * data, size_t size) + : TBufferFile(TBuffer::kRead, size-sizeof(char*), data + sizeof(char*), false, nullptr) + { + } +}; - static void Serialize(fair::mq::Message& msg, const TObject* input, - CompressionLevel compressionLevel = -1); +struct TMessageSerializer { + static void Serialize(fair::mq::Message& msg, const TObject* input); template - static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl, // - CompressionLevel compressionLevel = -1); + static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl); template static void Deserialize(const fair::mq::Message& msg, std::unique_ptr& output); - static void serialize(FairTMessage& msg, const TObject* input, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const TObject* input); template - static void serialize(FairTMessage& msg, const T* input, // - const TClass* cl, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const T* input, const TClass* cl); template - static std::unique_ptr deserialize(gsl::span buffer); - template - static inline std::unique_ptr deserialize(std::byte* buffer, size_t size); + static inline std::unique_ptr deserialize(FairInputTBuffer & buffer); }; -inline void TMessageSerializer::serialize(FairTMessage& tm, const TObject* input, - CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const TObject* input) { - return serialize(tm, input, nullptr, compressionLevel); + return serialize(tm, input, nullptr); } template -inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // - const TClass* cl, CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const T* input, const TClass* cl) { - if (compressionLevel >= 0) { - // if negative, skip to use ROOT default - tm.SetCompressionLevel(compressionLevel); - } - // TODO: check what WriateObject and WriteObjectAny are doing if (cl == nullptr) { tm.WriteObject(input); @@ -98,7 +106,7 @@ inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // } template -inline std::unique_ptr TMessageSerializer::deserialize(gsl::span buffer) +inline std::unique_ptr TMessageSerializer::deserialize(FairInputTBuffer & buffer) { TClass* tgtClass = TClass::GetClass(typeid(T)); if (tgtClass == nullptr) { @@ -107,53 +115,32 @@ inline std::unique_ptr TMessageSerializer::deserialize(gsl::span b // FIXME: we need to add consistency check for buffer data to be serialized // at the moment, TMessage might simply crash if an invalid or inconsistent // buffer is provided - FairTMessage tm(buffer); - TClass* serializedClass = tm.GetClass(); + buffer.InitMap(); + TClass* serializedClass = buffer.ReadClass(); + buffer.SetBufferOffset(0); + buffer.ResetMap(); if (serializedClass == nullptr) { throw runtime_error_f("can not read class info from buffer"); } if (tgtClass != serializedClass && serializedClass->GetBaseClass(tgtClass) == nullptr) { throw runtime_error_f("can not convert serialized class %s into target class %s", - tm.GetClass()->GetName(), + serializedClass->GetName(), tgtClass->GetName()); } - return std::unique_ptr(reinterpret_cast(tm.ReadObjectAny(serializedClass))); + return std::unique_ptr(reinterpret_cast(buffer.ReadObjectAny(serializedClass))); } -template -inline std::unique_ptr TMessageSerializer::deserialize(std::byte* buffer, size_t size) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input) { - return deserialize(gsl::span(buffer, gsl::narrow::size_type>(size))); -} - -inline void FairTMessage::free(void* /*data*/, void* hint) -{ - std::default_delete deleter; - deleter(static_cast(hint)); -} - -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input, - TMessageSerializer::CompressionLevel compressionLevel) -{ - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, input->Class(), compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, input->Class()); } template -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, // - const TClass* cl, // - TMessageSerializer::CompressionLevel compressionLevel) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, const TClass* cl) { - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, cl, compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, cl); } template @@ -161,7 +148,8 @@ inline void TMessageSerializer::Deserialize(const fair::mq::Message& msg, std::u { // we know the message will not be modified by this, // so const_cast should be OK here(IMHO). - output = deserialize(as_span(msg)); + FairInputTBuffer input(static_cast(msg.GetData()), static_cast(msg.GetSize())); + output = deserialize(input); } // gsl::narrow is used to do a runtime narrowing check, this might be a bit paranoid, @@ -171,7 +159,7 @@ inline gsl::span as_span(const fair::mq::Message& msg) return gsl::span{static_cast(msg.GetData()), gsl::narrow::size_type>(msg.GetSize())}; } -inline gsl::span as_span(const FairTMessage& msg) +inline gsl::span as_span(const FairInputTBuffer& msg) { return gsl::span{reinterpret_cast(msg.Buffer()), gsl::narrow::size_type>(msg.BufferSize())}; diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 48a3eb1da95b9..02ef5c7bc5b3c 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -141,9 +141,9 @@ DataProcessorSpec CommonDataProcessors::getOutputObjHistSink(std::vector(ref.payload), static_cast(datah->payloadSize)); + FairInputTBuffer tm(const_cast(ref.payload), static_cast(datah->payloadSize)); InputObject obj; - obj.kind = tm.GetClass(); + obj.kind = tm.ReadClass(); if (obj.kind == nullptr) { LOG(error) << "Cannot read class info from buffer."; return; diff --git a/Framework/Core/src/TMessageSerializer.cxx b/Framework/Core/src/TMessageSerializer.cxx index 5388a6d716cda..9f09c3ade0089 100644 --- a/Framework/Core/src/TMessageSerializer.cxx +++ b/Framework/Core/src/TMessageSerializer.cxx @@ -9,7 +9,38 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. #include +#include #include #include using namespace o2::framework; + +void* FairOutputTBuffer::embedInItself(fair::mq::Message& msg) { + // The first bytes of the message are used to store the pointer to the message itself + // so that we can reallocate it if needed. + if (sizeof(char*) > msg.GetSize()) { + throw std::runtime_error("Message size too small to embed pointer"); + } + char* data = reinterpret_cast(msg.GetData()); + char* ptr = reinterpret_cast(&msg); + std::memcpy(data, ptr, sizeof(char*)); + return data + sizeof(char*); +} + +// Reallocation function. Get the message pointer from the data and call Rebuild. +char *FairOutputTBuffer::fairMQrealloc(char *oldData, size_t newSize, size_t oldSize) { + auto* msg = reinterpret_cast(oldData - sizeof(char*)); + if (newSize <= msg->GetSize()) { + // no need to reallocate, the message is already big enough + return oldData; + } + // Create a shallow copy of the message + fair::mq::MessagePtr oldMsg = msg->GetTransport()->CreateMessage(); + oldMsg->Copy(*msg); + // Copy the old data while rebuilding. Reference counting should make + // sure the old message is not deleted until the new one is ready. + msg->Rebuild(newSize, fair::mq::Alignment{64}); + memcpy(msg->GetData(), oldMsg->GetData(), oldSize); + + return reinterpret_cast(msg->GetData()) + sizeof(char*); +} diff --git a/Framework/Core/test/test_DataRefUtils.cxx b/Framework/Core/test/test_DataRefUtils.cxx index 37da7912bfe8b..081adc81ebf69 100644 --- a/Framework/Core/test/test_DataRefUtils.cxx +++ b/Framework/Core/test/test_DataRefUtils.cxx @@ -21,17 +21,37 @@ using namespace o2::framework; +TEST_CASE("PureRootTest") { + TBufferFile buffer(TBuffer::kWrite); + TObjString s("test"); + buffer.WriteObject(&s); + + TBufferFile buffer2(TBuffer::kRead, buffer.BufferSize(), buffer.Buffer(), false); + buffer2.SetReadMode(); + buffer2.InitMap(); + TClass *storedClass = buffer2.ReadClass(); + // ReadClass advances the buffer, so we need to reset it. + buffer2.SetBufferOffset(0); + buffer2.ResetMap(); + REQUIRE(storedClass != nullptr); + auto *outS = (TObjString*)buffer2.ReadObjectAny(storedClass); + REQUIRE(outS != nullptr); + REQUIRE(outS->GetString() == "test"); +} + // Simple test to do root deserialization. TEST_CASE("TestRootSerialization") { DataRef ref; - TMessage* tm = new TMessage(kMESS_OBJECT); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer tm(*msg); auto sOrig = std::make_unique("test"); - tm->WriteObject(sOrig.get()); + tm << sOrig.get(); o2::header::DataHeader dh; dh.payloadSerializationMethod = o2::header::gSerializationMethodROOT; - ref.payload = tm->Buffer(); - dh.payloadSize = tm->BufferSize(); + ref.payload = (char*)msg->GetData(); + dh.payloadSize = (size_t)msg->GetSize(); ref.header = reinterpret_cast(&dh); // Check by using the same type diff --git a/Framework/Core/test/test_TMessageSerializer.cxx b/Framework/Core/test/test_TMessageSerializer.cxx index bc5f817400a44..89de74edc8455 100644 --- a/Framework/Core/test/test_TMessageSerializer.cxx +++ b/Framework/Core/test/test_TMessageSerializer.cxx @@ -11,6 +11,7 @@ #include "Framework/TMessageSerializer.h" #include "Framework/RuntimeError.h" +#include #include "TestClasses.h" #include #include @@ -49,14 +50,14 @@ TEST_CASE("TestTMessageSerializer") array.SetOwner(); array.Add(new TNamed(testname, testtitle)); - FairTMessage msg; - TMessageSerializer::serialize(msg, &array); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); + TMessageSerializer::serialize(buffer, &array); - auto buf = as_span(msg); - REQUIRE(buf.size() == msg.BufferSize()); - REQUIRE(static_cast(buf.data()) == static_cast(msg.Buffer())); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); // test deserialization with TObject as target class (default) - auto out = TMessageSerializer::deserialize(buf); + auto out = TMessageSerializer::deserialize(msg2); auto* outarr = dynamic_cast(out.get()); REQUIRE(out.get() == outarr); @@ -66,9 +67,9 @@ TEST_CASE("TestTMessageSerializer") REQUIRE(named->GetTitle() == std::string(testtitle)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef, + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, ExceptionMatcher("can not convert serialized class TObjArray into target class TNamed")); } @@ -87,18 +88,21 @@ TEST_CASE("TestTMessageSerializer_NonTObject") TClass* cl = TClass::GetClass("std::vector"); REQUIRE(cl != nullptr); - FairTMessage msg; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); char* in = reinterpret_cast(&data); - TMessageSerializer::serialize(msg, in, cl); + TMessageSerializer::serialize(buffer, in, cl); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); - auto out = TMessageSerializer::deserialize>(as_span(msg)); + auto out = TMessageSerializer::deserialize>(msg2); REQUIRE(out); REQUIRE((*out.get()).size() == 2); REQUIRE((*out.get())[0] == o2::test::Polymorphic(0xaffe)); REQUIRE((*out.get())[1] == o2::test::Polymorphic(0xd00f)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(as_span(msg)), RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), RuntimeErrorRef); } TEST_CASE("TestTMessageSerializer_InvalidBuffer") @@ -119,5 +123,6 @@ TEST_CASE("TestTMessageSerializer_InvalidBuffer") struct Dummy { }; auto matcher = ExceptionMatcher("class is not ROOT-serializable: ZL22CATCH2_INTERNAL_TEST_4vE5Dummy"); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize((std::byte*)buffer, strlen(buffer)), o2::framework::RuntimeErrorRef, matcher); + FairInputTBuffer msg2((char*)buffer, strlen(buffer)); + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, matcher); } diff --git a/Utilities/Mergers/src/ObjectStore.cxx b/Utilities/Mergers/src/ObjectStore.cxx index e88358507c31e..3bb49f1dfc9d8 100644 --- a/Utilities/Mergers/src/ObjectStore.cxx +++ b/Utilities/Mergers/src/ObjectStore.cxx @@ -38,7 +38,7 @@ static std::string concat(Args&&... arguments) return std::move(ss.str()); } -void* readObject(const TClass* type, o2::framework::FairTMessage& ftm) +void* readObject(const TClass* type, o2::framework::FairInputTBuffer& ftm) { using namespace std::string_view_literals; auto* object = ftm.ReadObjectAny(type); @@ -60,7 +60,7 @@ MergeInterface* castToMergeInterface(bool inheritsFromTObject, void* object, TCl return objectAsMergeInterface; } -std::optional extractVector(o2::framework::FairTMessage& ftm, const TClass* storedClass) +std::optional extractVector(o2::framework::FairInputTBuffer& ftm, const TClass* storedClass) { if (!storedClass->InheritsFrom(TClass::GetClass(typeid(VectorOfRawTObjects)))) { return std::nullopt; @@ -88,11 +88,14 @@ ObjectStore extractObjectFrom(const framework::DataRef& ref) throw std::runtime_error(concat(errorPrefix, "It is not ROOT-serialized"sv)); } - o2::framework::FairTMessage ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); - auto* storedClass = ftm.GetClass(); + o2::framework::FairInputTBuffer ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error(concat(errorPrefix, "Unknown stored class"sv)); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); if (const auto extractedVector = extractVector(ftm, storedClass)) { return extractedVector.value(); diff --git a/Utilities/Mergers/test/benchmark_Types.cxx b/Utilities/Mergers/test/benchmark_Types.cxx index 790fd329185ea..736685c5746b8 100644 --- a/Utilities/Mergers/test/benchmark_Types.cxx +++ b/Utilities/Mergers/test/benchmark_Types.cxx @@ -165,11 +165,16 @@ auto measure = [](Measurement m, auto* o, auto* i) -> double { tm->WriteObject(o); start = std::chrono::high_resolution_clock::now(); - o2::framework::FairTMessage ftm(const_cast(tm->Buffer()), tm->BufferSize()); - auto* storedClass = ftm.GetClass(); + // Needed to take into account that FairInputTBuffer expects the first 8 bytes to be the + // allocator pointer, which is not present in the TMessage buffer. + o2::framework::FairInputTBuffer ftm(const_cast(tm->Buffer() - 8), tm->BufferSize() + 8); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error("Unknown stored class"); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* tObjectClass = TClass::GetClass(typeid(TObject)); if (!storedClass->InheritsFrom(tObjectClass)) { @@ -738,4 +743,4 @@ int main(int argc, const char* argv[]) file.close(); return 0; -} \ No newline at end of file +} From eb215631f99b00576f45fb39889d76b1586ec989 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 23 Feb 2024 20:36:58 +0100 Subject: [PATCH 377/726] DPL: avoid TMessage usage TMessage does not allow for non owned buffers, so we end up having an extra buffer in private memory for (de)serializing. Using TBufferFile directly allows to avoid that, so this moves the whole ROOT serialization support in DPL to use it. --- .../src/AODJAlienReaderHelpers.h | 2 + .../Core/include/Framework/DataAllocator.h | 1 + .../Core/include/Framework/DataRefUtils.h | 13 +- .../include/Framework/RootMessageContext.h | 3 + .../Framework/RootSerializationSupport.h | 3 +- .../include/Framework/TMessageSerializer.h | 130 ++++++++---------- Framework/Core/src/CommonDataProcessors.cxx | 4 +- Framework/Core/src/TMessageSerializer.cxx | 31 +++++ Framework/Core/test/test_DataRefUtils.cxx | 28 +++- .../Core/test/test_TMessageSerializer.cxx | 31 +++-- Framework/Utils/test/test_RootTreeWriter.cxx | 1 + Utilities/Mergers/src/ObjectStore.cxx | 11 +- Utilities/Mergers/test/benchmark_Types.cxx | 11 +- 13 files changed, 168 insertions(+), 101 deletions(-) diff --git a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h index 655e4b6c0b439..4b9fd710aca14 100644 --- a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h +++ b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h @@ -16,7 +16,9 @@ #include "Framework/AlgorithmSpec.h" #include "Framework/Logger.h" #include + #include +class TFile; namespace o2::framework::readers { diff --git a/Framework/Core/include/Framework/DataAllocator.h b/Framework/Core/include/Framework/DataAllocator.h index 8151d2f83c6c6..029e922aeb90b 100644 --- a/Framework/Core/include/Framework/DataAllocator.h +++ b/Framework/Core/include/Framework/DataAllocator.h @@ -359,6 +359,7 @@ class DataAllocator } else if constexpr (has_root_dictionary::value == true || is_specialization_v == true) { // Serialize a snapshot of an object with root dictionary payloadMessage = proxy.createOutputMessage(routeIndex); + payloadMessage->Rebuild(4096, {64}); if constexpr (is_specialization_v == true) { // Explicitely ROOT serialize a snapshot of object. // An object wrapped into type `ROOTSerialized` is explicitely marked to be ROOT serialized diff --git a/Framework/Core/include/Framework/DataRefUtils.h b/Framework/Core/include/Framework/DataRefUtils.h index defd10244bca5..e59f986f09250 100644 --- a/Framework/Core/include/Framework/DataRefUtils.h +++ b/Framework/Core/include/Framework/DataRefUtils.h @@ -71,12 +71,15 @@ struct DataRefUtils { throw runtime_error("Attempt to extract a TMessage from non-ROOT serialised message"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); - auto* storedClass = ftm.GetClass(); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); auto* requestedClass = RSS::TClass::GetClass(typeid(T)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); // should always have the class description if has_root_dictionary is true assert(requestedClass != nullptr); + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* object = ftm.ReadObjectAny(storedClass); if (object == nullptr) { throw runtime_error_f("Failed to read object with name %s from message using ROOT serialization.", @@ -146,7 +149,11 @@ struct DataRefUtils { throw runtime_error("ROOT serialization not supported, dictionary not found for data type"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); + ftm.InitMap(); + auto *classInfo = ftm.ReadClass(); + ftm.SetBufferOffset(0); + ftm.ResetMap(); result.reset(static_cast(ftm.ReadObjectAny(cl))); if (result.get() == nullptr) { throw runtime_error_f("Unable to extract class %s", cl == nullptr ? "" : cl->GetName()); diff --git a/Framework/Core/include/Framework/RootMessageContext.h b/Framework/Core/include/Framework/RootMessageContext.h index bef60ebbbf9f9..b1124880cf30f 100644 --- a/Framework/Core/include/Framework/RootMessageContext.h +++ b/Framework/Core/include/Framework/RootMessageContext.h @@ -72,6 +72,9 @@ class RootSerializedObject : public MessageContext::ContextObject fair::mq::Parts finalize() final { assert(mParts.Size() == 1); + if (mPayloadMsg->GetSize() < sizeof(char*)) { + mPayloadMsg->Rebuild(4096, {64}); + } TMessageSerializer::Serialize(*mPayloadMsg, mObject.get(), nullptr); mParts.AddPart(std::move(mPayloadMsg)); return ContextObject::finalize(); diff --git a/Framework/Core/include/Framework/RootSerializationSupport.h b/Framework/Core/include/Framework/RootSerializationSupport.h index cbf7408b13c7d..a44093f9c02bf 100644 --- a/Framework/Core/include/Framework/RootSerializationSupport.h +++ b/Framework/Core/include/Framework/RootSerializationSupport.h @@ -21,7 +21,8 @@ namespace o2::framework /// compiler. struct RootSerializationSupport { using TClass = ::TClass; - using FairTMessage = o2::framework::FairTMessage; + using FairInputTBuffer = o2::framework::FairInputTBuffer; + using FairOutputBuffer = o2::framework::FairOutputTBuffer; using TObject = ::TObject; }; diff --git a/Framework/Core/include/Framework/TMessageSerializer.h b/Framework/Core/include/Framework/TMessageSerializer.h index 1f08b456c0218..ca18eb21abfa1 100644 --- a/Framework/Core/include/Framework/TMessageSerializer.h +++ b/Framework/Core/include/Framework/TMessageSerializer.h @@ -16,9 +16,8 @@ #include "Framework/RuntimeError.h" #include -#include +#include #include -#include #include #include #include @@ -28,67 +27,76 @@ namespace o2::framework { -class FairTMessage; +class FairOutputTBuffer; +class FairInputTBuffer; // utilities to produce a span over a byte buffer held by various message types // this is to avoid littering code with casts and conversions (span has a signed index type(!)) -gsl::span as_span(const FairTMessage& msg); +gsl::span as_span(const FairInputTBuffer& msg); +gsl::span as_span(const FairOutputTBuffer& msg); gsl::span as_span(const fair::mq::Message& msg); -class FairTMessage : public TMessage +// A TBufferFile which we can use to serialise data to a FairMQ message. +class FairOutputTBuffer : public TBufferFile { public: - using TMessage::TMessage; - FairTMessage() : TMessage(kMESS_OBJECT) {} - FairTMessage(void* buf, Int_t len) : TMessage(buf, len) { ResetBit(kIsOwner); } - FairTMessage(gsl::span buf) : TMessage(buf.data(), buf.size()) { ResetBit(kIsOwner); } + // This is to serialise data to FairMQ. We embed the pointer to the message + // in the data itself, so that we can use it to reallocate the message if needed. + // The FairMQ message retains ownership of the data. + // When deserialising the root object, keep in mind one needs to skip the 8 bytes + // for the pointer. + FairOutputTBuffer(fair::mq::Message& msg) + : TBufferFile(TBuffer::kWrite, msg.GetSize() - sizeof(char*), embedInItself(msg), false, fairMQrealloc) + { + } + // Helper function to keep track of the FairMQ message that holds the data + // in the data itself. We can use this to make sure the message can be reallocated + // even if we simply have a pointer to the data. Hopefully ROOT will not play dirty + // with us. + void* embedInItself(fair::mq::Message& msg); // helper function to clean up the object holding the data after it is transported. - static void free(void* /*data*/, void* hint); + static char* fairMQrealloc(char* oldData, size_t newSize, size_t oldSize); }; -struct TMessageSerializer { - using StreamerList = std::vector; - using CompressionLevel = int; +class FairInputTBuffer : public TBufferFile +{ + public: + // This is to serialise data to FairMQ. The provided message is expeted to have 8 bytes + // of overhead, where the source embedded the pointer for the reallocation. + // Notice this will break if the sender and receiver are not using the same + // size for a pointer. + FairInputTBuffer(char * data, size_t size) + : TBufferFile(TBuffer::kRead, size-sizeof(char*), data + sizeof(char*), false, nullptr) + { + } +}; - static void Serialize(fair::mq::Message& msg, const TObject* input, - CompressionLevel compressionLevel = -1); +struct TMessageSerializer { + static void Serialize(fair::mq::Message& msg, const TObject* input); template - static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl, // - CompressionLevel compressionLevel = -1); + static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl); template static void Deserialize(const fair::mq::Message& msg, std::unique_ptr& output); - static void serialize(FairTMessage& msg, const TObject* input, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const TObject* input); template - static void serialize(FairTMessage& msg, const T* input, // - const TClass* cl, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const T* input, const TClass* cl); template - static std::unique_ptr deserialize(gsl::span buffer); - template - static inline std::unique_ptr deserialize(std::byte* buffer, size_t size); + static inline std::unique_ptr deserialize(FairInputTBuffer & buffer); }; -inline void TMessageSerializer::serialize(FairTMessage& tm, const TObject* input, - CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const TObject* input) { - return serialize(tm, input, nullptr, compressionLevel); + return serialize(tm, input, nullptr); } template -inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // - const TClass* cl, CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const T* input, const TClass* cl) { - if (compressionLevel >= 0) { - // if negative, skip to use ROOT default - tm.SetCompressionLevel(compressionLevel); - } - // TODO: check what WriateObject and WriteObjectAny are doing if (cl == nullptr) { tm.WriteObject(input); @@ -98,7 +106,7 @@ inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // } template -inline std::unique_ptr TMessageSerializer::deserialize(gsl::span buffer) +inline std::unique_ptr TMessageSerializer::deserialize(FairInputTBuffer & buffer) { TClass* tgtClass = TClass::GetClass(typeid(T)); if (tgtClass == nullptr) { @@ -107,53 +115,32 @@ inline std::unique_ptr TMessageSerializer::deserialize(gsl::span b // FIXME: we need to add consistency check for buffer data to be serialized // at the moment, TMessage might simply crash if an invalid or inconsistent // buffer is provided - FairTMessage tm(buffer); - TClass* serializedClass = tm.GetClass(); + buffer.InitMap(); + TClass* serializedClass = buffer.ReadClass(); + buffer.SetBufferOffset(0); + buffer.ResetMap(); if (serializedClass == nullptr) { throw runtime_error_f("can not read class info from buffer"); } if (tgtClass != serializedClass && serializedClass->GetBaseClass(tgtClass) == nullptr) { throw runtime_error_f("can not convert serialized class %s into target class %s", - tm.GetClass()->GetName(), + serializedClass->GetName(), tgtClass->GetName()); } - return std::unique_ptr(reinterpret_cast(tm.ReadObjectAny(serializedClass))); + return std::unique_ptr(reinterpret_cast(buffer.ReadObjectAny(serializedClass))); } -template -inline std::unique_ptr TMessageSerializer::deserialize(std::byte* buffer, size_t size) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input) { - return deserialize(gsl::span(buffer, gsl::narrow::size_type>(size))); -} - -inline void FairTMessage::free(void* /*data*/, void* hint) -{ - std::default_delete deleter; - deleter(static_cast(hint)); -} - -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input, - TMessageSerializer::CompressionLevel compressionLevel) -{ - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, input->Class(), compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, input->Class()); } template -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, // - const TClass* cl, // - TMessageSerializer::CompressionLevel compressionLevel) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, const TClass* cl) { - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, cl, compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, cl); } template @@ -161,7 +148,8 @@ inline void TMessageSerializer::Deserialize(const fair::mq::Message& msg, std::u { // we know the message will not be modified by this, // so const_cast should be OK here(IMHO). - output = deserialize(as_span(msg)); + FairInputTBuffer input(static_cast(msg.GetData()), static_cast(msg.GetSize())); + output = deserialize(input); } // gsl::narrow is used to do a runtime narrowing check, this might be a bit paranoid, @@ -171,7 +159,7 @@ inline gsl::span as_span(const fair::mq::Message& msg) return gsl::span{static_cast(msg.GetData()), gsl::narrow::size_type>(msg.GetSize())}; } -inline gsl::span as_span(const FairTMessage& msg) +inline gsl::span as_span(const FairInputTBuffer& msg) { return gsl::span{reinterpret_cast(msg.Buffer()), gsl::narrow::size_type>(msg.BufferSize())}; diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 48a3eb1da95b9..02ef5c7bc5b3c 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -141,9 +141,9 @@ DataProcessorSpec CommonDataProcessors::getOutputObjHistSink(std::vector(ref.payload), static_cast(datah->payloadSize)); + FairInputTBuffer tm(const_cast(ref.payload), static_cast(datah->payloadSize)); InputObject obj; - obj.kind = tm.GetClass(); + obj.kind = tm.ReadClass(); if (obj.kind == nullptr) { LOG(error) << "Cannot read class info from buffer."; return; diff --git a/Framework/Core/src/TMessageSerializer.cxx b/Framework/Core/src/TMessageSerializer.cxx index 5388a6d716cda..9f09c3ade0089 100644 --- a/Framework/Core/src/TMessageSerializer.cxx +++ b/Framework/Core/src/TMessageSerializer.cxx @@ -9,7 +9,38 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. #include +#include #include #include using namespace o2::framework; + +void* FairOutputTBuffer::embedInItself(fair::mq::Message& msg) { + // The first bytes of the message are used to store the pointer to the message itself + // so that we can reallocate it if needed. + if (sizeof(char*) > msg.GetSize()) { + throw std::runtime_error("Message size too small to embed pointer"); + } + char* data = reinterpret_cast(msg.GetData()); + char* ptr = reinterpret_cast(&msg); + std::memcpy(data, ptr, sizeof(char*)); + return data + sizeof(char*); +} + +// Reallocation function. Get the message pointer from the data and call Rebuild. +char *FairOutputTBuffer::fairMQrealloc(char *oldData, size_t newSize, size_t oldSize) { + auto* msg = reinterpret_cast(oldData - sizeof(char*)); + if (newSize <= msg->GetSize()) { + // no need to reallocate, the message is already big enough + return oldData; + } + // Create a shallow copy of the message + fair::mq::MessagePtr oldMsg = msg->GetTransport()->CreateMessage(); + oldMsg->Copy(*msg); + // Copy the old data while rebuilding. Reference counting should make + // sure the old message is not deleted until the new one is ready. + msg->Rebuild(newSize, fair::mq::Alignment{64}); + memcpy(msg->GetData(), oldMsg->GetData(), oldSize); + + return reinterpret_cast(msg->GetData()) + sizeof(char*); +} diff --git a/Framework/Core/test/test_DataRefUtils.cxx b/Framework/Core/test/test_DataRefUtils.cxx index 37da7912bfe8b..081adc81ebf69 100644 --- a/Framework/Core/test/test_DataRefUtils.cxx +++ b/Framework/Core/test/test_DataRefUtils.cxx @@ -21,17 +21,37 @@ using namespace o2::framework; +TEST_CASE("PureRootTest") { + TBufferFile buffer(TBuffer::kWrite); + TObjString s("test"); + buffer.WriteObject(&s); + + TBufferFile buffer2(TBuffer::kRead, buffer.BufferSize(), buffer.Buffer(), false); + buffer2.SetReadMode(); + buffer2.InitMap(); + TClass *storedClass = buffer2.ReadClass(); + // ReadClass advances the buffer, so we need to reset it. + buffer2.SetBufferOffset(0); + buffer2.ResetMap(); + REQUIRE(storedClass != nullptr); + auto *outS = (TObjString*)buffer2.ReadObjectAny(storedClass); + REQUIRE(outS != nullptr); + REQUIRE(outS->GetString() == "test"); +} + // Simple test to do root deserialization. TEST_CASE("TestRootSerialization") { DataRef ref; - TMessage* tm = new TMessage(kMESS_OBJECT); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer tm(*msg); auto sOrig = std::make_unique("test"); - tm->WriteObject(sOrig.get()); + tm << sOrig.get(); o2::header::DataHeader dh; dh.payloadSerializationMethod = o2::header::gSerializationMethodROOT; - ref.payload = tm->Buffer(); - dh.payloadSize = tm->BufferSize(); + ref.payload = (char*)msg->GetData(); + dh.payloadSize = (size_t)msg->GetSize(); ref.header = reinterpret_cast(&dh); // Check by using the same type diff --git a/Framework/Core/test/test_TMessageSerializer.cxx b/Framework/Core/test/test_TMessageSerializer.cxx index bc5f817400a44..89de74edc8455 100644 --- a/Framework/Core/test/test_TMessageSerializer.cxx +++ b/Framework/Core/test/test_TMessageSerializer.cxx @@ -11,6 +11,7 @@ #include "Framework/TMessageSerializer.h" #include "Framework/RuntimeError.h" +#include #include "TestClasses.h" #include #include @@ -49,14 +50,14 @@ TEST_CASE("TestTMessageSerializer") array.SetOwner(); array.Add(new TNamed(testname, testtitle)); - FairTMessage msg; - TMessageSerializer::serialize(msg, &array); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); + TMessageSerializer::serialize(buffer, &array); - auto buf = as_span(msg); - REQUIRE(buf.size() == msg.BufferSize()); - REQUIRE(static_cast(buf.data()) == static_cast(msg.Buffer())); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); // test deserialization with TObject as target class (default) - auto out = TMessageSerializer::deserialize(buf); + auto out = TMessageSerializer::deserialize(msg2); auto* outarr = dynamic_cast(out.get()); REQUIRE(out.get() == outarr); @@ -66,9 +67,9 @@ TEST_CASE("TestTMessageSerializer") REQUIRE(named->GetTitle() == std::string(testtitle)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef, + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, ExceptionMatcher("can not convert serialized class TObjArray into target class TNamed")); } @@ -87,18 +88,21 @@ TEST_CASE("TestTMessageSerializer_NonTObject") TClass* cl = TClass::GetClass("std::vector"); REQUIRE(cl != nullptr); - FairTMessage msg; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); char* in = reinterpret_cast(&data); - TMessageSerializer::serialize(msg, in, cl); + TMessageSerializer::serialize(buffer, in, cl); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); - auto out = TMessageSerializer::deserialize>(as_span(msg)); + auto out = TMessageSerializer::deserialize>(msg2); REQUIRE(out); REQUIRE((*out.get()).size() == 2); REQUIRE((*out.get())[0] == o2::test::Polymorphic(0xaffe)); REQUIRE((*out.get())[1] == o2::test::Polymorphic(0xd00f)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(as_span(msg)), RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), RuntimeErrorRef); } TEST_CASE("TestTMessageSerializer_InvalidBuffer") @@ -119,5 +123,6 @@ TEST_CASE("TestTMessageSerializer_InvalidBuffer") struct Dummy { }; auto matcher = ExceptionMatcher("class is not ROOT-serializable: ZL22CATCH2_INTERNAL_TEST_4vE5Dummy"); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize((std::byte*)buffer, strlen(buffer)), o2::framework::RuntimeErrorRef, matcher); + FairInputTBuffer msg2((char*)buffer, strlen(buffer)); + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, matcher); } diff --git a/Framework/Utils/test/test_RootTreeWriter.cxx b/Framework/Utils/test/test_RootTreeWriter.cxx index 3194508f3d775..62e1eb62cb4f1 100644 --- a/Framework/Utils/test/test_RootTreeWriter.cxx +++ b/Framework/Utils/test/test_RootTreeWriter.cxx @@ -179,6 +179,7 @@ TEST_CASE("test_RootTreeWriter") auto createSerializedMessage = [&transport, &store](DataHeader&& dh, auto& data) { fair::mq::MessagePtr payload = transport->CreateMessage(); + payload->Rebuild(4096, {64}); auto* cl = TClass::GetClass(typeid(decltype(data))); TMessageSerializer().Serialize(*payload, &data, cl); dh.payloadSize = payload->GetSize(); diff --git a/Utilities/Mergers/src/ObjectStore.cxx b/Utilities/Mergers/src/ObjectStore.cxx index e88358507c31e..3bb49f1dfc9d8 100644 --- a/Utilities/Mergers/src/ObjectStore.cxx +++ b/Utilities/Mergers/src/ObjectStore.cxx @@ -38,7 +38,7 @@ static std::string concat(Args&&... arguments) return std::move(ss.str()); } -void* readObject(const TClass* type, o2::framework::FairTMessage& ftm) +void* readObject(const TClass* type, o2::framework::FairInputTBuffer& ftm) { using namespace std::string_view_literals; auto* object = ftm.ReadObjectAny(type); @@ -60,7 +60,7 @@ MergeInterface* castToMergeInterface(bool inheritsFromTObject, void* object, TCl return objectAsMergeInterface; } -std::optional extractVector(o2::framework::FairTMessage& ftm, const TClass* storedClass) +std::optional extractVector(o2::framework::FairInputTBuffer& ftm, const TClass* storedClass) { if (!storedClass->InheritsFrom(TClass::GetClass(typeid(VectorOfRawTObjects)))) { return std::nullopt; @@ -88,11 +88,14 @@ ObjectStore extractObjectFrom(const framework::DataRef& ref) throw std::runtime_error(concat(errorPrefix, "It is not ROOT-serialized"sv)); } - o2::framework::FairTMessage ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); - auto* storedClass = ftm.GetClass(); + o2::framework::FairInputTBuffer ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error(concat(errorPrefix, "Unknown stored class"sv)); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); if (const auto extractedVector = extractVector(ftm, storedClass)) { return extractedVector.value(); diff --git a/Utilities/Mergers/test/benchmark_Types.cxx b/Utilities/Mergers/test/benchmark_Types.cxx index 790fd329185ea..736685c5746b8 100644 --- a/Utilities/Mergers/test/benchmark_Types.cxx +++ b/Utilities/Mergers/test/benchmark_Types.cxx @@ -165,11 +165,16 @@ auto measure = [](Measurement m, auto* o, auto* i) -> double { tm->WriteObject(o); start = std::chrono::high_resolution_clock::now(); - o2::framework::FairTMessage ftm(const_cast(tm->Buffer()), tm->BufferSize()); - auto* storedClass = ftm.GetClass(); + // Needed to take into account that FairInputTBuffer expects the first 8 bytes to be the + // allocator pointer, which is not present in the TMessage buffer. + o2::framework::FairInputTBuffer ftm(const_cast(tm->Buffer() - 8), tm->BufferSize() + 8); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error("Unknown stored class"); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* tObjectClass = TClass::GetClass(typeid(TObject)); if (!storedClass->InheritsFrom(tObjectClass)) { @@ -738,4 +743,4 @@ int main(int argc, const char* argv[]) file.close(); return 0; -} \ No newline at end of file +} From 045edb4da0ebf74d224fe0792f0cd61edbc04870 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 24 Feb 2024 00:21:18 +0100 Subject: [PATCH 378/726] DPL: avoid TMessage usage TMessage does not allow for non owned buffers, so we end up having an extra buffer in private memory for (de)serializing. Using TBufferFile directly allows to avoid that, so this moves the whole ROOT serialization support in DPL to use it. --- .../src/AODJAlienReaderHelpers.h | 2 + .../Core/include/Framework/DataAllocator.h | 1 + .../Core/include/Framework/DataRefUtils.h | 13 +- .../include/Framework/RootMessageContext.h | 3 + .../Framework/RootSerializationSupport.h | 3 +- .../include/Framework/TMessageSerializer.h | 130 ++++++++---------- Framework/Core/src/CommonDataProcessors.cxx | 4 +- Framework/Core/src/TMessageSerializer.cxx | 31 +++++ Framework/Core/test/test_DataRefUtils.cxx | 28 +++- .../Core/test/test_TMessageSerializer.cxx | 34 +++-- Framework/Utils/test/test_RootTreeWriter.cxx | 1 + Utilities/Mergers/src/ObjectStore.cxx | 11 +- Utilities/Mergers/test/benchmark_Types.cxx | 11 +- 13 files changed, 171 insertions(+), 101 deletions(-) diff --git a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h index 655e4b6c0b439..4b9fd710aca14 100644 --- a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h +++ b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h @@ -16,7 +16,9 @@ #include "Framework/AlgorithmSpec.h" #include "Framework/Logger.h" #include + #include +class TFile; namespace o2::framework::readers { diff --git a/Framework/Core/include/Framework/DataAllocator.h b/Framework/Core/include/Framework/DataAllocator.h index 8151d2f83c6c6..029e922aeb90b 100644 --- a/Framework/Core/include/Framework/DataAllocator.h +++ b/Framework/Core/include/Framework/DataAllocator.h @@ -359,6 +359,7 @@ class DataAllocator } else if constexpr (has_root_dictionary::value == true || is_specialization_v == true) { // Serialize a snapshot of an object with root dictionary payloadMessage = proxy.createOutputMessage(routeIndex); + payloadMessage->Rebuild(4096, {64}); if constexpr (is_specialization_v == true) { // Explicitely ROOT serialize a snapshot of object. // An object wrapped into type `ROOTSerialized` is explicitely marked to be ROOT serialized diff --git a/Framework/Core/include/Framework/DataRefUtils.h b/Framework/Core/include/Framework/DataRefUtils.h index defd10244bca5..e59f986f09250 100644 --- a/Framework/Core/include/Framework/DataRefUtils.h +++ b/Framework/Core/include/Framework/DataRefUtils.h @@ -71,12 +71,15 @@ struct DataRefUtils { throw runtime_error("Attempt to extract a TMessage from non-ROOT serialised message"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); - auto* storedClass = ftm.GetClass(); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); auto* requestedClass = RSS::TClass::GetClass(typeid(T)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); // should always have the class description if has_root_dictionary is true assert(requestedClass != nullptr); + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* object = ftm.ReadObjectAny(storedClass); if (object == nullptr) { throw runtime_error_f("Failed to read object with name %s from message using ROOT serialization.", @@ -146,7 +149,11 @@ struct DataRefUtils { throw runtime_error("ROOT serialization not supported, dictionary not found for data type"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); + ftm.InitMap(); + auto *classInfo = ftm.ReadClass(); + ftm.SetBufferOffset(0); + ftm.ResetMap(); result.reset(static_cast(ftm.ReadObjectAny(cl))); if (result.get() == nullptr) { throw runtime_error_f("Unable to extract class %s", cl == nullptr ? "" : cl->GetName()); diff --git a/Framework/Core/include/Framework/RootMessageContext.h b/Framework/Core/include/Framework/RootMessageContext.h index bef60ebbbf9f9..b1124880cf30f 100644 --- a/Framework/Core/include/Framework/RootMessageContext.h +++ b/Framework/Core/include/Framework/RootMessageContext.h @@ -72,6 +72,9 @@ class RootSerializedObject : public MessageContext::ContextObject fair::mq::Parts finalize() final { assert(mParts.Size() == 1); + if (mPayloadMsg->GetSize() < sizeof(char*)) { + mPayloadMsg->Rebuild(4096, {64}); + } TMessageSerializer::Serialize(*mPayloadMsg, mObject.get(), nullptr); mParts.AddPart(std::move(mPayloadMsg)); return ContextObject::finalize(); diff --git a/Framework/Core/include/Framework/RootSerializationSupport.h b/Framework/Core/include/Framework/RootSerializationSupport.h index cbf7408b13c7d..a44093f9c02bf 100644 --- a/Framework/Core/include/Framework/RootSerializationSupport.h +++ b/Framework/Core/include/Framework/RootSerializationSupport.h @@ -21,7 +21,8 @@ namespace o2::framework /// compiler. struct RootSerializationSupport { using TClass = ::TClass; - using FairTMessage = o2::framework::FairTMessage; + using FairInputTBuffer = o2::framework::FairInputTBuffer; + using FairOutputBuffer = o2::framework::FairOutputTBuffer; using TObject = ::TObject; }; diff --git a/Framework/Core/include/Framework/TMessageSerializer.h b/Framework/Core/include/Framework/TMessageSerializer.h index 1f08b456c0218..ca18eb21abfa1 100644 --- a/Framework/Core/include/Framework/TMessageSerializer.h +++ b/Framework/Core/include/Framework/TMessageSerializer.h @@ -16,9 +16,8 @@ #include "Framework/RuntimeError.h" #include -#include +#include #include -#include #include #include #include @@ -28,67 +27,76 @@ namespace o2::framework { -class FairTMessage; +class FairOutputTBuffer; +class FairInputTBuffer; // utilities to produce a span over a byte buffer held by various message types // this is to avoid littering code with casts and conversions (span has a signed index type(!)) -gsl::span as_span(const FairTMessage& msg); +gsl::span as_span(const FairInputTBuffer& msg); +gsl::span as_span(const FairOutputTBuffer& msg); gsl::span as_span(const fair::mq::Message& msg); -class FairTMessage : public TMessage +// A TBufferFile which we can use to serialise data to a FairMQ message. +class FairOutputTBuffer : public TBufferFile { public: - using TMessage::TMessage; - FairTMessage() : TMessage(kMESS_OBJECT) {} - FairTMessage(void* buf, Int_t len) : TMessage(buf, len) { ResetBit(kIsOwner); } - FairTMessage(gsl::span buf) : TMessage(buf.data(), buf.size()) { ResetBit(kIsOwner); } + // This is to serialise data to FairMQ. We embed the pointer to the message + // in the data itself, so that we can use it to reallocate the message if needed. + // The FairMQ message retains ownership of the data. + // When deserialising the root object, keep in mind one needs to skip the 8 bytes + // for the pointer. + FairOutputTBuffer(fair::mq::Message& msg) + : TBufferFile(TBuffer::kWrite, msg.GetSize() - sizeof(char*), embedInItself(msg), false, fairMQrealloc) + { + } + // Helper function to keep track of the FairMQ message that holds the data + // in the data itself. We can use this to make sure the message can be reallocated + // even if we simply have a pointer to the data. Hopefully ROOT will not play dirty + // with us. + void* embedInItself(fair::mq::Message& msg); // helper function to clean up the object holding the data after it is transported. - static void free(void* /*data*/, void* hint); + static char* fairMQrealloc(char* oldData, size_t newSize, size_t oldSize); }; -struct TMessageSerializer { - using StreamerList = std::vector; - using CompressionLevel = int; +class FairInputTBuffer : public TBufferFile +{ + public: + // This is to serialise data to FairMQ. The provided message is expeted to have 8 bytes + // of overhead, where the source embedded the pointer for the reallocation. + // Notice this will break if the sender and receiver are not using the same + // size for a pointer. + FairInputTBuffer(char * data, size_t size) + : TBufferFile(TBuffer::kRead, size-sizeof(char*), data + sizeof(char*), false, nullptr) + { + } +}; - static void Serialize(fair::mq::Message& msg, const TObject* input, - CompressionLevel compressionLevel = -1); +struct TMessageSerializer { + static void Serialize(fair::mq::Message& msg, const TObject* input); template - static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl, // - CompressionLevel compressionLevel = -1); + static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl); template static void Deserialize(const fair::mq::Message& msg, std::unique_ptr& output); - static void serialize(FairTMessage& msg, const TObject* input, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const TObject* input); template - static void serialize(FairTMessage& msg, const T* input, // - const TClass* cl, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const T* input, const TClass* cl); template - static std::unique_ptr deserialize(gsl::span buffer); - template - static inline std::unique_ptr deserialize(std::byte* buffer, size_t size); + static inline std::unique_ptr deserialize(FairInputTBuffer & buffer); }; -inline void TMessageSerializer::serialize(FairTMessage& tm, const TObject* input, - CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const TObject* input) { - return serialize(tm, input, nullptr, compressionLevel); + return serialize(tm, input, nullptr); } template -inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // - const TClass* cl, CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const T* input, const TClass* cl) { - if (compressionLevel >= 0) { - // if negative, skip to use ROOT default - tm.SetCompressionLevel(compressionLevel); - } - // TODO: check what WriateObject and WriteObjectAny are doing if (cl == nullptr) { tm.WriteObject(input); @@ -98,7 +106,7 @@ inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // } template -inline std::unique_ptr TMessageSerializer::deserialize(gsl::span buffer) +inline std::unique_ptr TMessageSerializer::deserialize(FairInputTBuffer & buffer) { TClass* tgtClass = TClass::GetClass(typeid(T)); if (tgtClass == nullptr) { @@ -107,53 +115,32 @@ inline std::unique_ptr TMessageSerializer::deserialize(gsl::span b // FIXME: we need to add consistency check for buffer data to be serialized // at the moment, TMessage might simply crash if an invalid or inconsistent // buffer is provided - FairTMessage tm(buffer); - TClass* serializedClass = tm.GetClass(); + buffer.InitMap(); + TClass* serializedClass = buffer.ReadClass(); + buffer.SetBufferOffset(0); + buffer.ResetMap(); if (serializedClass == nullptr) { throw runtime_error_f("can not read class info from buffer"); } if (tgtClass != serializedClass && serializedClass->GetBaseClass(tgtClass) == nullptr) { throw runtime_error_f("can not convert serialized class %s into target class %s", - tm.GetClass()->GetName(), + serializedClass->GetName(), tgtClass->GetName()); } - return std::unique_ptr(reinterpret_cast(tm.ReadObjectAny(serializedClass))); + return std::unique_ptr(reinterpret_cast(buffer.ReadObjectAny(serializedClass))); } -template -inline std::unique_ptr TMessageSerializer::deserialize(std::byte* buffer, size_t size) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input) { - return deserialize(gsl::span(buffer, gsl::narrow::size_type>(size))); -} - -inline void FairTMessage::free(void* /*data*/, void* hint) -{ - std::default_delete deleter; - deleter(static_cast(hint)); -} - -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input, - TMessageSerializer::CompressionLevel compressionLevel) -{ - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, input->Class(), compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, input->Class()); } template -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, // - const TClass* cl, // - TMessageSerializer::CompressionLevel compressionLevel) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, const TClass* cl) { - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, cl, compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, cl); } template @@ -161,7 +148,8 @@ inline void TMessageSerializer::Deserialize(const fair::mq::Message& msg, std::u { // we know the message will not be modified by this, // so const_cast should be OK here(IMHO). - output = deserialize(as_span(msg)); + FairInputTBuffer input(static_cast(msg.GetData()), static_cast(msg.GetSize())); + output = deserialize(input); } // gsl::narrow is used to do a runtime narrowing check, this might be a bit paranoid, @@ -171,7 +159,7 @@ inline gsl::span as_span(const fair::mq::Message& msg) return gsl::span{static_cast(msg.GetData()), gsl::narrow::size_type>(msg.GetSize())}; } -inline gsl::span as_span(const FairTMessage& msg) +inline gsl::span as_span(const FairInputTBuffer& msg) { return gsl::span{reinterpret_cast(msg.Buffer()), gsl::narrow::size_type>(msg.BufferSize())}; diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 48a3eb1da95b9..02ef5c7bc5b3c 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -141,9 +141,9 @@ DataProcessorSpec CommonDataProcessors::getOutputObjHistSink(std::vector(ref.payload), static_cast(datah->payloadSize)); + FairInputTBuffer tm(const_cast(ref.payload), static_cast(datah->payloadSize)); InputObject obj; - obj.kind = tm.GetClass(); + obj.kind = tm.ReadClass(); if (obj.kind == nullptr) { LOG(error) << "Cannot read class info from buffer."; return; diff --git a/Framework/Core/src/TMessageSerializer.cxx b/Framework/Core/src/TMessageSerializer.cxx index 5388a6d716cda..9f09c3ade0089 100644 --- a/Framework/Core/src/TMessageSerializer.cxx +++ b/Framework/Core/src/TMessageSerializer.cxx @@ -9,7 +9,38 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. #include +#include #include #include using namespace o2::framework; + +void* FairOutputTBuffer::embedInItself(fair::mq::Message& msg) { + // The first bytes of the message are used to store the pointer to the message itself + // so that we can reallocate it if needed. + if (sizeof(char*) > msg.GetSize()) { + throw std::runtime_error("Message size too small to embed pointer"); + } + char* data = reinterpret_cast(msg.GetData()); + char* ptr = reinterpret_cast(&msg); + std::memcpy(data, ptr, sizeof(char*)); + return data + sizeof(char*); +} + +// Reallocation function. Get the message pointer from the data and call Rebuild. +char *FairOutputTBuffer::fairMQrealloc(char *oldData, size_t newSize, size_t oldSize) { + auto* msg = reinterpret_cast(oldData - sizeof(char*)); + if (newSize <= msg->GetSize()) { + // no need to reallocate, the message is already big enough + return oldData; + } + // Create a shallow copy of the message + fair::mq::MessagePtr oldMsg = msg->GetTransport()->CreateMessage(); + oldMsg->Copy(*msg); + // Copy the old data while rebuilding. Reference counting should make + // sure the old message is not deleted until the new one is ready. + msg->Rebuild(newSize, fair::mq::Alignment{64}); + memcpy(msg->GetData(), oldMsg->GetData(), oldSize); + + return reinterpret_cast(msg->GetData()) + sizeof(char*); +} diff --git a/Framework/Core/test/test_DataRefUtils.cxx b/Framework/Core/test/test_DataRefUtils.cxx index 37da7912bfe8b..081adc81ebf69 100644 --- a/Framework/Core/test/test_DataRefUtils.cxx +++ b/Framework/Core/test/test_DataRefUtils.cxx @@ -21,17 +21,37 @@ using namespace o2::framework; +TEST_CASE("PureRootTest") { + TBufferFile buffer(TBuffer::kWrite); + TObjString s("test"); + buffer.WriteObject(&s); + + TBufferFile buffer2(TBuffer::kRead, buffer.BufferSize(), buffer.Buffer(), false); + buffer2.SetReadMode(); + buffer2.InitMap(); + TClass *storedClass = buffer2.ReadClass(); + // ReadClass advances the buffer, so we need to reset it. + buffer2.SetBufferOffset(0); + buffer2.ResetMap(); + REQUIRE(storedClass != nullptr); + auto *outS = (TObjString*)buffer2.ReadObjectAny(storedClass); + REQUIRE(outS != nullptr); + REQUIRE(outS->GetString() == "test"); +} + // Simple test to do root deserialization. TEST_CASE("TestRootSerialization") { DataRef ref; - TMessage* tm = new TMessage(kMESS_OBJECT); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer tm(*msg); auto sOrig = std::make_unique("test"); - tm->WriteObject(sOrig.get()); + tm << sOrig.get(); o2::header::DataHeader dh; dh.payloadSerializationMethod = o2::header::gSerializationMethodROOT; - ref.payload = tm->Buffer(); - dh.payloadSize = tm->BufferSize(); + ref.payload = (char*)msg->GetData(); + dh.payloadSize = (size_t)msg->GetSize(); ref.header = reinterpret_cast(&dh); // Check by using the same type diff --git a/Framework/Core/test/test_TMessageSerializer.cxx b/Framework/Core/test/test_TMessageSerializer.cxx index bc5f817400a44..395b3779421a2 100644 --- a/Framework/Core/test/test_TMessageSerializer.cxx +++ b/Framework/Core/test/test_TMessageSerializer.cxx @@ -11,6 +11,7 @@ #include "Framework/TMessageSerializer.h" #include "Framework/RuntimeError.h" +#include #include "TestClasses.h" #include #include @@ -49,14 +50,14 @@ TEST_CASE("TestTMessageSerializer") array.SetOwner(); array.Add(new TNamed(testname, testtitle)); - FairTMessage msg; - TMessageSerializer::serialize(msg, &array); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); + TMessageSerializer::serialize(buffer, &array); - auto buf = as_span(msg); - REQUIRE(buf.size() == msg.BufferSize()); - REQUIRE(static_cast(buf.data()) == static_cast(msg.Buffer())); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); // test deserialization with TObject as target class (default) - auto out = TMessageSerializer::deserialize(buf); + auto out = TMessageSerializer::deserialize(msg2); auto* outarr = dynamic_cast(out.get()); REQUIRE(out.get() == outarr); @@ -66,9 +67,9 @@ TEST_CASE("TestTMessageSerializer") REQUIRE(named->GetTitle() == std::string(testtitle)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef, + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, ExceptionMatcher("can not convert serialized class TObjArray into target class TNamed")); } @@ -87,23 +88,29 @@ TEST_CASE("TestTMessageSerializer_NonTObject") TClass* cl = TClass::GetClass("std::vector"); REQUIRE(cl != nullptr); - FairTMessage msg; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); char* in = reinterpret_cast(&data); - TMessageSerializer::serialize(msg, in, cl); + TMessageSerializer::serialize(buffer, in, cl); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); - auto out = TMessageSerializer::deserialize>(as_span(msg)); + auto out = TMessageSerializer::deserialize>(msg2); REQUIRE(out); REQUIRE((*out.get()).size() == 2); REQUIRE((*out.get())[0] == o2::test::Polymorphic(0xaffe)); REQUIRE((*out.get())[1] == o2::test::Polymorphic(0xd00f)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(as_span(msg)), RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), RuntimeErrorRef); } TEST_CASE("TestTMessageSerializer_InvalidBuffer") { const char* buffer = "this is for sure not a serialized ROOT object"; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(strlen(buffer) + 8); + memcpy((char*)msg->GetData() + 8, buffer, strlen(buffer)); // test deserialization of invalid buffer and check the exception // FIXME: at the moment, TMessage fails directly with a segfault, which it shouldn't do /* @@ -119,5 +126,6 @@ TEST_CASE("TestTMessageSerializer_InvalidBuffer") struct Dummy { }; auto matcher = ExceptionMatcher("class is not ROOT-serializable: ZL22CATCH2_INTERNAL_TEST_4vE5Dummy"); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize((std::byte*)buffer, strlen(buffer)), o2::framework::RuntimeErrorRef, matcher); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, matcher); } diff --git a/Framework/Utils/test/test_RootTreeWriter.cxx b/Framework/Utils/test/test_RootTreeWriter.cxx index 3194508f3d775..62e1eb62cb4f1 100644 --- a/Framework/Utils/test/test_RootTreeWriter.cxx +++ b/Framework/Utils/test/test_RootTreeWriter.cxx @@ -179,6 +179,7 @@ TEST_CASE("test_RootTreeWriter") auto createSerializedMessage = [&transport, &store](DataHeader&& dh, auto& data) { fair::mq::MessagePtr payload = transport->CreateMessage(); + payload->Rebuild(4096, {64}); auto* cl = TClass::GetClass(typeid(decltype(data))); TMessageSerializer().Serialize(*payload, &data, cl); dh.payloadSize = payload->GetSize(); diff --git a/Utilities/Mergers/src/ObjectStore.cxx b/Utilities/Mergers/src/ObjectStore.cxx index e88358507c31e..3bb49f1dfc9d8 100644 --- a/Utilities/Mergers/src/ObjectStore.cxx +++ b/Utilities/Mergers/src/ObjectStore.cxx @@ -38,7 +38,7 @@ static std::string concat(Args&&... arguments) return std::move(ss.str()); } -void* readObject(const TClass* type, o2::framework::FairTMessage& ftm) +void* readObject(const TClass* type, o2::framework::FairInputTBuffer& ftm) { using namespace std::string_view_literals; auto* object = ftm.ReadObjectAny(type); @@ -60,7 +60,7 @@ MergeInterface* castToMergeInterface(bool inheritsFromTObject, void* object, TCl return objectAsMergeInterface; } -std::optional extractVector(o2::framework::FairTMessage& ftm, const TClass* storedClass) +std::optional extractVector(o2::framework::FairInputTBuffer& ftm, const TClass* storedClass) { if (!storedClass->InheritsFrom(TClass::GetClass(typeid(VectorOfRawTObjects)))) { return std::nullopt; @@ -88,11 +88,14 @@ ObjectStore extractObjectFrom(const framework::DataRef& ref) throw std::runtime_error(concat(errorPrefix, "It is not ROOT-serialized"sv)); } - o2::framework::FairTMessage ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); - auto* storedClass = ftm.GetClass(); + o2::framework::FairInputTBuffer ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error(concat(errorPrefix, "Unknown stored class"sv)); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); if (const auto extractedVector = extractVector(ftm, storedClass)) { return extractedVector.value(); diff --git a/Utilities/Mergers/test/benchmark_Types.cxx b/Utilities/Mergers/test/benchmark_Types.cxx index 790fd329185ea..736685c5746b8 100644 --- a/Utilities/Mergers/test/benchmark_Types.cxx +++ b/Utilities/Mergers/test/benchmark_Types.cxx @@ -165,11 +165,16 @@ auto measure = [](Measurement m, auto* o, auto* i) -> double { tm->WriteObject(o); start = std::chrono::high_resolution_clock::now(); - o2::framework::FairTMessage ftm(const_cast(tm->Buffer()), tm->BufferSize()); - auto* storedClass = ftm.GetClass(); + // Needed to take into account that FairInputTBuffer expects the first 8 bytes to be the + // allocator pointer, which is not present in the TMessage buffer. + o2::framework::FairInputTBuffer ftm(const_cast(tm->Buffer() - 8), tm->BufferSize() + 8); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error("Unknown stored class"); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* tObjectClass = TClass::GetClass(typeid(TObject)); if (!storedClass->InheritsFrom(tObjectClass)) { @@ -738,4 +743,4 @@ int main(int argc, const char* argv[]) file.close(); return 0; -} \ No newline at end of file +} From 8e513105561b7b1a03503d0cfa78b4ffae0b7e5a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Sat, 24 Feb 2024 00:21:18 +0100 Subject: [PATCH 379/726] DPL: avoid TMessage usage TMessage does not allow for non owned buffers, so we end up having an extra buffer in private memory for (de)serializing. Using TBufferFile directly allows to avoid that, so this moves the whole ROOT serialization support in DPL to use it. --- .../Base/include/DetectorsBase/Detector.h | 15 +- Detectors/Base/src/Detector.cxx | 34 ++--- .../src/AODJAlienReaderHelpers.h | 2 + .../Core/include/Framework/DataAllocator.h | 1 + .../Core/include/Framework/DataRefUtils.h | 13 +- .../include/Framework/RootMessageContext.h | 3 + .../Framework/RootSerializationSupport.h | 3 +- .../include/Framework/TMessageSerializer.h | 130 ++++++++---------- Framework/Core/src/CommonDataProcessors.cxx | 4 +- Framework/Core/src/TMessageSerializer.cxx | 31 +++++ Framework/Core/test/test_DataRefUtils.cxx | 28 +++- .../Core/test/test_TMessageSerializer.cxx | 34 +++-- Framework/Utils/test/test_RootTreeWriter.cxx | 1 + Utilities/Mergers/src/ObjectStore.cxx | 11 +- Utilities/Mergers/test/benchmark_Types.cxx | 11 +- 15 files changed, 191 insertions(+), 130 deletions(-) diff --git a/Detectors/Base/include/DetectorsBase/Detector.h b/Detectors/Base/include/DetectorsBase/Detector.h index 6acfa4f5cc46c..4dd0452f2c059 100644 --- a/Detectors/Base/include/DetectorsBase/Detector.h +++ b/Detectors/Base/include/DetectorsBase/Detector.h @@ -29,7 +29,6 @@ #include #include #include -#include #include "CommonUtils/ShmManager.h" #include "CommonUtils/ShmAllocator.h" #include @@ -42,9 +41,7 @@ #include -namespace o2 -{ -namespace base +namespace o2::base { /// This is the basic class for any AliceO2 detector module, whether it is @@ -260,17 +257,12 @@ T decodeShmMessage(fair::mq::Parts& dataparts, int index, bool*& busy) } // this goes into the source -void attachMessageBufferToParts(fair::mq::Parts& parts, fair::mq::Channel& channel, - void* data, size_t size, void (*func_ptr)(void* data, void* hint), void* hint); +void attachMessageBufferToParts(fair::mq::Parts& parts, fair::mq::Channel& channel, void* data, TClass* cl); template void attachTMessage(Container const& hits, fair::mq::Channel& channel, fair::mq::Parts& parts) { - TMessage* tmsg = new TMessage(); - tmsg->WriteObjectAny((void*)&hits, TClass::GetClass(typeid(hits))); - attachMessageBufferToParts( - parts, channel, tmsg->Buffer(), tmsg->BufferSize(), - [](void* data, void* hint) { delete static_cast(hint); }, tmsg); + attachMessageBufferToParts(parts, channel, (void*)&hits, TClass::GetClass(typeid(hits))); } void* decodeTMessageCore(fair::mq::Parts& dataparts, int index); @@ -746,7 +738,6 @@ class DetImpl : public o2::base::Detector ClassDefOverride(DetImpl, 0); }; -} // namespace base } // namespace o2 #endif diff --git a/Detectors/Base/src/Detector.cxx b/Detectors/Base/src/Detector.cxx index 3168e0e84e1f2..3dccf732517b6 100644 --- a/Detectors/Base/src/Detector.cxx +++ b/Detectors/Base/src/Detector.cxx @@ -17,6 +17,7 @@ #include "DetectorsBase/MaterialManager.h" #include "DetectorsCommonDataFormats/DetID.h" #include "Field/MagneticField.h" +#include "Framework/TMessageSerializer.h" #include "TString.h" // for TString #include "TGeoManager.h" @@ -196,16 +197,18 @@ int Detector::registerSensitiveVolumeAndGetVolID(std::string const& name) #include #include #include -namespace o2 -{ -namespace base +namespace o2::base { // this goes into the source -void attachMessageBufferToParts(fair::mq::Parts& parts, fair::mq::Channel& channel, void* data, size_t size, - void (*free_func)(void* data, void* hint), void* hint) -{ - std::unique_ptr message(channel.NewMessage(data, size, free_func, hint)); - parts.AddPart(std::move(message)); +void attachMessageBufferToParts(fair::mq::Parts& parts, fair::mq::Channel& channel, void* data, TClass* cl) { + auto msg = channel.Transport()->CreateMessage(4096, fair::mq::Alignment{64}); + // This will serialize the data directly into the message buffer, without any further + // buffer or copying. Notice how the message will have 8 bytes of header and then + // the serialized data as TBufferFile. In principle one could construct a serialized TMessage payload + // however I did not manage to get it to work for every case. + o2::framework::FairOutputTBuffer buffer(*msg); + o2::framework::TMessageSerializer::serialize(buffer, data, cl); + parts.AddPart(std::move(msg)); } void attachDetIDHeaderMessage(int id, fair::mq::Channel& channel, fair::mq::Parts& parts) { @@ -246,17 +249,14 @@ void* decodeShmCore(fair::mq::Parts& dataparts, int index, bool*& busy) void* decodeTMessageCore(fair::mq::Parts& dataparts, int index) { - class TMessageWrapper : public TMessage - { - public: - TMessageWrapper(void* buf, Int_t len) : TMessage(buf, len) { ResetBit(kIsOwner); } - ~TMessageWrapper() override = default; - }; auto rawmessage = std::move(dataparts.At(index)); - auto message = std::make_unique(rawmessage->GetData(), rawmessage->GetSize()); - return message.get()->ReadObjectAny(message.get()->GetClass()); + o2::framework::FairInputTBuffer buffer((char*)rawmessage->GetData(), rawmessage->GetSize()); + buffer.InitMap(); + auto *cl = buffer.ReadClass(); + buffer.SetBufferOffset(0); + buffer.ResetMap(); + return buffer.ReadObjectAny(cl); } -} // namespace base } // namespace o2 ClassImp(o2::base::Detector); diff --git a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h index 655e4b6c0b439..4b9fd710aca14 100644 --- a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h +++ b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h @@ -16,7 +16,9 @@ #include "Framework/AlgorithmSpec.h" #include "Framework/Logger.h" #include + #include +class TFile; namespace o2::framework::readers { diff --git a/Framework/Core/include/Framework/DataAllocator.h b/Framework/Core/include/Framework/DataAllocator.h index 8151d2f83c6c6..029e922aeb90b 100644 --- a/Framework/Core/include/Framework/DataAllocator.h +++ b/Framework/Core/include/Framework/DataAllocator.h @@ -359,6 +359,7 @@ class DataAllocator } else if constexpr (has_root_dictionary::value == true || is_specialization_v == true) { // Serialize a snapshot of an object with root dictionary payloadMessage = proxy.createOutputMessage(routeIndex); + payloadMessage->Rebuild(4096, {64}); if constexpr (is_specialization_v == true) { // Explicitely ROOT serialize a snapshot of object. // An object wrapped into type `ROOTSerialized` is explicitely marked to be ROOT serialized diff --git a/Framework/Core/include/Framework/DataRefUtils.h b/Framework/Core/include/Framework/DataRefUtils.h index defd10244bca5..e59f986f09250 100644 --- a/Framework/Core/include/Framework/DataRefUtils.h +++ b/Framework/Core/include/Framework/DataRefUtils.h @@ -71,12 +71,15 @@ struct DataRefUtils { throw runtime_error("Attempt to extract a TMessage from non-ROOT serialised message"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); - auto* storedClass = ftm.GetClass(); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); auto* requestedClass = RSS::TClass::GetClass(typeid(T)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); // should always have the class description if has_root_dictionary is true assert(requestedClass != nullptr); + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* object = ftm.ReadObjectAny(storedClass); if (object == nullptr) { throw runtime_error_f("Failed to read object with name %s from message using ROOT serialization.", @@ -146,7 +149,11 @@ struct DataRefUtils { throw runtime_error("ROOT serialization not supported, dictionary not found for data type"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); + ftm.InitMap(); + auto *classInfo = ftm.ReadClass(); + ftm.SetBufferOffset(0); + ftm.ResetMap(); result.reset(static_cast(ftm.ReadObjectAny(cl))); if (result.get() == nullptr) { throw runtime_error_f("Unable to extract class %s", cl == nullptr ? "" : cl->GetName()); diff --git a/Framework/Core/include/Framework/RootMessageContext.h b/Framework/Core/include/Framework/RootMessageContext.h index bef60ebbbf9f9..b1124880cf30f 100644 --- a/Framework/Core/include/Framework/RootMessageContext.h +++ b/Framework/Core/include/Framework/RootMessageContext.h @@ -72,6 +72,9 @@ class RootSerializedObject : public MessageContext::ContextObject fair::mq::Parts finalize() final { assert(mParts.Size() == 1); + if (mPayloadMsg->GetSize() < sizeof(char*)) { + mPayloadMsg->Rebuild(4096, {64}); + } TMessageSerializer::Serialize(*mPayloadMsg, mObject.get(), nullptr); mParts.AddPart(std::move(mPayloadMsg)); return ContextObject::finalize(); diff --git a/Framework/Core/include/Framework/RootSerializationSupport.h b/Framework/Core/include/Framework/RootSerializationSupport.h index cbf7408b13c7d..a44093f9c02bf 100644 --- a/Framework/Core/include/Framework/RootSerializationSupport.h +++ b/Framework/Core/include/Framework/RootSerializationSupport.h @@ -21,7 +21,8 @@ namespace o2::framework /// compiler. struct RootSerializationSupport { using TClass = ::TClass; - using FairTMessage = o2::framework::FairTMessage; + using FairInputTBuffer = o2::framework::FairInputTBuffer; + using FairOutputBuffer = o2::framework::FairOutputTBuffer; using TObject = ::TObject; }; diff --git a/Framework/Core/include/Framework/TMessageSerializer.h b/Framework/Core/include/Framework/TMessageSerializer.h index 1f08b456c0218..ca18eb21abfa1 100644 --- a/Framework/Core/include/Framework/TMessageSerializer.h +++ b/Framework/Core/include/Framework/TMessageSerializer.h @@ -16,9 +16,8 @@ #include "Framework/RuntimeError.h" #include -#include +#include #include -#include #include #include #include @@ -28,67 +27,76 @@ namespace o2::framework { -class FairTMessage; +class FairOutputTBuffer; +class FairInputTBuffer; // utilities to produce a span over a byte buffer held by various message types // this is to avoid littering code with casts and conversions (span has a signed index type(!)) -gsl::span as_span(const FairTMessage& msg); +gsl::span as_span(const FairInputTBuffer& msg); +gsl::span as_span(const FairOutputTBuffer& msg); gsl::span as_span(const fair::mq::Message& msg); -class FairTMessage : public TMessage +// A TBufferFile which we can use to serialise data to a FairMQ message. +class FairOutputTBuffer : public TBufferFile { public: - using TMessage::TMessage; - FairTMessage() : TMessage(kMESS_OBJECT) {} - FairTMessage(void* buf, Int_t len) : TMessage(buf, len) { ResetBit(kIsOwner); } - FairTMessage(gsl::span buf) : TMessage(buf.data(), buf.size()) { ResetBit(kIsOwner); } + // This is to serialise data to FairMQ. We embed the pointer to the message + // in the data itself, so that we can use it to reallocate the message if needed. + // The FairMQ message retains ownership of the data. + // When deserialising the root object, keep in mind one needs to skip the 8 bytes + // for the pointer. + FairOutputTBuffer(fair::mq::Message& msg) + : TBufferFile(TBuffer::kWrite, msg.GetSize() - sizeof(char*), embedInItself(msg), false, fairMQrealloc) + { + } + // Helper function to keep track of the FairMQ message that holds the data + // in the data itself. We can use this to make sure the message can be reallocated + // even if we simply have a pointer to the data. Hopefully ROOT will not play dirty + // with us. + void* embedInItself(fair::mq::Message& msg); // helper function to clean up the object holding the data after it is transported. - static void free(void* /*data*/, void* hint); + static char* fairMQrealloc(char* oldData, size_t newSize, size_t oldSize); }; -struct TMessageSerializer { - using StreamerList = std::vector; - using CompressionLevel = int; +class FairInputTBuffer : public TBufferFile +{ + public: + // This is to serialise data to FairMQ. The provided message is expeted to have 8 bytes + // of overhead, where the source embedded the pointer for the reallocation. + // Notice this will break if the sender and receiver are not using the same + // size for a pointer. + FairInputTBuffer(char * data, size_t size) + : TBufferFile(TBuffer::kRead, size-sizeof(char*), data + sizeof(char*), false, nullptr) + { + } +}; - static void Serialize(fair::mq::Message& msg, const TObject* input, - CompressionLevel compressionLevel = -1); +struct TMessageSerializer { + static void Serialize(fair::mq::Message& msg, const TObject* input); template - static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl, // - CompressionLevel compressionLevel = -1); + static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl); template static void Deserialize(const fair::mq::Message& msg, std::unique_ptr& output); - static void serialize(FairTMessage& msg, const TObject* input, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const TObject* input); template - static void serialize(FairTMessage& msg, const T* input, // - const TClass* cl, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const T* input, const TClass* cl); template - static std::unique_ptr deserialize(gsl::span buffer); - template - static inline std::unique_ptr deserialize(std::byte* buffer, size_t size); + static inline std::unique_ptr deserialize(FairInputTBuffer & buffer); }; -inline void TMessageSerializer::serialize(FairTMessage& tm, const TObject* input, - CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const TObject* input) { - return serialize(tm, input, nullptr, compressionLevel); + return serialize(tm, input, nullptr); } template -inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // - const TClass* cl, CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const T* input, const TClass* cl) { - if (compressionLevel >= 0) { - // if negative, skip to use ROOT default - tm.SetCompressionLevel(compressionLevel); - } - // TODO: check what WriateObject and WriteObjectAny are doing if (cl == nullptr) { tm.WriteObject(input); @@ -98,7 +106,7 @@ inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // } template -inline std::unique_ptr TMessageSerializer::deserialize(gsl::span buffer) +inline std::unique_ptr TMessageSerializer::deserialize(FairInputTBuffer & buffer) { TClass* tgtClass = TClass::GetClass(typeid(T)); if (tgtClass == nullptr) { @@ -107,53 +115,32 @@ inline std::unique_ptr TMessageSerializer::deserialize(gsl::span b // FIXME: we need to add consistency check for buffer data to be serialized // at the moment, TMessage might simply crash if an invalid or inconsistent // buffer is provided - FairTMessage tm(buffer); - TClass* serializedClass = tm.GetClass(); + buffer.InitMap(); + TClass* serializedClass = buffer.ReadClass(); + buffer.SetBufferOffset(0); + buffer.ResetMap(); if (serializedClass == nullptr) { throw runtime_error_f("can not read class info from buffer"); } if (tgtClass != serializedClass && serializedClass->GetBaseClass(tgtClass) == nullptr) { throw runtime_error_f("can not convert serialized class %s into target class %s", - tm.GetClass()->GetName(), + serializedClass->GetName(), tgtClass->GetName()); } - return std::unique_ptr(reinterpret_cast(tm.ReadObjectAny(serializedClass))); + return std::unique_ptr(reinterpret_cast(buffer.ReadObjectAny(serializedClass))); } -template -inline std::unique_ptr TMessageSerializer::deserialize(std::byte* buffer, size_t size) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input) { - return deserialize(gsl::span(buffer, gsl::narrow::size_type>(size))); -} - -inline void FairTMessage::free(void* /*data*/, void* hint) -{ - std::default_delete deleter; - deleter(static_cast(hint)); -} - -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input, - TMessageSerializer::CompressionLevel compressionLevel) -{ - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, input->Class(), compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, input->Class()); } template -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, // - const TClass* cl, // - TMessageSerializer::CompressionLevel compressionLevel) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, const TClass* cl) { - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, cl, compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, cl); } template @@ -161,7 +148,8 @@ inline void TMessageSerializer::Deserialize(const fair::mq::Message& msg, std::u { // we know the message will not be modified by this, // so const_cast should be OK here(IMHO). - output = deserialize(as_span(msg)); + FairInputTBuffer input(static_cast(msg.GetData()), static_cast(msg.GetSize())); + output = deserialize(input); } // gsl::narrow is used to do a runtime narrowing check, this might be a bit paranoid, @@ -171,7 +159,7 @@ inline gsl::span as_span(const fair::mq::Message& msg) return gsl::span{static_cast(msg.GetData()), gsl::narrow::size_type>(msg.GetSize())}; } -inline gsl::span as_span(const FairTMessage& msg) +inline gsl::span as_span(const FairInputTBuffer& msg) { return gsl::span{reinterpret_cast(msg.Buffer()), gsl::narrow::size_type>(msg.BufferSize())}; diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 48a3eb1da95b9..02ef5c7bc5b3c 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -141,9 +141,9 @@ DataProcessorSpec CommonDataProcessors::getOutputObjHistSink(std::vector(ref.payload), static_cast(datah->payloadSize)); + FairInputTBuffer tm(const_cast(ref.payload), static_cast(datah->payloadSize)); InputObject obj; - obj.kind = tm.GetClass(); + obj.kind = tm.ReadClass(); if (obj.kind == nullptr) { LOG(error) << "Cannot read class info from buffer."; return; diff --git a/Framework/Core/src/TMessageSerializer.cxx b/Framework/Core/src/TMessageSerializer.cxx index 5388a6d716cda..9f09c3ade0089 100644 --- a/Framework/Core/src/TMessageSerializer.cxx +++ b/Framework/Core/src/TMessageSerializer.cxx @@ -9,7 +9,38 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. #include +#include #include #include using namespace o2::framework; + +void* FairOutputTBuffer::embedInItself(fair::mq::Message& msg) { + // The first bytes of the message are used to store the pointer to the message itself + // so that we can reallocate it if needed. + if (sizeof(char*) > msg.GetSize()) { + throw std::runtime_error("Message size too small to embed pointer"); + } + char* data = reinterpret_cast(msg.GetData()); + char* ptr = reinterpret_cast(&msg); + std::memcpy(data, ptr, sizeof(char*)); + return data + sizeof(char*); +} + +// Reallocation function. Get the message pointer from the data and call Rebuild. +char *FairOutputTBuffer::fairMQrealloc(char *oldData, size_t newSize, size_t oldSize) { + auto* msg = reinterpret_cast(oldData - sizeof(char*)); + if (newSize <= msg->GetSize()) { + // no need to reallocate, the message is already big enough + return oldData; + } + // Create a shallow copy of the message + fair::mq::MessagePtr oldMsg = msg->GetTransport()->CreateMessage(); + oldMsg->Copy(*msg); + // Copy the old data while rebuilding. Reference counting should make + // sure the old message is not deleted until the new one is ready. + msg->Rebuild(newSize, fair::mq::Alignment{64}); + memcpy(msg->GetData(), oldMsg->GetData(), oldSize); + + return reinterpret_cast(msg->GetData()) + sizeof(char*); +} diff --git a/Framework/Core/test/test_DataRefUtils.cxx b/Framework/Core/test/test_DataRefUtils.cxx index 37da7912bfe8b..081adc81ebf69 100644 --- a/Framework/Core/test/test_DataRefUtils.cxx +++ b/Framework/Core/test/test_DataRefUtils.cxx @@ -21,17 +21,37 @@ using namespace o2::framework; +TEST_CASE("PureRootTest") { + TBufferFile buffer(TBuffer::kWrite); + TObjString s("test"); + buffer.WriteObject(&s); + + TBufferFile buffer2(TBuffer::kRead, buffer.BufferSize(), buffer.Buffer(), false); + buffer2.SetReadMode(); + buffer2.InitMap(); + TClass *storedClass = buffer2.ReadClass(); + // ReadClass advances the buffer, so we need to reset it. + buffer2.SetBufferOffset(0); + buffer2.ResetMap(); + REQUIRE(storedClass != nullptr); + auto *outS = (TObjString*)buffer2.ReadObjectAny(storedClass); + REQUIRE(outS != nullptr); + REQUIRE(outS->GetString() == "test"); +} + // Simple test to do root deserialization. TEST_CASE("TestRootSerialization") { DataRef ref; - TMessage* tm = new TMessage(kMESS_OBJECT); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer tm(*msg); auto sOrig = std::make_unique("test"); - tm->WriteObject(sOrig.get()); + tm << sOrig.get(); o2::header::DataHeader dh; dh.payloadSerializationMethod = o2::header::gSerializationMethodROOT; - ref.payload = tm->Buffer(); - dh.payloadSize = tm->BufferSize(); + ref.payload = (char*)msg->GetData(); + dh.payloadSize = (size_t)msg->GetSize(); ref.header = reinterpret_cast(&dh); // Check by using the same type diff --git a/Framework/Core/test/test_TMessageSerializer.cxx b/Framework/Core/test/test_TMessageSerializer.cxx index bc5f817400a44..395b3779421a2 100644 --- a/Framework/Core/test/test_TMessageSerializer.cxx +++ b/Framework/Core/test/test_TMessageSerializer.cxx @@ -11,6 +11,7 @@ #include "Framework/TMessageSerializer.h" #include "Framework/RuntimeError.h" +#include #include "TestClasses.h" #include #include @@ -49,14 +50,14 @@ TEST_CASE("TestTMessageSerializer") array.SetOwner(); array.Add(new TNamed(testname, testtitle)); - FairTMessage msg; - TMessageSerializer::serialize(msg, &array); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); + TMessageSerializer::serialize(buffer, &array); - auto buf = as_span(msg); - REQUIRE(buf.size() == msg.BufferSize()); - REQUIRE(static_cast(buf.data()) == static_cast(msg.Buffer())); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); // test deserialization with TObject as target class (default) - auto out = TMessageSerializer::deserialize(buf); + auto out = TMessageSerializer::deserialize(msg2); auto* outarr = dynamic_cast(out.get()); REQUIRE(out.get() == outarr); @@ -66,9 +67,9 @@ TEST_CASE("TestTMessageSerializer") REQUIRE(named->GetTitle() == std::string(testtitle)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef, + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, ExceptionMatcher("can not convert serialized class TObjArray into target class TNamed")); } @@ -87,23 +88,29 @@ TEST_CASE("TestTMessageSerializer_NonTObject") TClass* cl = TClass::GetClass("std::vector"); REQUIRE(cl != nullptr); - FairTMessage msg; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); char* in = reinterpret_cast(&data); - TMessageSerializer::serialize(msg, in, cl); + TMessageSerializer::serialize(buffer, in, cl); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); - auto out = TMessageSerializer::deserialize>(as_span(msg)); + auto out = TMessageSerializer::deserialize>(msg2); REQUIRE(out); REQUIRE((*out.get()).size() == 2); REQUIRE((*out.get())[0] == o2::test::Polymorphic(0xaffe)); REQUIRE((*out.get())[1] == o2::test::Polymorphic(0xd00f)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(as_span(msg)), RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), RuntimeErrorRef); } TEST_CASE("TestTMessageSerializer_InvalidBuffer") { const char* buffer = "this is for sure not a serialized ROOT object"; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(strlen(buffer) + 8); + memcpy((char*)msg->GetData() + 8, buffer, strlen(buffer)); // test deserialization of invalid buffer and check the exception // FIXME: at the moment, TMessage fails directly with a segfault, which it shouldn't do /* @@ -119,5 +126,6 @@ TEST_CASE("TestTMessageSerializer_InvalidBuffer") struct Dummy { }; auto matcher = ExceptionMatcher("class is not ROOT-serializable: ZL22CATCH2_INTERNAL_TEST_4vE5Dummy"); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize((std::byte*)buffer, strlen(buffer)), o2::framework::RuntimeErrorRef, matcher); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, matcher); } diff --git a/Framework/Utils/test/test_RootTreeWriter.cxx b/Framework/Utils/test/test_RootTreeWriter.cxx index 3194508f3d775..62e1eb62cb4f1 100644 --- a/Framework/Utils/test/test_RootTreeWriter.cxx +++ b/Framework/Utils/test/test_RootTreeWriter.cxx @@ -179,6 +179,7 @@ TEST_CASE("test_RootTreeWriter") auto createSerializedMessage = [&transport, &store](DataHeader&& dh, auto& data) { fair::mq::MessagePtr payload = transport->CreateMessage(); + payload->Rebuild(4096, {64}); auto* cl = TClass::GetClass(typeid(decltype(data))); TMessageSerializer().Serialize(*payload, &data, cl); dh.payloadSize = payload->GetSize(); diff --git a/Utilities/Mergers/src/ObjectStore.cxx b/Utilities/Mergers/src/ObjectStore.cxx index e88358507c31e..3bb49f1dfc9d8 100644 --- a/Utilities/Mergers/src/ObjectStore.cxx +++ b/Utilities/Mergers/src/ObjectStore.cxx @@ -38,7 +38,7 @@ static std::string concat(Args&&... arguments) return std::move(ss.str()); } -void* readObject(const TClass* type, o2::framework::FairTMessage& ftm) +void* readObject(const TClass* type, o2::framework::FairInputTBuffer& ftm) { using namespace std::string_view_literals; auto* object = ftm.ReadObjectAny(type); @@ -60,7 +60,7 @@ MergeInterface* castToMergeInterface(bool inheritsFromTObject, void* object, TCl return objectAsMergeInterface; } -std::optional extractVector(o2::framework::FairTMessage& ftm, const TClass* storedClass) +std::optional extractVector(o2::framework::FairInputTBuffer& ftm, const TClass* storedClass) { if (!storedClass->InheritsFrom(TClass::GetClass(typeid(VectorOfRawTObjects)))) { return std::nullopt; @@ -88,11 +88,14 @@ ObjectStore extractObjectFrom(const framework::DataRef& ref) throw std::runtime_error(concat(errorPrefix, "It is not ROOT-serialized"sv)); } - o2::framework::FairTMessage ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); - auto* storedClass = ftm.GetClass(); + o2::framework::FairInputTBuffer ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error(concat(errorPrefix, "Unknown stored class"sv)); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); if (const auto extractedVector = extractVector(ftm, storedClass)) { return extractedVector.value(); diff --git a/Utilities/Mergers/test/benchmark_Types.cxx b/Utilities/Mergers/test/benchmark_Types.cxx index 790fd329185ea..736685c5746b8 100644 --- a/Utilities/Mergers/test/benchmark_Types.cxx +++ b/Utilities/Mergers/test/benchmark_Types.cxx @@ -165,11 +165,16 @@ auto measure = [](Measurement m, auto* o, auto* i) -> double { tm->WriteObject(o); start = std::chrono::high_resolution_clock::now(); - o2::framework::FairTMessage ftm(const_cast(tm->Buffer()), tm->BufferSize()); - auto* storedClass = ftm.GetClass(); + // Needed to take into account that FairInputTBuffer expects the first 8 bytes to be the + // allocator pointer, which is not present in the TMessage buffer. + o2::framework::FairInputTBuffer ftm(const_cast(tm->Buffer() - 8), tm->BufferSize() + 8); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error("Unknown stored class"); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* tObjectClass = TClass::GetClass(typeid(TObject)); if (!storedClass->InheritsFrom(tObjectClass)) { @@ -738,4 +743,4 @@ int main(int argc, const char* argv[]) file.close(); return 0; -} \ No newline at end of file +} From 23c82aa37b9f1acfac022ec3c797d669c1c7b056 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 26 Feb 2024 14:40:12 +0100 Subject: [PATCH 380/726] DPL: move StringLiteral to FunctionalHelpers --- Framework/Core/include/Framework/AlgorithmSpec.h | 11 ----------- .../Foundation/include/Framework/FunctionalHelpers.h | 10 ++++++++++ 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/Framework/Core/include/Framework/AlgorithmSpec.h b/Framework/Core/include/Framework/AlgorithmSpec.h index e98a3abefbce1..1f9bb750f0ef3 100644 --- a/Framework/Core/include/Framework/AlgorithmSpec.h +++ b/Framework/Core/include/Framework/AlgorithmSpec.h @@ -83,17 +83,6 @@ struct AlgorithmSpec { struct AlgorithmPlugin { virtual AlgorithmSpec create() = 0; }; - -template -struct StringLiteral { - constexpr StringLiteral(const char (&str)[N]) - { - std::copy_n(str, N, value); - } - - char value[N]; -}; - // Allow fetching inputs from the context using a string literal. template struct Input { diff --git a/Framework/Foundation/include/Framework/FunctionalHelpers.h b/Framework/Foundation/include/Framework/FunctionalHelpers.h index 7e060e8816e3b..12470a7eb0f1c 100644 --- a/Framework/Foundation/include/Framework/FunctionalHelpers.h +++ b/Framework/Foundation/include/Framework/FunctionalHelpers.h @@ -25,6 +25,16 @@ struct memfun_type { }; } // namespace +template +struct StringLiteral { + constexpr StringLiteral(const char (&str)[N]) + { + std::copy_n(str, N, value); + } + + char value[N]; +}; + /// Type helper to hold metadata about a lambda or a class /// method. template From 9368e259442fc5ec6521cb4ee9ff992b7ca7fd03 Mon Sep 17 00:00:00 2001 From: Ole Schmidt Date: Mon, 26 Feb 2024 17:07:18 +0100 Subject: [PATCH 381/726] Fix odc_expendable_task requirement in DDS XML --- Framework/Core/src/DDSConfigHelpers.cxx | 4 ++-- Framework/Core/test/test_FrameworkDataFlowToDDS.cxx | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Framework/Core/src/DDSConfigHelpers.cxx b/Framework/Core/src/DDSConfigHelpers.cxx index e56afe34bc511..d17eeca394520 100644 --- a/Framework/Core/src/DDSConfigHelpers.cxx +++ b/Framework/Core/src/DDSConfigHelpers.cxx @@ -181,7 +181,7 @@ void DDSConfigHelpers::dumpDeviceSpec2DDS(std::ostream& out, out << fmt::format("cat ${{DDS_LOCATION}}/dpl_json{}.asset | o2-dpl-run --driver-mode embedded", workflowSuffix); out << R"()" << "\n" - << " \n" + << " odc_expendable_task\n" << "\n" << "\n"; out << ""; @@ -252,7 +252,7 @@ void DDSConfigHelpers::dumpDeviceSpec2DDS(std::ostream& out, return label.value == "expendable"; }) != spec.labels.end()) { out << " \n"; - out << " \n"; + out << " odc_expendable_task\n"; out << " \n"; } auto& rewriter = rewriters[di]; diff --git a/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx b/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx index 49420e4bd99ad..e2082f13b40df 100644 --- a/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx +++ b/Framework/Core/test/test_FrameworkDataFlowToDDS.cxx @@ -635,7 +635,7 @@ TEST_CASE("TestDDSExpendable") dpl_json cat ${DDS_LOCATION}/dpl_json.asset | foo --id D_dds%TaskIndex%_%CollectionIndex% --shm-monitor false --log-color false --batch --color false --channel-config "name=from_B_to_D,type=pull,method=connect,address=ipc://@localhostworkflow-id_22002,transport=shmem,rateLogging=0,rcvBufSize=1,sndBufSize=1" --channel-config "name=from_C_to_D,type=pull,method=connect,address=ipc://@localhostworkflow-id_22003,transport=shmem,rateLogging=0,rcvBufSize=1,sndBufSize=1" --bad-alloc-attempt-interval 50 --bad-alloc-max-attempts 1 --early-forward-policy never --io-threads 1 --jobs 4 --severity info --shm-allocation rbtree_best_fit --shm-mlock-segment false --shm-mlock-segment-on-creation false --shm-no-cleanup false --shm-segment-id 0 --shm-throw-bad-alloc true --shm-zero-segment false --stacktrace-on-signal simple --timeframes-rate-limit 0 --a-param 1 --b-param "" --c-param "foo;bar" --session dpl_workflow-id --plugin odc - + odc_expendable_task From 43b29ee7dd6d684bfd82f50368b610ad9f01dea8 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 27 Feb 2024 23:27:48 +0100 Subject: [PATCH 382/726] DPL: avoid TMessage usage TMessage does not allow for non owned buffers, so we end up having an extra buffer in private memory for (de)serializing. Using TBufferFile directly allows to avoid that, so this moves the whole ROOT serialization support in DPL to use it. --- .../src/AODJAlienReaderHelpers.h | 2 + .../Core/include/Framework/DataAllocator.h | 1 + .../Core/include/Framework/DataRefUtils.h | 13 +- .../include/Framework/RootMessageContext.h | 3 + .../Framework/RootSerializationSupport.h | 3 +- .../include/Framework/TMessageSerializer.h | 130 ++++++++---------- Framework/Core/src/CommonDataProcessors.cxx | 7 +- Framework/Core/src/TMessageSerializer.cxx | 35 +++++ Framework/Core/test/test_DataRefUtils.cxx | 28 +++- .../Core/test/test_TMessageSerializer.cxx | 51 +++++-- Framework/Utils/test/test_RootTreeWriter.cxx | 1 + Steer/DigitizerWorkflow/src/SimReaderSpec.cxx | 1 - Utilities/Mergers/src/ObjectStore.cxx | 11 +- Utilities/Mergers/test/benchmark_Types.cxx | 11 +- 14 files changed, 195 insertions(+), 102 deletions(-) diff --git a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h index 655e4b6c0b439..4b9fd710aca14 100644 --- a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h +++ b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h @@ -16,7 +16,9 @@ #include "Framework/AlgorithmSpec.h" #include "Framework/Logger.h" #include + #include +class TFile; namespace o2::framework::readers { diff --git a/Framework/Core/include/Framework/DataAllocator.h b/Framework/Core/include/Framework/DataAllocator.h index 8151d2f83c6c6..029e922aeb90b 100644 --- a/Framework/Core/include/Framework/DataAllocator.h +++ b/Framework/Core/include/Framework/DataAllocator.h @@ -359,6 +359,7 @@ class DataAllocator } else if constexpr (has_root_dictionary::value == true || is_specialization_v == true) { // Serialize a snapshot of an object with root dictionary payloadMessage = proxy.createOutputMessage(routeIndex); + payloadMessage->Rebuild(4096, {64}); if constexpr (is_specialization_v == true) { // Explicitely ROOT serialize a snapshot of object. // An object wrapped into type `ROOTSerialized` is explicitely marked to be ROOT serialized diff --git a/Framework/Core/include/Framework/DataRefUtils.h b/Framework/Core/include/Framework/DataRefUtils.h index defd10244bca5..e59f986f09250 100644 --- a/Framework/Core/include/Framework/DataRefUtils.h +++ b/Framework/Core/include/Framework/DataRefUtils.h @@ -71,12 +71,15 @@ struct DataRefUtils { throw runtime_error("Attempt to extract a TMessage from non-ROOT serialised message"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); - auto* storedClass = ftm.GetClass(); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); auto* requestedClass = RSS::TClass::GetClass(typeid(T)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); // should always have the class description if has_root_dictionary is true assert(requestedClass != nullptr); + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* object = ftm.ReadObjectAny(storedClass); if (object == nullptr) { throw runtime_error_f("Failed to read object with name %s from message using ROOT serialization.", @@ -146,7 +149,11 @@ struct DataRefUtils { throw runtime_error("ROOT serialization not supported, dictionary not found for data type"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); + ftm.InitMap(); + auto *classInfo = ftm.ReadClass(); + ftm.SetBufferOffset(0); + ftm.ResetMap(); result.reset(static_cast(ftm.ReadObjectAny(cl))); if (result.get() == nullptr) { throw runtime_error_f("Unable to extract class %s", cl == nullptr ? "" : cl->GetName()); diff --git a/Framework/Core/include/Framework/RootMessageContext.h b/Framework/Core/include/Framework/RootMessageContext.h index bef60ebbbf9f9..b1124880cf30f 100644 --- a/Framework/Core/include/Framework/RootMessageContext.h +++ b/Framework/Core/include/Framework/RootMessageContext.h @@ -72,6 +72,9 @@ class RootSerializedObject : public MessageContext::ContextObject fair::mq::Parts finalize() final { assert(mParts.Size() == 1); + if (mPayloadMsg->GetSize() < sizeof(char*)) { + mPayloadMsg->Rebuild(4096, {64}); + } TMessageSerializer::Serialize(*mPayloadMsg, mObject.get(), nullptr); mParts.AddPart(std::move(mPayloadMsg)); return ContextObject::finalize(); diff --git a/Framework/Core/include/Framework/RootSerializationSupport.h b/Framework/Core/include/Framework/RootSerializationSupport.h index cbf7408b13c7d..a44093f9c02bf 100644 --- a/Framework/Core/include/Framework/RootSerializationSupport.h +++ b/Framework/Core/include/Framework/RootSerializationSupport.h @@ -21,7 +21,8 @@ namespace o2::framework /// compiler. struct RootSerializationSupport { using TClass = ::TClass; - using FairTMessage = o2::framework::FairTMessage; + using FairInputTBuffer = o2::framework::FairInputTBuffer; + using FairOutputBuffer = o2::framework::FairOutputTBuffer; using TObject = ::TObject; }; diff --git a/Framework/Core/include/Framework/TMessageSerializer.h b/Framework/Core/include/Framework/TMessageSerializer.h index 1f08b456c0218..ca18eb21abfa1 100644 --- a/Framework/Core/include/Framework/TMessageSerializer.h +++ b/Framework/Core/include/Framework/TMessageSerializer.h @@ -16,9 +16,8 @@ #include "Framework/RuntimeError.h" #include -#include +#include #include -#include #include #include #include @@ -28,67 +27,76 @@ namespace o2::framework { -class FairTMessage; +class FairOutputTBuffer; +class FairInputTBuffer; // utilities to produce a span over a byte buffer held by various message types // this is to avoid littering code with casts and conversions (span has a signed index type(!)) -gsl::span as_span(const FairTMessage& msg); +gsl::span as_span(const FairInputTBuffer& msg); +gsl::span as_span(const FairOutputTBuffer& msg); gsl::span as_span(const fair::mq::Message& msg); -class FairTMessage : public TMessage +// A TBufferFile which we can use to serialise data to a FairMQ message. +class FairOutputTBuffer : public TBufferFile { public: - using TMessage::TMessage; - FairTMessage() : TMessage(kMESS_OBJECT) {} - FairTMessage(void* buf, Int_t len) : TMessage(buf, len) { ResetBit(kIsOwner); } - FairTMessage(gsl::span buf) : TMessage(buf.data(), buf.size()) { ResetBit(kIsOwner); } + // This is to serialise data to FairMQ. We embed the pointer to the message + // in the data itself, so that we can use it to reallocate the message if needed. + // The FairMQ message retains ownership of the data. + // When deserialising the root object, keep in mind one needs to skip the 8 bytes + // for the pointer. + FairOutputTBuffer(fair::mq::Message& msg) + : TBufferFile(TBuffer::kWrite, msg.GetSize() - sizeof(char*), embedInItself(msg), false, fairMQrealloc) + { + } + // Helper function to keep track of the FairMQ message that holds the data + // in the data itself. We can use this to make sure the message can be reallocated + // even if we simply have a pointer to the data. Hopefully ROOT will not play dirty + // with us. + void* embedInItself(fair::mq::Message& msg); // helper function to clean up the object holding the data after it is transported. - static void free(void* /*data*/, void* hint); + static char* fairMQrealloc(char* oldData, size_t newSize, size_t oldSize); }; -struct TMessageSerializer { - using StreamerList = std::vector; - using CompressionLevel = int; +class FairInputTBuffer : public TBufferFile +{ + public: + // This is to serialise data to FairMQ. The provided message is expeted to have 8 bytes + // of overhead, where the source embedded the pointer for the reallocation. + // Notice this will break if the sender and receiver are not using the same + // size for a pointer. + FairInputTBuffer(char * data, size_t size) + : TBufferFile(TBuffer::kRead, size-sizeof(char*), data + sizeof(char*), false, nullptr) + { + } +}; - static void Serialize(fair::mq::Message& msg, const TObject* input, - CompressionLevel compressionLevel = -1); +struct TMessageSerializer { + static void Serialize(fair::mq::Message& msg, const TObject* input); template - static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl, // - CompressionLevel compressionLevel = -1); + static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl); template static void Deserialize(const fair::mq::Message& msg, std::unique_ptr& output); - static void serialize(FairTMessage& msg, const TObject* input, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const TObject* input); template - static void serialize(FairTMessage& msg, const T* input, // - const TClass* cl, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const T* input, const TClass* cl); template - static std::unique_ptr deserialize(gsl::span buffer); - template - static inline std::unique_ptr deserialize(std::byte* buffer, size_t size); + static inline std::unique_ptr deserialize(FairInputTBuffer & buffer); }; -inline void TMessageSerializer::serialize(FairTMessage& tm, const TObject* input, - CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const TObject* input) { - return serialize(tm, input, nullptr, compressionLevel); + return serialize(tm, input, nullptr); } template -inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // - const TClass* cl, CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const T* input, const TClass* cl) { - if (compressionLevel >= 0) { - // if negative, skip to use ROOT default - tm.SetCompressionLevel(compressionLevel); - } - // TODO: check what WriateObject and WriteObjectAny are doing if (cl == nullptr) { tm.WriteObject(input); @@ -98,7 +106,7 @@ inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // } template -inline std::unique_ptr TMessageSerializer::deserialize(gsl::span buffer) +inline std::unique_ptr TMessageSerializer::deserialize(FairInputTBuffer & buffer) { TClass* tgtClass = TClass::GetClass(typeid(T)); if (tgtClass == nullptr) { @@ -107,53 +115,32 @@ inline std::unique_ptr TMessageSerializer::deserialize(gsl::span b // FIXME: we need to add consistency check for buffer data to be serialized // at the moment, TMessage might simply crash if an invalid or inconsistent // buffer is provided - FairTMessage tm(buffer); - TClass* serializedClass = tm.GetClass(); + buffer.InitMap(); + TClass* serializedClass = buffer.ReadClass(); + buffer.SetBufferOffset(0); + buffer.ResetMap(); if (serializedClass == nullptr) { throw runtime_error_f("can not read class info from buffer"); } if (tgtClass != serializedClass && serializedClass->GetBaseClass(tgtClass) == nullptr) { throw runtime_error_f("can not convert serialized class %s into target class %s", - tm.GetClass()->GetName(), + serializedClass->GetName(), tgtClass->GetName()); } - return std::unique_ptr(reinterpret_cast(tm.ReadObjectAny(serializedClass))); + return std::unique_ptr(reinterpret_cast(buffer.ReadObjectAny(serializedClass))); } -template -inline std::unique_ptr TMessageSerializer::deserialize(std::byte* buffer, size_t size) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input) { - return deserialize(gsl::span(buffer, gsl::narrow::size_type>(size))); -} - -inline void FairTMessage::free(void* /*data*/, void* hint) -{ - std::default_delete deleter; - deleter(static_cast(hint)); -} - -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input, - TMessageSerializer::CompressionLevel compressionLevel) -{ - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, input->Class(), compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, input->Class()); } template -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, // - const TClass* cl, // - TMessageSerializer::CompressionLevel compressionLevel) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, const TClass* cl) { - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, cl, compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, cl); } template @@ -161,7 +148,8 @@ inline void TMessageSerializer::Deserialize(const fair::mq::Message& msg, std::u { // we know the message will not be modified by this, // so const_cast should be OK here(IMHO). - output = deserialize(as_span(msg)); + FairInputTBuffer input(static_cast(msg.GetData()), static_cast(msg.GetSize())); + output = deserialize(input); } // gsl::narrow is used to do a runtime narrowing check, this might be a bit paranoid, @@ -171,7 +159,7 @@ inline gsl::span as_span(const fair::mq::Message& msg) return gsl::span{static_cast(msg.GetData()), gsl::narrow::size_type>(msg.GetSize())}; } -inline gsl::span as_span(const FairTMessage& msg) +inline gsl::span as_span(const FairInputTBuffer& msg) { return gsl::span{reinterpret_cast(msg.Buffer()), gsl::narrow::size_type>(msg.BufferSize())}; diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 48a3eb1da95b9..0cf7224c25ac8 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -141,9 +141,12 @@ DataProcessorSpec CommonDataProcessors::getOutputObjHistSink(std::vector(ref.payload), static_cast(datah->payloadSize)); InputObject obj; - obj.kind = tm.GetClass(); + FairInputTBuffer tm(const_cast(ref.payload), static_cast(datah->payloadSize)); + tm.InitMap(); + obj.kind = tm.ReadClass(); + tm.SetBufferOffset(0); + tm.ResetMap(); if (obj.kind == nullptr) { LOG(error) << "Cannot read class info from buffer."; return; diff --git a/Framework/Core/src/TMessageSerializer.cxx b/Framework/Core/src/TMessageSerializer.cxx index 5388a6d716cda..43113b2572191 100644 --- a/Framework/Core/src/TMessageSerializer.cxx +++ b/Framework/Core/src/TMessageSerializer.cxx @@ -9,7 +9,42 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. #include +#include #include #include using namespace o2::framework; + +void* FairOutputTBuffer::embedInItself(fair::mq::Message& msg) { + // The first bytes of the message are used to store the pointer to the message itself + // so that we can reallocate it if needed. + if (sizeof(char*) > msg.GetSize()) { + throw std::runtime_error("Message size too small to embed pointer"); + } + char* data = reinterpret_cast(msg.GetData()); + char* ptr = reinterpret_cast(&msg); + std::memcpy(data, &ptr, sizeof(char*)); + return data + sizeof(char*); +} + +// Reallocation function. Get the message pointer from the data and call Rebuild. +char *FairOutputTBuffer::fairMQrealloc(char *oldData, size_t newSize, size_t oldSize) { + // Old data is the pointer at the beginning of the message, so the pointer + // to the message is **stored** in the 8 bytes before it. + auto* msg = *(fair::mq::Message**)(oldData - sizeof(char*)); + if (newSize <= msg->GetSize()) { + // no need to reallocate, the message is already big enough + return oldData; + } + // Create a shallow copy of the message + fair::mq::MessagePtr oldMsg = msg->GetTransport()->CreateMessage(); + oldMsg->Copy(*msg); + // Copy the old data while rebuilding. Reference counting should make + // sure the old message is not deleted until the new one is ready. + // We need 8 extra bytes for the pointer to the message itself (realloc does not know about it) + // and we need to copy 8 bytes more than the old size (again, the extra pointer). + msg->Rebuild(newSize+8, fair::mq::Alignment{64}); + memcpy(msg->GetData(), oldMsg->GetData(), oldSize+8); + + return reinterpret_cast(msg->GetData()) + sizeof(char*); +} diff --git a/Framework/Core/test/test_DataRefUtils.cxx b/Framework/Core/test/test_DataRefUtils.cxx index 37da7912bfe8b..081adc81ebf69 100644 --- a/Framework/Core/test/test_DataRefUtils.cxx +++ b/Framework/Core/test/test_DataRefUtils.cxx @@ -21,17 +21,37 @@ using namespace o2::framework; +TEST_CASE("PureRootTest") { + TBufferFile buffer(TBuffer::kWrite); + TObjString s("test"); + buffer.WriteObject(&s); + + TBufferFile buffer2(TBuffer::kRead, buffer.BufferSize(), buffer.Buffer(), false); + buffer2.SetReadMode(); + buffer2.InitMap(); + TClass *storedClass = buffer2.ReadClass(); + // ReadClass advances the buffer, so we need to reset it. + buffer2.SetBufferOffset(0); + buffer2.ResetMap(); + REQUIRE(storedClass != nullptr); + auto *outS = (TObjString*)buffer2.ReadObjectAny(storedClass); + REQUIRE(outS != nullptr); + REQUIRE(outS->GetString() == "test"); +} + // Simple test to do root deserialization. TEST_CASE("TestRootSerialization") { DataRef ref; - TMessage* tm = new TMessage(kMESS_OBJECT); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer tm(*msg); auto sOrig = std::make_unique("test"); - tm->WriteObject(sOrig.get()); + tm << sOrig.get(); o2::header::DataHeader dh; dh.payloadSerializationMethod = o2::header::gSerializationMethodROOT; - ref.payload = tm->Buffer(); - dh.payloadSize = tm->BufferSize(); + ref.payload = (char*)msg->GetData(); + dh.payloadSize = (size_t)msg->GetSize(); ref.header = reinterpret_cast(&dh); // Check by using the same type diff --git a/Framework/Core/test/test_TMessageSerializer.cxx b/Framework/Core/test/test_TMessageSerializer.cxx index bc5f817400a44..365d31e95dd30 100644 --- a/Framework/Core/test/test_TMessageSerializer.cxx +++ b/Framework/Core/test/test_TMessageSerializer.cxx @@ -11,6 +11,7 @@ #include "Framework/TMessageSerializer.h" #include "Framework/RuntimeError.h" +#include #include "TestClasses.h" #include #include @@ -49,14 +50,14 @@ TEST_CASE("TestTMessageSerializer") array.SetOwner(); array.Add(new TNamed(testname, testtitle)); - FairTMessage msg; - TMessageSerializer::serialize(msg, &array); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); + TMessageSerializer::serialize(buffer, &array); - auto buf = as_span(msg); - REQUIRE(buf.size() == msg.BufferSize()); - REQUIRE(static_cast(buf.data()) == static_cast(msg.Buffer())); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); // test deserialization with TObject as target class (default) - auto out = TMessageSerializer::deserialize(buf); + auto out = TMessageSerializer::deserialize(msg2); auto* outarr = dynamic_cast(out.get()); REQUIRE(out.get() == outarr); @@ -66,9 +67,9 @@ TEST_CASE("TestTMessageSerializer") REQUIRE(named->GetTitle() == std::string(testtitle)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef, + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, ExceptionMatcher("can not convert serialized class TObjArray into target class TNamed")); } @@ -87,23 +88,29 @@ TEST_CASE("TestTMessageSerializer_NonTObject") TClass* cl = TClass::GetClass("std::vector"); REQUIRE(cl != nullptr); - FairTMessage msg; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); char* in = reinterpret_cast(&data); - TMessageSerializer::serialize(msg, in, cl); + TMessageSerializer::serialize(buffer, in, cl); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); - auto out = TMessageSerializer::deserialize>(as_span(msg)); + auto out = TMessageSerializer::deserialize>(msg2); REQUIRE(out); REQUIRE((*out.get()).size() == 2); REQUIRE((*out.get())[0] == o2::test::Polymorphic(0xaffe)); REQUIRE((*out.get())[1] == o2::test::Polymorphic(0xd00f)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(as_span(msg)), RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), RuntimeErrorRef); } TEST_CASE("TestTMessageSerializer_InvalidBuffer") { const char* buffer = "this is for sure not a serialized ROOT object"; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(strlen(buffer) + 8); + memcpy((char*)msg->GetData() + 8, buffer, strlen(buffer)); // test deserialization of invalid buffer and check the exception // FIXME: at the moment, TMessage fails directly with a segfault, which it shouldn't do /* @@ -119,5 +126,23 @@ TEST_CASE("TestTMessageSerializer_InvalidBuffer") struct Dummy { }; auto matcher = ExceptionMatcher("class is not ROOT-serializable: ZL22CATCH2_INTERNAL_TEST_4vE5Dummy"); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize((std::byte*)buffer, strlen(buffer)), o2::framework::RuntimeErrorRef, matcher); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, matcher); +} + +TEST_CASE("TestTMessageSerializer_CheckExpansion") +{ + const char* buffer = "this is for sure not a serialized ROOT object"; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(strlen(buffer) + 8); + FairOutputTBuffer msg2(*msg); + // The buffer starts after 8 bytes. + REQUIRE(msg2.Buffer() == (char*)msg->GetData()+8); + // The first 8 bytes of the buffer store the pointer to the message itself. + REQUIRE(*(fair::mq::Message**)msg->GetData() == msg.get()); + // Notice that TBuffer does the same trick with the reallocation function, + // so in the end the useful buffer size is the message size minus 16. + REQUIRE(msg2.BufferSize() == (msg->GetSize()-16)); + // This will not fit the original buffer size, so the buffer will be expanded. + msg2.Expand(100); } diff --git a/Framework/Utils/test/test_RootTreeWriter.cxx b/Framework/Utils/test/test_RootTreeWriter.cxx index 3194508f3d775..62e1eb62cb4f1 100644 --- a/Framework/Utils/test/test_RootTreeWriter.cxx +++ b/Framework/Utils/test/test_RootTreeWriter.cxx @@ -179,6 +179,7 @@ TEST_CASE("test_RootTreeWriter") auto createSerializedMessage = [&transport, &store](DataHeader&& dh, auto& data) { fair::mq::MessagePtr payload = transport->CreateMessage(); + payload->Rebuild(4096, {64}); auto* cl = TClass::GetClass(typeid(decltype(data))); TMessageSerializer().Serialize(*payload, &data, cl); dh.payloadSize = payload->GetSize(); diff --git a/Steer/DigitizerWorkflow/src/SimReaderSpec.cxx b/Steer/DigitizerWorkflow/src/SimReaderSpec.cxx index 03bfa2eb23ede..6f8502f74a85b 100644 --- a/Steer/DigitizerWorkflow/src/SimReaderSpec.cxx +++ b/Steer/DigitizerWorkflow/src/SimReaderSpec.cxx @@ -25,7 +25,6 @@ #include "DetectorsRaw/HBFUtils.h" #include #include -#include // object serialization #include // std::unique_ptr #include // memcpy #include // std::string diff --git a/Utilities/Mergers/src/ObjectStore.cxx b/Utilities/Mergers/src/ObjectStore.cxx index e88358507c31e..3bb49f1dfc9d8 100644 --- a/Utilities/Mergers/src/ObjectStore.cxx +++ b/Utilities/Mergers/src/ObjectStore.cxx @@ -38,7 +38,7 @@ static std::string concat(Args&&... arguments) return std::move(ss.str()); } -void* readObject(const TClass* type, o2::framework::FairTMessage& ftm) +void* readObject(const TClass* type, o2::framework::FairInputTBuffer& ftm) { using namespace std::string_view_literals; auto* object = ftm.ReadObjectAny(type); @@ -60,7 +60,7 @@ MergeInterface* castToMergeInterface(bool inheritsFromTObject, void* object, TCl return objectAsMergeInterface; } -std::optional extractVector(o2::framework::FairTMessage& ftm, const TClass* storedClass) +std::optional extractVector(o2::framework::FairInputTBuffer& ftm, const TClass* storedClass) { if (!storedClass->InheritsFrom(TClass::GetClass(typeid(VectorOfRawTObjects)))) { return std::nullopt; @@ -88,11 +88,14 @@ ObjectStore extractObjectFrom(const framework::DataRef& ref) throw std::runtime_error(concat(errorPrefix, "It is not ROOT-serialized"sv)); } - o2::framework::FairTMessage ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); - auto* storedClass = ftm.GetClass(); + o2::framework::FairInputTBuffer ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error(concat(errorPrefix, "Unknown stored class"sv)); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); if (const auto extractedVector = extractVector(ftm, storedClass)) { return extractedVector.value(); diff --git a/Utilities/Mergers/test/benchmark_Types.cxx b/Utilities/Mergers/test/benchmark_Types.cxx index 790fd329185ea..736685c5746b8 100644 --- a/Utilities/Mergers/test/benchmark_Types.cxx +++ b/Utilities/Mergers/test/benchmark_Types.cxx @@ -165,11 +165,16 @@ auto measure = [](Measurement m, auto* o, auto* i) -> double { tm->WriteObject(o); start = std::chrono::high_resolution_clock::now(); - o2::framework::FairTMessage ftm(const_cast(tm->Buffer()), tm->BufferSize()); - auto* storedClass = ftm.GetClass(); + // Needed to take into account that FairInputTBuffer expects the first 8 bytes to be the + // allocator pointer, which is not present in the TMessage buffer. + o2::framework::FairInputTBuffer ftm(const_cast(tm->Buffer() - 8), tm->BufferSize() + 8); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error("Unknown stored class"); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* tObjectClass = TClass::GetClass(typeid(TObject)); if (!storedClass->InheritsFrom(tObjectClass)) { @@ -738,4 +743,4 @@ int main(int argc, const char* argv[]) file.close(); return 0; -} \ No newline at end of file +} From 03228df7d530292972830710a1bcc5bbf53845c8 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 28 Feb 2024 14:08:03 +0100 Subject: [PATCH 383/726] DPL: avoid TMessage usage TMessage does not allow for non owned buffers, so we end up having an extra buffer in private memory for (de)serializing. Using TBufferFile directly allows to avoid that, so this moves the whole ROOT serialization support in DPL to use it. --- .../src/AODJAlienReaderHelpers.h | 2 + .../Core/include/Framework/DataAllocator.h | 1 + .../Core/include/Framework/DataRefUtils.h | 13 +- .../include/Framework/RootMessageContext.h | 3 + .../Framework/RootSerializationSupport.h | 3 +- .../include/Framework/TMessageSerializer.h | 130 ++++++++---------- Framework/Core/src/CommonDataProcessors.cxx | 7 +- Framework/Core/src/TMessageSerializer.cxx | 37 +++++ Framework/Core/test/test_DataRefUtils.cxx | 29 +++- .../Core/test/test_TMessageSerializer.cxx | 51 +++++-- Framework/Utils/test/test_RootTreeWriter.cxx | 1 + Steer/DigitizerWorkflow/src/SimReaderSpec.cxx | 1 - Utilities/Mergers/src/ObjectStore.cxx | 11 +- Utilities/Mergers/test/benchmark_Types.cxx | 11 +- 14 files changed, 198 insertions(+), 102 deletions(-) diff --git a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h index 655e4b6c0b439..4b9fd710aca14 100644 --- a/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h +++ b/Framework/AnalysisSupport/src/AODJAlienReaderHelpers.h @@ -16,7 +16,9 @@ #include "Framework/AlgorithmSpec.h" #include "Framework/Logger.h" #include + #include +class TFile; namespace o2::framework::readers { diff --git a/Framework/Core/include/Framework/DataAllocator.h b/Framework/Core/include/Framework/DataAllocator.h index 8151d2f83c6c6..029e922aeb90b 100644 --- a/Framework/Core/include/Framework/DataAllocator.h +++ b/Framework/Core/include/Framework/DataAllocator.h @@ -359,6 +359,7 @@ class DataAllocator } else if constexpr (has_root_dictionary::value == true || is_specialization_v == true) { // Serialize a snapshot of an object with root dictionary payloadMessage = proxy.createOutputMessage(routeIndex); + payloadMessage->Rebuild(4096, {64}); if constexpr (is_specialization_v == true) { // Explicitely ROOT serialize a snapshot of object. // An object wrapped into type `ROOTSerialized` is explicitely marked to be ROOT serialized diff --git a/Framework/Core/include/Framework/DataRefUtils.h b/Framework/Core/include/Framework/DataRefUtils.h index defd10244bca5..264533def326d 100644 --- a/Framework/Core/include/Framework/DataRefUtils.h +++ b/Framework/Core/include/Framework/DataRefUtils.h @@ -71,12 +71,15 @@ struct DataRefUtils { throw runtime_error("Attempt to extract a TMessage from non-ROOT serialised message"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); - auto* storedClass = ftm.GetClass(); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); auto* requestedClass = RSS::TClass::GetClass(typeid(T)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); // should always have the class description if has_root_dictionary is true assert(requestedClass != nullptr); + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* object = ftm.ReadObjectAny(storedClass); if (object == nullptr) { throw runtime_error_f("Failed to read object with name %s from message using ROOT serialization.", @@ -146,7 +149,11 @@ struct DataRefUtils { throw runtime_error("ROOT serialization not supported, dictionary not found for data type"); } - typename RSS::FairTMessage ftm(const_cast(ref.payload), payloadSize); + typename RSS::FairInputTBuffer ftm(const_cast(ref.payload), payloadSize); + ftm.InitMap(); + auto* classInfo = ftm.ReadClass(); + ftm.SetBufferOffset(0); + ftm.ResetMap(); result.reset(static_cast(ftm.ReadObjectAny(cl))); if (result.get() == nullptr) { throw runtime_error_f("Unable to extract class %s", cl == nullptr ? "" : cl->GetName()); diff --git a/Framework/Core/include/Framework/RootMessageContext.h b/Framework/Core/include/Framework/RootMessageContext.h index bef60ebbbf9f9..b1124880cf30f 100644 --- a/Framework/Core/include/Framework/RootMessageContext.h +++ b/Framework/Core/include/Framework/RootMessageContext.h @@ -72,6 +72,9 @@ class RootSerializedObject : public MessageContext::ContextObject fair::mq::Parts finalize() final { assert(mParts.Size() == 1); + if (mPayloadMsg->GetSize() < sizeof(char*)) { + mPayloadMsg->Rebuild(4096, {64}); + } TMessageSerializer::Serialize(*mPayloadMsg, mObject.get(), nullptr); mParts.AddPart(std::move(mPayloadMsg)); return ContextObject::finalize(); diff --git a/Framework/Core/include/Framework/RootSerializationSupport.h b/Framework/Core/include/Framework/RootSerializationSupport.h index cbf7408b13c7d..a44093f9c02bf 100644 --- a/Framework/Core/include/Framework/RootSerializationSupport.h +++ b/Framework/Core/include/Framework/RootSerializationSupport.h @@ -21,7 +21,8 @@ namespace o2::framework /// compiler. struct RootSerializationSupport { using TClass = ::TClass; - using FairTMessage = o2::framework::FairTMessage; + using FairInputTBuffer = o2::framework::FairInputTBuffer; + using FairOutputBuffer = o2::framework::FairOutputTBuffer; using TObject = ::TObject; }; diff --git a/Framework/Core/include/Framework/TMessageSerializer.h b/Framework/Core/include/Framework/TMessageSerializer.h index 1f08b456c0218..34a5156074b81 100644 --- a/Framework/Core/include/Framework/TMessageSerializer.h +++ b/Framework/Core/include/Framework/TMessageSerializer.h @@ -16,9 +16,8 @@ #include "Framework/RuntimeError.h" #include -#include +#include #include -#include #include #include #include @@ -28,67 +27,76 @@ namespace o2::framework { -class FairTMessage; +class FairOutputTBuffer; +class FairInputTBuffer; // utilities to produce a span over a byte buffer held by various message types // this is to avoid littering code with casts and conversions (span has a signed index type(!)) -gsl::span as_span(const FairTMessage& msg); +gsl::span as_span(const FairInputTBuffer& msg); +gsl::span as_span(const FairOutputTBuffer& msg); gsl::span as_span(const fair::mq::Message& msg); -class FairTMessage : public TMessage +// A TBufferFile which we can use to serialise data to a FairMQ message. +class FairOutputTBuffer : public TBufferFile { public: - using TMessage::TMessage; - FairTMessage() : TMessage(kMESS_OBJECT) {} - FairTMessage(void* buf, Int_t len) : TMessage(buf, len) { ResetBit(kIsOwner); } - FairTMessage(gsl::span buf) : TMessage(buf.data(), buf.size()) { ResetBit(kIsOwner); } + // This is to serialise data to FairMQ. We embed the pointer to the message + // in the data itself, so that we can use it to reallocate the message if needed. + // The FairMQ message retains ownership of the data. + // When deserialising the root object, keep in mind one needs to skip the 8 bytes + // for the pointer. + FairOutputTBuffer(fair::mq::Message& msg) + : TBufferFile(TBuffer::kWrite, msg.GetSize() - sizeof(char*), embedInItself(msg), false, fairMQrealloc) + { + } + // Helper function to keep track of the FairMQ message that holds the data + // in the data itself. We can use this to make sure the message can be reallocated + // even if we simply have a pointer to the data. Hopefully ROOT will not play dirty + // with us. + void* embedInItself(fair::mq::Message& msg); // helper function to clean up the object holding the data after it is transported. - static void free(void* /*data*/, void* hint); + static char* fairMQrealloc(char* oldData, size_t newSize, size_t oldSize); }; -struct TMessageSerializer { - using StreamerList = std::vector; - using CompressionLevel = int; +class FairInputTBuffer : public TBufferFile +{ + public: + // This is to serialise data to FairMQ. The provided message is expeted to have 8 bytes + // of overhead, where the source embedded the pointer for the reallocation. + // Notice this will break if the sender and receiver are not using the same + // size for a pointer. + FairInputTBuffer(char* data, size_t size) + : TBufferFile(TBuffer::kRead, size - sizeof(char*), data + sizeof(char*), false, nullptr) + { + } +}; - static void Serialize(fair::mq::Message& msg, const TObject* input, - CompressionLevel compressionLevel = -1); +struct TMessageSerializer { + static void Serialize(fair::mq::Message& msg, const TObject* input); template - static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl, // - CompressionLevel compressionLevel = -1); + static void Serialize(fair::mq::Message& msg, const T* input, const TClass* cl); template static void Deserialize(const fair::mq::Message& msg, std::unique_ptr& output); - static void serialize(FairTMessage& msg, const TObject* input, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const TObject* input); template - static void serialize(FairTMessage& msg, const T* input, // - const TClass* cl, - CompressionLevel compressionLevel = -1); + static void serialize(o2::framework::FairOutputTBuffer& msg, const T* input, const TClass* cl); template - static std::unique_ptr deserialize(gsl::span buffer); - template - static inline std::unique_ptr deserialize(std::byte* buffer, size_t size); + static inline std::unique_ptr deserialize(FairInputTBuffer& buffer); }; -inline void TMessageSerializer::serialize(FairTMessage& tm, const TObject* input, - CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const TObject* input) { - return serialize(tm, input, nullptr, compressionLevel); + return serialize(tm, input, nullptr); } template -inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // - const TClass* cl, CompressionLevel compressionLevel) +inline void TMessageSerializer::serialize(FairOutputTBuffer& tm, const T* input, const TClass* cl) { - if (compressionLevel >= 0) { - // if negative, skip to use ROOT default - tm.SetCompressionLevel(compressionLevel); - } - // TODO: check what WriateObject and WriteObjectAny are doing if (cl == nullptr) { tm.WriteObject(input); @@ -98,7 +106,7 @@ inline void TMessageSerializer::serialize(FairTMessage& tm, const T* input, // } template -inline std::unique_ptr TMessageSerializer::deserialize(gsl::span buffer) +inline std::unique_ptr TMessageSerializer::deserialize(FairInputTBuffer& buffer) { TClass* tgtClass = TClass::GetClass(typeid(T)); if (tgtClass == nullptr) { @@ -107,53 +115,32 @@ inline std::unique_ptr TMessageSerializer::deserialize(gsl::span b // FIXME: we need to add consistency check for buffer data to be serialized // at the moment, TMessage might simply crash if an invalid or inconsistent // buffer is provided - FairTMessage tm(buffer); - TClass* serializedClass = tm.GetClass(); + buffer.InitMap(); + TClass* serializedClass = buffer.ReadClass(); + buffer.SetBufferOffset(0); + buffer.ResetMap(); if (serializedClass == nullptr) { throw runtime_error_f("can not read class info from buffer"); } if (tgtClass != serializedClass && serializedClass->GetBaseClass(tgtClass) == nullptr) { throw runtime_error_f("can not convert serialized class %s into target class %s", - tm.GetClass()->GetName(), + serializedClass->GetName(), tgtClass->GetName()); } - return std::unique_ptr(reinterpret_cast(tm.ReadObjectAny(serializedClass))); + return std::unique_ptr(reinterpret_cast(buffer.ReadObjectAny(serializedClass))); } -template -inline std::unique_ptr TMessageSerializer::deserialize(std::byte* buffer, size_t size) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input) { - return deserialize(gsl::span(buffer, gsl::narrow::size_type>(size))); -} - -inline void FairTMessage::free(void* /*data*/, void* hint) -{ - std::default_delete deleter; - deleter(static_cast(hint)); -} - -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const TObject* input, - TMessageSerializer::CompressionLevel compressionLevel) -{ - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, input->Class(), compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, input->Class()); } template -inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, // - const TClass* cl, // - TMessageSerializer::CompressionLevel compressionLevel) +inline void TMessageSerializer::Serialize(fair::mq::Message& msg, const T* input, const TClass* cl) { - std::unique_ptr tm = std::make_unique(kMESS_OBJECT); - - serialize(*tm, input, cl, compressionLevel); - - msg.Rebuild(tm->Buffer(), tm->BufferSize(), FairTMessage::free, tm.get()); - tm.release(); + FairOutputTBuffer output(msg); + serialize(output, input, cl); } template @@ -161,7 +148,8 @@ inline void TMessageSerializer::Deserialize(const fair::mq::Message& msg, std::u { // we know the message will not be modified by this, // so const_cast should be OK here(IMHO). - output = deserialize(as_span(msg)); + FairInputTBuffer input(static_cast(msg.GetData()), static_cast(msg.GetSize())); + output = deserialize(input); } // gsl::narrow is used to do a runtime narrowing check, this might be a bit paranoid, @@ -171,7 +159,7 @@ inline gsl::span as_span(const fair::mq::Message& msg) return gsl::span{static_cast(msg.GetData()), gsl::narrow::size_type>(msg.GetSize())}; } -inline gsl::span as_span(const FairTMessage& msg) +inline gsl::span as_span(const FairInputTBuffer& msg) { return gsl::span{reinterpret_cast(msg.Buffer()), gsl::narrow::size_type>(msg.BufferSize())}; diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 48a3eb1da95b9..0cf7224c25ac8 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -141,9 +141,12 @@ DataProcessorSpec CommonDataProcessors::getOutputObjHistSink(std::vector(ref.payload), static_cast(datah->payloadSize)); InputObject obj; - obj.kind = tm.GetClass(); + FairInputTBuffer tm(const_cast(ref.payload), static_cast(datah->payloadSize)); + tm.InitMap(); + obj.kind = tm.ReadClass(); + tm.SetBufferOffset(0); + tm.ResetMap(); if (obj.kind == nullptr) { LOG(error) << "Cannot read class info from buffer."; return; diff --git a/Framework/Core/src/TMessageSerializer.cxx b/Framework/Core/src/TMessageSerializer.cxx index 5388a6d716cda..c5da4cc576242 100644 --- a/Framework/Core/src/TMessageSerializer.cxx +++ b/Framework/Core/src/TMessageSerializer.cxx @@ -9,7 +9,44 @@ // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. #include +#include #include #include using namespace o2::framework; + +void* FairOutputTBuffer::embedInItself(fair::mq::Message& msg) +{ + // The first bytes of the message are used to store the pointer to the message itself + // so that we can reallocate it if needed. + if (sizeof(char*) > msg.GetSize()) { + throw std::runtime_error("Message size too small to embed pointer"); + } + char* data = reinterpret_cast(msg.GetData()); + char* ptr = reinterpret_cast(&msg); + std::memcpy(data, &ptr, sizeof(char*)); + return data + sizeof(char*); +} + +// Reallocation function. Get the message pointer from the data and call Rebuild. +char* FairOutputTBuffer::fairMQrealloc(char* oldData, size_t newSize, size_t oldSize) +{ + // Old data is the pointer at the beginning of the message, so the pointer + // to the message is **stored** in the 8 bytes before it. + auto* msg = *(fair::mq::Message**)(oldData - sizeof(char*)); + if (newSize <= msg->GetSize()) { + // no need to reallocate, the message is already big enough + return oldData; + } + // Create a shallow copy of the message + fair::mq::MessagePtr oldMsg = msg->GetTransport()->CreateMessage(); + oldMsg->Copy(*msg); + // Copy the old data while rebuilding. Reference counting should make + // sure the old message is not deleted until the new one is ready. + // We need 8 extra bytes for the pointer to the message itself (realloc does not know about it) + // and we need to copy 8 bytes more than the old size (again, the extra pointer). + msg->Rebuild(newSize + 8, fair::mq::Alignment{64}); + memcpy(msg->GetData(), oldMsg->GetData(), oldSize + 8); + + return reinterpret_cast(msg->GetData()) + sizeof(char*); +} diff --git a/Framework/Core/test/test_DataRefUtils.cxx b/Framework/Core/test/test_DataRefUtils.cxx index 37da7912bfe8b..d4accde0fecf0 100644 --- a/Framework/Core/test/test_DataRefUtils.cxx +++ b/Framework/Core/test/test_DataRefUtils.cxx @@ -21,17 +21,38 @@ using namespace o2::framework; +TEST_CASE("PureRootTest") +{ + TBufferFile buffer(TBuffer::kWrite); + TObjString s("test"); + buffer.WriteObject(&s); + + TBufferFile buffer2(TBuffer::kRead, buffer.BufferSize(), buffer.Buffer(), false); + buffer2.SetReadMode(); + buffer2.InitMap(); + TClass* storedClass = buffer2.ReadClass(); + // ReadClass advances the buffer, so we need to reset it. + buffer2.SetBufferOffset(0); + buffer2.ResetMap(); + REQUIRE(storedClass != nullptr); + auto* outS = (TObjString*)buffer2.ReadObjectAny(storedClass); + REQUIRE(outS != nullptr); + REQUIRE(outS->GetString() == "test"); +} + // Simple test to do root deserialization. TEST_CASE("TestRootSerialization") { DataRef ref; - TMessage* tm = new TMessage(kMESS_OBJECT); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer tm(*msg); auto sOrig = std::make_unique("test"); - tm->WriteObject(sOrig.get()); + tm << sOrig.get(); o2::header::DataHeader dh; dh.payloadSerializationMethod = o2::header::gSerializationMethodROOT; - ref.payload = tm->Buffer(); - dh.payloadSize = tm->BufferSize(); + ref.payload = (char*)msg->GetData(); + dh.payloadSize = (size_t)msg->GetSize(); ref.header = reinterpret_cast(&dh); // Check by using the same type diff --git a/Framework/Core/test/test_TMessageSerializer.cxx b/Framework/Core/test/test_TMessageSerializer.cxx index bc5f817400a44..2807351058c1d 100644 --- a/Framework/Core/test/test_TMessageSerializer.cxx +++ b/Framework/Core/test/test_TMessageSerializer.cxx @@ -11,6 +11,7 @@ #include "Framework/TMessageSerializer.h" #include "Framework/RuntimeError.h" +#include #include "TestClasses.h" #include #include @@ -49,14 +50,14 @@ TEST_CASE("TestTMessageSerializer") array.SetOwner(); array.Add(new TNamed(testname, testtitle)); - FairTMessage msg; - TMessageSerializer::serialize(msg, &array); + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); + TMessageSerializer::serialize(buffer, &array); - auto buf = as_span(msg); - REQUIRE(buf.size() == msg.BufferSize()); - REQUIRE(static_cast(buf.data()) == static_cast(msg.Buffer())); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); // test deserialization with TObject as target class (default) - auto out = TMessageSerializer::deserialize(buf); + auto out = TMessageSerializer::deserialize(msg2); auto* outarr = dynamic_cast(out.get()); REQUIRE(out.get() == outarr); @@ -66,9 +67,9 @@ TEST_CASE("TestTMessageSerializer") REQUIRE(named->GetTitle() == std::string(testtitle)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(buf), o2::framework::RuntimeErrorRef, + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, ExceptionMatcher("can not convert serialized class TObjArray into target class TNamed")); } @@ -87,23 +88,29 @@ TEST_CASE("TestTMessageSerializer_NonTObject") TClass* cl = TClass::GetClass("std::vector"); REQUIRE(cl != nullptr); - FairTMessage msg; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(4096); + FairOutputTBuffer buffer(*msg); char* in = reinterpret_cast(&data); - TMessageSerializer::serialize(msg, in, cl); + TMessageSerializer::serialize(buffer, in, cl); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); - auto out = TMessageSerializer::deserialize>(as_span(msg)); + auto out = TMessageSerializer::deserialize>(msg2); REQUIRE(out); REQUIRE((*out.get()).size() == 2); REQUIRE((*out.get())[0] == o2::test::Polymorphic(0xaffe)); REQUIRE((*out.get())[1] == o2::test::Polymorphic(0xd00f)); // test deserialization with a wrong target class and check the exception - REQUIRE_THROWS_AS(TMessageSerializer::deserialize(as_span(msg)), RuntimeErrorRef); + REQUIRE_THROWS_AS(TMessageSerializer::deserialize(msg2), RuntimeErrorRef); } TEST_CASE("TestTMessageSerializer_InvalidBuffer") { const char* buffer = "this is for sure not a serialized ROOT object"; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(strlen(buffer) + 8); + memcpy((char*)msg->GetData() + 8, buffer, strlen(buffer)); // test deserialization of invalid buffer and check the exception // FIXME: at the moment, TMessage fails directly with a segfault, which it shouldn't do /* @@ -119,5 +126,23 @@ TEST_CASE("TestTMessageSerializer_InvalidBuffer") struct Dummy { }; auto matcher = ExceptionMatcher("class is not ROOT-serializable: ZL22CATCH2_INTERNAL_TEST_4vE5Dummy"); - REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize((std::byte*)buffer, strlen(buffer)), o2::framework::RuntimeErrorRef, matcher); + FairInputTBuffer msg2((char*)msg->GetData(), msg->GetSize()); + REQUIRE_THROWS_MATCHES(TMessageSerializer::deserialize(msg2), o2::framework::RuntimeErrorRef, matcher); +} + +TEST_CASE("TestTMessageSerializer_CheckExpansion") +{ + const char* buffer = "this is for sure not a serialized ROOT object"; + auto transport = fair::mq::TransportFactory::CreateTransportFactory("zeromq"); + auto msg = transport->CreateMessage(strlen(buffer) + 8); + FairOutputTBuffer msg2(*msg); + // The buffer starts after 8 bytes. + REQUIRE(msg2.Buffer() == (char*)msg->GetData() + 8); + // The first 8 bytes of the buffer store the pointer to the message itself. + REQUIRE(*(fair::mq::Message**)msg->GetData() == msg.get()); + // Notice that TBuffer does the same trick with the reallocation function, + // so in the end the useful buffer size is the message size minus 16. + REQUIRE(msg2.BufferSize() == (msg->GetSize() - 16)); + // This will not fit the original buffer size, so the buffer will be expanded. + msg2.Expand(100); } diff --git a/Framework/Utils/test/test_RootTreeWriter.cxx b/Framework/Utils/test/test_RootTreeWriter.cxx index 3194508f3d775..62e1eb62cb4f1 100644 --- a/Framework/Utils/test/test_RootTreeWriter.cxx +++ b/Framework/Utils/test/test_RootTreeWriter.cxx @@ -179,6 +179,7 @@ TEST_CASE("test_RootTreeWriter") auto createSerializedMessage = [&transport, &store](DataHeader&& dh, auto& data) { fair::mq::MessagePtr payload = transport->CreateMessage(); + payload->Rebuild(4096, {64}); auto* cl = TClass::GetClass(typeid(decltype(data))); TMessageSerializer().Serialize(*payload, &data, cl); dh.payloadSize = payload->GetSize(); diff --git a/Steer/DigitizerWorkflow/src/SimReaderSpec.cxx b/Steer/DigitizerWorkflow/src/SimReaderSpec.cxx index 03bfa2eb23ede..6f8502f74a85b 100644 --- a/Steer/DigitizerWorkflow/src/SimReaderSpec.cxx +++ b/Steer/DigitizerWorkflow/src/SimReaderSpec.cxx @@ -25,7 +25,6 @@ #include "DetectorsRaw/HBFUtils.h" #include #include -#include // object serialization #include // std::unique_ptr #include // memcpy #include // std::string diff --git a/Utilities/Mergers/src/ObjectStore.cxx b/Utilities/Mergers/src/ObjectStore.cxx index e88358507c31e..3bb49f1dfc9d8 100644 --- a/Utilities/Mergers/src/ObjectStore.cxx +++ b/Utilities/Mergers/src/ObjectStore.cxx @@ -38,7 +38,7 @@ static std::string concat(Args&&... arguments) return std::move(ss.str()); } -void* readObject(const TClass* type, o2::framework::FairTMessage& ftm) +void* readObject(const TClass* type, o2::framework::FairInputTBuffer& ftm) { using namespace std::string_view_literals; auto* object = ftm.ReadObjectAny(type); @@ -60,7 +60,7 @@ MergeInterface* castToMergeInterface(bool inheritsFromTObject, void* object, TCl return objectAsMergeInterface; } -std::optional extractVector(o2::framework::FairTMessage& ftm, const TClass* storedClass) +std::optional extractVector(o2::framework::FairInputTBuffer& ftm, const TClass* storedClass) { if (!storedClass->InheritsFrom(TClass::GetClass(typeid(VectorOfRawTObjects)))) { return std::nullopt; @@ -88,11 +88,14 @@ ObjectStore extractObjectFrom(const framework::DataRef& ref) throw std::runtime_error(concat(errorPrefix, "It is not ROOT-serialized"sv)); } - o2::framework::FairTMessage ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); - auto* storedClass = ftm.GetClass(); + o2::framework::FairInputTBuffer ftm(const_cast(ref.payload), o2::framework::DataRefUtils::getPayloadSize(ref)); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error(concat(errorPrefix, "Unknown stored class"sv)); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); if (const auto extractedVector = extractVector(ftm, storedClass)) { return extractedVector.value(); diff --git a/Utilities/Mergers/test/benchmark_Types.cxx b/Utilities/Mergers/test/benchmark_Types.cxx index 790fd329185ea..736685c5746b8 100644 --- a/Utilities/Mergers/test/benchmark_Types.cxx +++ b/Utilities/Mergers/test/benchmark_Types.cxx @@ -165,11 +165,16 @@ auto measure = [](Measurement m, auto* o, auto* i) -> double { tm->WriteObject(o); start = std::chrono::high_resolution_clock::now(); - o2::framework::FairTMessage ftm(const_cast(tm->Buffer()), tm->BufferSize()); - auto* storedClass = ftm.GetClass(); + // Needed to take into account that FairInputTBuffer expects the first 8 bytes to be the + // allocator pointer, which is not present in the TMessage buffer. + o2::framework::FairInputTBuffer ftm(const_cast(tm->Buffer() - 8), tm->BufferSize() + 8); + ftm.InitMap(); + auto* storedClass = ftm.ReadClass(); if (storedClass == nullptr) { throw std::runtime_error("Unknown stored class"); } + ftm.SetBufferOffset(0); + ftm.ResetMap(); auto* tObjectClass = TClass::GetClass(typeid(TObject)); if (!storedClass->InheritsFrom(tObjectClass)) { @@ -738,4 +743,4 @@ int main(int argc, const char* argv[]) file.close(); return 0; -} \ No newline at end of file +} From f7073e5acb3f9c5334d6b8018e7f88f5aef5f472 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 28 Feb 2024 14:55:55 +0100 Subject: [PATCH 384/726] Require C++20 for the whole AliceO2 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bf1d83fbb6644..b754fc9ce9551 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -37,7 +37,7 @@ set_property(GLOBAL PROPERTY JOB_POOLS analysis=${ANALYSIS_COMPILE_POOL}) include(O2BuildSanityChecks) o2_build_sanity_checks() -set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED TRUE) find_package(ONNXRuntime::ONNXRuntime CONFIG) From b9937bce006aefd1b41bc02878f3483156171600 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 28 Feb 2024 16:10:54 +0100 Subject: [PATCH 385/726] [O2-4578] DPL: attempt at removing a race condition in the rate limiting --- Framework/Core/src/CommonDataProcessors.cxx | 48 +++++++++++++++------ 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 0cf7224c25ac8..7f29b9bf42554 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -568,29 +568,49 @@ DataProcessorSpec CommonDataProcessors::getGlobalFairMQSink(std::vectordata; + auto& timesliceIndex = services->get(); + auto* device = services->get().device(); + auto channel = device->GetChannels().find("metric-feedback"); + auto oldestPossingTimeslice = timesliceIndex.getOldestPossibleOutput().timeslice.value; + if (channel == device->GetChannels().end()) { + return; + } + fair::mq::MessagePtr payload(device->NewMessage()); + payload->Rebuild(&oldestPossingTimeslice, sizeof(int64_t), nullptr, nullptr); + auto* consumed = (size_t*)malloc(sizeof(size_t)); + *consumed = oldestPossingTimeslice; + + int64_t result = channel->second[0].Send(payload, 100); + // If the sending worked, we do not retry. + if (result != 0) { + // If the sending did not work, we keep trying until it actually works. + // This will schedule other tasks in the queue, so the processing of the + // data will still happen. + uv_async_send(async); + } else { + lastTimeslice = *consumed; + } +} + DataProcessorSpec CommonDataProcessors::getDummySink(std::vector const& danglingOutputInputs, std::string rateLimitingChannelConfig) { return DataProcessorSpec{ .name = "internal-dpl-injected-dummy-sink", .inputs = danglingOutputInputs, - .algorithm = AlgorithmSpec{adaptStateful([](CallbackService& callbacks) { + .algorithm = AlgorithmSpec{adaptStateful([](CallbackService& callbacks, DeviceState& deviceState, ServiceRegistry& services) { + static uv_async_t async; + // The callback will only have access to the + async.data = new ServiceRegistryRef{services}; + uv_async_init(deviceState.loop, &async, retryMetricCallback); auto domainInfoUpdated = [](ServiceRegistryRef services, size_t timeslice, ChannelIndex channelIndex) { LOGP(debug, "Domain info updated with timeslice {}", timeslice); - static size_t lastTimeslice = -1; + retryMetricCallback(&async); auto& timesliceIndex = services.get(); - auto device = services.get().device(); - auto channel = device->GetChannels().find("metric-feedback"); auto oldestPossingTimeslice = timesliceIndex.getOldestPossibleOutput().timeslice.value; - if (channel != device->GetChannels().end()) { - fair::mq::MessagePtr payload(device->NewMessage()); - size_t* consumed = (size_t*)malloc(sizeof(size_t)); - *consumed = oldestPossingTimeslice; - if (*consumed != lastTimeslice) { - payload->Rebuild(consumed, sizeof(int64_t), nullptr, nullptr); - channel->second[0].Send(payload); - lastTimeslice = *consumed; - } - } auto& stats = services.get(); stats.updateStats({(int)ProcessingStatsId::CONSUMED_TIMEFRAMES, DataProcessingStats::Op::Set, (int64_t)oldestPossingTimeslice}); }; From f829c9c14961639d1d65b4df755c6d8533feb105 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 29 Feb 2024 08:56:51 +0100 Subject: [PATCH 386/726] DPL: allow a given buffer to be deserialised multiple times This is only exercised in one of the tests, AFAICT, but it should not harm. --- Framework/Core/include/Framework/TMessageSerializer.h | 1 + 1 file changed, 1 insertion(+) diff --git a/Framework/Core/include/Framework/TMessageSerializer.h b/Framework/Core/include/Framework/TMessageSerializer.h index 34a5156074b81..17f3f65ce2030 100644 --- a/Framework/Core/include/Framework/TMessageSerializer.h +++ b/Framework/Core/include/Framework/TMessageSerializer.h @@ -115,6 +115,7 @@ inline std::unique_ptr TMessageSerializer::deserialize(FairInputTBuffer& buff // FIXME: we need to add consistency check for buffer data to be serialized // at the moment, TMessage might simply crash if an invalid or inconsistent // buffer is provided + buffer.SetBufferOffset(0); buffer.InitMap(); TClass* serializedClass = buffer.ReadClass(); buffer.SetBufferOffset(0); From 0d941464737b8673f1f3ca7452b95501183e1aa9 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 1 Mar 2024 09:29:49 +0100 Subject: [PATCH 387/726] DPL: attempt to have some dropping logic which does not break test I suspect what is happening in the test is the consumers start too late (and the lossy policy drops messages on the sender side when that happens). This should avoid the problem by increasing the delay up to 1 second, before switching to lossy, giving enough time to the consumers to start. --- Framework/Core/src/SendingPolicy.cxx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index bf97d56b4a5b8..c575301064d8a 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -54,17 +54,17 @@ std::vector SendingPolicy::createDefaultPolicies() // We count the number of consecutively dropped messages. // If we have more than 10, we switch to a completely // non-blocking approach. - int64_t timeout = 50; + int64_t timeout = 100; if (state.droppedMessages == 10 + 1) { LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode."; } if (state.droppedMessages == 0) { - timeout = 50; + timeout = 100; } if (state.droppedMessages > 10) { timeout = 0; } - int64_t result = info.channel.Send(parts, timeout); + int64_t result = info.channel.Send(parts, timeout*(state.droppedMessages+1)); if (result >= 0) { state.droppedMessages = 0; } else if (state.droppedMessages < std::numeric_limits::max()) { From d8b3ce7b76c7a02abedda16751bec5590d62a659 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 1 Mar 2024 09:32:07 +0100 Subject: [PATCH 388/726] DPL: attempt to have some dropping logic which does not break test I suspect what is happening in the test is the consumers start too late (and the lossy policy drops messages on the sender side when that happens). This should avoid the problem by increasing the delay up to 1 second, before switching to lossy, giving enough time to the consumers to start. --- Framework/Core/src/SendingPolicy.cxx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index bf97d56b4a5b8..fb0a590920b76 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -54,17 +54,17 @@ std::vector SendingPolicy::createDefaultPolicies() // We count the number of consecutively dropped messages. // If we have more than 10, we switch to a completely // non-blocking approach. - int64_t timeout = 50; + int64_t timeout = 100; if (state.droppedMessages == 10 + 1) { LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode."; } if (state.droppedMessages == 0) { - timeout = 50; + timeout = 100; } if (state.droppedMessages > 10) { timeout = 0; } - int64_t result = info.channel.Send(parts, timeout); + int64_t result = info.channel.Send(parts, timeout * (state.droppedMessages + 1)); if (result >= 0) { state.droppedMessages = 0; } else if (state.droppedMessages < std::numeric_limits::max()) { From a147ca6aebb5932e875e999fad94168c5f921911 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 1 Mar 2024 10:08:40 +0100 Subject: [PATCH 389/726] DPL: drop special policy for Dispatcher --- Framework/Core/src/SendingPolicy.cxx | 43 ---------------------------- 1 file changed, 43 deletions(-) diff --git a/Framework/Core/src/SendingPolicy.cxx b/Framework/Core/src/SendingPolicy.cxx index fb0a590920b76..e2c00104a8951 100644 --- a/Framework/Core/src/SendingPolicy.cxx +++ b/Framework/Core/src/SendingPolicy.cxx @@ -28,49 +28,6 @@ namespace o2::framework std::vector SendingPolicy::createDefaultPolicies() { return {SendingPolicy{ - .name = "dispatcher", - .matcher = [](DataProcessorSpec const& source, DataProcessorSpec const& dest, ConfigContext const&) { - if (source.name == "Dispatcher") { - return true; - } - // Check if any of the labels has "Dispatcher" as prefix - for (auto const& label : source.labels) { - if (label.value.find("Dispatcher") == 0) { - return true; - } - } - // Check if any of the destination's labels is "expendable" - for (auto const& label : dest.labels) { - if (label.value == "expendable") { - return true; - } - } - return false; }, - .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { - auto &proxy = registry.get(); - OutputChannelInfo const& info = proxy.getOutputChannelInfo(channelIndex); - OutputChannelState& state = proxy.getOutputChannelState(channelIndex); - // Default timeout is 50ms. - // We count the number of consecutively dropped messages. - // If we have more than 10, we switch to a completely - // non-blocking approach. - int64_t timeout = 100; - if (state.droppedMessages == 10 + 1) { - LOG(warning) << "Failed to send 10 messages with 10ms timeout in a row, switching to completely non-blocking mode."; - } - if (state.droppedMessages == 0) { - timeout = 100; - } - if (state.droppedMessages > 10) { - timeout = 0; - } - int64_t result = info.channel.Send(parts, timeout * (state.droppedMessages + 1)); - if (result >= 0) { - state.droppedMessages = 0; - } else if (state.droppedMessages < std::numeric_limits::max()) { - state.droppedMessages++; - } }}, - SendingPolicy{ .name = "profiling", .matcher = [](DataProcessorSpec const&, DataProcessorSpec const&, ConfigContext const&) { return getenv("DPL_DEBUG_MESSAGE_SIZE"); }, .send = [](fair::mq::Parts& parts, ChannelIndex channelIndex, ServiceRegistryRef registry) { From 1f1428e507090c2c9d11aa1237531e82ca481d29 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 4 Mar 2024 10:41:23 +0100 Subject: [PATCH 390/726] DPL CCDB: handle the Cache-Valid-Until header This will not recheck the etag and consider the cache valid until the timestamp in milliseconds in the Cache-Valid-Until is not passed by the data timestamp. --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 837b20cf4262b..fb7a369bc1741 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -33,6 +33,7 @@ namespace o2::framework struct CCDBFetcherHelper { struct CCDBCacheInfo { std::string etag; + size_t cacheValidUntil = 0; size_t cacheMiss = 0; size_t cacheHit = 0; size_t minSize = -1ULL; @@ -217,7 +218,9 @@ auto populateCacheWith(std::shared_ptr const& helper, const auto url2uuid = helper->mapURL2UUID.find(path); if (url2uuid != helper->mapURL2UUID.end()) { etag = url2uuid->second.etag; - checkValidity = std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate; + // We check validity every chRate timeslices or if the cache is expired + uint64_t validUntil = url2uuid->second.cacheValidUntil; + checkValidity = (std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate) || (validUntil <= timestamp); } else { checkValidity = true; // never skip check if the cache is empty } @@ -240,6 +243,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].lastCheckedTF = timingInfo.tfCounter; if (etag.empty()) { helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); @@ -251,6 +255,7 @@ auto populateCacheWith(std::shared_ptr const& helper, if (v.size()) { // but should be overridden by fresh object // somewhere here pruneFromCache should be called helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); From 18c9baae745cfc0af5ef78665e1817949dd2ec8b Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 4 Mar 2024 11:32:56 +0100 Subject: [PATCH 391/726] DPL CCDB: handle the Cache-Valid-Until header This will not recheck the etag and consider the cache valid until the timestamp in milliseconds in the Cache-Valid-Until is not passed by the data timestamp. --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 837b20cf4262b..c72c6b4d0ff7f 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -33,6 +33,7 @@ namespace o2::framework struct CCDBFetcherHelper { struct CCDBCacheInfo { std::string etag; + size_t cacheValidUntil = 0; size_t cacheMiss = 0; size_t cacheHit = 0; size_t minSize = -1ULL; @@ -176,6 +177,11 @@ auto getOrbitResetTime(o2::pmr::vector const& v) -> Long64_t return (*ctp)[0]; }; +bool isOnlineRun(DataTakingContext const& dtc) +{ + return dtc.deploymentMode == DeploymentMode::OnlineAUX || dtc.deploymentMode == DeploymentMode::OnlineDDS || dtc.deploymentMode == DeploymentMode::OnlineECS; +} + auto populateCacheWith(std::shared_ptr const& helper, int64_t timestamp, TimingInfo& timingInfo, @@ -186,6 +192,8 @@ auto populateCacheWith(std::shared_ptr const& helper, int objCnt = -1; // We use the timeslice, so that we hook into the same interval as the rest of the // callback. + static bool isOnline = isOnlineRun(dtc); + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; O2_SIGNPOST_START(ccdb, sid, "populateCacheWith", "Starting to populate cache with CCDB objects"); for (auto& route : helper->routes) { @@ -217,7 +225,9 @@ auto populateCacheWith(std::shared_ptr const& helper, const auto url2uuid = helper->mapURL2UUID.find(path); if (url2uuid != helper->mapURL2UUID.end()) { etag = url2uuid->second.etag; - checkValidity = std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate; + // We check validity every chRate timeslices or if the cache is expired + uint64_t validUntil = url2uuid->second.cacheValidUntil; + checkValidity = (std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate) && (isOnline || (validUntil <= timestamp)); } else { checkValidity = true; // never skip check if the cache is empty } @@ -240,6 +250,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].lastCheckedTF = timingInfo.tfCounter; if (etag.empty()) { helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); @@ -251,6 +262,7 @@ auto populateCacheWith(std::shared_ptr const& helper, if (v.size()) { // but should be overridden by fresh object // somewhere here pruneFromCache should be called helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); From d7ed1a3bc07b57f22bb16fa167a60070da97e09f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 4 Mar 2024 15:44:42 +0100 Subject: [PATCH 392/726] DPL: Move DefaultsHelpers to Framework/Foundation This allows non-DPL code to use the same helpers --- CCDB/src/CcdbApi.cxx | 1 + Framework/CCDBSupport/src/CCDBHelpers.cxx | 1 + Framework/Core/CMakeLists.txt | 1 - .../include/Framework/DataTakingContext.h | 13 +++-------- Framework/Core/src/CallbacksPolicy.cxx | 1 + Framework/Core/src/CommonServices.cxx | 1 + Framework/Core/src/RateLimiter.cxx | 1 + Framework/Core/src/WorkflowHelpers.cxx | 1 + Framework/Core/src/runDataProcessing.cxx | 1 + Framework/Foundation/CMakeLists.txt | 1 + .../include/Framework/DefaultsHelpers.h | 0 .../include/Framework/DeploymentMode.h | 22 +++++++++++++++++++ .../src/DefaultsHelpers.cxx | 2 +- 13 files changed, 34 insertions(+), 12 deletions(-) rename Framework/{Core => Foundation}/include/Framework/DefaultsHelpers.h (100%) create mode 100644 Framework/Foundation/include/Framework/DeploymentMode.h rename Framework/{Core => Foundation}/src/DefaultsHelpers.cxx (98%) diff --git a/CCDB/src/CcdbApi.cxx b/CCDB/src/CcdbApi.cxx index 0661eff071e46..fd73305542c26 100644 --- a/CCDB/src/CcdbApi.cxx +++ b/CCDB/src/CcdbApi.cxx @@ -22,6 +22,7 @@ #include "CommonUtils/MemFileHelper.h" #include "Framework/DefaultsHelpers.h" #include "Framework/DataTakingContext.h" +#include "Framework/DeploymentMode.h" #include #include #include diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 837b20cf4262b..415f7b4218261 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -17,6 +17,7 @@ #include "Framework/DataTakingContext.h" #include "Framework/RawDeviceService.h" #include "Framework/DataSpecUtils.h" +#include "Framework/DeploymentMode.h" #include "CCDB/CcdbApi.h" #include "CommonConstants/LHCConstants.h" #include "Framework/Signpost.h" diff --git a/Framework/Core/CMakeLists.txt b/Framework/Core/CMakeLists.txt index 05bcb1bd53083..9953f13cf5e10 100644 --- a/Framework/Core/CMakeLists.txt +++ b/Framework/Core/CMakeLists.txt @@ -43,7 +43,6 @@ o2_add_library(Framework src/DataSender.cxx src/DataProcessingStats.cxx src/DataProcessingStates.cxx - src/DefaultsHelpers.cxx src/DomainInfoHeader.cxx src/ProcessingPoliciesHelpers.cxx src/ConfigParamStore.cxx diff --git a/Framework/Core/include/Framework/DataTakingContext.h b/Framework/Core/include/Framework/DataTakingContext.h index 05bf8f34ad74d..2ae298de9f5a3 100644 --- a/Framework/Core/include/Framework/DataTakingContext.h +++ b/Framework/Core/include/Framework/DataTakingContext.h @@ -12,20 +12,13 @@ #define O2_FRAMEWORK_DATATAKINGCONTEXT_H_ #include "Framework/ServiceHandle.h" + #include #include namespace o2::framework { - -enum struct DeploymentMode { - Local, // if nothing special is specified - OnlineECS, // Running online (sync processing) a P2 on FLP steered by ECS - OnlineDDS, // Running online (sync processing) a P2 on EPN steered by DDS/ODC - OnlineAUX, // Running online (sync processing) a P2 as auxiliary process - Grid, // Running as GRID job with Alien job id - FST // Running 8 GPU FST on EPNs (ALICE_O2_FST=1 set) -}; +enum struct DeploymentMode; struct DataTakingContext { constexpr static ServiceKind service_kind = ServiceKind::Stream; @@ -46,7 +39,7 @@ struct DataTakingContext { bool forcedRaw{false}; /// Where we thing this is running - DeploymentMode deploymentMode{DeploymentMode::Local}; + DeploymentMode deploymentMode{}; }; } // namespace o2::framework diff --git a/Framework/Core/src/CallbacksPolicy.cxx b/Framework/Core/src/CallbacksPolicy.cxx index 9be04cdef290d..98211f3c2d297 100644 --- a/Framework/Core/src/CallbacksPolicy.cxx +++ b/Framework/Core/src/CallbacksPolicy.cxx @@ -17,6 +17,7 @@ #include "Framework/CommonServices.h" #include "Framework/DataTakingContext.h" #include "Framework/DefaultsHelpers.h" +#include "Framework/DeploymentMode.h" #include #include diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index a0f7fcdc88201..803c467457b9a 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -29,6 +29,7 @@ #include "Framework/CommonMessageBackends.h" #include "Framework/DanglingContext.h" #include "Framework/DataProcessingHelpers.h" +#include "Framework/DeploymentMode.h" #include "InputRouteHelpers.h" #include "Framework/EndOfStreamContext.h" #include "Framework/RawDeviceService.h" diff --git a/Framework/Core/src/RateLimiter.cxx b/Framework/Core/src/RateLimiter.cxx index 5f6ff24adfd7c..c242880b7e523 100644 --- a/Framework/Core/src/RateLimiter.cxx +++ b/Framework/Core/src/RateLimiter.cxx @@ -16,6 +16,7 @@ #include "Framework/DataTakingContext.h" #include "Framework/DeviceState.h" #include "Framework/DeviceContext.h" +#include "Framework/DeploymentMode.h" #include #include #include diff --git a/Framework/Core/src/WorkflowHelpers.cxx b/Framework/Core/src/WorkflowHelpers.cxx index 6b4ad35f826be..52a98f5d9f810 100644 --- a/Framework/Core/src/WorkflowHelpers.cxx +++ b/Framework/Core/src/WorkflowHelpers.cxx @@ -24,6 +24,7 @@ #include "Framework/Plugins.h" #include "Framework/DataTakingContext.h" #include "Framework/DefaultsHelpers.h" +#include "Framework/DeploymentMode.h" #include "Headers/DataHeader.h" #include diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 64c034ebb8533..59ae3a2a167f9 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -10,6 +10,7 @@ // or submit itself to any jurisdiction. #define BOOST_BIND_GLOBAL_PLACEHOLDERS #include +#include "Framework/DeploymentMode.h" #include "Framework/BoostOptionsRetriever.h" #include "Framework/CallbacksPolicy.h" #include "Framework/ChannelConfigurationPolicy.h" diff --git a/Framework/Foundation/CMakeLists.txt b/Framework/Foundation/CMakeLists.txt index bcde0f7a69f34..77e2fac4d2c10 100644 --- a/Framework/Foundation/CMakeLists.txt +++ b/Framework/Foundation/CMakeLists.txt @@ -16,6 +16,7 @@ install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/include/Framework o2_add_library(FrameworkFoundation SOURCES src/RuntimeError.cxx src/Signpost.cxx + src/DefaultsHelpers.cxx TARGETVARNAME targetName PUBLIC_LINK_LIBRARIES O2::FrameworkFoundation3rdparty ) diff --git a/Framework/Core/include/Framework/DefaultsHelpers.h b/Framework/Foundation/include/Framework/DefaultsHelpers.h similarity index 100% rename from Framework/Core/include/Framework/DefaultsHelpers.h rename to Framework/Foundation/include/Framework/DefaultsHelpers.h diff --git a/Framework/Foundation/include/Framework/DeploymentMode.h b/Framework/Foundation/include/Framework/DeploymentMode.h new file mode 100644 index 0000000000000..194e0fa5a807b --- /dev/null +++ b/Framework/Foundation/include/Framework/DeploymentMode.h @@ -0,0 +1,22 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +namespace o2::framework +{ +enum struct DeploymentMode { + Local, // if nothing special is specified + OnlineECS, // Running online (sync processing) a P2 on FLP steered by ECS + OnlineDDS, // Running online (sync processing) a P2 on EPN steered by DDS/ODC + OnlineAUX, // Running online (sync processing) a P2 as auxiliary process + Grid, // Running as GRID job with Alien job id + FST // Running 8 GPU FST on EPNs (ALICE_O2_FST=1 set) +}; +} diff --git a/Framework/Core/src/DefaultsHelpers.cxx b/Framework/Foundation/src/DefaultsHelpers.cxx similarity index 98% rename from Framework/Core/src/DefaultsHelpers.cxx rename to Framework/Foundation/src/DefaultsHelpers.cxx index 88956b93a855d..658e7a2500d25 100644 --- a/Framework/Core/src/DefaultsHelpers.cxx +++ b/Framework/Foundation/src/DefaultsHelpers.cxx @@ -10,7 +10,7 @@ // or submit itself to any jurisdiction. #include "Framework/DefaultsHelpers.h" -#include "Framework/DataTakingContext.h" +#include "Framework/DeploymentMode.h" #include #include #include From f355f45f64aac4121d0a32f3971d652e8e0565ce Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 4 Mar 2024 11:32:56 +0100 Subject: [PATCH 393/726] DPL CCDB: handle the Cache-Valid-Until header This will not recheck the etag and consider the cache valid until the timestamp in milliseconds in the Cache-Valid-Until is not passed by the data timestamp. --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 415f7b4218261..515e503bd77c8 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -34,6 +34,7 @@ namespace o2::framework struct CCDBFetcherHelper { struct CCDBCacheInfo { std::string etag; + size_t cacheValidUntil = 0; size_t cacheMiss = 0; size_t cacheHit = 0; size_t minSize = -1ULL; @@ -177,6 +178,11 @@ auto getOrbitResetTime(o2::pmr::vector const& v) -> Long64_t return (*ctp)[0]; }; +bool isOnlineRun(DataTakingContext const& dtc) +{ + return dtc.deploymentMode == DeploymentMode::OnlineAUX || dtc.deploymentMode == DeploymentMode::OnlineDDS || dtc.deploymentMode == DeploymentMode::OnlineECS; +} + auto populateCacheWith(std::shared_ptr const& helper, int64_t timestamp, TimingInfo& timingInfo, @@ -187,6 +193,8 @@ auto populateCacheWith(std::shared_ptr const& helper, int objCnt = -1; // We use the timeslice, so that we hook into the same interval as the rest of the // callback. + static bool isOnline = isOnlineRun(dtc); + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; O2_SIGNPOST_START(ccdb, sid, "populateCacheWith", "Starting to populate cache with CCDB objects"); for (auto& route : helper->routes) { @@ -218,7 +226,9 @@ auto populateCacheWith(std::shared_ptr const& helper, const auto url2uuid = helper->mapURL2UUID.find(path); if (url2uuid != helper->mapURL2UUID.end()) { etag = url2uuid->second.etag; - checkValidity = std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate; + // We check validity every chRate timeslices or if the cache is expired + uint64_t validUntil = url2uuid->second.cacheValidUntil; + checkValidity = (std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate) && (isOnline || (validUntil <= timestamp)); } else { checkValidity = true; // never skip check if the cache is empty } @@ -241,6 +251,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].lastCheckedTF = timingInfo.tfCounter; if (etag.empty()) { helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); @@ -252,6 +263,7 @@ auto populateCacheWith(std::shared_ptr const& helper, if (v.size()) { // but should be overridden by fresh object // somewhere here pruneFromCache should be called helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); From 956b26710723080d559b30835eb15cadaba0d503 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Mon, 4 Mar 2024 11:32:56 +0100 Subject: [PATCH 394/726] DPL CCDB: handle the Cache-Valid-Until header This will not recheck the etag and consider the cache valid until the timestamp in milliseconds in the Cache-Valid-Until is not passed by the data timestamp. --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 837b20cf4262b..c72c6b4d0ff7f 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -33,6 +33,7 @@ namespace o2::framework struct CCDBFetcherHelper { struct CCDBCacheInfo { std::string etag; + size_t cacheValidUntil = 0; size_t cacheMiss = 0; size_t cacheHit = 0; size_t minSize = -1ULL; @@ -176,6 +177,11 @@ auto getOrbitResetTime(o2::pmr::vector const& v) -> Long64_t return (*ctp)[0]; }; +bool isOnlineRun(DataTakingContext const& dtc) +{ + return dtc.deploymentMode == DeploymentMode::OnlineAUX || dtc.deploymentMode == DeploymentMode::OnlineDDS || dtc.deploymentMode == DeploymentMode::OnlineECS; +} + auto populateCacheWith(std::shared_ptr const& helper, int64_t timestamp, TimingInfo& timingInfo, @@ -186,6 +192,8 @@ auto populateCacheWith(std::shared_ptr const& helper, int objCnt = -1; // We use the timeslice, so that we hook into the same interval as the rest of the // callback. + static bool isOnline = isOnlineRun(dtc); + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; O2_SIGNPOST_START(ccdb, sid, "populateCacheWith", "Starting to populate cache with CCDB objects"); for (auto& route : helper->routes) { @@ -217,7 +225,9 @@ auto populateCacheWith(std::shared_ptr const& helper, const auto url2uuid = helper->mapURL2UUID.find(path); if (url2uuid != helper->mapURL2UUID.end()) { etag = url2uuid->second.etag; - checkValidity = std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate; + // We check validity every chRate timeslices or if the cache is expired + uint64_t validUntil = url2uuid->second.cacheValidUntil; + checkValidity = (std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate) && (isOnline || (validUntil <= timestamp)); } else { checkValidity = true; // never skip check if the cache is empty } @@ -240,6 +250,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].lastCheckedTF = timingInfo.tfCounter; if (etag.empty()) { helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); @@ -251,6 +262,7 @@ auto populateCacheWith(std::shared_ptr const& helper, if (v.size()) { // but should be overridden by fresh object // somewhere here pruneFromCache should be called helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); From db18c53c2659aa4d755bc41a7aeb392f27fedf35 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Mar 2024 09:27:26 +0100 Subject: [PATCH 395/726] DPL CCDB: handle the Cache-Valid-Until header This will not recheck the etag and consider the cache valid until the timestamp in milliseconds in the Cache-Valid-Until is not passed by the data timestamp. --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 837b20cf4262b..f1d948e184e13 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -33,6 +33,8 @@ namespace o2::framework struct CCDBFetcherHelper { struct CCDBCacheInfo { std::string etag; + size_t cacheValidUntil = 0; + size_t cachePopulatedAt = 0; size_t cacheMiss = 0; size_t cacheHit = 0; size_t minSize = -1ULL; @@ -176,6 +178,11 @@ auto getOrbitResetTime(o2::pmr::vector const& v) -> Long64_t return (*ctp)[0]; }; +bool isOnlineRun(DataTakingContext const& dtc) +{ + return dtc.deploymentMode == DeploymentMode::OnlineAUX || dtc.deploymentMode == DeploymentMode::OnlineDDS || dtc.deploymentMode == DeploymentMode::OnlineECS; +} + auto populateCacheWith(std::shared_ptr const& helper, int64_t timestamp, TimingInfo& timingInfo, @@ -186,6 +193,8 @@ auto populateCacheWith(std::shared_ptr const& helper, int objCnt = -1; // We use the timeslice, so that we hook into the same interval as the rest of the // callback. + static bool isOnline = isOnlineRun(dtc); + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; O2_SIGNPOST_START(ccdb, sid, "populateCacheWith", "Starting to populate cache with CCDB objects"); for (auto& route : helper->routes) { @@ -217,7 +226,14 @@ auto populateCacheWith(std::shared_ptr const& helper, const auto url2uuid = helper->mapURL2UUID.find(path); if (url2uuid != helper->mapURL2UUID.end()) { etag = url2uuid->second.etag; - checkValidity = std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate; + // We check validity every chRate timeslices or if the cache is expired + uint64_t validUntil = url2uuid->second.cacheValidUntil; + // When the cache was populated. If the cache was populated after the timestamp, we need to check validity. + uint64_t cachePopulatedAt = url2uuid->second.cachePopulatedAt; + // If timestamp is before the time the element was cached or after the claimed validity, we need to check validity, again + // when online. + bool cacheExipred = (validUntil <= timestamp) && (timestamp <= cachePopulatedAt); + checkValidity = (std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate) && (isOnline || cacheExipred); } else { checkValidity = true; // never skip check if the cache is empty } @@ -240,6 +256,8 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].lastCheckedTF = timingInfo.tfCounter; if (etag.empty()) { helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); + helper->mapURL2UUID[path].cachePopulatedAt = timestamp; helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); @@ -251,6 +269,8 @@ auto populateCacheWith(std::shared_ptr const& helper, if (v.size()) { // but should be overridden by fresh object // somewhere here pruneFromCache should be called helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); + helper->mapURL2UUID[path].cachePopulatedAt = timestamp; helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); From db34e4303c9d6c28a2875a3472a19e4e2c57cf4c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Mar 2024 09:35:26 +0100 Subject: [PATCH 396/726] DPL CCDB: handle the Cache-Valid-Until header This will not recheck the etag and consider the cache valid until the timestamp in milliseconds in the Cache-Valid-Until is not passed by the data timestamp. --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 837b20cf4262b..04dd0b13a7434 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -33,6 +33,8 @@ namespace o2::framework struct CCDBFetcherHelper { struct CCDBCacheInfo { std::string etag; + size_t cacheValidUntil = 0; + size_t cachePopulatedAt = 0; size_t cacheMiss = 0; size_t cacheHit = 0; size_t minSize = -1ULL; @@ -176,6 +178,11 @@ auto getOrbitResetTime(o2::pmr::vector const& v) -> Long64_t return (*ctp)[0]; }; +bool isOnlineRun(DataTakingContext const& dtc) +{ + return dtc.deploymentMode == DeploymentMode::OnlineAUX || dtc.deploymentMode == DeploymentMode::OnlineDDS || dtc.deploymentMode == DeploymentMode::OnlineECS; +} + auto populateCacheWith(std::shared_ptr const& helper, int64_t timestamp, TimingInfo& timingInfo, @@ -186,6 +193,8 @@ auto populateCacheWith(std::shared_ptr const& helper, int objCnt = -1; // We use the timeslice, so that we hook into the same interval as the rest of the // callback. + static bool isOnline = isOnlineRun(dtc); + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; O2_SIGNPOST_START(ccdb, sid, "populateCacheWith", "Starting to populate cache with CCDB objects"); for (auto& route : helper->routes) { @@ -217,7 +226,14 @@ auto populateCacheWith(std::shared_ptr const& helper, const auto url2uuid = helper->mapURL2UUID.find(path); if (url2uuid != helper->mapURL2UUID.end()) { etag = url2uuid->second.etag; - checkValidity = std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate; + // We check validity every chRate timeslices or if the cache is expired + uint64_t validUntil = url2uuid->second.cacheValidUntil; + // When the cache was populated. If the cache was populated after the timestamp, we need to check validity. + uint64_t cachePopulatedAt = url2uuid->second.cachePopulatedAt; + // If timestamp is before the time the element was cached or after the claimed validity, we need to check validity, again + // when online. + bool cacheExipred = (validUntil <= timestamp) && (timestamp <= cachePopulatedAt); + checkValidity = (std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate) && (isOnline || cacheExipred); } else { checkValidity = true; // never skip check if the cache is empty } @@ -240,6 +256,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].lastCheckedTF = timingInfo.tfCounter; if (etag.empty()) { helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cachePopulatedAt = timestamp; helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); @@ -251,6 +268,7 @@ auto populateCacheWith(std::shared_ptr const& helper, if (v.size()) { // but should be overridden by fresh object // somewhere here pruneFromCache should be called helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cachePopulatedAt = timestamp; helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); @@ -260,6 +278,9 @@ auto populateCacheWith(std::shared_ptr const& helper, // one could modify the adoptContainer to take optional old cacheID to clean: // mapURL2DPLCache[URL] = ctx.outputs().adoptContainer(output, std::move(outputBuffer), DataAllocator::CacheStrategy::Always, mapURL2DPLCache[URL]); continue; + } else { + // Only once the etag is actually used, we get the information on how long the object is valid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); } } // cached object is fine From 8ca9c1783c09c3cf2424475f26fd1d6d7284be2f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Mar 2024 09:35:26 +0100 Subject: [PATCH 397/726] DPL CCDB: handle the Cache-Valid-Until header This will not recheck the etag and consider the cache valid until the timestamp in milliseconds in the Cache-Valid-Until is not passed by the data timestamp. --- Framework/CCDBSupport/src/CCDBHelpers.cxx | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/Framework/CCDBSupport/src/CCDBHelpers.cxx b/Framework/CCDBSupport/src/CCDBHelpers.cxx index 837b20cf4262b..37c4affe2877e 100644 --- a/Framework/CCDBSupport/src/CCDBHelpers.cxx +++ b/Framework/CCDBSupport/src/CCDBHelpers.cxx @@ -33,6 +33,8 @@ namespace o2::framework struct CCDBFetcherHelper { struct CCDBCacheInfo { std::string etag; + size_t cacheValidUntil = 0; + size_t cachePopulatedAt = 0; size_t cacheMiss = 0; size_t cacheHit = 0; size_t minSize = -1ULL; @@ -176,6 +178,11 @@ auto getOrbitResetTime(o2::pmr::vector const& v) -> Long64_t return (*ctp)[0]; }; +bool isOnlineRun(DataTakingContext const& dtc) +{ + return dtc.deploymentMode == DeploymentMode::OnlineAUX || dtc.deploymentMode == DeploymentMode::OnlineDDS || dtc.deploymentMode == DeploymentMode::OnlineECS; +} + auto populateCacheWith(std::shared_ptr const& helper, int64_t timestamp, TimingInfo& timingInfo, @@ -186,6 +193,8 @@ auto populateCacheWith(std::shared_ptr const& helper, int objCnt = -1; // We use the timeslice, so that we hook into the same interval as the rest of the // callback. + static bool isOnline = isOnlineRun(dtc); + auto sid = _o2_signpost_id_t{(int64_t)timingInfo.timeslice}; O2_SIGNPOST_START(ccdb, sid, "populateCacheWith", "Starting to populate cache with CCDB objects"); for (auto& route : helper->routes) { @@ -217,7 +226,14 @@ auto populateCacheWith(std::shared_ptr const& helper, const auto url2uuid = helper->mapURL2UUID.find(path); if (url2uuid != helper->mapURL2UUID.end()) { etag = url2uuid->second.etag; - checkValidity = std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate; + // We check validity every chRate timeslices or if the cache is expired + uint64_t validUntil = url2uuid->second.cacheValidUntil; + // When the cache was populated. If the cache was populated after the timestamp, we need to check validity. + uint64_t cachePopulatedAt = url2uuid->second.cachePopulatedAt; + // If timestamp is before the time the element was cached or after the claimed validity, we need to check validity, again + // when online. + bool cacheExpired = (validUntil <= timestamp) && (timestamp <= cachePopulatedAt); + checkValidity = (std::abs(int(timingInfo.tfCounter - url2uuid->second.lastCheckedTF)) >= chRate) && (isOnline || cacheExpired); } else { checkValidity = true; // never skip check if the cache is empty } @@ -240,6 +256,7 @@ auto populateCacheWith(std::shared_ptr const& helper, helper->mapURL2UUID[path].lastCheckedTF = timingInfo.tfCounter; if (etag.empty()) { helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cachePopulatedAt = timestamp; helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); @@ -251,6 +268,7 @@ auto populateCacheWith(std::shared_ptr const& helper, if (v.size()) { // but should be overridden by fresh object // somewhere here pruneFromCache should be called helper->mapURL2UUID[path].etag = headers["ETag"]; // update uuid + helper->mapURL2UUID[path].cachePopulatedAt = timestamp; helper->mapURL2UUID[path].cacheMiss++; helper->mapURL2UUID[path].minSize = std::min(v.size(), helper->mapURL2UUID[path].minSize); helper->mapURL2UUID[path].maxSize = std::max(v.size(), helper->mapURL2UUID[path].maxSize); @@ -260,6 +278,9 @@ auto populateCacheWith(std::shared_ptr const& helper, // one could modify the adoptContainer to take optional old cacheID to clean: // mapURL2DPLCache[URL] = ctx.outputs().adoptContainer(output, std::move(outputBuffer), DataAllocator::CacheStrategy::Always, mapURL2DPLCache[URL]); continue; + } else { + // Only once the etag is actually used, we get the information on how long the object is valid + helper->mapURL2UUID[path].cacheValidUntil = headers["Cache-Valid-Until"].empty() ? 0 : std::stoul(headers["Cache-Valid-Until"]); } } // cached object is fine From b12b5bbfca6f4f1cf126b940415b69b9de096f5a Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Mar 2024 10:48:10 +0100 Subject: [PATCH 398/726] DPL: add helper to get InitContext when using adaptStateful Needed to have callbacks which can be executed in the global context. --- Framework/Core/include/Framework/AlgorithmSpec.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Framework/Core/include/Framework/AlgorithmSpec.h b/Framework/Core/include/Framework/AlgorithmSpec.h index e98a3abefbce1..39d0582a0f1ee 100644 --- a/Framework/Core/include/Framework/AlgorithmSpec.h +++ b/Framework/Core/include/Framework/AlgorithmSpec.h @@ -164,6 +164,14 @@ struct ContextElementTraits { } }; +template <> +struct ContextElementTraits { + static InitContext& get(InitContext& ctx) + { + return ctx; + } +}; + template struct ContextElementTraits const> { static Input get(ProcessingContext& ctx) From b00c9d0e19028bfb92625a2c0a88fefcbced5b3c Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Mar 2024 10:46:27 +0100 Subject: [PATCH 399/726] [O2-4578] DPL: attempt at removing a race condition in the rate limiting --- Framework/Core/src/CommonDataProcessors.cxx | 48 +++++++++++++++------ 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/Framework/Core/src/CommonDataProcessors.cxx b/Framework/Core/src/CommonDataProcessors.cxx index 0cf7224c25ac8..423627ec4f7a7 100644 --- a/Framework/Core/src/CommonDataProcessors.cxx +++ b/Framework/Core/src/CommonDataProcessors.cxx @@ -568,29 +568,49 @@ DataProcessorSpec CommonDataProcessors::getGlobalFairMQSink(std::vectordata; + auto& timesliceIndex = services->get(); + auto* device = services->get().device(); + auto channel = device->GetChannels().find("metric-feedback"); + auto oldestPossingTimeslice = timesliceIndex.getOldestPossibleOutput().timeslice.value; + if (channel == device->GetChannels().end()) { + return; + } + fair::mq::MessagePtr payload(device->NewMessage()); + payload->Rebuild(&oldestPossingTimeslice, sizeof(int64_t), nullptr, nullptr); + auto* consumed = (size_t*)malloc(sizeof(size_t)); + *consumed = oldestPossingTimeslice; + + int64_t result = channel->second[0].Send(payload, 100); + // If the sending worked, we do not retry. + if (result != 0) { + // If the sending did not work, we keep trying until it actually works. + // This will schedule other tasks in the queue, so the processing of the + // data will still happen. + uv_async_send(async); + } else { + lastTimeslice = *consumed; + } +} + DataProcessorSpec CommonDataProcessors::getDummySink(std::vector const& danglingOutputInputs, std::string rateLimitingChannelConfig) { return DataProcessorSpec{ .name = "internal-dpl-injected-dummy-sink", .inputs = danglingOutputInputs, - .algorithm = AlgorithmSpec{adaptStateful([](CallbackService& callbacks) { + .algorithm = AlgorithmSpec{adaptStateful([](CallbackService& callbacks, DeviceState& deviceState, InitContext& ic) { + static uv_async_t async; + // The callback will only have access to the + async.data = new ServiceRegistryRef{ic.services()}; + uv_async_init(deviceState.loop, &async, retryMetricCallback); auto domainInfoUpdated = [](ServiceRegistryRef services, size_t timeslice, ChannelIndex channelIndex) { LOGP(debug, "Domain info updated with timeslice {}", timeslice); - static size_t lastTimeslice = -1; + retryMetricCallback(&async); auto& timesliceIndex = services.get(); - auto device = services.get().device(); - auto channel = device->GetChannels().find("metric-feedback"); auto oldestPossingTimeslice = timesliceIndex.getOldestPossibleOutput().timeslice.value; - if (channel != device->GetChannels().end()) { - fair::mq::MessagePtr payload(device->NewMessage()); - size_t* consumed = (size_t*)malloc(sizeof(size_t)); - *consumed = oldestPossingTimeslice; - if (*consumed != lastTimeslice) { - payload->Rebuild(consumed, sizeof(int64_t), nullptr, nullptr); - channel->second[0].Send(payload); - lastTimeslice = *consumed; - } - } auto& stats = services.get(); stats.updateStats({(int)ProcessingStatsId::CONSUMED_TIMEFRAMES, DataProcessingStats::Op::Set, (int64_t)oldestPossingTimeslice}); }; From a622a4836fa9378c3d6d90a43c72874cc76d6dbb Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Mar 2024 14:01:11 +0100 Subject: [PATCH 400/726] DPL: do not spin when stdout / stderr from child is not connected --- Framework/Core/src/runDataProcessing.cxx | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 64c034ebb8533..ce1b2647b30af 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -422,11 +422,20 @@ void log_callback(uv_poll_t* handle, int status, int events) if (status < 0) { info.active = false; } + // We are in writeable mode. This means that something connected and + // therefore we can read from the child. Switch to readable mode. + if (events & UV_WRITABLE) { + uv_poll_start(handle, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, log_callback); + return; + } if (events & UV_READABLE) { getChildData(logContext->fd, info); } if (events & UV_DISCONNECT) { info.active = false; + // This makes sure we do not get any more events until it reconnects (which + // does not happen in the current implementation). + uv_poll_start(handle, UV_WRITABLE, log_callback); } } @@ -606,7 +615,10 @@ void handleChildrenStdio(uv_loop_t* loop, auto handle = handles.back(); handle->data = context; uv_poll_init(loop, handle, fd); - uv_poll_start(handle, UV_READABLE, log_callback); + // We need to start with the poller in writeable mode so that we + // do not unnecessarily block get notified when the child is not yet + // connected. + uv_poll_start(handle, UV_WRITABLE, log_callback); }; addPoller(i, childstdout[0]); From 771983da98e48ca2c3173a26526c41fa37ff5a23 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Tue, 5 Mar 2024 14:01:11 +0100 Subject: [PATCH 401/726] DPL: improve signposts when processing messages --- Framework/Core/src/runDataProcessing.cxx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index ce1b2647b30af..8409809bd87bb 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -838,7 +838,7 @@ void processChildrenOutput(DriverInfo& driverInfo, std::match_results match; ParsedMetricMatch metricMatch; ParsedConfigMatch configMatch; - const std::string delimiter("\n"); + constexpr auto delimiter = "\n"; for (size_t di = 0, de = infos.size(); di < de; ++di) { DeviceInfo& info = infos[di]; @@ -851,7 +851,7 @@ void processChildrenOutput(DriverInfo& driverInfo, } O2_SIGNPOST_ID_FROM_POINTER(sid, driver, &info); - O2_SIGNPOST_START(driver, sid, "bytes_processed", "bytes processed by %{xcode:pid}d", info.pid); + O2_SIGNPOST_START(driver, sid, "bytes_processed", "Staring processing of logs from pid %{public}s (%{xcode:pid}d)", spec.name.data(), info.pid); std::string_view s = info.unprinted; size_t pos = 0; @@ -894,12 +894,12 @@ void processChildrenOutput(DriverInfo& driverInfo, info.firstSevereError = token; } } - s.remove_prefix(pos + delimiter.length()); + s.remove_prefix(pos + strlen(delimiter)); } size_t oldSize = info.unprinted.size(); info.unprinted = std::string(s); int64_t bytesProcessed = oldSize - info.unprinted.size(); - O2_SIGNPOST_END(driver, sid, "bytes_processed", "bytes processed by %{xcode:network-size-in-bytes}" PRIi64, bytesProcessed); + O2_SIGNPOST_END(driver, sid, "bytes_processed", "Done processing %{xcode:network-size-in-bytes}" PRIi64 " bytes by %{public}s", bytesProcessed, spec.name.data()); } } From 02d80f8bff296e54964fd499911b4a97b5679a76 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 6 Mar 2024 08:52:44 +0100 Subject: [PATCH 402/726] DPL: improve processing of logs from devices - Log processing now happens in an asynchronous action which is performed only when some logs are actually read. All logs are read in any case, because libuv will coalesce those callbacks into one. - Improved signposts to print out what the driver event loop is doing. - Improved signposts to debug the GUI with a separate stream. --- .../include/Framework/GuiCallbackContext.h | 1 + Framework/Core/src/DriverServerContext.h | 1 + Framework/Core/src/runDataProcessing.cxx | 92 +++++++++++++++---- 3 files changed, 74 insertions(+), 20 deletions(-) diff --git a/Framework/Core/include/Framework/GuiCallbackContext.h b/Framework/Core/include/Framework/GuiCallbackContext.h index 9c0f21f3aec9c..1dbb6ec30e849 100644 --- a/Framework/Core/include/Framework/GuiCallbackContext.h +++ b/Framework/Core/include/Framework/GuiCallbackContext.h @@ -44,6 +44,7 @@ struct GuiCallbackContext { void* window = nullptr; bool* guiQuitRequested = nullptr; bool* allChildrenGone = nullptr; + bool* guiTimerExpired = nullptr; std::function callback; std::set renderers; }; diff --git a/Framework/Core/src/DriverServerContext.h b/Framework/Core/src/DriverServerContext.h index c49ab460e4c6a..4d25c47bd172b 100644 --- a/Framework/Core/src/DriverServerContext.h +++ b/Framework/Core/src/DriverServerContext.h @@ -52,6 +52,7 @@ struct DriverServerContext { /// The handle to the server component of the /// driver. uv_tcp_t serverHandle; + uv_async_t* asyncLogProcessing = nullptr; }; } // namespace o2::framework diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 64c034ebb8533..6b424ee6a4261 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -176,6 +176,7 @@ std::vector gDeviceMetricsInfos; bpo::options_description gHiddenDeviceOptions("Hidden child options"); O2_DECLARE_DYNAMIC_LOG(driver); +O2_DECLARE_DYNAMIC_LOG(gui); void doBoostException(boost::exception& e, const char*); void doDPLException(o2::framework::RuntimeErrorRef& ref, char const*); @@ -409,14 +410,14 @@ void spawnRemoteDevice(uv_loop_t* loop, struct DeviceLogContext { int fd; int index; - uv_loop_t* loop; - std::vector* infos; + DriverServerContext* serverContext; }; void log_callback(uv_poll_t* handle, int status, int events) { + O2_SIGNPOST_ID_FROM_POINTER(sid, driver, handle->loop); auto* logContext = reinterpret_cast(handle->data); - std::vector* infos = logContext->infos; + std::vector* infos = logContext->serverContext->infos; DeviceInfo& info = infos->at(logContext->index); if (status < 0) { @@ -428,16 +429,26 @@ void log_callback(uv_poll_t* handle, int status, int events) if (events & UV_DISCONNECT) { info.active = false; } + O2_SIGNPOST_EVENT_EMIT(driver, sid, "loop", "log_callback invoked by poller for device %{xcode:pid}d which is %{public}s%{public}s", + info.pid, info.active ? "active" : "inactive", + info.active ? " and still has data to read." : "."); + if (info.active == false) { + uv_poll_stop(handle); + } + uv_async_send(logContext->serverContext->asyncLogProcessing); } void close_websocket(uv_handle_t* handle) { - LOG(debug) << "Handle is being closed"; + O2_SIGNPOST_ID_FROM_POINTER(sid, driver, handle->loop); + O2_SIGNPOST_EVENT_EMIT(driver, sid, "mainloop", "close_websocket"); delete (WSDPLHandler*)handle->data; } void websocket_callback(uv_stream_t* stream, ssize_t nread, const uv_buf_t* buf) { + O2_SIGNPOST_ID_FROM_POINTER(sid, driver, stream->loop); + O2_SIGNPOST_EVENT_EMIT(driver, sid, "mainloop", "websocket_callback"); auto* handler = (WSDPLHandler*)stream->data; if (nread == 0) { return; @@ -481,6 +492,8 @@ static void my_alloc_cb(uv_handle_t*, size_t suggested_size, uv_buf_t* buf) /// A callback for the rest engine void ws_connect_callback(uv_stream_t* server, int status) { + O2_SIGNPOST_ID_FROM_POINTER(sid, driver, server->loop); + O2_SIGNPOST_EVENT_EMIT(driver, sid, "mainloop", "websocket_callback"); auto* serverContext = reinterpret_cast(server->data); if (status < 0) { LOGF(error, "New connection error %s\n", uv_strerror(status)); @@ -574,9 +587,8 @@ void handleSignals() } } -void handleChildrenStdio(uv_loop_t* loop, +void handleChildrenStdio(DriverServerContext* serverContext, std::string const& forwardedStdin, - std::vector& deviceInfos, std::vector& childFds, std::vector& handles) { @@ -586,7 +598,7 @@ void handleChildrenStdio(uv_loop_t* loop, auto* req = (uv_work_t*)malloc(sizeof(uv_work_t)); req->data = new StreamConfigContext{forwardedStdin, childstdin[1]}; - uv_queue_work(loop, req, stream_config, nullptr); + uv_queue_work(serverContext->loop, req, stream_config, nullptr); // Setting them to non-blocking to avoid haing the driver hang when // reading from child. @@ -596,16 +608,15 @@ void handleChildrenStdio(uv_loop_t* loop, } /// Add pollers for stdout and stderr - auto addPoller = [&handles, &deviceInfos, &loop](int index, int fd) { + auto addPoller = [&handles, &serverContext](int index, int fd) { auto* context = new DeviceLogContext{}; context->index = index; context->fd = fd; - context->loop = loop; - context->infos = &deviceInfos; + context->serverContext = serverContext; handles.push_back((uv_poll_t*)malloc(sizeof(uv_poll_t))); auto handle = handles.back(); handle->data = context; - uv_poll_init(loop, handle, fd); + uv_poll_init(serverContext->loop, handle, fd); uv_poll_start(handle, UV_READABLE, log_callback); }; @@ -810,7 +821,8 @@ void spawnDevice(uv_loop_t* loop, gDeviceMetricsInfos.emplace_back(DeviceMetricsInfo{}); } -void processChildrenOutput(DriverInfo& driverInfo, +void processChildrenOutput(uv_loop_t* loop, + DriverInfo& driverInfo, DeviceInfos& infos, DeviceSpecs const& specs, DeviceControls& controls) @@ -826,8 +838,8 @@ void processChildrenOutput(DriverInfo& driverInfo, std::match_results match; ParsedMetricMatch metricMatch; ParsedConfigMatch configMatch; - const std::string delimiter("\n"); + int processed = 0; for (size_t di = 0, de = infos.size(); di < de; ++di) { DeviceInfo& info = infos[di]; DeviceControl& control = controls[di]; @@ -837,6 +849,7 @@ void processChildrenOutput(DriverInfo& driverInfo, if (info.unprinted.empty()) { continue; } + processed++; O2_SIGNPOST_ID_FROM_POINTER(sid, driver, &info); O2_SIGNPOST_START(driver, sid, "bytes_processed", "bytes processed by %{xcode:pid}d", info.pid); @@ -846,7 +859,7 @@ void processChildrenOutput(DriverInfo& driverInfo, info.history.resize(info.historySize); info.historyLevel.resize(info.historySize); - while ((pos = s.find(delimiter)) != std::string::npos) { + while ((pos = s.find("\n")) != std::string::npos) { std::string_view token{s.substr(0, pos)}; auto logLevel = LogParsingHelpers::parseTokenLevel(token); @@ -882,13 +895,18 @@ void processChildrenOutput(DriverInfo& driverInfo, info.firstSevereError = token; } } - s.remove_prefix(pos + delimiter.length()); + // +1 is to skip the \n + s.remove_prefix(pos + 1); } size_t oldSize = info.unprinted.size(); info.unprinted = std::string(s); int64_t bytesProcessed = oldSize - info.unprinted.size(); O2_SIGNPOST_END(driver, sid, "bytes_processed", "bytes processed by %{xcode:network-size-in-bytes}" PRIi64, bytesProcessed); } + if (processed == 0) { + O2_SIGNPOST_ID_FROM_POINTER(lid, driver, loop); + O2_SIGNPOST_EVENT_EMIT(driver, lid, "mainloop", "processChildrenOutput invoked for nothing!"); + } } // Process all the sigchld which are pending @@ -1106,14 +1124,29 @@ void gui_callback(uv_timer_s* ctx) { auto* gui = reinterpret_cast(ctx->data); if (gui->plugin == nullptr) { + // The gui is not there. Why are we here? + O2_SIGNPOST_ID_FROM_POINTER(sid, driver, ctx->loop); + O2_SIGNPOST_EVENT_EMIT_ERROR(driver, sid, "gui", "GUI timer callback invoked without a GUI plugin."); + uv_timer_stop(ctx); return; } + *gui->guiTimerExpired = true; + static int counter = 0; + if ((counter++ % 6000) == 0) { + O2_SIGNPOST_ID_FROM_POINTER(sid, driver, ctx->loop); + O2_SIGNPOST_EVENT_EMIT(driver, sid, "gui", "The GUI callback got called %d times.", counter); + *gui->guiTimerExpired = false; + } + // One interval per GUI invocation, using the loop as anchor. + O2_SIGNPOST_ID_FROM_POINTER(sid, gui, ctx->loop); + O2_SIGNPOST_START(gui, sid, "gui", "gui_callback"); // New version which allows deferred closure of windows if (gui->plugin->supportsDeferredClose()) { // For now, there is nothing for which we want to defer the close // so if the flag is set, we simply exit if (*(gui->guiQuitRequested)) { + O2_SIGNPOST_END(gui, sid, "gui", "Quit requested by the GUI."); return; } void* draw_data = nullptr; @@ -1123,6 +1156,7 @@ void gui_callback(uv_timer_s* ctx) // if less than 15ms have passed reuse old frame if (frameLatency / 1000000 <= 15) { draw_data = gui->lastFrame; + O2_SIGNPOST_END(gui, sid, "gui", "Reusing old frame."); return; } // The result of the pollGUIPreRender is used to determine if we @@ -1148,6 +1182,7 @@ void gui_callback(uv_timer_s* ctx) if (frameLatency / 1000000 > 15) { if (!gui->plugin->pollGUIPreRender(gui->window, (float)frameLatency / 1000000000.0f)) { *(gui->guiQuitRequested) = true; + O2_SIGNPOST_END(gui, sid, "gui", "Reusing old frame."); return; } draw_data = gui->plugin->pollGUIRender(gui->callback); @@ -1163,6 +1198,7 @@ void gui_callback(uv_timer_s* ctx) gui->frameLast = frameStart; } } + O2_SIGNPOST_END(gui, sid, "gui", "Gui redrawn."); } /// Force single stepping of the children @@ -1429,12 +1465,14 @@ int runStateMachine(DataProcessorSpecs const& workflow, ServiceRegistryRef ref{serviceRegistry}; ref.registerService(ServiceRegistryHelpers::handleForService(devicesManager)); + bool guiTimerExpired = false; GuiCallbackContext guiContext; guiContext.plugin = debugGUI; guiContext.frameLast = uv_hrtime(); guiContext.frameLatency = &driverInfo.frameLatency; guiContext.frameCost = &driverInfo.frameCost; guiContext.guiQuitRequested = &guiQuitRequested; + guiContext.guiTimerExpired = &guiTimerExpired; // This is to make sure we can process metrics, commands, configuration // changes coming from websocket (or even via any standard uv_stream_t, I guess). @@ -1467,6 +1505,16 @@ int runStateMachine(DataProcessorSpecs const& workflow, metricDumpTimer.data = &serverContext; bool allChildrenGone = false; guiContext.allChildrenGone = &allChildrenGone; + O2_SIGNPOST_ID_FROM_POINTER(sid, driver, loop); + O2_SIGNPOST_START(driver, sid, "driver", "Starting driver loop"); + + // Async callback to process the output of the children, if needed. + serverContext.asyncLogProcessing = (uv_async_t*)malloc(sizeof(uv_async_t)); + serverContext.asyncLogProcessing->data = &serverContext; + uv_async_init(loop, serverContext.asyncLogProcessing, [](uv_async_t* handle) { + auto* context = (DriverServerContext*)handle->data; + processChildrenOutput(context->loop, *context->driver, *context->infos, *context->specs, *context->controls); + }); while (true) { // If control forced some transition on us, we push it to the queue. @@ -2046,7 +2094,7 @@ int runStateMachine(DataProcessorSpecs const& workflow, } } handleSignals(); - handleChildrenStdio(loop, forwardedStdin.str(), infos, childFds, pollHandles); + handleChildrenStdio(&serverContext, forwardedStdin.str(), childFds, pollHandles); for (auto& callback : postScheduleCallbacks) { callback(serviceRegistry, {varmap}); } @@ -2067,6 +2115,12 @@ int runStateMachine(DataProcessorSpecs const& workflow, // any, so that we do not consume CPU time when the driver is // idle. devicesManager->flush(); + // We print the event loop for the gui only once every + // 6000 iterations (i.e. ~2 minutes). To avoid spamming, while still + // being able to see the event loop in case of a deadlock / systematic failure. + if (guiTimerExpired == false) { + O2_SIGNPOST_EVENT_EMIT(driver, sid, "mainloop", "Entering event loop with %{public}s", once ? "UV_RUN_ONCE" : "UV_RUN_NOWAIT"); + } uv_run(loop, once ? UV_RUN_ONCE : UV_RUN_NOWAIT); once = true; // Calculate what we should do next and eventually @@ -2099,9 +2153,6 @@ int runStateMachine(DataProcessorSpecs const& workflow, } else { driverInfo.states.push_back(DriverState::RUNNING); } - { - processChildrenOutput(driverInfo, infos, runningWorkflow.devices, controls); - } break; case DriverState::QUIT_REQUESTED: LOG(info) << "QUIT_REQUESTED"; @@ -2135,7 +2186,7 @@ int runStateMachine(DataProcessorSpecs const& workflow, } sigchld_requested = false; driverInfo.sigchldRequested = false; - processChildrenOutput(driverInfo, infos, runningWorkflow.devices, controls); + processChildrenOutput(loop, driverInfo, infos, runningWorkflow.devices, controls); hasError = processSigChild(infos, runningWorkflow.devices); allChildrenGone = areAllChildrenGone(infos); bool canExit = checkIfCanExit(infos); @@ -2213,6 +2264,7 @@ int runStateMachine(DataProcessorSpecs const& workflow, driverInfo.states.push_back(DriverState::QUIT_REQUESTED); } } + O2_SIGNPOST_END(driver, sid, "driver", "End driver loop"); } // Print help From 37e4427c92e9dbc4bd3f6b6acefb9cd0570821e1 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 6 Mar 2024 09:22:12 +0100 Subject: [PATCH 403/726] DPL TestWorkflows: add example for two synchronized timers This reproduces the issue reported in https://its.cern.ch/jira/browse/O2-4328. --- Framework/TestWorkflows/CMakeLists.txt | 4 ++ Framework/TestWorkflows/src/o2TwoTimers.cxx | 61 +++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 Framework/TestWorkflows/src/o2TwoTimers.cxx diff --git a/Framework/TestWorkflows/CMakeLists.txt b/Framework/TestWorkflows/CMakeLists.txt index 848337200a4d2..8aaf01ff55dcf 100644 --- a/Framework/TestWorkflows/CMakeLists.txt +++ b/Framework/TestWorkflows/CMakeLists.txt @@ -33,6 +33,10 @@ o2_add_dpl_workflow(diamond-workflow SOURCES src/o2DiamondWorkflow.cxx COMPONENT_NAME TestWorkflows) +o2_add_dpl_workflow(two-timers + SOURCES src/o2TwoTimers.cxx + COMPONENT_NAME TestWorkflows) + o2_add_dpl_workflow(dummy-calibration-workflow SOURCES src/o2DummyCalibrationWorkflow.cxx COMPONENT_NAME TestWorkflows) diff --git a/Framework/TestWorkflows/src/o2TwoTimers.cxx b/Framework/TestWorkflows/src/o2TwoTimers.cxx new file mode 100644 index 0000000000000..b0bdc87378a83 --- /dev/null +++ b/Framework/TestWorkflows/src/o2TwoTimers.cxx @@ -0,0 +1,61 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +#include "Framework/ConfigParamSpec.h" +#include "Framework/DataTakingContext.h" +#include "Framework/CompletionPolicyHelpers.h" +#include "Framework/DeviceSpec.h" +#include "Framework/RawDeviceService.h" +#include "Framework/ControlService.h" +#include "Framework/Configurable.h" +#include "Framework/RunningWorkflowInfo.h" +#include "Framework/CallbackService.h" +#include "Framework/RateLimiter.h" +#include "Framework/ConfigContext.h" +#include + +#include +#include +#include +#include + +using namespace o2::framework; +#include "Framework/runDataProcessing.h" + +// This is how you can define your processing in a declarative way +WorkflowSpec defineDataProcessing(ConfigContext const& specs) +{ + DataProcessorSpec timer1{ + .name = "timer1", + .inputs = {InputSpec{"x", "TIM", "A1", Lifetime::Timer}}, + .outputs = {OutputSpec{{"output"}, "TST", "A1"}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + auto& aData = outputs.make(OutputRef{"output"}, 1); + LOG(info) << "timer1: " << aData[0]; + })}, + .options = { + ConfigParamSpec{"some-device-param", VariantType::Int, 1, {"Some device parameter"}}, + }}; + DataProcessorSpec timer2{ + .name = "timer2", + .inputs = {InputSpec{"x", "TIM", "A1", Lifetime::Timer}}, + .outputs = {OutputSpec{{"output"}, "TST", "A2"}}, + .algorithm = AlgorithmSpec{adaptStateless( + [](DataAllocator& outputs, RawDeviceService& device, DataTakingContext& context, ProcessingContext& pcx) { + auto& aData = outputs.make(OutputRef{"output"}, 1); + LOG(info) << "timer2: " << aData[0]; + })}, + .options = { + ConfigParamSpec{"some-device-param", VariantType::Int, 1, {"Some device parameter"}}, + }}; + + return workflow::concat(WorkflowSpec{timer1, timer2}); +} From ccbf4ca6acbe77db50d281c1c50b1730ded4ec82 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 6 Mar 2024 14:04:31 +0100 Subject: [PATCH 404/726] DPL: correctly propagate timers with the same description --- Framework/Core/src/DeviceSpecHelpers.cxx | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 43ba0f2d250e5..514e6c91da54c 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -705,6 +705,12 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext .channel = channel.name, .policy = forwardPolicyPtr, }; + // In case we have a timer, the data it creates should be + // forwarded as a timeframe to the next device, so that + // we have synchronization. + if (route.matcher.lifetime == Lifetime::Timer) { + route.matcher.lifetime = Lifetime::Timeframe; + } device.forwards.emplace_back(route); } }; @@ -949,6 +955,13 @@ void DeviceSpecHelpers::processInEdgeActions(std::vector& devices, } } + // In case we add a new route to the device, we remap any + // Lifetime::Timer to Lifetime::Timeframe, so that we can + // synchronize the devices without creating a new timer. + if (edge.isForward && route.matcher.lifetime == Lifetime::Timer) { + route.matcher.lifetime = Lifetime::Timeframe; + } + consumerDevice.inputs.push_back(route); }; From e213a5c1330ccfb304b7996ed692e7fe07fc3815 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Wed, 6 Mar 2024 14:21:33 +0100 Subject: [PATCH 405/726] DPL: correctly propagate timers with the same description --- Framework/Core/src/DeviceSpecHelpers.cxx | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 43ba0f2d250e5..eb89b51193b54 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -705,6 +705,12 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext .channel = channel.name, .policy = forwardPolicyPtr, }; + // In case we have a timer, the data it creates should be + // forwarded as a timeframe to the next device, so that + // we have synchronization. + if (route.matcher.lifetime == Lifetime::Timer) { + route.matcher.lifetime = Lifetime::Timeframe; + } device.forwards.emplace_back(route); } }; @@ -923,6 +929,7 @@ void DeviceSpecHelpers::processInEdgeActions(std::vector& devices, auto appendInputRouteToDestDeviceChannel = [&devices, &logicalEdges, &workflow](size_t ei, size_t di, size_t ci) { auto const& edge = logicalEdges[ei]; auto const& consumer = workflow[edge.consumer]; + auto const& producer = workflow[edge.producer]; auto& consumerDevice = devices[di]; auto const& inputSpec = consumer.inputs[edge.consumerInputIndex]; @@ -949,6 +956,19 @@ void DeviceSpecHelpers::processInEdgeActions(std::vector& devices, } } + // In case we add a new route to the device, we remap any + // Lifetime::Timer to Lifetime::Timeframe, so that we can + // synchronize the devices without creating a new timer. + if (edge.isForward && route.matcher.lifetime == Lifetime::Timer) { + LOGP(warn, "Warning: Forwarding timer {} from {} to a {} as both requested it." + " If this is undesired, please make sure to use two different data matchers for their InputSpec.", + DataSpecUtils::describe(route.matcher).c_str(), + producer.name.c_str(), + consumer.name.c_str() + ); + route.matcher.lifetime = Lifetime::Timeframe; + } + consumerDevice.inputs.push_back(route); }; From 492e5c2262967f32d09ec731f3397314a8fbb9fe Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Thu, 7 Mar 2024 06:58:14 +0100 Subject: [PATCH 406/726] DPL: correctly propagate timers with the same description --- Framework/Core/src/DeviceSpecHelpers.cxx | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/Framework/Core/src/DeviceSpecHelpers.cxx b/Framework/Core/src/DeviceSpecHelpers.cxx index 43ba0f2d250e5..565f85e895f31 100644 --- a/Framework/Core/src/DeviceSpecHelpers.cxx +++ b/Framework/Core/src/DeviceSpecHelpers.cxx @@ -705,6 +705,12 @@ void DeviceSpecHelpers::processOutEdgeActions(ConfigContext const& configContext .channel = channel.name, .policy = forwardPolicyPtr, }; + // In case we have a timer, the data it creates should be + // forwarded as a timeframe to the next device, so that + // we have synchronization. + if (route.matcher.lifetime == Lifetime::Timer) { + route.matcher.lifetime = Lifetime::Timeframe; + } device.forwards.emplace_back(route); } }; @@ -923,6 +929,7 @@ void DeviceSpecHelpers::processInEdgeActions(std::vector& devices, auto appendInputRouteToDestDeviceChannel = [&devices, &logicalEdges, &workflow](size_t ei, size_t di, size_t ci) { auto const& edge = logicalEdges[ei]; auto const& consumer = workflow[edge.consumer]; + auto const& producer = workflow[edge.producer]; auto& consumerDevice = devices[di]; auto const& inputSpec = consumer.inputs[edge.consumerInputIndex]; @@ -949,6 +956,19 @@ void DeviceSpecHelpers::processInEdgeActions(std::vector& devices, } } + // In case we add a new route to the device, we remap any + // Lifetime::Timer to Lifetime::Timeframe, so that we can + // synchronize the devices without creating a new timer. + if (edge.isForward && route.matcher.lifetime == Lifetime::Timer) { + LOGP(warn, + "Warning: Forwarding timer {} from {} to a {} as both requested it." + " If this is undesired, please make sure to use two different data matchers for their InputSpec.", + DataSpecUtils::describe(route.matcher).c_str(), + producer.name.c_str(), + consumer.name.c_str()); + route.matcher.lifetime = Lifetime::Timeframe; + } + consumerDevice.inputs.push_back(route); }; From ac86486ac80fe3e066cc6791192508d8ae700c17 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:30:37 +0100 Subject: [PATCH 407/726] O2Sim move to new ROOT serialization --- .../Base/include/DetectorsBase/Detector.h | 17 +++------ Detectors/Base/src/Detector.cxx | 35 ++++++++++--------- 2 files changed, 22 insertions(+), 30 deletions(-) diff --git a/Detectors/Base/include/DetectorsBase/Detector.h b/Detectors/Base/include/DetectorsBase/Detector.h index 6acfa4f5cc46c..1432d93c53821 100644 --- a/Detectors/Base/include/DetectorsBase/Detector.h +++ b/Detectors/Base/include/DetectorsBase/Detector.h @@ -29,7 +29,6 @@ #include #include #include -#include #include "CommonUtils/ShmManager.h" #include "CommonUtils/ShmAllocator.h" #include @@ -42,9 +41,7 @@ #include -namespace o2 -{ -namespace base +namespace o2::base { /// This is the basic class for any AliceO2 detector module, whether it is @@ -260,17 +257,12 @@ T decodeShmMessage(fair::mq::Parts& dataparts, int index, bool*& busy) } // this goes into the source -void attachMessageBufferToParts(fair::mq::Parts& parts, fair::mq::Channel& channel, - void* data, size_t size, void (*func_ptr)(void* data, void* hint), void* hint); +void attachMessageBufferToParts(fair::mq::Parts& parts, fair::mq::Channel& channel, void* data, TClass* cl); template void attachTMessage(Container const& hits, fair::mq::Channel& channel, fair::mq::Parts& parts) { - TMessage* tmsg = new TMessage(); - tmsg->WriteObjectAny((void*)&hits, TClass::GetClass(typeid(hits))); - attachMessageBufferToParts( - parts, channel, tmsg->Buffer(), tmsg->BufferSize(), - [](void* data, void* hint) { delete static_cast(hint); }, tmsg); + attachMessageBufferToParts(parts, channel, (void*)&hits, TClass::GetClass(typeid(hits))); } void* decodeTMessageCore(fair::mq::Parts& dataparts, int index); @@ -746,7 +738,6 @@ class DetImpl : public o2::base::Detector ClassDefOverride(DetImpl, 0); }; -} // namespace base -} // namespace o2 +} // namespace o2::base #endif diff --git a/Detectors/Base/src/Detector.cxx b/Detectors/Base/src/Detector.cxx index 3168e0e84e1f2..f2b790ffccd5b 100644 --- a/Detectors/Base/src/Detector.cxx +++ b/Detectors/Base/src/Detector.cxx @@ -17,6 +17,7 @@ #include "DetectorsBase/MaterialManager.h" #include "DetectorsCommonDataFormats/DetID.h" #include "Field/MagneticField.h" +#include "Framework/TMessageSerializer.h" #include "TString.h" // for TString #include "TGeoManager.h" @@ -196,16 +197,19 @@ int Detector::registerSensitiveVolumeAndGetVolID(std::string const& name) #include #include #include -namespace o2 -{ -namespace base +namespace o2::base { // this goes into the source -void attachMessageBufferToParts(fair::mq::Parts& parts, fair::mq::Channel& channel, void* data, size_t size, - void (*free_func)(void* data, void* hint), void* hint) +void attachMessageBufferToParts(fair::mq::Parts& parts, fair::mq::Channel& channel, void* data, TClass* cl) { - std::unique_ptr message(channel.NewMessage(data, size, free_func, hint)); - parts.AddPart(std::move(message)); + auto msg = channel.Transport()->CreateMessage(4096, fair::mq::Alignment{64}); + // This will serialize the data directly into the message buffer, without any further + // buffer or copying. Notice how the message will have 8 bytes of header and then + // the serialized data as TBufferFile. In principle one could construct a serialized TMessage payload + // however I did not manage to get it to work for every case. + o2::framework::FairOutputTBuffer buffer(*msg); + o2::framework::TMessageSerializer::serialize(buffer, data, cl); + parts.AddPart(std::move(msg)); } void attachDetIDHeaderMessage(int id, fair::mq::Channel& channel, fair::mq::Parts& parts) { @@ -246,17 +250,14 @@ void* decodeShmCore(fair::mq::Parts& dataparts, int index, bool*& busy) void* decodeTMessageCore(fair::mq::Parts& dataparts, int index) { - class TMessageWrapper : public TMessage - { - public: - TMessageWrapper(void* buf, Int_t len) : TMessage(buf, len) { ResetBit(kIsOwner); } - ~TMessageWrapper() override = default; - }; auto rawmessage = std::move(dataparts.At(index)); - auto message = std::make_unique(rawmessage->GetData(), rawmessage->GetSize()); - return message.get()->ReadObjectAny(message.get()->GetClass()); + o2::framework::FairInputTBuffer buffer((char*)rawmessage->GetData(), rawmessage->GetSize()); + buffer.InitMap(); + auto* cl = buffer.ReadClass(); + buffer.SetBufferOffset(0); + buffer.ResetMap(); + return buffer.ReadObjectAny(cl); } -} // namespace base -} // namespace o2 +} // namespace o2::base ClassImp(o2::base::Detector); From 61774fd3332b690d086fdcb03377b5b88910d66e Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:35:09 +0100 Subject: [PATCH 408/726] DPL: make sure O2_SIGNPOST_ID_FROM_POINTER allows for const pointers --- Framework/Foundation/include/Framework/Signpost.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Framework/Foundation/include/Framework/Signpost.h b/Framework/Foundation/include/Framework/Signpost.h index 7750687088a1d..9ea8aea1192a9 100644 --- a/Framework/Foundation/include/Framework/Signpost.h +++ b/Framework/Foundation/include/Framework/Signpost.h @@ -202,8 +202,6 @@ struct _o2_log_t { bool _o2_lock_free_stack_push(_o2_lock_free_stack& stack, const int& value, bool spin = false); bool _o2_lock_free_stack_pop(_o2_lock_free_stack& stack, int& value, bool spin = false); -//_o2_signpost_id_t _o2_signpost_id_generate_local(_o2_log_t* log); -//_o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer); void* _o2_log_create(char const* name, int stacktrace); void _o2_signpost_event_emit(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); void _o2_signpost_interval_begin(_o2_log_t* log, _o2_signpost_id_t id, char const* name, char const* const format, ...); @@ -224,7 +222,7 @@ inline _o2_signpost_id_t _o2_signpost_id_generate_local(_o2_log_t* log) // Generate a unique id for a signpost. Do not use this directly, use O2_SIGNPOST_ID_FROM_POINTER instead. // Notice that this will fail for pointers to bytes as it might overlap with the id above. -inline _o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void* pointer) +inline _o2_signpost_id_t _o2_signpost_id_make_with_pointer(_o2_log_t* log, void const* pointer) { assert(((int64_t)pointer & 1) != 1); _o2_signpost_id_t uniqueId{(int64_t)pointer}; From cf3c2402f1e6e85d3128d300262a13d2e4e447fc Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:35:09 +0100 Subject: [PATCH 409/726] DPL: replace Tracy allocation profiler with Signposts --- Framework/Core/src/DataAllocator.cxx | 7 ++++ Framework/Core/src/DataProcessingDevice.cxx | 42 +++++++++++++++------ Framework/Core/src/LifetimeHelpers.cxx | 8 +++- 3 files changed, 44 insertions(+), 13 deletions(-) diff --git a/Framework/Core/src/DataAllocator.cxx b/Framework/Core/src/DataAllocator.cxx index de500cd6d6c4e..fe38283d5e2de 100644 --- a/Framework/Core/src/DataAllocator.cxx +++ b/Framework/Core/src/DataAllocator.cxx @@ -35,6 +35,7 @@ #include O2_DECLARE_DYNAMIC_LOG(stream_context); +O2_DECLARE_DYNAMIC_LOG(parts); namespace o2::framework { @@ -129,6 +130,8 @@ void DataAllocator::addPartToContext(RouteIndex routeIndex, fair::mq::MessagePtr o2::header::SerializationMethod serializationMethod) { auto headerMessage = headerMessageFromOutput(spec, routeIndex, serializationMethod, 0); + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, headerMessage->GetData()); + O2_SIGNPOST_START(parts, pid, "parts", "addPartToContext %p", headerMessage->GetData()); // FIXME: this is kind of ugly, we know that we can change the content of the // header message because we have just created it, but the API declares it const @@ -150,6 +153,8 @@ void DataAllocator::adopt(const Output& spec, std::string* ptr) // the correct payload size is set later when sending the // StringContext, see DataProcessor::doSend auto header = headerMessageFromOutput(spec, routeIndex, o2::header::gSerializationMethodNone, 0); + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, header->GetData()); + O2_SIGNPOST_START(parts, pid, "parts", "addPartToContext %p", header->GetData()); mRegistry.get().addString(std::move(header), std::move(payload), routeIndex); assert(payload.get() == nullptr); } @@ -206,6 +211,8 @@ void DataAllocator::adopt(const Output& spec, LifetimeHolder& tb) auto& timingInfo = mRegistry.get(); RouteIndex routeIndex = matchDataHeader(spec, timingInfo.timeslice); auto header = headerMessageFromOutput(spec, routeIndex, o2::header::gSerializationMethodArrow, 0); + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, header->GetData()); + O2_SIGNPOST_START(parts, pid, "parts", "adopt %p", header->GetData()); auto& context = mRegistry.get(); auto creator = [transport = context.proxy().getOutputTransport(routeIndex)](size_t s) -> std::unique_ptr { diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 3e944b30ed11f..ca4ca71f50c2c 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -92,7 +92,10 @@ struct formatter : ostream_format }; } // namespace fmt +// A log to use for general device logging O2_DECLARE_DYNAMIC_LOG(device); +// Special log to keep track of the lifetime of the parts +O2_DECLARE_DYNAMIC_LOG(parts); using namespace o2::framework; using ConfigurationInterface = o2::configuration::ConfigurationInterface; @@ -1804,9 +1807,14 @@ void DataProcessingDevice::handleData(ServiceRegistryRef ref, InputChannelInfo& LOGP(error, "DataHeader payloadSize mismatch"); continue; } - TracyPlot("payload size", (int64_t)dh->payloadSize); auto dph = o2::header::get(headerData); - TracyAlloc(parts.At(pi + 1)->GetData(), parts.At(pi + 1)->GetSize()); + // We only deal with the tracking of parts if the log is enabled. + // This is because in principle we should track the size of each of + // the parts and sum it up. Not for now. + if (O2_LOG_ENABLED(parts) == true) { + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, headerData); + O2_SIGNPOST_START(parts, pid, "parts", "Processing DataHeader with splitPayloadParts %d and splitPayloadIndex %d", dh->splitPayloadParts, dh->splitPayloadIndex); + } if (!dph) { insertInputInfo(pi, 2, InputType::Invalid); LOGP(error, "Header stack does not contain DataProcessingHeader"); @@ -2180,10 +2188,12 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v // Function to cleanup record. For the moment we // simply use it to keep track of input messages // which are not needed, to display them in the GUI. -#ifdef TRACY_ENABLE auto cleanupRecord = [](InputRecord& record) { - for (size_t ii = 0, ie = record.size(); ii < ie; ++ii) { - DataRef input = record.getByPos(ii); + if (O2_LOG_ENABLED(parts) == false) { + return; + } + for (size_t pi = 0, pe = record.size(); pi < pe; ++pi) { + DataRef input = record.getByPos(pi); if (input.header == nullptr) { continue; } @@ -2196,10 +2206,21 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v if (!dh) { continue; } - TracyFree(input.payload); + // We use the address of the first header of a split payload + // to identify the interval. + O2_SIGNPOST_ID_FROM_POINTER(pid, parts, dh); + O2_SIGNPOST_END(parts, pid, "parts", "Cleaning up parts associated to %p", dh); + + // No split parts, we simply skip the payload + if (dh->splitPayloadParts > 0 && dh->splitPayloadParts == dh->splitPayloadIndex) { + // this is indicating a sequence of payloads following the header + // FIXME: we will probably also set the DataHeader version + pi += dh->splitPayloadParts - 1; + } else { + size_t pi = pi + (dh->splitPayloadParts > 0 ? dh->splitPayloadParts : 1) * 2; + } } }; -#endif auto switchState = [ref](StreamingState newState) { auto& control = ref.get(); @@ -2432,6 +2453,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v // We forward inputs only when we consume them. If we simply Process them, // we keep them for next message arriving. if (action.op == CompletionPolicy::CompletionOp::Consume) { + cleanupRecord(record); context.postDispatchingCallbacks(processContext); ref.get().call(o2::framework::ServiceRegistryRef{ref}); } @@ -2441,11 +2463,7 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), false, action.op == CompletionPolicy::CompletionOp::Consume); } context.postForwardingCallbacks(processContext); - if (action.op == CompletionPolicy::CompletionOp::Consume) { -#ifdef TRACY_ENABLE - cleanupRecord(record); -#endif - } else if (action.op == CompletionPolicy::CompletionOp::Process) { + if (action.op == CompletionPolicy::CompletionOp::Process) { cleanTimers(action.slot, record); } } diff --git a/Framework/Core/src/LifetimeHelpers.cxx b/Framework/Core/src/LifetimeHelpers.cxx index 1aa53fa0493ca..8c8605d41849c 100644 --- a/Framework/Core/src/LifetimeHelpers.cxx +++ b/Framework/Core/src/LifetimeHelpers.cxx @@ -25,6 +25,7 @@ #include "Framework/FairMQDeviceProxy.h" #include "Framework/Formatters.h" #include "Framework/DeviceState.h" +#include "Framework/Signpost.h" #include "Headers/DataHeader.h" #include "Headers/DataHeaderHelpers.h" @@ -44,6 +45,8 @@ using namespace o2::header; using namespace fair; +O2_DECLARE_DYNAMIC_LOG(parts); + namespace o2::framework { @@ -411,7 +414,6 @@ ExpirationHandler::Handler LifetimeHelpers::enumerate(ConcreteDataMatcher const& assert(!ref.payload); auto timestamp = VariableContextHelpers::getTimeslice(variables).value; - LOGP(debug, "Enumerating record"); DataHeader dh; dh.dataOrigin = matcher.origin; dh.dataDescription = matcher.description; @@ -432,6 +434,8 @@ ExpirationHandler::Handler LifetimeHelpers::enumerate(ConcreteDataMatcher const& auto&& transport = deviceProxy.getInputChannel(channelIndex)->Transport(); auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dh, dph}); + O2_SIGNPOST_ID_FROM_POINTER(hid, parts, header->GetData()); + O2_SIGNPOST_START(parts, hid, "parts", "Enumerating part %p with timestamp %zu", header->GetData(), timestamp); ref.header = std::move(header); auto payload = transport->CreateMessage(sizeof(counter_t)); @@ -486,6 +490,8 @@ ExpirationHandler::Handler LifetimeHelpers::dummy(ConcreteDataMatcher const& mat auto channelAlloc = o2::pmr::getTransportAllocator(transport); auto header = o2::pmr::getMessage(o2::header::Stack{channelAlloc, dh, dph}); ref.header = std::move(header); + O2_SIGNPOST_ID_FROM_POINTER(hid, parts, header->GetData()); + O2_SIGNPOST_START(parts, hid, "parts", "Enumerating part %p with timestamp %zu", header->GetData(), timestamp); auto payload = transport->CreateMessage(0); ref.payload = std::move(payload); }; From afb25c5bf77078b0e7900692455033aee4f23d27 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:35:09 +0100 Subject: [PATCH 410/726] DPL: use signposts for OOB callback --- Framework/Core/src/DataProcessingDevice.cxx | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index ca4ca71f50c2c..ae7c19022e556 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -318,7 +318,7 @@ void on_socket_polled(uv_poll_t* poller, int status, int events) O2_SIGNPOST_END(device, sid, "socket_state", "Socket disconnected in context %{public}s", context->name); } break; case UV_PRIORITIZED: { - O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for context %{public}s", context->name); + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Socket prioritized for context %{public}s", context->name); } break; } // We do nothing, all the logic for now stays in DataProcessingDevice::doRun() @@ -326,6 +326,7 @@ void on_socket_polled(uv_poll_t* poller, int status, int events) void on_out_of_band_polled(uv_poll_t* poller, int status, int events) { + O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller); auto* context = (PollerContext*)poller->data; context->state->loopReason |= DeviceState::OOB_ACTIVITY; if (status < 0) { @@ -334,32 +335,27 @@ void on_out_of_band_polled(uv_poll_t* poller, int status, int events) } switch (events) { case UV_READABLE: { - ZoneScopedN("socket readable event"); + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "Data pending on socket for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_INCOMING; assert(context->channelInfo); - LOGP(debug, "oob socket {} polled UV_READABLE.", - context->name, - context->channelInfo->hasPendingEvents); context->channelInfo->readPolled = true; } break; case UV_WRITABLE: { - ZoneScopedN("socket writeable"); + O2_SIGNPOST_END(device, sid, "socket_state", "OOB socket connected for channel %{public}s", context->name); if (context->read) { - LOG(debug) << "socket polled UV_CONNECT" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "OOB socket connected for read in context %{public}s", context->name); uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &on_out_of_band_polled); } else { - LOG(debug) << "socket polled UV_WRITABLE" << context->name; + O2_SIGNPOST_START(device, sid, "socket_state", "OOB socket connected for write for channel %{public}s", context->name); context->state->loopReason |= DeviceState::DATA_OUTGOING; } } break; case UV_DISCONNECT: { - ZoneScopedN("socket disconnect"); - LOG(debug) << "socket polled UV_DISCONNECT"; + O2_SIGNPOST_END(device, sid, "socket_state", "OOB socket disconnected in context %{public}s", context->name); uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled); } break; case UV_PRIORITIZED: { - ZoneScopedN("socket prioritized"); - LOG(debug) << "socket polled UV_PRIORITIZED"; + O2_SIGNPOST_EVENT_EMIT(device, sid, "socket_state", "OOB socket prioritized for context %{public}s", context->name); } break; } // We do nothing, all the logic for now stays in DataProcessingDevice::doRun() From 7175a2746927054ef2904398967e602111b9ffd0 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:35:09 +0100 Subject: [PATCH 411/726] DPL: rename TracyLockable to O2_LOCKABLE --- .../Core/include/Framework/DataRelayer.h | 2 +- Framework/Core/include/Framework/DataSender.h | 2 +- Framework/Core/src/DataRelayer.cxx | 32 +++++++++---------- Framework/Core/src/DataSender.cxx | 2 +- Framework/Core/src/ServiceRegistry.cxx | 6 ++-- .../Foundation/include/Framework/Tracing.h | 4 +-- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Framework/Core/include/Framework/DataRelayer.h b/Framework/Core/include/Framework/DataRelayer.h index 8fdedbdd1119e..1ebcf549d6a29 100644 --- a/Framework/Core/include/Framework/DataRelayer.h +++ b/Framework/Core/include/Framework/DataRelayer.h @@ -194,7 +194,7 @@ class DataRelayer std::vector mPruneOps; size_t mMaxLanes; - TracyLockableN(std::recursive_mutex, mMutex, "data relayer mutex"); + O2_LOCKABLE_NAMED(std::recursive_mutex, mMutex, "data relayer mutex"); }; } // namespace o2::framework diff --git a/Framework/Core/include/Framework/DataSender.h b/Framework/Core/include/Framework/DataSender.h index 7cdc0ea537bea..2937bd80f78f8 100644 --- a/Framework/Core/include/Framework/DataSender.h +++ b/Framework/Core/include/Framework/DataSender.h @@ -61,7 +61,7 @@ class DataSender std::vector mPresent; std::vector mPresentDefaults; - TracyLockableN(std::recursive_mutex, mMutex, "data relayer mutex"); + O2_LOCKABLE_NAMED(std::recursive_mutex, mMutex, "data relayer mutex"); }; } // namespace o2::framework diff --git a/Framework/Core/src/DataRelayer.cxx b/Framework/Core/src/DataRelayer.cxx index 0b459dd155a21..1daf4bbd2a20b 100644 --- a/Framework/Core/src/DataRelayer.cxx +++ b/Framework/Core/src/DataRelayer.cxx @@ -70,7 +70,7 @@ DataRelayer::DataRelayer(const CompletionPolicy& policy, mInputMatchers{DataRelayerHelpers::createInputMatchers(routes)}, mMaxLanes{InputRouteHelpers::maxLanes(routes)} { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); if (policy.configureRelayer == nullptr) { static int pipelineLength = DefaultsHelpers::pipelineLength(); @@ -100,7 +100,7 @@ DataRelayer::DataRelayer(const CompletionPolicy& policy, TimesliceId DataRelayer::getTimesliceForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto& variables = mTimesliceIndex.getVariablesForSlot(slot); return VariableContextHelpers::getTimeslice(variables); } @@ -109,7 +109,7 @@ DataRelayer::ActivityStats DataRelayer::processDanglingInputs(std::vector lock(mMutex); + std::scoped_lock lock(mMutex); auto& deviceProxy = services.get(); ActivityStats activity; @@ -394,7 +394,7 @@ DataRelayer::RelayChoice size_t nPayloads, std::function&, TimesliceIndex::OldestOutputInfo)> onDrop) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); DataProcessingHeader const* dph = o2::header::get(rawHeader); // IMPLEMENTATION DETAILS // @@ -616,7 +616,7 @@ DataRelayer::RelayChoice void DataRelayer::getReadyToProcess(std::vector& completed) { LOGP(debug, "DataRelayer::getReadyToProcess"); - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); // THE STATE const auto& cache = mCache; @@ -749,7 +749,7 @@ void DataRelayer::getReadyToProcess(std::vector& comp void DataRelayer::updateCacheStatus(TimesliceSlot slot, CacheEntryStatus oldStatus, CacheEntryStatus newStatus) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); const auto numInputTypes = mDistinctRoutesIndex.size(); auto markInputDone = [&cachedStateMetrics = mCachedStateMetrics, @@ -767,7 +767,7 @@ void DataRelayer::updateCacheStatus(TimesliceSlot slot, CacheEntryStatus oldStat std::vector DataRelayer::consumeAllInputsForTimeslice(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); const auto numInputTypes = mDistinctRoutesIndex.size(); // State of the computation @@ -821,7 +821,7 @@ std::vector DataRelayer::consumeAllInputsForTimeslice std::vector DataRelayer::consumeExistingInputsForTimeslice(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); const auto numInputTypes = mDistinctRoutesIndex.size(); // State of the computation @@ -864,7 +864,7 @@ std::vector DataRelayer::consumeExistingInputsForTime void DataRelayer::clear() { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); for (auto& cache : mCache) { cache.clear(); @@ -886,7 +886,7 @@ size_t /// the time pipelining. void DataRelayer::setPipelineLength(size_t s) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); mTimesliceIndex.resize(s); mVariableContextes.resize(s); @@ -895,7 +895,7 @@ void DataRelayer::setPipelineLength(size_t s) void DataRelayer::publishMetrics() { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto numInputTypes = mDistinctRoutesIndex.size(); // FIXME: many of the DataRelayer function rely on allocated cache, so its @@ -932,31 +932,31 @@ void DataRelayer::publishMetrics() uint32_t DataRelayer::getFirstTFOrbitForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getFirstTFOrbit(mTimesliceIndex.getVariablesForSlot(slot)); } uint32_t DataRelayer::getFirstTFCounterForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getFirstTFCounter(mTimesliceIndex.getVariablesForSlot(slot)); } uint32_t DataRelayer::getRunNumberForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getRunNumber(mTimesliceIndex.getVariablesForSlot(slot)); } uint64_t DataRelayer::getCreationTimeForSlot(TimesliceSlot slot) { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); return VariableContextHelpers::getCreationTime(mTimesliceIndex.getVariablesForSlot(slot)); } void DataRelayer::sendContextState() { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto& states = mContext.get(); for (size_t ci = 0; ci < mTimesliceIndex.size(); ++ci) { auto slot = TimesliceSlot{ci}; diff --git a/Framework/Core/src/DataSender.cxx b/Framework/Core/src/DataSender.cxx index f0753102e2d40..3acce1c4b9704 100644 --- a/Framework/Core/src/DataSender.cxx +++ b/Framework/Core/src/DataSender.cxx @@ -51,7 +51,7 @@ DataSender::DataSender(ServiceRegistryRef registry) mSpec{registry.get()}, mDistinctRoutesIndex{createDistinctOutputRouteIndex(mSpec.outputs)} { - std::scoped_lock lock(mMutex); + std::scoped_lock lock(mMutex); auto numInputTypes = mDistinctRoutesIndex.size(); auto& routes = mSpec.outputs; diff --git a/Framework/Core/src/ServiceRegistry.cxx b/Framework/Core/src/ServiceRegistry.cxx index 7b77c521fe6a7..09921f6b4a7d4 100644 --- a/Framework/Core/src/ServiceRegistry.cxx +++ b/Framework/Core/src/ServiceRegistry.cxx @@ -271,7 +271,7 @@ void ServiceRegistry::postRenderGUICallbacks(ServiceRegistryRef ref) void ServiceRegistry::bindService(ServiceRegistry::Salt salt, ServiceSpec const& spec, void* service) const { - static TracyLockableN(std::mutex, bindMutex, "bind mutex"); + static O2_LOCKABLE_NAMED(std::mutex, bindMutex, "bind mutex"); // Stream services need to store their callbacks in the stream context. // This is to make sure we invoke the correct callback only once per // stream, since they could bind multiple times. @@ -280,12 +280,12 @@ void ServiceRegistry::bindService(ServiceRegistry::Salt salt, ServiceSpec const& if (spec.kind == ServiceKind::Stream) { ServiceRegistryRef ref{const_cast(*this), salt}; auto& streamContext = ref.get(); - std::scoped_lock lock(bindMutex); + std::scoped_lock lock(bindMutex); auto& dataProcessorContext = ref.get(); ContextHelpers::bindStreamService(dataProcessorContext, streamContext, spec, service); } else { ServiceRegistryRef ref{const_cast(*this), salt}; - std::scoped_lock lock(bindMutex); + std::scoped_lock lock(bindMutex); if (ref.active()) { auto& dataProcessorContext = ref.get(); ContextHelpers::bindProcessorService(dataProcessorContext, spec, service); diff --git a/Framework/Foundation/include/Framework/Tracing.h b/Framework/Foundation/include/Framework/Tracing.h index f5bcecd3889d7..6c9c65f4423e5 100644 --- a/Framework/Foundation/include/Framework/Tracing.h +++ b/Framework/Foundation/include/Framework/Tracing.h @@ -39,8 +39,8 @@ #define TracyAppInfo(...) \ while (false) { \ } -#define TracyLockableN(T, V, N) T V -#define LockableBase(T) T +#define O2_LOCKABLE_NAMED(T, V, N) T V +#define O2_LOCKABLE(T) T #endif #endif // O2_FRAMEWORK_TRACING_H_ From 178e632ca967462a43420e470d4631f65b4c538f Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:35:09 +0100 Subject: [PATCH 412/726] DPL: do not use Tracy anymore Tracy usage never took off, so I am removing all the explicit calls to it and I will replace them with Signpost usage, where it still makes sense. --- .../Core/src/CommonMessageBackendsHelpers.h | 1 - Framework/Core/src/CommonServices.cxx | 1 - Framework/Core/src/DataProcessingDevice.cxx | 29 ------------------- 3 files changed, 31 deletions(-) diff --git a/Framework/Core/src/CommonMessageBackendsHelpers.h b/Framework/Core/src/CommonMessageBackendsHelpers.h index 10d8a9c5d347a..a3684906ff838 100644 --- a/Framework/Core/src/CommonMessageBackendsHelpers.h +++ b/Framework/Core/src/CommonMessageBackendsHelpers.h @@ -38,7 +38,6 @@ struct CommonMessageBackendsHelpers { static ServiceProcessingCallback sendCallback() { return [](ProcessingContext& ctx, void* service) { - ZoneScopedN("send message callback"); T* context = reinterpret_cast(service); DataProcessor::doSend(ctx.services().get(), *context, ctx.services()); }; diff --git a/Framework/Core/src/CommonServices.cxx b/Framework/Core/src/CommonServices.cxx index a0f7fcdc88201..0d0828729ef42 100644 --- a/Framework/Core/src/CommonServices.cxx +++ b/Framework/Core/src/CommonServices.cxx @@ -741,7 +741,6 @@ auto sendRelayerMetrics(ServiceRegistryRef registry, DataProcessingStats& stats) stats.updateStats({static_cast(static_cast(ProcessingStatsId::AVAILABLE_MANAGED_SHM_BASE) + (runningWorkflow.shmSegmentId % 512)), DataProcessingStats::Op::SetIfPositive, freeMemory}); } - ZoneScopedN("send metrics"); auto device = registry.get().device(); int64_t totalBytesIn = 0; diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index ae7c19022e556..649aba2d6b502 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -115,14 +115,12 @@ struct ServiceKindExtractor { /// Watching stdin for commands probably a better approach. void on_idle_timer(uv_timer_t* handle) { - ZoneScopedN("Idle timer"); auto* state = (DeviceState*)handle->data; state->loopReason |= DeviceState::TIMER_EXPIRED; } void on_transition_requested_expired(uv_timer_t* handle) { - ZoneScopedN("Transition expired"); auto* state = (DeviceState*)handle->data; state->loopReason |= DeviceState::TIMER_EXPIRED; LOGP(info, "Timer expired. Forcing transition to READY"); @@ -222,12 +220,10 @@ DataProcessingDevice::DataProcessingDevice(RunningDeviceRef running, ServiceRegi // one with the thread id. For the moment we simply use the first one. void run_callback(uv_work_t* handle) { - ZoneScopedN("run_callback"); auto* task = (TaskStreamInfo*)handle->data; auto ref = ServiceRegistryRef{*task->registry, ServiceRegistry::globalStreamSalt(task->id.index + 1)}; DataProcessingDevice::doPrepare(ref); DataProcessingDevice::doRun(ref); - // FrameMark; } // Once the processing in a thread is done, this is executed on the main thread. @@ -268,7 +264,6 @@ void run_completion(uv_work_t* handle, int status) quotaEvaluator.handleExpired(reportExpiredOffer); quotaEvaluator.dispose(task->id.index); task->running = false; - ZoneScopedN("run_completion"); } // Context for polling @@ -377,8 +372,6 @@ void DataProcessingDevice::Init() context.statefulProcess = nullptr; context.error = spec.algorithm.onError; context.initError = spec.algorithm.onInitError; - TracyAppInfo(spec.name.data(), spec.name.size()); - ZoneScopedN("DataProcessingDevice::Init"); auto configStore = DeviceConfigurationHelpers::getConfiguration(mServiceRegistry, spec.name.c_str(), spec.options); if (configStore == nullptr) { @@ -412,7 +405,6 @@ void DataProcessingDevice::Init() if (context.initError) { context.initErrorHandling = [&errorCallback = context.initError, &serviceRegistry = mServiceRegistry](RuntimeErrorRef e) { - ZoneScopedN("Error handling"); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. ServiceRegistryRef ref{serviceRegistry, ServiceRegistry::globalDeviceSalt()}; @@ -426,7 +418,6 @@ void DataProcessingDevice::Init() }; } else { context.initErrorHandling = [&serviceRegistry = mServiceRegistry](RuntimeErrorRef e) { - ZoneScopedN("Error handling"); auto& err = error_from_ref(e); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. @@ -449,7 +440,6 @@ void DataProcessingDevice::Init() try { context.statefulProcess = context.init(initContext); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); if (context.initErrorHandling) { (context.initErrorHandling)(e); } @@ -458,14 +448,12 @@ void DataProcessingDevice::Init() try { context.statefulProcess = context.init(initContext); } catch (std::exception& ex) { - ZoneScopedN("error handling"); /// Convert a standard exception to a RuntimeErrorRef /// Notice how this will lose the backtrace information /// and report the exception coming from here. auto e = runtime_error(ex.what()); (context.initErrorHandling)(e); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); (context.initErrorHandling)(e); } } @@ -621,7 +609,6 @@ static auto toBeforwardedMessageSet = [](std::vector& cachedForwar // FIXME: do it in a smarter way than O(N^2) static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, std::vector& currentSetOfInputs, TimesliceIndex::OldestOutputInfo oldestTimeslice, bool copy, bool consume = true) { - ZoneScopedN("forward inputs"); auto& proxy = registry.get(); // we collect all messages per forward in a map and send them together std::vector forwardedParts; @@ -1078,7 +1065,6 @@ void DataProcessingDevice::fillContext(DataProcessorContext& context, DeviceCont if (context.error != nullptr) { context.errorHandling = [&errorCallback = context.error, &serviceRegistry = mServiceRegistry](RuntimeErrorRef e, InputRecord& record) { - ZoneScopedN("Error handling"); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. ServiceRegistryRef ref{serviceRegistry, ServiceRegistry::globalDeviceSalt()}; @@ -1093,7 +1079,6 @@ void DataProcessingDevice::fillContext(DataProcessorContext& context, DeviceCont } else { context.errorHandling = [&errorPolicy = mProcessingPolicies.error, &serviceRegistry = mServiceRegistry](RuntimeErrorRef e, InputRecord& record) { - ZoneScopedN("Error handling"); auto& err = error_from_ref(e); /// FIXME: we should pass the salt in, so that the message /// can access information which were stored in the stream. @@ -1242,8 +1227,6 @@ void DataProcessingDevice::Run() // so that devices which do not have a timer can still start an // enumeration. { - ZoneScopedN("uv idle"); - TracyPlot("past activity", (int64_t)mWasActive); ServiceRegistryRef ref{mServiceRegistry}; ref.get().flushPending(mServiceRegistry); auto shouldNotWait = (mWasActive && @@ -1303,7 +1286,6 @@ void DataProcessingDevice::Run() if (state.transitionHandling == TransitionHandlingState::Requested && state.streaming == StreamingState::Idle) { state.transitionHandling = TransitionHandlingState::Expired; } - TracyPlot("shouldNotWait", (int)shouldNotWait); if (state.severityStack.empty() == false) { fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back()); state.severityStack.pop_back(); @@ -1349,7 +1331,6 @@ void DataProcessingDevice::Run() fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back()); state.severityStack.pop_back(); } - TracyPlot("loopReason", (int64_t)(uint64_t)state.loopReason); LOGP(debug, "Loop reason mask {:b} & {:b} = {:b}", state.loopReason, state.tracingFlags, state.loopReason & state.tracingFlags); @@ -1433,7 +1414,6 @@ void DataProcessingDevice::Run() } else { mWasActive = false; } - FrameMark; } auto& spec = ref.get(); /// Cleanup messages which are still pending on exit. @@ -1448,12 +1428,10 @@ void DataProcessingDevice::Run() /// non-data triggers like those which are time based. void DataProcessingDevice::doPrepare(ServiceRegistryRef ref) { - ZoneScopedN("DataProcessingDevice::doPrepare"); auto& context = ref.get(); *context.wasActive = false; { - ZoneScopedN("CallbackService::Id::ClockTick"); ref.get().call(); } // Whether or not we had something to do. @@ -1734,7 +1712,6 @@ struct WaitBackpressurePolicy { void DataProcessingDevice::handleData(ServiceRegistryRef ref, InputChannelInfo& info) { auto& context = ref.get(); - ZoneScopedN("DataProcessingDevice::handleData"); enum struct InputType : int { Invalid = 0, @@ -1763,7 +1740,6 @@ void DataProcessingDevice::handleData(ServiceRegistryRef ref, InputChannelInfo& auto& parts = info.parts; stats.updateStats({(int)ProcessingStatsId::TOTAL_INPUTS, DataProcessingStats::Op::Set, (int64_t)parts.Size()}); - TracyPlot("messages received", (int64_t)parts.Size()); std::vector results; // we can reserve the upper limit results.reserve(parts.Size() / 2); @@ -2060,7 +2036,6 @@ void update_maximum(std::atomic& maximum_value, T const& value) noexcept bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::vector& completed) { auto& context = ref.get(); - ZoneScopedN("DataProcessingDevice::tryDispatchComputation"); LOGP(debug, "DataProcessingDevice::tryDispatchComputation"); // This is the actual hidden state for the outer loop. In case we decide we // want to support multithreaded dispatching of operations, I can simply @@ -2135,7 +2110,6 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v auto prepareAllocatorForCurrentTimeSlice = [ref](TimesliceSlot i) -> void { auto& relayer = ref.get(); auto& timingInfo = ref.get(); - ZoneScopedN("DataProcessingDevice::prepareForCurrentTimeslice"); auto timeslice = relayer.getTimesliceForSlot(i); timingInfo.timeslice = timeslice.value; @@ -2422,21 +2396,18 @@ bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::v try { runNoCatch(action); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); (context.errorHandling)(e, record); } } else { try { runNoCatch(action); } catch (std::exception& ex) { - ZoneScopedN("error handling"); /// Convert a standard exception to a RuntimeErrorRef /// Notice how this will lose the backtrace information /// and report the exception coming from here. auto e = runtime_error(ex.what()); (context.errorHandling)(e, record); } catch (o2::framework::RuntimeErrorRef e) { - ZoneScopedN("error handling"); (context.errorHandling)(e, record); } } From 7c3408d03132c468097d0357cafafa11d984e390 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:35:09 +0100 Subject: [PATCH 413/726] DPL: drop unused Tracy APIs ZoneScoped*, TracyAppInfo, FrameMark, TracyAlloc, TracyPlot were all removed and will be replaced by Signposts as needed. --- .../Foundation/include/Framework/Tracing.h | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/Framework/Foundation/include/Framework/Tracing.h b/Framework/Foundation/include/Framework/Tracing.h index 6c9c65f4423e5..91c3b99eb8dd0 100644 --- a/Framework/Foundation/include/Framework/Tracing.h +++ b/Framework/Foundation/include/Framework/Tracing.h @@ -15,30 +15,6 @@ #define DPL_HAS_TRACING #include #else -#define ZoneScoped \ - while (false) { \ - } -#define FrameMark \ - while (false) { \ - } -#define TracyPlot(...) \ - while (false) { \ - } -#define ZoneScopedN(...) \ - while (false) { \ - } -#define ZoneScopedNS(...) \ - while (false) { \ - } -#define TracyAlloc(...) \ - while (false) { \ - } -#define TracyFree(...) \ - while (false) { \ - } -#define TracyAppInfo(...) \ - while (false) { \ - } #define O2_LOCKABLE_NAMED(T, V, N) T V #define O2_LOCKABLE(T) T #endif From 2a57f1a6a3e0c680225f2bf07f44f9c4b1beae1e Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:35:09 +0100 Subject: [PATCH 414/726] DPL: drop tracy support from the GUI No one is using it and Signposts are probably a better alternative. --- .../GUISupport/src/FrameworkGUIDeviceInspector.cxx | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx index eb62fcc09d338..3ef8b99ed26bb 100644 --- a/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx +++ b/Framework/GUISupport/src/FrameworkGUIDeviceInspector.cxx @@ -261,9 +261,6 @@ void displayDeviceInspector(DeviceSpec const& spec, ImGui::Text("Pid: %d (exit status: %d)", info.pid, info.exitStatus); } ImGui::Text("Device state: %s", info.deviceState.data()); -#ifdef DPL_ENABLE_TRACING - ImGui::Text("Tracy Port: %d", info.tracyPort); -#endif ImGui::Text("Rank: %zu/%zu%%%zu/%zu", spec.rank, spec.nSlots, spec.inputTimesliceId, spec.maxInputTimeslices); if (ImGui::Button(ICON_FA_BUG "Attach debugger")) { @@ -324,16 +321,6 @@ void displayDeviceInspector(DeviceSpec const& spec, } #endif -#if DPL_ENABLE_TRACING - ImGui::SameLine(); - if (ImGui::Button("Tracy")) { - std::string tracyPort = std::to_string(info.tracyPort); - auto cmd = fmt::format("tracy-profiler -p {} -a 127.0.0.1 &", info.tracyPort); - LOG(debug) << cmd; - int retVal = system(cmd.c_str()); - (void)retVal; - } -#endif if (control.controller) { if (ImGui::Button("Offer SHM")) { control.controller->write("/shm-offer 1000", strlen("/shm-offer 1000")); From 101fc92ec1d9d6227b7744dd80939052cbfe5e92 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:35:09 +0100 Subject: [PATCH 415/726] DPL: drop tracy support for good --- Framework/Core/src/DataProcessingDevice.cxx | 4 ---- Framework/Foundation/include/Framework/Tracing.h | 7 ++++--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/Framework/Core/src/DataProcessingDevice.cxx b/Framework/Core/src/DataProcessingDevice.cxx index 649aba2d6b502..58ea6524f0b7d 100644 --- a/Framework/Core/src/DataProcessingDevice.cxx +++ b/Framework/Core/src/DataProcessingDevice.cxx @@ -8,10 +8,6 @@ // In applying this license CERN does not waive the privileges and immunities // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -#ifdef DPL_ENABLE_TRACING -#define TRACY_ENABLE -#include -#endif #include "Framework/AsyncQueue.h" #include "Framework/DataProcessingDevice.h" #include "Framework/ChannelMatching.h" diff --git a/Framework/Foundation/include/Framework/Tracing.h b/Framework/Foundation/include/Framework/Tracing.h index 91c3b99eb8dd0..72c052ca6c161 100644 --- a/Framework/Foundation/include/Framework/Tracing.h +++ b/Framework/Foundation/include/Framework/Tracing.h @@ -11,9 +11,10 @@ #ifndef O2_FRAMEWORK_TRACING_H_ #define O2_FRAMEWORK_TRACING_H_ -#if DPL_ENABLE_TRACING && __has_include() -#define DPL_HAS_TRACING -#include +#if DPL_ENABLE_TRACING +// FIXME: not implemented yet in terms of Signposts +#define O2_LOCKABLE_NAMED(T, V, N) T V +#define O2_LOCKABLE(T) T #else #define O2_LOCKABLE_NAMED(T, V, N) T V #define O2_LOCKABLE(T) T From 2fbedffecc63d304c86d56e8b6d64f1fb82194ef Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 09:35:09 +0100 Subject: [PATCH 416/726] [FEAT] Drop support for Tracy --- Framework/CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Framework/CHANGELOG.md b/Framework/CHANGELOG.md index e7278733de246..044274d9515c4 100644 --- a/Framework/CHANGELOG.md +++ b/Framework/CHANGELOG.md @@ -1,3 +1,12 @@ +# 2024-02-22: Drop Tracy support + +Tracy support never took off, so I am dropping it. This was mostly because people do not know about it and having a per process profile GUI was way unpractical. Moreover, needing an extra compile time flag meant one most likely did not have the support compiled in when needed. + +I have therefore decided to replace it with signposts, which hopefully will see better adoption thanks +to the integration with Instruments on mac and the easy way they can be enabled dynamically. + +We could then reintroduce Tracy support as a hook on top of signposts, if really needed. + # 2024-02-16: Improved Signposts. In particular: From 473b9587320736412b3e576f6bbe7141d5c0069d Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 10:21:15 +0100 Subject: [PATCH 417/726] DPL: drop some more code from the main header --- Framework/Core/include/Framework/runDataProcessing.h | 10 ++-------- Framework/Core/src/runDataProcessing.cxx | 9 +++++++++ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/Framework/Core/include/Framework/runDataProcessing.h b/Framework/Core/include/Framework/runDataProcessing.h index 186d76499c1d6..75e2a964ccba1 100644 --- a/Framework/Core/include/Framework/runDataProcessing.h +++ b/Framework/Core/include/Framework/runDataProcessing.h @@ -31,7 +31,6 @@ #include "Framework/StructToTuple.h" #include -#include namespace o2::framework { @@ -211,6 +210,7 @@ int mainNoCatch(int argc, char** argv) } int callMain(int argc, char** argv, int (*)(int, char**)); +char *getIdString(int argc, char** argv); int main(int argc, char** argv) { @@ -219,13 +219,7 @@ int main(int argc, char** argv) int result = callMain(argc, argv, mainNoCatch); - char* idstring = nullptr; - for (int argi = 0; argi < argc; argi++) { - if (strcmp(argv[argi], "--id") == 0 && argi + 1 < argc) { - idstring = argv[argi + 1]; - break; - } - } + char* idstring = getIdString(argc, argv); o2::framework::OnWorkflowTerminationHook onWorkflowTerminationHook; UserCustomizationsHelper::userDefinedCustomization(onWorkflowTerminationHook, 0); onWorkflowTerminationHook(idstring); diff --git a/Framework/Core/src/runDataProcessing.cxx b/Framework/Core/src/runDataProcessing.cxx index 6b424ee6a4261..f233eb5b72667 100644 --- a/Framework/Core/src/runDataProcessing.cxx +++ b/Framework/Core/src/runDataProcessing.cxx @@ -182,6 +182,15 @@ void doBoostException(boost::exception& e, const char*); void doDPLException(o2::framework::RuntimeErrorRef& ref, char const*); void doUnknownException(std::string const& s, char const*); +char *getIdString(int argc, char** argv) { + for (int argi = 0; argi < argc; argi++) { + if (strcmp(argv[argi], "--id") == 0 && argi + 1 < argc) { + return argv[argi + 1]; + } + } + return nullptr; +} + int callMain(int argc, char** argv, int (*mainNoCatch)(int, char**)) { static bool noCatch = getenv("O2_NO_CATCHALL_EXCEPTIONS") && strcmp(getenv("O2_NO_CATCHALL_EXCEPTIONS"), "0"); From b98dafccee418dd10a249e9646c0a6d592fddc88 Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 10:21:15 +0100 Subject: [PATCH 418/726] DPL: forward declaration of fill functions --- Framework/Core/include/Framework/HistogramRegistry.h | 4 ++++ Framework/Core/src/HistogramRegistry.cxx | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/Framework/Core/include/Framework/HistogramRegistry.h b/Framework/Core/include/Framework/HistogramRegistry.h index 4b96ce75bbc98..e93972b27dcf9 100644 --- a/Framework/Core/include/Framework/HistogramRegistry.h +++ b/Framework/Core/include/Framework/HistogramRegistry.h @@ -417,6 +417,10 @@ void HistogramRegistry::fill(const HistName& histName, Ts&&... positionAndWeight std::visit([&positionAndWeight...](auto&& hist) { HistFiller::fillHistAny(hist, std::forward(positionAndWeight)...); }, mRegistryValue[getHistIndex(histName)]); } +extern template void HistogramRegistry::fill(const HistName& histName, double); +extern template void HistogramRegistry::fill(const HistName& histName, float); +extern template void HistogramRegistry::fill(const HistName& histName, int); + template void HistogramRegistry::fill(const HistName& histName, const T& table, const o2::framework::expressions::Filter& filter) { diff --git a/Framework/Core/src/HistogramRegistry.cxx b/Framework/Core/src/HistogramRegistry.cxx index 86b2e128ebb2c..4b4bb2cf9be09 100644 --- a/Framework/Core/src/HistogramRegistry.cxx +++ b/Framework/Core/src/HistogramRegistry.cxx @@ -17,6 +17,10 @@ namespace o2::framework { +template void HistogramRegistry::fill(const HistName& histName, const double&); +template void HistogramRegistry::fill(const HistName& histName, const float&); +template void HistogramRegistry::fill(const HistName& histName, const int&); + constexpr HistogramRegistry::HistName::HistName(char const* const name) : str(name), hash(runtime_hash(name)), From 6eb36efa49d024c28349e20e1eee267071408afe Mon Sep 17 00:00:00 2001 From: Giulio Eulisse <10544+ktf@users.noreply.github.com> Date: Fri, 8 Mar 2024 10:33:48 +0100 Subject: [PATCH 419/726] DPL Analysis: use voids to select packs --- Framework/Foundation/include/Framework/Pack.h | 29 ++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/Framework/Foundation/include/Framework/Pack.h b/Framework/Foundation/include/Framework/Pack.h index f03476f4fdc0e..2d54ecb8ea5c8 100644 --- a/Framework/Foundation/include/Framework/Pack.h +++ b/Framework/Foundation/include/Framework/Pack.h @@ -99,6 +99,33 @@ constexpr auto interleave_pack(pack, pack) template using interleaved_pack_t = decltype(interleave_pack(P1{}, P2{})); +/// Marks as void the types that do not satisfy the condition +template