From 2d34cf678ab9342c25d85e155f032f0fda63f007 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 9 Jun 2021 22:02:23 -0700 Subject: [PATCH] HiPACE (legacy) pipeline: no chunking The parallel, independent I/O pattern here is corner-case for what HDF5 can support, due to non-collective declarations of data sets. Testing shows that it does not work with chunking. --- test/ParallelIOTest.cpp | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index ae32677958..a378b62055 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -527,7 +527,9 @@ TEST_CASE( "hzdr_adios_sample_content_test", "[parallel][adios1]" ) return; } } +#endif +#if openPMD_HAVE_MPI void close_iteration_test( std::string file_ending ) { @@ -727,6 +729,19 @@ hipace_like_write( std::string file_ending ) // the iterations we want to write std::vector< int > iterations = { 10, 30, 50, 70 }; + // Parallel HDF5 + chunking does not work with independent IO pattern + bool const isHDF5 = file_ending == "h5"; + std::string options = "{}"; + if( isHDF5 ) + options = R"( + { + "hdf5": { + "dataset": { + "chunks": "none" + } + } + })"; + // MPI communicator meta-data and file name int i_mpi_rank{ -1 }, i_mpi_size{ -1 }; MPI_Comm_rank( MPI_COMM_WORLD, &i_mpi_rank ); @@ -748,7 +763,7 @@ hipace_like_write( std::string file_ending ) [](precision d) -> precision { return std::sin( d * 2.0 * 3.1415 / 20. ); }); // open a parallel series - Series series( name, Access::CREATE, MPI_COMM_WORLD ); + Series series( name, Access::CREATE, MPI_COMM_WORLD, options ); series.setIterationEncoding( IterationEncoding::groupBased ); series.flush();