diff --git a/CMakeLists.txt b/CMakeLists.txt index f2dcc98f82..004a20de1e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -400,6 +400,7 @@ set(CORE_SOURCE src/Format.cpp src/Iteration.cpp src/IterationEncoding.cpp + src/LoadStoreChunk.cpp src/Mesh.cpp src/ParticlePatches.cpp src/ParticleSpecies.cpp @@ -411,6 +412,7 @@ set(CORE_SOURCE src/version.cpp src/auxiliary/Date.cpp src/auxiliary/Filesystem.cpp + src/auxiliary/Future.cpp src/auxiliary/JSON.cpp src/auxiliary/JSONMatcher.cpp src/auxiliary/Memory.cpp diff --git a/include/openPMD/Dataset.hpp b/include/openPMD/Dataset.hpp index e1f0058885..b8af286e14 100644 --- a/include/openPMD/Dataset.hpp +++ b/include/openPMD/Dataset.hpp @@ -34,6 +34,18 @@ namespace openPMD using Extent = std::vector; using Offset = std::vector; +/** Selection of a region of memory for storing chunks. + * + * Used to specify a non-contiguous memory region when storing + * data chunks. This allows writing data that is not contiguous + * in memory. + */ +struct MemorySelection +{ + Offset offset; + Extent extent; +}; + class Dataset { friend class RecordComponent; diff --git a/include/openPMD/Datatype.hpp b/include/openPMD/Datatype.hpp index 17cf6b67f4..3f667ae47f 100644 --- a/include/openPMD/Datatype.hpp +++ b/include/openPMD/Datatype.hpp @@ -420,6 +420,11 @@ inline size_t toBits(Datatype d) return toBytes(d) * CHAR_BIT; } +/** Check if a Datatype is a signed type + * + * @param d Datatype to test + * @return true if signed type (integer, floating point, complex), else false + */ constexpr bool isSigned(Datatype d); /** Compare if a Datatype is a vector type @@ -602,6 +607,13 @@ inline bool isSameFloatingPoint(Datatype d) return isSameFloatingPoint(d, determineDatatype()); } +/** Compare if two Datatypes are equivalent floating point types + * + * @param d1 First Datatype to compare + * @param d2 Second Datatype to compare + * @return true if both types are floating point and have same bitness, else + * false + */ inline bool isSameFloatingPoint(Datatype d1, Datatype d2) { // template @@ -629,6 +641,13 @@ inline bool isSameComplexFloatingPoint(Datatype d) return isSameComplexFloatingPoint(d, determineDatatype()); } +/** Compare if two Datatypes are equivalent complex floating point types + * + * @param d1 First Datatype to compare + * @param d2 Second Datatype to compare + * @return true if both types are complex floating point and have same bitness, + * else false + */ inline bool isSameComplexFloatingPoint(Datatype d1, Datatype d2) { // template @@ -656,6 +675,13 @@ inline bool isSameInteger(Datatype d) return isSameInteger(d, determineDatatype()); } +/** Compare if two Datatypes are equivalent integer types + * + * @param d1 First Datatype to compare + * @param d2 Second Datatype to compare + * @return true if both types are integers, same signedness and same bitness, + * else false + */ inline bool isSameInteger(Datatype d1, Datatype d2) { // template @@ -708,6 +734,13 @@ constexpr bool isChar(Datatype d) template constexpr bool isSameChar(Datatype d); +/** Compare if two Datatypes are equivalent char types + * + * @param d1 First Datatype to compare + * @param d2 Second Datatype to compare + * @return true if both types are chars with same signedness and size, else + * false + */ constexpr bool isSameChar(Datatype d1, Datatype d2); /** Comparison for two Datatypes @@ -715,6 +748,10 @@ constexpr bool isSameChar(Datatype d1, Datatype d2); * Besides returning true for the same types, identical implementations on * some platforms, e.g. if long and long long are the same or double and * long double will also return true. + * + * @param d First Datatype to compare + * @param e Second Datatype to compare + * @return true if the datatypes are equivalent */ constexpr bool isSame(openPMD::Datatype d, openPMD::Datatype e); @@ -726,15 +763,34 @@ constexpr bool isSame(openPMD::Datatype d, openPMD::Datatype e); */ Datatype basicDatatype(Datatype dt); +/** Convert a scalar Datatype to its vector variant + * + * @param dt Scalar Datatype to convert + * @return Vector Datatype (e.g., INT becomes VEC_INT) + */ Datatype toVectorType(Datatype dt); +/** Convert a Datatype to its string representation + * + * @param dt Datatype to convert + * @return String representation of the Datatype + */ std::string datatypeToString(Datatype dt); +/** Convert a string to a Datatype + * + * @param s String representation of a Datatype + * @return The corresponding Datatype + */ Datatype stringToDatatype(const std::string &s); -void warnWrongDtype(std::string const &key, Datatype store, Datatype request); - -std::ostream &operator<<(std::ostream &, openPMD::Datatype const &); +/** Stream operator for Datatype + * + * @param os Output stream + * @param dt Datatype to output + * @return Reference to the stream + */ +std::ostream &operator<<(std::ostream &os, openPMD::Datatype const &dt); template constexpr auto datatypeIndex() -> size_t diff --git a/include/openPMD/Datatype.tpp b/include/openPMD/Datatype.tpp index e35f2e26b6..b09351fd77 100644 --- a/include/openPMD/Datatype.tpp +++ b/include/openPMD/Datatype.tpp @@ -223,36 +223,52 @@ namespace detail template constexpr bool is_char_v = is_char::value; - template - inline bool isSameChar() + struct IsChar { - return - // both must be char types - is_char_v && is_char_v && - // both must have equivalent sign - std::is_signed_v == std::is_signed_v && - // both must have equivalent size - sizeof(T_Char1) == sizeof(T_Char2); + template + static constexpr bool call() + { + return is_char_v; + } + template + static constexpr bool call() + { + return false; + } + }; + + constexpr inline bool isChar(Datatype dtype) + { + return switchType(dtype); } - template - struct IsSameChar + struct DtypeSize { - template - static bool call() + template + static constexpr size_t call() { - return isSameChar(); + return sizeof(T); } - - static constexpr char const *errorMsg = "IsSameChar"; + static constexpr char const *errorMsg = "DtypeSize"; }; + constexpr inline size_t dtypeSize(Datatype dtype) + { + return switchType(dtype); + } } // namespace detail template constexpr inline bool isSameChar(Datatype d) { - return switchType>(d); + return isSameChar(d, determineDatatype()); +} + +constexpr bool isSameChar(Datatype d1, Datatype d2) +{ + return detail::isChar(d1) && detail::isChar(d2) && + isSigned(d1) == isSigned(d2) && + detail::dtypeSize(d1) == detail::dtypeSize(d2); } namespace detail @@ -285,11 +301,6 @@ constexpr inline bool isSigned(Datatype d) return switchType(d); } -constexpr inline bool isSameChar(Datatype d, Datatype e) -{ - return isChar(d) && isChar(e) && isSigned(d) == isSigned(e); -} - constexpr bool isSame(openPMD::Datatype const d, openPMD::Datatype const e) { return diff --git a/include/openPMD/IO/ADIOS/ADIOS2File.hpp b/include/openPMD/IO/ADIOS/ADIOS2File.hpp index d34cc8ebe5..66aa47e702 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2File.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2File.hpp @@ -20,6 +20,7 @@ */ #pragma once +#include "openPMD/Dataset.hpp" #include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp" #include "openPMD/IO/ADIOS/ADIOS2PreloadVariables.hpp" @@ -107,11 +108,14 @@ struct WriteDataset static void call(Params &&...); }; +/** Buffered put operation with unique pointer */ struct BufferedUniquePtrPut { std::string name; Offset offset; Extent extent; + /** Optional memory selection for non-contiguous memory regions */ + std::optional memorySelection; UniquePtrWithLambda data; Datatype dtype = Datatype::UNDEFINED; diff --git a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp index 4316f5181f..656f249b6b 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp @@ -21,6 +21,7 @@ */ #pragma once +#include "openPMD/Dataset.hpp" #include "openPMD/Error.hpp" #include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/IO/ADIOS/ADIOS2FilePosition.hpp" @@ -509,6 +510,7 @@ class ADIOS2IOHandlerImpl adios2::Variable verifyDataset( Offset const &offset, Extent const &extent, + std::optional const &memorySelection, adios2::IO &IO, adios2::Engine &engine, std::string const &varName, @@ -622,6 +624,18 @@ class ADIOS2IOHandlerImpl var.SetSelection( {adios2::Dims(offset.begin(), offset.end()), adios2::Dims(extent.begin(), extent.end())}); + + if (memorySelection.has_value()) + { + var.SetMemorySelection( + {adios2::Dims( + memorySelection->offset.begin(), + memorySelection->offset.end()), + adios2::Dims( + memorySelection->extent.begin(), + memorySelection->extent.end())}); + } + return var; } @@ -629,6 +643,7 @@ class ADIOS2IOHandlerImpl { bool noGroupBased = false; bool blosc2bp5 = false; + bool memorySelection = false; } printedWarningsAlready; }; // ADIOS2IOHandlerImpl @@ -942,7 +957,7 @@ class ADIOS2IOHandler : public AbstractIOHandler try { auto params = internal::defaultParsedFlushParams; - this->flush(params); + this->flush_impl(params); } catch (std::exception const &ex) { @@ -990,6 +1005,6 @@ class ADIOS2IOHandler : public AbstractIOHandler return true; } - std::future flush(internal::ParsedFlushParams &) override; + std::future flush_impl(internal::ParsedFlushParams &) override; }; // ADIOS2IOHandler } // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/macros.hpp b/include/openPMD/IO/ADIOS/macros.hpp index 8e57d9191d..5249486daa 100644 --- a/include/openPMD/IO/ADIOS/macros.hpp +++ b/include/openPMD/IO/ADIOS/macros.hpp @@ -41,6 +41,34 @@ #define openPMD_HAVE_ADIOS2_BP5 0 #endif +namespace openPMD +{ +namespace detail +{ + /** Trait to check if a variable supports SetMemorySelection + * + * @tparam Variable ADIOS2 variable type + */ + template + struct CanTheMemorySelectionBeReset + { + static constexpr bool value = false; + }; + + template + struct CanTheMemorySelectionBeReset< + Variable, + decltype(std::declval().SetMemorySelection())> + { + static constexpr bool value = true; + }; +} // namespace detail + +/** Whether ADIOS2 Variable supports SetMemorySelection */ +constexpr bool CanTheMemorySelectionBeReset = + detail::CanTheMemorySelectionBeReset>::value; +} // namespace openPMD + #else #define openPMD_HAS_ADIOS_2_8 0 diff --git a/include/openPMD/IO/AbstractIOHandler.hpp b/include/openPMD/IO/AbstractIOHandler.hpp index 9b7735b5ba..eccb73d742 100644 --- a/include/openPMD/IO/AbstractIOHandler.hpp +++ b/include/openPMD/IO/AbstractIOHandler.hpp @@ -265,13 +265,21 @@ class AbstractIOHandler * backends that decide to implement this operation asynchronously. */ std::future flush(internal::FlushParams const &); + /** Counter tracking the number of flush operations. This is later used to + * avoid repeated flushing in the DeferredComputation objects returned by + * the loadStoreChunk() API. (The counter is copied as a weak reference to + * the shared pointer, and the value is compared to the value upon enqueuing + * the operation. If the flush counter has proceeded past the old value, our + * operation has already been run.) */ + std::shared_ptr m_flushCounter = + std::make_shared(0); /** Process operations in queue according to FIFO. * * @return Future indicating the completion state of the operation for * backends that decide to implement this operation asynchronously. */ - virtual std::future flush(internal::ParsedFlushParams &) = 0; + std::future flush(internal::ParsedFlushParams &); /** The currently used backend */ virtual std::string backendName() const = 0; @@ -315,6 +323,17 @@ class AbstractIOHandler IterationEncoding m_encoding = IterationEncoding::groupBased; OpenpmdStandard m_standard = auxiliary::parseStandard(getStandardDefault()); bool m_verify_homogeneous_extents = true; + +protected: + /** Implementation of flush operation for subclasses + * + * Do not call directly, use flush() wrapper instead. + * + * @param params Parsed flush parameters + * @return Future indicating completion state + */ + virtual std::future + flush_impl(internal::ParsedFlushParams ¶ms) = 0; }; // AbstractIOHandler } // namespace openPMD diff --git a/include/openPMD/IO/DummyIOHandler.hpp b/include/openPMD/IO/DummyIOHandler.hpp index 8abcf20990..100711c944 100644 --- a/include/openPMD/IO/DummyIOHandler.hpp +++ b/include/openPMD/IO/DummyIOHandler.hpp @@ -44,7 +44,7 @@ class DummyIOHandler : public AbstractIOHandler /** No-op consistent with the IOHandler interface to enable library use * without IO. */ - std::future flush(internal::ParsedFlushParams &) override; + std::future flush_impl(internal::ParsedFlushParams &) override; std::string backendName() const override; }; // DummyIOHandler } // namespace openPMD diff --git a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp index 07b3978b87..5e2ddfd526 100644 --- a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp +++ b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp @@ -46,7 +46,7 @@ class HDF5IOHandler : public AbstractIOHandler return "HDF5"; } - std::future flush(internal::ParsedFlushParams &) override; + std::future flush_impl(internal::ParsedFlushParams &) override; private: std::unique_ptr m_impl; diff --git a/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp b/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp index abeb196b11..7b0c43129d 100644 --- a/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp +++ b/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp @@ -56,7 +56,7 @@ class ParallelHDF5IOHandler : public AbstractIOHandler return "MPI_HDF5"; } - std::future flush(internal::ParsedFlushParams &) override; + std::future flush_impl(internal::ParsedFlushParams &) override; private: std::unique_ptr m_impl; diff --git a/include/openPMD/IO/IOTask.hpp b/include/openPMD/IO/IOTask.hpp index 25e0d6ad54..662a34f9e3 100644 --- a/include/openPMD/IO/IOTask.hpp +++ b/include/openPMD/IO/IOTask.hpp @@ -495,6 +495,8 @@ struct OPENPMDAPI_EXPORT Extent extent = {}; Offset offset = {}; + /** Optional memory selection for non-contiguous memory regions */ + std::optional memorySelection = std::nullopt; Datatype dtype = Datatype::UNDEFINED; auxiliary::WriteBuffer data; }; @@ -558,7 +560,9 @@ struct OPENPMDAPI_EXPORT } // in parameters - bool queryOnly = false; // query if the backend supports this + /** If true, only query if the backend supports buffer views without + * performing operation */ + bool queryOnly = false; Offset offset; Extent extent; Datatype dtype = Datatype::UNDEFINED; diff --git a/include/openPMD/IO/InvalidatableFile.hpp b/include/openPMD/IO/InvalidatableFile.hpp index 6ac0051cbe..aa0f8c2c6b 100644 --- a/include/openPMD/IO/InvalidatableFile.hpp +++ b/include/openPMD/IO/InvalidatableFile.hpp @@ -83,12 +83,19 @@ struct hash result_type operator()(argument_type const &s) const noexcept; }; +/** Specialization of std::less for InvalidatableFile + * + * Enables using InvalidatableFile in ordered containers like std::set + * for consistent ordering across parallel processes. + */ template <> struct less { using first_argument_type = openPMD::InvalidatableFile; using second_argument_type = first_argument_type; - using result_type = typename std::less::result_type; + using result_type = decltype(std::less<>()( + *std::declval(), + *std::declval())); result_type operator()(first_argument_type const &, second_argument_type const &) const; }; diff --git a/include/openPMD/IO/JSON/JSONIOHandler.hpp b/include/openPMD/IO/JSON/JSONIOHandler.hpp index 07e797d4b3..db8b687ed9 100644 --- a/include/openPMD/IO/JSON/JSONIOHandler.hpp +++ b/include/openPMD/IO/JSON/JSONIOHandler.hpp @@ -59,7 +59,7 @@ class JSONIOHandler : public AbstractIOHandler return "JSON"; } - std::future flush(internal::ParsedFlushParams &) override; + std::future flush_impl(internal::ParsedFlushParams &) override; private: JSONIOHandlerImpl m_impl; diff --git a/include/openPMD/Iteration.hpp b/include/openPMD/Iteration.hpp index abce61eef7..38ee56f7ad 100644 --- a/include/openPMD/Iteration.hpp +++ b/include/openPMD/Iteration.hpp @@ -454,6 +454,12 @@ class Iteration : public Attributable namespace traits { + /** Generation policy for Iteration objects. + * + * This policy populates the cached iteration index when an Iteration + * is created or inserted into a Series, enabling constant-time lookup + * of the owning map entry. + */ template <> struct GenerationPolicy { diff --git a/include/openPMD/LoadStoreChunk.hpp b/include/openPMD/LoadStoreChunk.hpp new file mode 100644 index 0000000000..89983ab0fe --- /dev/null +++ b/include/openPMD/LoadStoreChunk.hpp @@ -0,0 +1,402 @@ +#pragma once + +#include "openPMD/Dataset.hpp" +#include "openPMD/auxiliary/Future.hpp" +#include "openPMD/auxiliary/Memory.hpp" +#include "openPMD/auxiliary/UniquePtr.hpp" + +// comment to prevent this include from being moved by clang-format +#include "openPMD/DatatypeMacros.hpp" + +#include +#include + +namespace openPMD +{ +class RecordComponent; +class ConfigureStoreChunkFromBuffer; +class ConfigureLoadStoreFromBuffer; +template +class DynamicMemoryView; +class Attributable; + +namespace internal +{ + /** Internal configuration for load/store operations without buffer. Default + * values for optionally specified parameters (offset, extent) must be + * computed to create this configuration struct. */ + struct LoadStoreConfig + { + Offset offset; + Extent extent; + }; + /** Internal configuration for load/store operations with buffer. Default + * values for optionally specified parameters (offset, extent) must be + * computed to create this configuration struct. MemorySelection remains + * optional even then. */ + struct LoadStoreConfigWithBuffer + { + Offset offset; + Extent extent; + std::optional memorySelection; + }; + +} // namespace internal + +namespace auxiliary::detail +{ +#define OPENPMD_ENUMERATE_TYPES(type) , std::shared_ptr + using shared_ptr_dataset_types = auxiliary::detail::variant_tail_t< + auxiliary::detail::bottom OPENPMD_FOREACH_DATASET_DATATYPE( + OPENPMD_ENUMERATE_TYPES)>; +#undef OPENPMD_ENUMERATE_TYPES +} // namespace auxiliary::detail + +/** Base class for configuring load/store chunk operations. + * + * Actual data members of `ConfigureLoadStore<>` and methods that don't + * depend on the ChildClass template parameter. By extracting the members to + * this struct, we can pass them around between different instances of the + * class template. Numbers of method instantiations can be reduced. + */ +class ConfigureLoadStore +{ + friend class openPMD::RecordComponent; + +protected: + ConfigureLoadStore(RecordComponent &); + RecordComponent &m_rc; + + std::optional m_offset; + std::optional m_extent; + + bool m_unsafeNoAutomaticFlush = false; + + [[nodiscard]] auto dim() const -> uint8_t; + auto storeChunkConfig() -> internal::LoadStoreConfig; + + auto deferFlush(Attributable &); + + auto getOffset() -> Offset const &; + auto getExtent() -> Extent const &; + + // The below methods return void. + // For chaining calls, they should return *this, but this class right + // here is going to be somewhere in the inheritance chain, and the final + // class should be returned. Could be solved more elegantly with CRT, + // but that blows up compile-time, so we make internal void functions + // and then repeat them in the final classes. + // (e.g. ConfigureLoadStoreFromBuffer::offset()) + + void offset_impl(Offset); + void extent_impl(Extent); + void unsafeNoAutomaticFlush_impl(); + +private: + auto withSharedPtr_impl_mut(std::shared_ptr data, Datatype) + -> openPMD::ConfigureLoadStoreFromBuffer; + auto withSharedPtr_impl_const(std::shared_ptr data, Datatype) + -> openPMD::ConfigureStoreChunkFromBuffer; + auto withUniquePtr_impl_mut(UniquePtrWithLambda, Datatype) + -> openPMD::ConfigureStoreChunkFromBuffer; + auto withUniquePtr_impl_const(UniquePtrWithLambda, Datatype) + -> openPMD::ConfigureStoreChunkFromBuffer; + auto withRawPtr_impl_mut(void *data, Datatype) + -> openPMD::ConfigureLoadStoreFromBuffer; + auto withRawPtr_impl_const(void const *data, Datatype) + -> openPMD::ConfigureStoreChunkFromBuffer; + +public: + using this_t = ConfigureLoadStore; + + // Configuration methods (always available) + + /** Set the offset within the dataset + * + * Optional. The operation will apply without offset by default (i.e. offset + * = (0, 0, ...)). + * + * @param offset Offset within the dataset + * @return Reference to this object for chaining + */ + auto offset(Offset offset) -> this_t & + { + offset_impl(std::move(offset)); + return *this; + } + /** Set the extent within the dataset + * + * Optional. The operation will apply to the entire dataset by default (i.e. + * operation extent = global dataset extent - operation offset). + * + * @param extent Extent within the dataset, counted from the offset + * @return Reference to this object for chaining + */ + auto extent(Extent extent) -> this_t & + { + extent_impl(std::move(extent)); + return *this; + } + /** Disable automatic flush after store operation + * + * The returned objects of type DeferredComputation will still return a + * buffer upon get() / operator()(), but these buffers are not guaranteed to + * be filled until explicitly flushing. + * + * @return Reference to this object for chaining + */ + auto unsafeNoAutomaticFlush() -> this_t & + { + unsafeNoAutomaticFlush_impl(); + return *this; + } + + /* + * If the type is non-const, then the return type should be + * ConfigureLoadStoreFromBuffer, but if it is a const type, Load operations + * make no sense, so the return type should be + * ConfigureStoreChunkFromBuffer<>. + */ + template + using shared_ptr_return_type = std::conditional_t< + std::is_const_v, + ConfigureStoreChunkFromBuffer, + ConfigureLoadStoreFromBuffer>; + + /* + * As loading into unique pointer types makes no sense, the case is + * simpler for unique pointers. Just remove the array extents here. + * (Our interface wrappers still support const-type unique pointers, + * but the internal logic does not handle them separately.) + */ + template + using unique_ptr_return_type = openPMD::ConfigureStoreChunkFromBuffer; + + // Buffer specification methods (return specialized configurations) + template + auto withSharedPtr(std::shared_ptr) -> shared_ptr_return_type; + template + auto withUniquePtr(UniquePtrWithLambda) -> unique_ptr_return_type; + template + auto withUniquePtr(std::unique_ptr) -> unique_ptr_return_type; + template + auto withRawPtr(T *data) -> shared_ptr_return_type; + template + auto withContiguousContainer(T_ContiguousContainer &data) + -> std::enable_if_t< + auxiliary::IsContiguousContainer_v, + shared_ptr_return_type>; + + // Enqueue methods (deferred execution) + template + [[nodiscard]] auto storeSpan() -> DynamicMemoryView; + // definition for this one is in RecordComponent.tpp since it needs the + // definition of class RecordComponent. + template + [[nodiscard]] auto storeSpan(F &&createBuffer) -> DynamicMemoryView; + + template + [[nodiscard]] auto load() + -> auxiliary::DeferredComputation>; + + [[nodiscard]] auto loadVariant() -> auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types>; +}; + +/** Configuration for storing chunks from a buffer. + * + * This class is used to configure a store chunk operation, where data is + * stored from a provided buffer into a dataset. + * This class is distinct from ConfigureLoadStoreFromBuffer, since reading + * data does not make sense on const / unique pointer types. This way, the type + * system will only allow read operations where they can actually run. + */ +class ConfigureStoreChunkFromBuffer : public ConfigureLoadStore +{ + friend class ConfigureLoadStore; + +protected: + auxiliary::WriteBuffer m_buffer; + Datatype m_datatype; + std::optional m_mem_select; + + ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer buffer, Datatype, ConfigureLoadStore &&); + + // The below methods return void. + // For chaining calls, they should return *this, but this class right + // here is going to be somewhere in the inheritance chain, and the final + // class should be returned. Could be solved more elegantly with CRT, + // but that blows up compile-time, so we make internal void functions + // and then repeat them in the final classes. + + /** Set memory selection for non-contiguous memory regions */ + void memorySelection_impl(MemorySelection); + + auto storeChunkConfig() -> internal::LoadStoreConfigWithBuffer; + +public: + using this_t = ConfigureStoreChunkFromBuffer; + + // Configuration methods (always available) + + /** Set the offset within the dataset + * + * Optional. The operation will apply without offset by default (i.e. offset + * = (0, 0, ...)). + * + * @param offset Offset within the dataset + * @return Reference to this object for chaining + */ + auto offset(Offset offset) -> this_t & + { + offset_impl(std::move(offset)); + return *this; + } + + /** Set the extent within the dataset + * + * Optional. The operation will apply to the entire dataset by default (i.e. + * operation extent = global dataset extent - operation offset). + * + * @param extent Extent within the dataset, counted from the offset + * @return Reference to this object for chaining + */ + auto extent(Extent extent) -> this_t & + { + extent_impl(std::move(extent)); + return *this; + } + + /** Disable automatic flush after store operation + * + * The returned objects of type DeferredComputation will still return a + * buffer upon get() / operator()(), but these buffers are not guaranteed to + * be filled until explicitly flushing. + * + * @return Reference to this object for chaining + */ + auto unsafeNoAutomaticFlush() -> this_t & + { + unsafeNoAutomaticFlush_impl(); + return *this; + } + + /** Set memory selection for non-contiguous memory regions + * + * @param memorySelection Selection of memory region + * @return Reference to this object for chaining + */ + auto memorySelection(MemorySelection memorySelection) -> this_t & + { + memorySelection_impl(std::move(memorySelection)); + return *this; + } + + // Enqueue method (deferred execution) + + /** Store the chunk data + * + * @return Deferred computation that performs the store when invoked + */ + auto store() -> auxiliary::DeferredComputation; + + /** This intentionally shadows the parent class's enqueueLoad methods in + * order to show a compile error when using load() on an object + * of this class. The parent method can still be accessed through + * typecasting if needed. + */ + template + auto load() + { + static_assert( + auxiliary::dependent_false_v, + "Cannot load chunk data into a buffer that is const or a " + "unique_ptr."); + } +}; + +/** Configuration for loading/storing chunks from/to a buffer. + * + * This class supports both loading and storing operations, allowing + * reading data into or writing data from a provided buffer. + */ +class ConfigureLoadStoreFromBuffer : public ConfigureStoreChunkFromBuffer +{ + friend class ConfigureLoadStore; + friend class RecordComponent; + + using ConfigureStoreChunkFromBuffer::ConfigureStoreChunkFromBuffer; + +public: + using this_t = ConfigureLoadStoreFromBuffer; + + // Configuration methods (always available) + + /** Set the offset within the dataset + * + * Optional. The operation will apply without offset by default (i.e. offset + * = (0, 0, ...)). + * + * @param offset Offset within the dataset + * @return Reference to this object for chaining + */ + auto offset(Offset offset) -> this_t & + { + offset_impl(std::move(offset)); + return *this; + } + + /** Set the extent within the dataset + * + * Optional. The operation will apply to the entire dataset by default (i.e. + * operation extent = global dataset extent - operation offset). + * + * @param extent Extent within the dataset, counted from the offset + * @return Reference to this object for chaining + */ + auto extent(Extent extent) -> this_t & + { + extent_impl(std::move(extent)); + return *this; + } + + /** Disable automatic flush after operation + * + * The returned objects of type DeferredComputation will still return a + * buffer upon get() / operator()(), but these buffers are not guaranteed to + * be filled until explicitly flushing. + * + * @return Reference to this object for chaining + */ + auto unsafeNoAutomaticFlush() -> this_t & + { + unsafeNoAutomaticFlush_impl(); + return *this; + } + + /** Set memory selection for non-contiguous memory regions + * + * @param memorySelection Selection of memory region + * @return Reference to this object for chaining + */ + auto memorySelection(MemorySelection memorySelection) -> this_t & + { + memorySelection_impl(std::move(memorySelection)); + return *this; + } + + // Enqueue method (deferred execution) + + /** Load the chunk data into the buffer + * + * @return Deferred computation that performs the load when invoked + */ + auto load() -> auxiliary::DeferredComputation; +}; + +} // namespace openPMD + +#include "openPMD/UndefDatatypeMacros.hpp" +// comment to prevent these includes from being moved by clang-format +#include "openPMD/LoadStoreChunk.tpp" diff --git a/include/openPMD/LoadStoreChunk.tpp b/include/openPMD/LoadStoreChunk.tpp new file mode 100644 index 0000000000..f6b0fedf15 --- /dev/null +++ b/include/openPMD/LoadStoreChunk.tpp @@ -0,0 +1,76 @@ +#pragma once + +#include "openPMD/LoadStoreChunk.hpp" + +namespace openPMD +{ +template +auto ConfigureLoadStore::withSharedPtr(std::shared_ptr data) + -> shared_ptr_return_type +{ + using T_decayed = std::remove_cv_t>; + constexpr auto dtype = determineDatatype(); + if constexpr (std::is_const_v) + { + return withSharedPtr_impl_const(data, dtype); + } + else + { + return withSharedPtr_impl_mut(data, dtype); + } +} + +template +auto ConfigureLoadStore::withUniquePtr(UniquePtrWithLambda data) + -> unique_ptr_return_type + +{ + using T_decayed = std::remove_cv_t>; + constexpr auto dtype = determineDatatype(); + if constexpr (std::is_const_v) + { + return withUniquePtr_impl_const( + std::move(data).template static_cast_(), dtype); + } + else + { + return withUniquePtr_impl_mut( + std::move(data).template static_cast_(), dtype); + } +} + +template +auto ConfigureLoadStore::withUniquePtr(std::unique_ptr data) + -> unique_ptr_return_type +{ + return withUniquePtr(UniquePtrWithLambda(std::move(data))); +} + +template +auto ConfigureLoadStore::withRawPtr(T *data) -> shared_ptr_return_type +{ + using T_decayed = std::remove_cv_t>; + constexpr auto dtype = determineDatatype(); + if constexpr (std::is_const_v) + { + return withRawPtr_impl_const(data, dtype); + } + else + { + return withRawPtr_impl_mut(data, dtype); + } +} + +template +auto ConfigureLoadStore::withContiguousContainer(T_ContiguousContainer &data) + -> std::enable_if_t< + auxiliary::IsContiguousContainer_v, + shared_ptr_return_type> +{ + if (!m_extent.has_value() && dim() == 1) + { + m_extent = Extent{data.size()}; + } + return withRawPtr(data.data()); +} +} // namespace openPMD diff --git a/include/openPMD/ParticleSpecies.hpp b/include/openPMD/ParticleSpecies.hpp index 7309afddef..2c38a0cfe1 100644 --- a/include/openPMD/ParticleSpecies.hpp +++ b/include/openPMD/ParticleSpecies.hpp @@ -57,6 +57,11 @@ class ParticleSpecies : public Container namespace traits { + /** Generation policy for ParticleSpecies objects. + * + * Links particle patches to their parent hierarchy when a species is + * created. + */ template <> struct GenerationPolicy { diff --git a/include/openPMD/RecordComponent.hpp b/include/openPMD/RecordComponent.hpp index d06b4213f4..58cfee1478 100644 --- a/include/openPMD/RecordComponent.hpp +++ b/include/openPMD/RecordComponent.hpp @@ -22,15 +22,13 @@ #include "openPMD/Dataset.hpp" #include "openPMD/Datatype.hpp" +#include "openPMD/LoadStoreChunk.hpp" #include "openPMD/auxiliary/ShareRaw.hpp" #include "openPMD/auxiliary/TypeTraits.hpp" #include "openPMD/auxiliary/UniquePtr.hpp" #include "openPMD/backend/Attributable.hpp" #include "openPMD/backend/BaseRecordComponent.hpp" -// comment to prevent this include from being moved by clang-format -#include "openPMD/DatatypeMacros.hpp" - #include #include #include @@ -128,6 +126,12 @@ class RecordComponent : public BaseRecordComponent friend class MeshRecordComponent; template friend T &internal::makeOwning(T &self, Series); + friend class ConfigureLoadStore; + friend class ConfigureLoadStoreFromBuffer; + friend class ConfigureStoreChunkFromBuffer; + friend struct VisitorEnqueueLoadVariantWithoutFlush; + friend struct VisitorEnqueueLoadVariantWithFlush; + friend struct VisitorLoadVariant; public: enum class Allocation @@ -214,6 +218,16 @@ class RecordComponent : public BaseRecordComponent */ bool empty() const; + /** Prepare a load/store chunk configuration object + * + * This is the entry point for the experimental new API for loading and + * storing chunks. It returns a ConfigureLoadStore object that can be used + * to specify offset, extent, and buffer for the operation. + * + * @return ConfigureLoadStore object for configuring the operation + */ + ConfigureLoadStore prepareLoadStore(); + /** Load and allocate a chunk of data * * Set offset to {0u} and extent to {-1u} for full selection. @@ -224,11 +238,8 @@ class RecordComponent : public BaseRecordComponent template std::shared_ptr loadChunk(Offset = {0u}, Extent = {-1u}); -#define OPENPMD_ENUMERATE_TYPES(type) , std::shared_ptr - using shared_ptr_dataset_types = auxiliary::detail::variant_tail_t< - auxiliary::detail::bottom OPENPMD_FOREACH_DATASET_DATATYPE( - OPENPMD_ENUMERATE_TYPES)>; -#undef OPENPMD_ENUMERATE_TYPES + using shared_ptr_dataset_types = + auxiliary::detail::shared_ptr_dataset_types; /** std::variant-based version of allocating loadChunk(Offset, Extent) * @@ -266,25 +277,6 @@ class RecordComponent : public BaseRecordComponent template void loadChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Load a chunk of data into pre-allocated memory, array version. - * - * @param data Preallocated, contiguous buffer, large enough to load the - * the requested data into it. - * The shared pointer must own and manage the buffer. - * Optimizations might be implemented based on this - * assumption (e.g. skipping the operation if the backend - * is the unique owner). - * The array-based overload helps avoid having to manually - * specify the delete[] destructor (C++17 feature). - * @param offset Offset within the dataset. Set to {0u} for full selection. - * @param extent Extent within the dataset, counted from the offset. - * Set to {-1u} for full selection. - * If offset is non-zero and extent is {-1u} the leftover - * extent in the record component will be selected. - */ - template - void loadChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Load a chunk of data into pre-allocated memory, raw pointer version. * * @param data Preallocated, contiguous buffer, large enough to load the @@ -324,18 +316,6 @@ class RecordComponent : public BaseRecordComponent template void storeChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Store a chunk of data from a chunk of memory, array version. - * - * @param data Preallocated, contiguous buffer, large enough to read the - * the specified data from it. - * The array-based overload helps avoid having to manually - * specify the delete[] destructor (C++17 feature). - * @param offset Offset within the dataset. - * @param extent Extent within the dataset, counted from the offset. - */ - template - void storeChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Store a chunk of data from a chunk of memory, unique pointer version. * * @param data Preallocated, contiguous buffer, large enough to read the @@ -497,8 +477,28 @@ class RecordComponent : public BaseRecordComponent */ RecordComponent &makeEmpty(Dataset d); - void storeChunk( - auxiliary::WriteBuffer buffer, Datatype datatype, Offset o, Extent e); + void storeChunk_impl( + auxiliary::WriteBuffer buffer, + Datatype datatype, + internal::LoadStoreConfigWithBuffer); + + template + DynamicMemoryView storeChunkSpan_impl(internal::LoadStoreConfig); + template + DynamicMemoryView storeChunkSpanCreateBuffer_impl( + internal::LoadStoreConfig, F &&createBuffer); + + template + void loadChunk_impl( + std::shared_ptr const &, internal::LoadStoreConfigWithBuffer); + void loadChunk_impl( + std::shared_ptr const &, + Datatype, + internal::LoadStoreConfigWithBuffer); + template + std::shared_ptr loadChunkAllocate_impl(internal::LoadStoreConfig); + std::shared_ptr loadChunkAllocate_impl( + Datatype, size_t dtype_size, internal::LoadStoreConfig); // clang-format off OPENPMD_protected @@ -564,6 +564,4 @@ namespace internal } // namespace openPMD -#include "openPMD/UndefDatatypeMacros.hpp" -// comment to prevent these includes from being moved by clang-format #include "RecordComponent.tpp" diff --git a/include/openPMD/RecordComponent.tpp b/include/openPMD/RecordComponent.tpp index b796ab1a93..f5d31f1faa 100644 --- a/include/openPMD/RecordComponent.tpp +++ b/include/openPMD/RecordComponent.tpp @@ -23,6 +23,7 @@ #include "openPMD/Datatype.hpp" #include "openPMD/Error.hpp" +#include "openPMD/LoadStoreChunk.hpp" #include "openPMD/RecordComponent.hpp" #include "openPMD/Span.hpp" #include "openPMD/auxiliary/Memory.hpp" @@ -32,6 +33,7 @@ #include "openPMD/backend/Attributable.hpp" #include +#include #include namespace openPMD @@ -41,8 +43,12 @@ template inline void RecordComponent::storeChunk(std::unique_ptr data, Offset o, Extent e) { - storeChunk( - UniquePtrWithLambda(std::move(data)), std::move(o), std::move(e)); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withUniquePtr(std::move(data)) + .unsafeNoAutomaticFlush() + .store(); } template @@ -50,39 +56,39 @@ inline typename std::enable_if_t< auxiliary::IsContiguousContainer_v> RecordComponent::storeChunk(T_ContiguousContainer &data, Offset o, Extent e) { - uint8_t dim = getDimensionality(); + auto storeChunkConfig = prepareLoadStore(); - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = o; - if (o.size() == 1u && o.at(0) == 0u) + auto joined_dim = joinedDimension(); + if (!joined_dim.has_value() && (o.size() != 1 || o.at(0) != 0u)) { - if (joinedDimension().has_value()) - { - offset.clear(); - } - else if (dim > 1u) - { - offset = Offset(dim, 0u); - } + storeChunkConfig.offset(std::move(o)); + } + if (e.size() != 1 || e.at(0) != -1u) + { + storeChunkConfig.extent(std::move(e)); } - // extent = {-1u}: take full size - Extent extent(dim, 1u); - // avoid outsmarting the user: - // - stdlib data container implement 1D -> 1D chunk to write - if (e.size() == 1u && e.at(0) == -1u && dim == 1u) - extent.at(0) = data.size(); - else - extent = e; - - storeChunk(auxiliary::shareRaw(data.data()), offset, extent); + std::move(storeChunkConfig) + .withContiguousContainer(data) + .unsafeNoAutomaticFlush() + .store(); } template inline DynamicMemoryView RecordComponent::storeChunk(Offset o, Extent e, F &&createBuffer) { + return prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .storeSpan(std::forward(createBuffer)); +} + +template +inline DynamicMemoryView RecordComponent::storeChunkSpanCreateBuffer_impl( + internal::LoadStoreConfig cfg, F &&createBuffer) +{ + auto [o, e] = std::move(cfg); verifyChunk(o, e); size_t size = 1; @@ -189,4 +195,12 @@ inline auto RecordComponent::visit(Args &&...args) return switchDatasetType>( getDatatype(), *this, std::forward(args)...); } + +// definitions for LoadStoreChunk.hpp +template +auto ConfigureLoadStore::storeSpan(F &&createBuffer) -> DynamicMemoryView +{ + return m_rc.storeChunkSpanCreateBuffer_impl( + storeChunkConfig(), std::forward(createBuffer)); +} } // namespace openPMD diff --git a/include/openPMD/auxiliary/Defer.hpp b/include/openPMD/auxiliary/Defer.hpp index c6bc4e0533..804e775c70 100644 --- a/include/openPMD/auxiliary/Defer.hpp +++ b/include/openPMD/auxiliary/Defer.hpp @@ -6,6 +6,17 @@ namespace openPMD::auxiliary { +/** Defer wrapper + * + * Executes a functor when destroyed unless explicitly cancelled. + * Similar to Go's defer or C++'s experimental::scope_exit. + * + * Similar also to DeferredComputation under Future.hpp, but has another + * application scope (this: internal resource cleanup, that: public Future-like + * API) and is hence kept separate. + * + * @tparam F The functor type + */ template struct defer_type { @@ -48,8 +59,17 @@ struct defer_type auto operator=(defer_type const &) -> defer_type & = delete; }; +/** Type-erased defer wrapper for void functors */ using opaque_defer_type = defer_type>; +/** Create a defer wrapper + * + * Creates a defer wrapper that will execute the given functor when + * destroyed. + * + * @param functor The functor to execute on destruction + * @return A defer wrapper + */ template auto defer(F &&functor) -> defer_type> { diff --git a/include/openPMD/auxiliary/Future.hpp b/include/openPMD/auxiliary/Future.hpp new file mode 100644 index 0000000000..e4e51dc7eb --- /dev/null +++ b/include/openPMD/auxiliary/Future.hpp @@ -0,0 +1,131 @@ +#pragma once + +#include +#include +#include + +namespace openPMD::auxiliary::detail +{ +/** Internal helper for deferred computation - executes task once */ +template +struct OneTimeTask +{ + using task_type = std::function; + // Helper struct so we get auto-generated move constructor / assignment + // operator, but can still override constructors outside + struct Members + { + task_type m_task; + bool m_task_valid = true; + }; + Members members; + + static constexpr bool noexcept_move = + std::is_move_constructible_v && + std::is_move_assignable_v; + + explicit OneTimeTask(); + OneTimeTask(task_type); + + OneTimeTask(OneTimeTask &&) noexcept(noexcept_move); + OneTimeTask(OneTimeTask const &) = delete; + + auto operator=(OneTimeTask &&) noexcept(noexcept_move) -> OneTimeTask &; + auto operator=(OneTimeTask const &) -> OneTimeTask & = delete; + + auto operator()() -> T; +}; + +/** Internal helper for cached value storage. Used when the API requires + * creation of a DeferredComputation object, but there is not actually a + * computation to run. */ +template +struct CachedValue +{ + T val; +}; +template <> +struct CachedValue +{ + // this is silly +}; +} // namespace openPMD::auxiliary::detail + +namespace openPMD::auxiliary +{ +/** A computation that is deferred until explicitly invoked. + * + * This class wraps a callable, allowing lazy evaluation. + * The computation is performed once on first invocation, repeated invocation is + * an error. Check if the computation is still valid by calling valid(). + * + * Note: Some API operations may construct a DeferredComputation without any + * actual computation, instead emplacing a cached value. This is treated + * transparently to the user. In this case however, the object will not turn + * invalid upon invocation. + * + * @tparam T The return type of the computation + */ +template +class DeferredComputation +{ + using task_type = std::function; + using cached_type = std::conditional_t< + std::is_void_v, + // just something that is not void + detail::CachedValue, + T>; + std::variant, detail::CachedValue> m_task; + +public: + static constexpr bool noexcept_move = + std::is_move_constructible_v> && + std::is_move_assignable_v> && + std::is_move_constructible_v> && + std::is_move_assignable_v>; + /** Construct from a callable + * + * @param task The callable to execute + */ + DeferredComputation(task_type task); + /** Construct from a cached value + * + * @param val The pre-computed value + */ + DeferredComputation(cached_type val); + + explicit DeferredComputation(); + + DeferredComputation(DeferredComputation &&) noexcept(noexcept_move); + DeferredComputation(DeferredComputation const &) = delete; + + auto operator=(DeferredComputation &&) noexcept(noexcept_move) + -> DeferredComputation &; + auto operator=(DeferredComputation const &) + -> DeferredComputation & = delete; + + ~DeferredComputation(); + + /** Get the result of the computation + * + * @return The result of the computation + */ + auto get() -> T; + /** Invoke the computation + * + * Alias for get() + * @return The result of the computation + */ + auto operator()() -> T; + + /** Discard the computation without executing it + */ + void invalidate() &&; + + /** Check if the computation is valid + * + * @return true if the computation has not been invalidated + */ + [[nodiscard]] auto valid() const noexcept -> bool; +}; +} // namespace openPMD::auxiliary diff --git a/include/openPMD/auxiliary/Memory.hpp b/include/openPMD/auxiliary/Memory.hpp index 6f8807b354..99b6b53d22 100644 --- a/include/openPMD/auxiliary/Memory.hpp +++ b/include/openPMD/auxiliary/Memory.hpp @@ -65,6 +65,7 @@ namespace auxiliary [[nodiscard]] auto release() -> UniquePtrWithLambda; }; using SharedPtr = std::shared_ptr; + using ReadSharedPtr = std::shared_ptr; /* * Use std::any publically since some compilers have trouble with * certain uses of std::variant, so hide it from them. @@ -73,17 +74,21 @@ namespace auxiliary */ std::any m_buffer; - WriteBuffer(); - WriteBuffer(std::shared_ptr ptr); - WriteBuffer(UniquePtrWithLambda ptr); + explicit WriteBuffer(); + // @todo implementation must distinguish const types + template + explicit WriteBuffer(std::shared_ptr ptr); + explicit WriteBuffer(UniquePtrWithLambda ptr); WriteBuffer(WriteBuffer &&) noexcept; WriteBuffer(WriteBuffer const &) = delete; WriteBuffer &operator=(WriteBuffer &&) noexcept; WriteBuffer &operator=(WriteBuffer const &) = delete; - WriteBuffer const &operator=(std::shared_ptr ptr); - WriteBuffer const &operator=(UniquePtrWithLambda ptr); + // @todo implementation must distinguish const types + template + WriteBuffer &operator=(std::shared_ptr const &ptr); + WriteBuffer &operator=(UniquePtrWithLambda ptr); void const *get() const; diff --git a/include/openPMD/auxiliary/Memory_internal.hpp b/include/openPMD/auxiliary/Memory_internal.hpp index bf3c8ccb4b..ee7134d9dc 100644 --- a/include/openPMD/auxiliary/Memory_internal.hpp +++ b/include/openPMD/auxiliary/Memory_internal.hpp @@ -25,6 +25,8 @@ namespace openPMD::auxiliary { // cannot use a unique_ptr inside a std::variant, so we represent it with this -using WriteBufferTypes = - std::variant; +using WriteBufferTypes = std::variant< + WriteBuffer::CopyableUniquePtr, + WriteBuffer::SharedPtr, + WriteBuffer::ReadSharedPtr>; } // namespace openPMD::auxiliary diff --git a/include/openPMD/auxiliary/UniquePtr.hpp b/include/openPMD/auxiliary/UniquePtr.hpp index 87f3261b45..ee17794d3e 100644 --- a/include/openPMD/auxiliary/UniquePtr.hpp +++ b/include/openPMD/auxiliary/UniquePtr.hpp @@ -176,10 +176,11 @@ template UniquePtrWithLambda UniquePtrWithLambda::static_cast_() && { using other_type = std::remove_extent_t; + auto original_ptr = this->release(); return UniquePtrWithLambda{ - static_cast(this->release()), - [deleter = std::move(this->get_deleter())](other_type *ptr) { - deleter(static_cast(ptr)); + static_cast(original_ptr), + [deleter = std::move(this->get_deleter()), original_ptr](other_type *) { + deleter(original_ptr); }}; } } // namespace openPMD diff --git a/include/openPMD/backend/Attributable.hpp b/include/openPMD/backend/Attributable.hpp index 705080be84..8767e92cb0 100644 --- a/include/openPMD/backend/Attributable.hpp +++ b/include/openPMD/backend/Attributable.hpp @@ -244,6 +244,7 @@ class Attributable friend class internal::AttributableData; friend class Snapshots; friend struct internal::HomogenizeExtents; + friend class ConfigureLoadStore; protected: // tag for internal constructor diff --git a/src/Datatype.cpp b/src/Datatype.cpp index 479286066c..ead2859c81 100644 --- a/src/Datatype.cpp +++ b/src/Datatype.cpp @@ -29,13 +29,6 @@ namespace openPMD { -void warnWrongDtype(std::string const &key, Datatype store, Datatype request) -{ - std::cerr << "Warning: Attribute '" << key << "' stored as " << store - << ", requested as " << request - << ". Casting unconditionally with possible loss of precision.\n"; -} - std::ostream &operator<<(std::ostream &os, openPMD::Datatype const &d) { using DT = openPMD::Datatype; diff --git a/src/IO/ADIOS/ADIOS2File.cpp b/src/IO/ADIOS/ADIOS2File.cpp index af2d42640c..6bddeb6bd4 100644 --- a/src/IO/ADIOS/ADIOS2File.cpp +++ b/src/IO/ADIOS/ADIOS2File.cpp @@ -23,6 +23,7 @@ #include "openPMD/Error.hpp" #include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/IO/ADIOS/ADIOS2IOHandler.hpp" +#include "openPMD/IO/ADIOS/macros.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IterationEncoding.hpp" #include "openPMD/auxiliary/Environment.hpp" @@ -70,6 +71,7 @@ void DatasetReader::call( adios2::Variable var = impl->verifyDataset( bp.param.offset, bp.param.extent, + std::nullopt, IO, engine, bp.name, @@ -88,6 +90,12 @@ void DatasetReader::call( template inline constexpr bool always_false_v = false; +static constexpr char const *warningMemorySelection = + "[Warning] Using a version of ADIOS2 that cannot reset memory selections " + "on a variable, once specified. When using memory selections, then please " + "specify it explicitly on all storeChunk() calls. Further info: " + "https://github.com/ornladios/ADIOS2/pull/4169."; + template void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) { @@ -98,7 +106,9 @@ void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) std::visit( [&](auto &&arg) { using ptr_type = std::decay_t; - if constexpr (std::is_same_v>) + if constexpr ( + std::is_same_v> || + std::is_same_v>) { auto ptr = static_cast(arg.get()); auto &engine = ba.getEngine(); @@ -106,6 +116,7 @@ void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) adios2::Variable var = ba.m_impl->verifyDataset( bp.param.offset, bp.param.extent, + bp.param.memorySelection, ba.m_IO, engine, bp.name, @@ -113,6 +124,19 @@ void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) ba.variables()); engine.Put(var, ptr); + if (bp.param.memorySelection.has_value()) + { + if constexpr (openPMD::CanTheMemorySelectionBeReset) + { + var.SetMemorySelection(); + } + else if (!ba.m_impl->printedWarningsAlready.memorySelection) + { + std::cerr << warningMemorySelection << std::endl; + ba.m_impl->printedWarningsAlready.memorySelection = + true; + } + } } else if constexpr ( std::is_same_v< @@ -123,6 +147,14 @@ void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) bput.name = std::move(bp.name); bput.offset = std::move(bp.param.offset); bput.extent = std::move(bp.param.extent); + bput.memorySelection = std::move(bp.param.memorySelection); + /* + * Note: Moving is required here since it's a unique_ptr. + * std::forward<>() would theoretically work, but it + * requires the type parameter and we don't have that + * inside the lambda. + * (ptr_type does not work for this case). + */ bput.data = arg.release(); bput.dtype = bp.param.dtype; ba.m_uniquePtrPuts.push_back(std::move(bput)); @@ -170,12 +202,25 @@ struct RunUniquePtrPut adios2::Variable var = ba.m_impl->verifyDataset( bufferedPut.offset, bufferedPut.extent, + bufferedPut.memorySelection, ba.m_IO, engine, bufferedPut.name, std::nullopt, ba.variables()); engine.Put(var, ptr); + if (bufferedPut.memorySelection.has_value()) + { + if constexpr (openPMD::CanTheMemorySelectionBeReset) + { + var.SetMemorySelection(); + } + else if (!ba.m_impl->printedWarningsAlready.memorySelection) + { + std::cerr << warningMemorySelection << std::endl; + ba.m_impl->printedWarningsAlready.memorySelection = true; + } + } } static constexpr char const *errorMsg = "RunUniquePtrPut"; diff --git a/src/IO/ADIOS/ADIOS2IOHandler.cpp b/src/IO/ADIOS/ADIOS2IOHandler.cpp index 3f16bcf155..d3b19fde0f 100644 --- a/src/IO/ADIOS/ADIOS2IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS2IOHandler.cpp @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -1239,6 +1240,7 @@ namespace detail adios2::Variable variable = impl->verifyDataset( params.offset, params.extent, + std::nullopt, IO, engine, varName, @@ -2626,7 +2628,7 @@ ADIOS2IOHandler::ADIOS2IOHandler( {} std::future -ADIOS2IOHandler::flush(internal::ParsedFlushParams &flushParams) +ADIOS2IOHandler::flush_impl(internal::ParsedFlushParams &flushParams) { return m_impl.flush(flushParams); } @@ -2667,7 +2669,7 @@ ADIOS2IOHandler::ADIOS2IOHandler( std::move(initialize_from), std::move(path), at, std::move(config)) {} -std::future ADIOS2IOHandler::flush(internal::ParsedFlushParams &) +std::future ADIOS2IOHandler::flush_impl(internal::ParsedFlushParams &) { return std::future(); } diff --git a/src/IO/AbstractIOHandler.cpp b/src/IO/AbstractIOHandler.cpp index 5f2bdeb2f5..e54336a6c6 100644 --- a/src/IO/AbstractIOHandler.cpp +++ b/src/IO/AbstractIOHandler.cpp @@ -122,6 +122,26 @@ std::future AbstractIOHandler::flush(internal::FlushParams const ¶ms) return future; } +std::future AbstractIOHandler::flush(internal::ParsedFlushParams ¶ms) +{ + // The flush counter indicates the number of times that m_work has been + // emptied. Only increment it if m_work was full before operation and is + // empty after operation. + // Enqueuers can use this counter to check if the enqueued operation has + // been flushed already. + bool increase_flush_counter = !m_work.empty(); + auto res = this->flush_impl(params); + if (!m_work.empty()) + { + throw error::Internal("flush() did not clear all work!"); + } + if (increase_flush_counter) + { + ++*m_flushCounter; + } + return res; +} + bool AbstractIOHandler::fullSupportForVariableBasedEncoding() const { return false; diff --git a/src/IO/AbstractIOHandlerImpl.cpp b/src/IO/AbstractIOHandlerImpl.cpp index 4f93ff1a5b..fe5efc2d66 100644 --- a/src/IO/AbstractIOHandlerImpl.cpp +++ b/src/IO/AbstractIOHandlerImpl.cpp @@ -275,10 +275,26 @@ std::future AbstractIOHandlerImpl::flush() i.writable->parent, "->", i.writable, - "] WRITE_DATASET, offset=", - [¶meter]() { return vec_as_string(parameter.offset); }, - ", extent=", - [¶meter]() { return vec_as_string(parameter.extent); }); + "] WRITE_DATASET: ", + [&]() { + std::stringstream stream; + stream << "offset: " << vec_as_string(parameter.offset) + << " extent: " << vec_as_string(parameter.extent) + << " mem-selection: "; + if (parameter.memorySelection.has_value()) + { + stream << vec_as_string( + parameter.memorySelection->offset) + << "--" + << vec_as_string( + parameter.memorySelection->extent); + } + else + { + stream << "NONE"; + } + return stream.str(); + }); writeDataset(i.writable, parameter); break; } diff --git a/src/IO/DummyIOHandler.cpp b/src/IO/DummyIOHandler.cpp index f3b4e155d2..52c83bb624 100644 --- a/src/IO/DummyIOHandler.cpp +++ b/src/IO/DummyIOHandler.cpp @@ -39,7 +39,7 @@ DummyIOHandler::DummyIOHandler(std::string path, Access at) void DummyIOHandler::enqueue(IOTask const &) {} -std::future DummyIOHandler::flush(internal::ParsedFlushParams &) +std::future DummyIOHandler::flush_impl(internal::ParsedFlushParams &) { return std::future(); } diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index d859ccc122..a6a97fa6ff 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -1896,6 +1896,12 @@ void HDF5IOHandlerImpl::writeDataset( "[HDF5] Writing into a dataset in a file opened as read only is " "not possible."); + if (parameters.memorySelection.has_value()) + { + throw error::OperationUnsupportedInBackend( + "HDF5", + "Non-contiguous memory selections not supported in HDF5 backend."); + } File file = requireFile("writeDataset", writable, /* checkParent = */ true); herr_t status; @@ -3586,7 +3592,7 @@ HDF5IOHandler::HDF5IOHandler( HDF5IOHandler::~HDF5IOHandler() = default; -std::future HDF5IOHandler::flush(internal::ParsedFlushParams ¶ms) +std::future HDF5IOHandler::flush_impl(internal::ParsedFlushParams ¶ms) { return m_impl->flush(params); } @@ -3605,7 +3611,7 @@ HDF5IOHandler::HDF5IOHandler( HDF5IOHandler::~HDF5IOHandler() = default; -std::future HDF5IOHandler::flush(internal::ParsedFlushParams &) +std::future HDF5IOHandler::flush_impl(internal::ParsedFlushParams &) { return std::future(); } diff --git a/src/IO/HDF5/ParallelHDF5IOHandler.cpp b/src/IO/HDF5/ParallelHDF5IOHandler.cpp index 7de4960feb..0cf8b396a3 100644 --- a/src/IO/HDF5/ParallelHDF5IOHandler.cpp +++ b/src/IO/HDF5/ParallelHDF5IOHandler.cpp @@ -76,7 +76,7 @@ ParallelHDF5IOHandler::ParallelHDF5IOHandler( ParallelHDF5IOHandler::~ParallelHDF5IOHandler() = default; std::future -ParallelHDF5IOHandler::flush(internal::ParsedFlushParams ¶ms) +ParallelHDF5IOHandler::flush_impl(internal::ParsedFlushParams ¶ms) { if (auto hdf5_config_it = params.backendConfig.json().find("hdf5"); hdf5_config_it != params.backendConfig.json().end()) @@ -462,7 +462,8 @@ ParallelHDF5IOHandler::ParallelHDF5IOHandler( ParallelHDF5IOHandler::~ParallelHDF5IOHandler() = default; -std::future ParallelHDF5IOHandler::flush(internal::ParsedFlushParams &) +std::future +ParallelHDF5IOHandler::flush_impl(internal::ParsedFlushParams &) { return std::future(); } diff --git a/src/IO/JSON/JSONIOHandler.cpp b/src/IO/JSON/JSONIOHandler.cpp index c531aabb00..0e167b801b 100644 --- a/src/IO/JSON/JSONIOHandler.cpp +++ b/src/IO/JSON/JSONIOHandler.cpp @@ -53,7 +53,7 @@ JSONIOHandler::JSONIOHandler( {} #endif -std::future JSONIOHandler::flush(internal::ParsedFlushParams &) +std::future JSONIOHandler::flush_impl(internal::ParsedFlushParams &) { return m_impl.flush(); } diff --git a/src/IO/JSON/JSONIOHandlerImpl.cpp b/src/IO/JSON/JSONIOHandlerImpl.cpp index 551b6c4358..cc36a2edb3 100644 --- a/src/IO/JSON/JSONIOHandlerImpl.cpp +++ b/src/IO/JSON/JSONIOHandlerImpl.cpp @@ -1146,6 +1146,13 @@ void JSONIOHandlerImpl::writeDataset( access::write(m_handler->m_backendAccess), "[JSON] Cannot write data in read-only mode."); + if (parameters.memorySelection.has_value()) + { + throw error::OperationUnsupportedInBackend( + "JSON", + "Non-contiguous memory selections not supported in JSON backend."); + } + auto pos = setAndGetFilePosition(writable); auto file = refreshFileFromParent(writable); auto &j = obtainJsonContents(writable); diff --git a/src/LoadStoreChunk.cpp b/src/LoadStoreChunk.cpp new file mode 100644 index 0000000000..6a8c526be6 --- /dev/null +++ b/src/LoadStoreChunk.cpp @@ -0,0 +1,398 @@ + + +#include "openPMD/LoadStoreChunk.hpp" +#include "openPMD/Datatype.hpp" +#include "openPMD/Error.hpp" +#include "openPMD/RecordComponent.hpp" +#include "openPMD/Span.hpp" +#include "openPMD/auxiliary/Future.hpp" +#include "openPMD/auxiliary/Memory.hpp" +#include "openPMD/auxiliary/Memory_internal.hpp" +#include "openPMD/auxiliary/ShareRawInternal.hpp" +#include "openPMD/auxiliary/UniquePtr.hpp" + +// comment to keep clang-format from reordering +#include "openPMD/DatatypeMacros.hpp" +#include "openPMD/backend/Attributable.hpp" + +#include +#include +#include + +namespace openPMD +{ +namespace +{ + template + auto asWriteBuffer(std::shared_ptr &&ptr) -> auxiliary::WriteBuffer + { + /* std::static_pointer_cast correctly reference-counts the pointer */ + return auxiliary::WriteBuffer( + std::static_pointer_cast(std::move(ptr))); + } + template + auto asWriteBuffer(UniquePtrWithLambda &&ptr) -> auxiliary::WriteBuffer + { + return auxiliary::WriteBuffer( + std::move(ptr).template static_cast_()); + } + + /* + * There is no backend support currently for const unique pointers. + * We support these mostly for providing a clean API to users that have such + * pointers and want to store from them, but there will be no + * backend-specific optimizations for such buffers as there are for + * non-const unique pointers. + */ + template + auto asWriteBuffer(UniquePtrWithLambda &&ptr) + -> auxiliary::WriteBuffer + { + auto raw_ptr = ptr.release(); + return asWriteBuffer( + std::shared_ptr{ + raw_ptr, + [deleter = std::move(ptr.get_deleter())]( + auto const *delete_me) { deleter(delete_me); }}); + } +} // namespace + +ConfigureLoadStore::ConfigureLoadStore(RecordComponent &rc) : m_rc(rc) +{} + +auto ConfigureLoadStore::dim() const -> uint8_t +{ + return m_rc.getDimensionality(); +} + +auto ConfigureLoadStore::storeChunkConfig() -> internal::LoadStoreConfig +{ + return internal::LoadStoreConfig{getOffset(), getExtent()}; +} + +auto ConfigureLoadStore::deferFlush(Attributable &attr) +{ + if (m_unsafeNoAutomaticFlush) + { + throw error::Internal( + "Configuring an automatic flush operating after configuring that " + "those should be switched off."); + } + auto index = attr.IOHandler()->m_flushCounter; + return [attr, + old_index = *index, + current_index = std::weak_ptr(index)]() mutable { + auto lock_current_index = current_index.lock(); + if (!lock_current_index || *lock_current_index >= old_index) + { + return; + } + attr.seriesFlush(); + }; +} + +auto ConfigureLoadStore::getOffset() -> Offset const & +{ + if (!m_offset.has_value()) + { + if (m_rc.joinedDimension().has_value()) + { + m_offset = std::make_optional(); + } + else + { + m_offset = std::make_optional(dim(), 0); + } + } + return *m_offset; +} + +auto ConfigureLoadStore::getExtent() -> Extent const & +{ + if (!m_extent.has_value()) + { + m_extent = std::make_optional(m_rc.getExtent()); + if (m_offset.has_value()) + { + auto it_o = m_offset->begin(); + auto end_o = m_offset->end(); + auto it_e = m_extent->begin(); + auto end_e = m_extent->end(); + for (; it_o != end_o && it_e != end_e; ++it_e, ++it_o) + { + *it_e -= *it_o; + } + } + } + return *m_extent; +} + +auto ConfigureLoadStore::withSharedPtr_impl_mut( + std::shared_ptr data, Datatype datatype) + -> openPMD::ConfigureLoadStoreFromBuffer +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + return openPMD::ConfigureLoadStoreFromBuffer( + auxiliary::WriteBuffer(std::move(data)), datatype, {std::move(*this)}); +} +auto ConfigureLoadStore::withSharedPtr_impl_const( + std::shared_ptr data, Datatype datatype) + -> openPMD::ConfigureStoreChunkFromBuffer +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + return openPMD::ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer(std::move(data)), datatype, {std::move(*this)}); +} + +auto ConfigureLoadStore::withUniquePtr_impl_mut( + UniquePtrWithLambda data, Datatype dtype) + -> openPMD::ConfigureStoreChunkFromBuffer + +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + + return openPMD::ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer(std::move(data)), dtype, {std::move(*this)}); +} +auto ConfigureLoadStore::withUniquePtr_impl_const( + UniquePtrWithLambda data, Datatype dtype) + -> openPMD::ConfigureStoreChunkFromBuffer + +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + + void const *raw_ptr = data.release(); + auto &deleter = data.get_deleter(); + return openPMD::ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer( + std::shared_ptr( + raw_ptr, + [deleter_lambda = std::move(deleter)](auto const *p) { + deleter_lambda(p); + })), + dtype, + {std::move(*this)}); +} + +auto ConfigureLoadStore::withRawPtr_impl_mut(void *data, Datatype dtype) + -> openPMD::ConfigureLoadStoreFromBuffer +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + return openPMD::ConfigureLoadStoreFromBuffer( + auxiliary::WriteBuffer(auxiliary::shareRaw(data)), + dtype, + {std::move(*this)}); +} + +auto ConfigureLoadStore::withRawPtr_impl_const(void const *data, Datatype dtype) + -> openPMD::ConfigureStoreChunkFromBuffer +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + return openPMD::ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer(auxiliary::shareRaw(data)), + dtype, + {std::move(*this)}); +} + +template +auto ConfigureLoadStore::storeSpan() -> DynamicMemoryView +{ + return m_rc.storeChunkSpan_impl(storeChunkConfig()); +} + +template +auto ConfigureLoadStore::load() + -> auxiliary::DeferredComputation> +{ + auto res = m_rc.loadChunkAllocate_impl(storeChunkConfig()); + if (m_unsafeNoAutomaticFlush) + { + return auxiliary::DeferredComputation>( + std::move(res)); + } + return auxiliary::DeferredComputation>( + [res_lambda = std::move(res), dflush = deferFlush(m_rc)]() mutable { + dflush(); + return res_lambda; + }); +} + +struct VisitorEnqueueLoadVariantWithFlush +{ + template + static auto + call(RecordComponent &rc, internal::LoadStoreConfig cfg, F &&dflush) + -> auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types> + { + auto res = rc.loadChunkAllocate_impl(std::move(cfg)); + return auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types>( + [res_lambda = std::move(res), + dflush_lambda = std::forward(dflush)]() mutable + -> auxiliary::detail::shared_ptr_dataset_types { + dflush_lambda(); + return res_lambda; + }); + } +}; +struct VisitorEnqueueLoadVariantWithoutFlush +{ + template + static auto call(RecordComponent &rc, internal::LoadStoreConfig cfg) + -> auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types> + { + auto res = rc.loadChunkAllocate_impl(std::move(cfg)); + return auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types>(std::move(res)); + } +}; + +auto ConfigureLoadStore::loadVariant() -> auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types> +{ + if (m_unsafeNoAutomaticFlush) + { + return m_rc.visit( + this->storeChunkConfig()); + } + else + { + return m_rc.visit( + this->storeChunkConfig(), deferFlush(m_rc)); + } +} + +struct VisitorLoadVariant +{ + template + static auto call(RecordComponent &rc, internal::LoadStoreConfig cfg) + -> auxiliary::detail::shared_ptr_dataset_types + { + return rc.loadChunkAllocate_impl(std::move(cfg)); + } +}; + +ConfigureStoreChunkFromBuffer::ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer buffer, Datatype dt, ConfigureLoadStore &&core) + : ConfigureLoadStore(std::move(core)) + , m_buffer(std::move(buffer)) + , m_datatype(dt) +{} + +auto ConfigureStoreChunkFromBuffer::storeChunkConfig() + -> internal::LoadStoreConfigWithBuffer +{ + return internal::LoadStoreConfigWithBuffer{ + this->getOffset(), this->getExtent(), m_mem_select}; +} + +auto ConfigureStoreChunkFromBuffer::store() + -> auxiliary::DeferredComputation +{ + this->m_rc.storeChunk_impl( + std::move(m_buffer), m_datatype, storeChunkConfig()); + if (m_unsafeNoAutomaticFlush) + { + return auxiliary::DeferredComputation( + auxiliary::detail::CachedValue()); + } + return auxiliary::DeferredComputation( + [dflush = deferFlush(m_rc)]() mutable -> void { dflush(); }); +} + +auto ConfigureLoadStoreFromBuffer::load() + -> auxiliary::DeferredComputation +{ + auto *shared_ptr = std::get_if( + &this->m_buffer.as_variant()); + if (!shared_ptr) + { + throw std::runtime_error( + "ConfigureLoadStoreFromBuffer must be instantiated with a " + "non-const shared_ptr type."); + } + this->m_rc.loadChunk_impl( + *shared_ptr, m_datatype, this->storeChunkConfig()); + if (m_unsafeNoAutomaticFlush) + { + return auxiliary::DeferredComputation( + auxiliary::detail::CachedValue()); + } + return auxiliary::DeferredComputation( + [dflush = this->deferFlush(this->m_rc)]() mutable -> void { + dflush(); + }); +} + +void ConfigureLoadStore::extent_impl(Extent extent) +{ + m_extent = std::make_optional(std::move(extent)); +} + +void ConfigureLoadStore::offset_impl(Offset offset) +{ + m_offset = std::make_optional(std::move(offset)); +} + +void ConfigureLoadStore::unsafeNoAutomaticFlush_impl() +{ + m_unsafeNoAutomaticFlush = true; +} + +void ConfigureStoreChunkFromBuffer::memorySelection_impl(MemorySelection sel) +{ + m_mem_select = std::make_optional(std::move(sel)); +} +// namespace core + +// need this for clang-tidy +#define OPENPMD_ARRAY(type) type[] +#define OPENPMD_POINTER(type) type * +#define OPENPMD_APPLY_TEMPLATE(template_, type) template_ + +#define INSTANTIATE_METHOD_TEMPLATES(dtype) \ + template auto ConfigureLoadStore::load() \ + -> auxiliary::DeferredComputation; +#define INSTANTIATE_METHOD_TEMPLATES_WITH_AND_WITHOUT_EXTENT(type) \ + INSTANTIATE_METHOD_TEMPLATES(type) \ + INSTANTIATE_METHOD_TEMPLATES(OPENPMD_ARRAY(type)) \ + template auto ConfigureLoadStore::storeSpan() -> DynamicMemoryView; + +OPENPMD_FOREACH_DATASET_DATATYPE( + INSTANTIATE_METHOD_TEMPLATES_WITH_AND_WITHOUT_EXTENT) + +#undef INSTANTIATE_METHOD_TEMPLATES +#undef INSTANTIATE_METHOD_TEMPLATES_WITH_AND_WITHOUT_EXTENT + +#undef INSTANTIATE_METHOD_TEMPLATES +#undef OPENPMD_ARRAY +#undef OPENPMD_POINTER +#undef OPENPMD_APPLY_TEMPLATE +} // namespace openPMD diff --git a/src/RecordComponent.cpp b/src/RecordComponent.cpp index fbf2ad0d88..a1a4680154 100644 --- a/src/RecordComponent.cpp +++ b/src/RecordComponent.cpp @@ -23,6 +23,7 @@ #include "openPMD/DatatypeHelpers.hpp" #include "openPMD/Error.hpp" #include "openPMD/IO/Format.hpp" +#include "openPMD/LoadStoreChunk.hpp" #include "openPMD/Series.hpp" #include "openPMD/auxiliary/Environment.hpp" #include "openPMD/auxiliary/Memory.hpp" @@ -34,6 +35,9 @@ // comment so clang-format does not move this #include "openPMD/DatatypeMacros.hpp" +// comment +#include "openPMD/DatatypeMacros.hpp" + #include #include #include @@ -189,6 +193,72 @@ auto resource(T &t) -> attribute_types & return t.template resource(); } +ConfigureLoadStore RecordComponent::prepareLoadStore() +{ + return ConfigureLoadStore{*this}; +} + +namespace +{ +#if (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000) || \ + (defined(__apple_build_version__) && __clang_major__ < 14) + template + auto createSpanBufferFallback(size_t size) -> UniquePtrWithLambda + { + return UniquePtrWithLambda{ + new T[size], [](auto *ptr) { delete[] ptr; }}; + } +#else + template + auto createSpanBufferFallback(size_t size) -> std::unique_ptr + { + return std::unique_ptr{new T[size]}; + } +#endif +} // namespace + +template +DynamicMemoryView +RecordComponent::storeChunkSpan_impl(internal::LoadStoreConfig cfg) +{ + return storeChunkSpanCreateBuffer_impl( + std::move(cfg), &createSpanBufferFallback); +} + +template +std::shared_ptr +RecordComponent::loadChunkAllocate_impl(internal::LoadStoreConfig cfg) +{ + using T = std::remove_cv_t>; + auto res = loadChunkAllocate_impl( + determineDatatype(), sizeof(T), std::move(cfg)); + return std::static_pointer_cast(res); +} + +std::shared_ptr RecordComponent::loadChunkAllocate_impl( + Datatype dtype, size_t dtype_size, internal::LoadStoreConfig cfg) +{ + auto [o, e] = std::move(cfg); + + size_t numPoints = 1; + for (auto val : e) + { + numPoints *= val; + } + + auto newData = + std::shared_ptr(new char[numPoints * dtype_size], [](void *p) { + delete[] (static_cast(p)); + }); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withSharedPtr_impl_mut(newData, dtype) + .unsafeNoAutomaticFlush() + .load(); + return newData; +} + RecordComponent::RecordComponent() : BaseRecordComponent(NoInit()) { setData(std::make_shared()); @@ -633,14 +703,18 @@ void RecordComponent::readBase(bool require_unit_si) } } -void RecordComponent::storeChunk( - auxiliary::WriteBuffer buffer, Datatype dtype, Offset o, Extent e) +void RecordComponent::storeChunk_impl( + auxiliary::WriteBuffer buffer, + Datatype dtype, + internal::LoadStoreConfigWithBuffer cfg) { + auto [o, e, memorySelection] = std::move(cfg); verifyChunk(dtype, o, e); Parameter dWrite; dWrite.offset = std::move(o); dWrite.extent = std::move(e); + dWrite.memorySelection = memorySelection; dWrite.dtype = dtype; /* std::static_pointer_cast correctly reference-counts the pointer */ dWrite.data = std::move(buffer); @@ -764,68 +838,80 @@ template std::shared_ptr RecordComponent::loadChunk(Offset o, Extent e) { uint8_t dim = getDimensionality(); + auto operation = prepareLoadStore(); // default arguments // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = o; - if (o.size() == 1u && o.at(0) == 0u && dim > 1u) - offset = Offset(dim, 0u); + if (o.size() != 1u || o.at(0) != 0u || dim <= 1u) + { + operation.offset(std::move(o)); + } // extent = {-1u}: take full size - Extent extent(dim, 1u); - if (e.size() == 1u && e.at(0) == -1u) + if (e.size() != 1u || e.at(0) != -1u) { - extent = getExtent(); - for (uint8_t i = 0u; i < dim; ++i) - extent[i] -= offset[i]; + operation.extent(std::move(e)); } - else - extent = e; - - uint64_t numPoints = 1u; - for (auto const &dimensionSize : extent) - numPoints *= dimensionSize; -#if (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000) || \ - (defined(__apple_build_version__) && __clang_major__ < 14) - auto newData = - std::shared_ptr(new T[numPoints], [](T *p) { delete[] p; }); - loadChunk(newData, offset, extent); - return newData; -#else - auto newData = std::shared_ptr[]>( - new std::remove_extent_t[numPoints]); - loadChunk(newData, offset, extent); - return std::static_pointer_cast(std::move(newData)); -#endif + return operation.unsafeNoAutomaticFlush().load().get(); } namespace detail { - template - struct do_convert + struct FillBuffer { - template - static std::optional call(Attribute &attr) + template + static void call( + void *target, + size_t numPoints, + RecordComponent const &component, + internal::RecordComponentData const &rc) { - if constexpr (std::is_convertible_v) + std::optional val = rc.m_constantValue.getOptional(); + + if (val.has_value()) { - return std::make_optional(attr.get()); + auto raw_ptr = static_cast(target); + std::fill(raw_ptr, raw_ptr + numPoints, *val); } else { - return std::nullopt; + std::string const data_type_str = + datatypeToString(component.getDatatype()); + std::string const requ_type_str = + datatypeToString(determineDatatype()); + std::string err_msg = + "Type conversion during chunk loading not possible! "; + err_msg += + "Data: " + data_type_str + "; Load as: " + requ_type_str; + throw error::WrongAPIUsage(err_msg); } } - static constexpr char const *errorMsg = "is_conversible"; + static constexpr char const *errorMsg = "FillBuffer"; }; } // namespace detail template -void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) +void RecordComponent::loadChunk_impl( + std::shared_ptr const &data, internal::LoadStoreConfigWithBuffer cfg) +{ + loadChunk_impl( + std::static_pointer_cast(data), + determineDatatype>>(), + std::move(cfg)); +} + +void RecordComponent::loadChunk_impl( + std::shared_ptr const &data, + Datatype dtype_requested, + internal::LoadStoreConfigWithBuffer cfg) { - Datatype dtype = determineDatatype(data); + if (cfg.memorySelection.has_value()) + { + throw error::WrongAPIUsage( + "Unsupported: Memory selections in chunk loading."); + } /* * For constant components, we implement type conversion, so there is * a separate check further below. @@ -836,35 +922,18 @@ void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) * * Attention: Do NOT use operator==(), doesnt work properly on Windows! */ - if (!isSame(dtype, getDatatype()) && !constant()) + if (!isSame(dtype_requested, getDatatype()) && !constant()) { std::string const data_type_str = datatypeToString(getDatatype()); - std::string const requ_type_str = - datatypeToString(determineDatatype()); + std::string const requ_type_str = datatypeToString(dtype_requested); std::string err_msg = "Type conversion during chunk loading not yet implemented! "; err_msg += "Data: " + data_type_str + "; Load as: " + requ_type_str; throw std::runtime_error(err_msg); } - uint8_t dim = getDimensionality(); - - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = o; - if (o.size() == 1u && o.at(0) == 0u && dim > 1u) - offset = Offset(dim, 0u); - - // extent = {-1u}: take full size - Extent extent(dim, 1u); - if (e.size() == 1u && e.at(0) == -1u) - { - extent = getExtent(); - for (uint8_t i = 0u; i < dim; ++i) - extent[i] -= offset[i]; - } - else - extent = e; + auto dim = getDimensionality(); + auto [offset, extent, memorySelection] = std::move(cfg); if (extent.size() != dim || offset.size() != dim) { @@ -883,9 +952,6 @@ void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) "Chunk does not reside inside dataset (Dimension on index " + std::to_string(i) + ". DS: " + std::to_string(dse[i]) + " - Chunk: " + std::to_string(offset[i] + extent[i]) + ")"); - if (!data) - throw std::runtime_error( - "Unallocated pointer passed during chunk loading."); auto &rc = get(); if (constant()) @@ -894,25 +960,8 @@ void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) for (auto const &dimensionSize : extent) numPoints *= dimensionSize; - std::optional val = - switchNonVectorType>( - /* dt = */ getDatatype(), rc.m_constantValue); - - if (val.has_value()) - { - T *raw_ptr = data.get(); - std::fill(raw_ptr, raw_ptr + numPoints, *val); - } - else - { - std::string const data_type_str = datatypeToString(getDatatype()); - std::string const requ_type_str = - datatypeToString(determineDatatype()); - std::string err_msg = - "Type conversion during chunk loading not possible! "; - err_msg += "Data: " + data_type_str + "; Load as: " + requ_type_str; - throw error::WrongAPIUsage(err_msg); - } + switchDatasetType( + dtype_requested, data.get(), numPoints, *this, rc); } else { @@ -926,80 +975,80 @@ void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) } template -void RecordComponent::loadChunk( - std::shared_ptr ptr, Offset offset, Extent extent) +void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) { - loadChunk( - std::static_pointer_cast(std::move(ptr)), - std::move(offset), - std::move(extent)); + // static_assert(!std::is_same_v, "EVIL"); + uint8_t dim = getDimensionality(); + auto operation = prepareLoadStore(); + + // default arguments + // offset = {0u}: expand to right dim {0u, 0u, ...} + if (o.size() != 1u || o.at(0) != 0u || dim <= 1u) + { + operation.offset(std::move(o)); + } + + // extent = {-1u}: take full size + if (e.size() != 1u || e.at(0) != -1u) + { + operation.extent(std::move(e)); + } + + operation.withSharedPtr(std::move(data)).unsafeNoAutomaticFlush().load(); } template void RecordComponent::loadChunkRaw(T *ptr, Offset offset, Extent extent) { - loadChunk(auxiliary::shareRaw(ptr), std::move(offset), std::move(extent)); + prepareLoadStore() + .offset(std::move(offset)) + .extent(std::move(extent)) + .withRawPtr(ptr) + .unsafeNoAutomaticFlush() + .load(); } template void RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) { - if (!data) - throw std::runtime_error( - "Unallocated pointer passed during chunk store."); - Datatype dtype = determineDatatype(data); - - /* std::static_pointer_cast correctly reference-counts the pointer */ - storeChunk( - auxiliary::WriteBuffer(std::static_pointer_cast(data)), - dtype, - std::move(o), - std::move(e)); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withSharedPtr(std::move(data)) + .unsafeNoAutomaticFlush() + .store(); } template void RecordComponent::storeChunk( UniquePtrWithLambda data, Offset o, Extent e) { - if (!data) - throw std::runtime_error( - "Unallocated pointer passed during chunk store."); - Datatype dtype = determineDatatype<>(data); - - storeChunk( - auxiliary::WriteBuffer{std::move(data).template static_cast_()}, - dtype, - std::move(o), - std::move(e)); -} - -template -void RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) -{ - storeChunk( - std::static_pointer_cast(std::move(data)), - std::move(o), - std::move(e)); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withUniquePtr(std::move(data)) + .unsafeNoAutomaticFlush() + .store(); } template void RecordComponent::storeChunkRaw(T const *ptr, Offset offset, Extent extent) { - storeChunk(auxiliary::shareRaw(ptr), std::move(offset), std::move(extent)); + prepareLoadStore() + .offset(std::move(offset)) + .extent(std::move(extent)) + .withRawPtr(ptr) + .unsafeNoAutomaticFlush() + .store(); } template DynamicMemoryView RecordComponent::storeChunk(Offset offset, Extent extent) { - return storeChunk(std::move(offset), std::move(extent), [](size_t size) { -#if (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000) || \ - (defined(__apple_build_version__) && __clang_major__ < 14) - return UniquePtrWithLambda{ - new T[size], [](auto *ptr) { delete[] ptr; }}; -#else - return std::unique_ptr{new T[size]}; -#endif - }); + return prepareLoadStore() + .offset(std::move(offset)) + .extent(std::move(extent)) + .storeSpan(); } template @@ -1013,10 +1062,6 @@ void RecordComponent::verifyChunk(Offset const &o, Extent const &e) const #define OPENPMD_ARRAY(type) type[] #define OPENPMD_INSTANTIATE_BASIC(type) \ - template void RecordComponent::loadChunk( \ - std::shared_ptr data, Offset o, Extent e); \ - template void RecordComponent::loadChunk( \ - std::shared_ptr data, Offset o, Extent e); \ template void RecordComponent::loadChunkRaw( \ OPENPMD_PTR(type) ptr, Offset offset, Extent extent); \ template void RecordComponent::verifyChunk( \ @@ -1024,21 +1069,28 @@ void RecordComponent::verifyChunk(Offset const &o, Extent const &e) const template DynamicMemoryView RecordComponent::storeChunk( \ Offset offset, Extent extent); \ template void RecordComponent::storeChunkRaw( \ - OPENPMD_PTR(type const) ptr, Offset offset, Extent extent); + OPENPMD_PTR(type const) ptr, Offset offset, Extent extent); \ + template DynamicMemoryView RecordComponent::storeChunkSpan_impl( \ + internal::LoadStoreConfig cfg); -#define OPENPMD_INSTANTIATE_CONST_AND_NONCONST(type) \ - template void RecordComponent::storeChunk( \ - std::shared_ptr data, Offset o, Extent e); \ - template void RecordComponent::storeChunk( \ - std::shared_ptr data, Offset o, Extent e); +#define OPENPMD_INSTANTIATE_CONST_AND_NONCONST(type) #define OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT(type) \ + template void RecordComponent::loadChunk( \ + std::shared_ptr data, Offset o, Extent e); \ template std::shared_ptr RecordComponent::loadChunk( \ Offset o, Extent e); \ template void RecordComponent::storeChunk( \ - UniquePtrWithLambda data, Offset o, Extent e); + UniquePtrWithLambda data, Offset o, Extent e); \ + template void RecordComponent::loadChunk_impl( \ + std::shared_ptr const &data, \ + internal::LoadStoreConfigWithBuffer cfg); \ + template std::shared_ptr RecordComponent::loadChunkAllocate_impl( \ + internal::LoadStoreConfig cfg); #define OPENPMD_INSTANTIATE_FULLMATRIX(type) \ + template void RecordComponent::storeChunk( \ + std::shared_ptr data, Offset o, Extent e); \ template RecordComponent &RecordComponent::makeConstant(type); \ template RecordComponent &RecordComponent::makeEmpty( \ uint8_t dimensions); diff --git a/src/Series.cpp b/src/Series.cpp index 038bffc7cc..14f813559d 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -523,12 +523,13 @@ void Series::flushRankTable() }; auto writeDataset = [&rank, &maxSize, this, &rankTable]( - std::shared_ptr put, size_t num_lines = 1) { + std::shared_ptr const &put, + size_t num_lines = 1) { Parameter chunk; chunk.dtype = Datatype::CHAR; chunk.offset = {uint64_t(rank), 0}; chunk.extent = {num_lines, maxSize}; - chunk.data = std::move(put); + chunk.data = put; IOHandler()->enqueue( IOTask(&rankTable.m_attributable, std::move(chunk))); }; @@ -569,7 +570,7 @@ void Series::flushRankTable() * > } */ [asRawPtr](char *) { delete asRawPtr; }}; - writeDataset(std::move(put), /* num_lines = */ size); + writeDataset(put, /* num_lines = */ size); } // Must ensure that the Writable is consistently set to written on all @@ -588,7 +589,7 @@ void Series::flushRankTable() new char[maxSize]{}, [](char const *ptr) { delete[] ptr; }}; std::copy_n(myRankInfo.c_str(), mySize, put.get()); - writeDataset(std::move(put)); + writeDataset(put); } std::string Series::particlesPath() const diff --git a/src/auxiliary/Future.cpp b/src/auxiliary/Future.cpp new file mode 100644 index 0000000000..39af555f9e --- /dev/null +++ b/src/auxiliary/Future.cpp @@ -0,0 +1,190 @@ +#include "openPMD/auxiliary/Future.hpp" +#include "openPMD/Error.hpp" +#include "openPMD/RecordComponent.hpp" + +#include +#include +#include + +// comment + +#include "openPMD/DatatypeMacros.hpp" + +namespace openPMD::auxiliary::detail +{ +template +OneTimeTask::OneTimeTask() = default; + +template +OneTimeTask::OneTimeTask(task_type task) : members{std::move(task)} +{} + +template +OneTimeTask::OneTimeTask(OneTimeTask &&other) noexcept(noexcept_move) + : members(std::move(other.members)) +{ + other.members.m_task_valid = false; +} + +template +auto OneTimeTask::operator=(OneTimeTask &&other) noexcept(noexcept_move) + -> OneTimeTask & +{ + this->members = std::move(other.members); + other.members.m_task_valid = false; + return *this; +} + +template +auto OneTimeTask::operator()() -> T +{ + if (!members.m_task_valid) + { + throw error::WrongAPIUsage( + "[DeferredComputation] No valid state. Probably already " + "computed."); + } + if (!members.m_task) + { + throw error::WrongAPIUsage( + "[DeferredComputation] No valid task was specified."); + } + members.m_task_valid = false; + if constexpr (std::is_void_v) + { + std::move(members.m_task)(); + members.m_task = {}; + } + else + { + auto res = std::move(members.m_task)(); + members.m_task = {}; // reset + return res; + } +} +} // namespace openPMD::auxiliary::detail + +namespace openPMD::auxiliary +{ + +template +DeferredComputation::DeferredComputation(task_type task) + : m_task(detail::OneTimeTask{std::move(task)}) +{} + +template +DeferredComputation::DeferredComputation(cached_type cached_val) + : m_task(detail::CachedValue{std::move(cached_val)}) +{} + +template +DeferredComputation::DeferredComputation() = default; + +template +DeferredComputation::DeferredComputation(DeferredComputation &&) noexcept( + noexcept_move) = default; + +template +auto DeferredComputation::operator=(DeferredComputation &&) noexcept( + noexcept_move) -> DeferredComputation & = default; + +template +DeferredComputation::~DeferredComputation() +{ + try + { + std::visit( + auxiliary::overloaded{ + [](detail::OneTimeTask &task) { + if (task.members.m_task_valid) + { + std::move(task)(); + } + }, + [](detail::CachedValue &) {}}, + this->m_task); + } + catch (std::exception const &e) + { + std::cerr << "[DeferredComputation] Error in destructor: '" << e.what() + << "'." << std::endl; + } + catch (...) + { + std::cerr << "[DeferredComputation] Unknown error in destructor." + << std::endl; + } +} + +template +auto DeferredComputation::get() -> T +{ + return std::visit( + auxiliary::overloaded{ + [](detail::OneTimeTask &task) -> T { return std::move(task)(); }, + [](detail::CachedValue &cached) -> T { return cached.val; }}, + this->m_task); +} + +template <> +auto DeferredComputation::get() -> void +{ + std::visit( + auxiliary::overloaded{ + [](detail::OneTimeTask &task) { std::move(task)(); }, + [](detail::CachedValue &) { return; }}, + this->m_task); +} + +template +auto DeferredComputation::operator()() -> T +{ + return get(); +} + +template +void DeferredComputation::invalidate() && +{ + std::visit( + auxiliary::overloaded{ + [](detail::OneTimeTask &task) { + task.members.m_task = {}; + task.members.m_task_valid = false; + }, + [](detail::CachedValue const &) {}}, + this->m_task); +} + +template +auto DeferredComputation::valid() const noexcept -> bool +{ + return std::visit( + auxiliary::overloaded{ + [](detail::OneTimeTask const &task) { + return task.members.m_task_valid; + }, + [](detail::CachedValue const &) { return true; }}, + this->m_task); +} + +template class DeferredComputation; +template class DeferredComputation; +template class DeferredComputation; // used in tests + +// need this for clang-tidy +#define OPENPMD_ARRAY(type) type[] +#define OPENPMD_APPLY_TEMPLATE(template_, type) template_ + +#define INSTANTIATE_FUTURE(dtype) \ + template class DeferredComputation; +#define INSTANTIATE_FUTURE_WITH_AND_WITHOUT_EXTENT(type) \ + INSTANTIATE_FUTURE(type) INSTANTIATE_FUTURE(OPENPMD_ARRAY(type)) +OPENPMD_FOREACH_NONVECTOR_DATATYPE(INSTANTIATE_FUTURE_WITH_AND_WITHOUT_EXTENT) +#undef INSTANTIATE_FUTURE +#undef INSTANTIATE_FUTURE_WITH_AND_WITHOUT_EXTENT +#undef OPENPMD_ARRAY +#undef OPENPMD_APPLY_TEMPLATE +} // namespace openPMD::auxiliary + +#include "openPMD/UndefDatatypeMacros.hpp" diff --git a/src/auxiliary/Memory.cpp b/src/auxiliary/Memory.cpp index c2a0f2aa0d..b002288a88 100644 --- a/src/auxiliary/Memory.cpp +++ b/src/auxiliary/Memory.cpp @@ -21,8 +21,10 @@ #include "openPMD/auxiliary/Memory.hpp" #include "openPMD/ChunkInfo.hpp" +#include "openPMD/Datatype.tpp" #include "openPMD/auxiliary/Memory_internal.hpp" #include "openPMD/auxiliary/UniquePtr.hpp" +#include "openPMD/backend/Variant_internal.hpp" #include #include @@ -193,8 +195,21 @@ auto WriteBuffer::CopyableUniquePtr::release() -> UniquePtrWithLambda WriteBuffer::WriteBuffer() : m_buffer(std::make_any()) {} -WriteBuffer::WriteBuffer(std::shared_ptr ptr) - : m_buffer(std::make_any(std::move(ptr))) +template +WriteBuffer::WriteBuffer(std::shared_ptr ptr) + : m_buffer //(std::make_any(std::move(ptr))) + ([&]() { + if constexpr (std::is_const_v) + { + return std::make_any( + std::static_pointer_cast(ptr)); + } + else + { + return std::make_any( + std::static_pointer_cast(ptr)); + } + }()) {} WriteBuffer::WriteBuffer(UniquePtrWithLambda ptr) : m_buffer( @@ -204,12 +219,22 @@ WriteBuffer::WriteBuffer(UniquePtrWithLambda ptr) WriteBuffer::WriteBuffer(WriteBuffer &&) noexcept = default; WriteBuffer &WriteBuffer::operator=(WriteBuffer &&) noexcept = default; -WriteBuffer const &WriteBuffer::operator=(std::shared_ptr ptr) +template +WriteBuffer &WriteBuffer::operator=(std::shared_ptr const &ptr) { - m_buffer = std::make_any(std::move(ptr)); + if constexpr (std::is_const_v) + { + m_buffer = std::make_any( + std::static_pointer_cast(ptr)); + } + else + { + m_buffer = std::make_any( + std::static_pointer_cast(ptr)); + } return *this; } -WriteBuffer const &WriteBuffer::operator=(UniquePtrWithLambda ptr) +WriteBuffer &WriteBuffer::operator=(UniquePtrWithLambda ptr) { m_buffer = std::make_any(CopyableUniquePtr(std::move(ptr))); @@ -226,4 +251,22 @@ void const *WriteBuffer::get() const }, as_variant()); } + +#define OPENPMD_INSTANTIATE(dtype) \ + template WriteBuffer::WriteBuffer(std::shared_ptr); \ + template WriteBuffer &WriteBuffer::operator=( \ + std::shared_ptr const &); + +#ifndef DOXYGEN_SHOULD_SKIP_THIS + +OPENPMD_FOREACH_DATASET_DATATYPE(OPENPMD_INSTANTIATE) +template WriteBuffer::WriteBuffer(std::shared_ptr); +template WriteBuffer &WriteBuffer::operator=(std::shared_ptr const &); +template WriteBuffer::WriteBuffer(std::shared_ptr); +template WriteBuffer & +WriteBuffer::operator=(std::shared_ptr const &); + +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +#undef OPENPMD_INSTANTIATE } // namespace openPMD::auxiliary diff --git a/src/auxiliary/UniquePtr.cpp b/src/auxiliary/UniquePtr.cpp index 6625ca3a47..828c33a785 100644 --- a/src/auxiliary/UniquePtr.cpp +++ b/src/auxiliary/UniquePtr.cpp @@ -58,7 +58,7 @@ namespace auxiliary OPENPMD_FOREACH_DATASET_DATATYPE( OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT) - OPENPMD_INSTANTIATE(void) + OPENPMD_INSTANTIATE(void) OPENPMD_INSTANTIATE(void const) #undef OPENPMD_INSTANTIATE #undef OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT @@ -99,12 +99,16 @@ UniquePtrWithLambda::UniquePtrWithLambda( std::unique_ptr); #define OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT(type) \ - OPENPMD_INSTANTIATE(type) OPENPMD_INSTANTIATE(OPENPMD_ARRAY(type)) + OPENPMD_INSTANTIATE(type) \ + OPENPMD_INSTANTIATE(OPENPMD_ARRAY(type)) \ + OPENPMD_INSTANTIATE(type const) \ + OPENPMD_INSTANTIATE(OPENPMD_ARRAY(type const)) -OPENPMD_FOREACH_DATASET_DATATYPE(OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT) +OPENPMD_FOREACH_NONVECTOR_DATATYPE(OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT) // Instantiate this directly, do not instantiate the // `std::unique_ptr`-based constructor. template class UniquePtrWithLambda; +template class UniquePtrWithLambda; #undef OPENPMD_INSTANTIATE #undef OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT #undef OPENPMD_ARRAY diff --git a/src/binding/python/PatchRecordComponent.cpp b/src/binding/python/PatchRecordComponent.cpp index 5887ba7d03..c68cdbc16f 100644 --- a/src/binding/python/PatchRecordComponent.cpp +++ b/src/binding/python/PatchRecordComponent.cpp @@ -130,7 +130,9 @@ void init_PatchRecordComponent(py::module &m) switch (dtype) { case DT::BOOL: - return prc.store(idx, *static_cast(buf.ptr)); + throw std::runtime_error( + "make_constant: " + "Boolean type not supported!"); break; case DT::SHORT: return prc.store(idx, *static_cast(buf.ptr)); diff --git a/test/AuxiliaryTest.cpp b/test/AuxiliaryTest.cpp index ee0b029473..3612b13adb 100644 --- a/test/AuxiliaryTest.cpp +++ b/test/AuxiliaryTest.cpp @@ -19,6 +19,8 @@ * If not, see . */ // expose private and protected members for invasive testing +#include "openPMD/Error.hpp" +#include "openPMD/auxiliary/Future.hpp" #if openPMD_USE_INVASIVE_TESTS #define OPENPMD_private public: #define OPENPMD_protected public: @@ -538,3 +540,31 @@ TEST_CASE("filesystem_test", "[auxiliary]") REQUIRE(!remove_file("./nonexistent_file_in_cmake_bin_directory")); #endif } + +TEST_CASE("future_test", "[auxiliary]") +{ + using task_type = auxiliary::DeferredComputation; + size_t counter = 0; + + auto make_task = [&counter]() { + counter = 0; + return task_type{[&counter]() { + ++counter; + return "success"; + }}; + }; + + auto move_construct = make_task(); + task_type move_constructed(std::move(move_construct)); + REQUIRE(counter == 0); + REQUIRE(move_constructed() == "success"); + REQUIRE(counter == 1); + REQUIRE_THROWS_AS(move_constructed(), error::WrongAPIUsage); + + auto move_assign = make_task(); + task_type move_assigned = std::move(move_assign); + REQUIRE(counter == 0); + REQUIRE(move_assigned() == "success"); + REQUIRE(counter == 1); + REQUIRE_THROWS_AS(move_assigned(), error::WrongAPIUsage); +} diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp index 8a2be5f27c..903363538d 100644 --- a/test/CoreTest.cpp +++ b/test/CoreTest.cpp @@ -1270,7 +1270,7 @@ TEST_CASE("use_count_test", "[core]") pprc.resetDataset(Dataset(determineDatatype(), {4})); pprc.store(0, static_cast(1)); REQUIRE( - std::get>( + std::get>( static_cast *>( pprc.get().m_chunks.front().parameter.get()) ->data.as_variant()) diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index 9c28f52945..c72b9b1574 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -454,18 +454,51 @@ void available_chunks_test(std::string const &file_ending) } )END"; - std::vector data{2, 4, 6, 8}; + std::vector xdata{2, 4, 6, 8}; + std::vector ydata{0, 0, 0, 0, 0, // + 0, 1, 2, 3, 0, // + 0, 4, 5, 6, 0, // + 0, 7, 8, 9, 0, // + 0, 0, 0, 0, 0}; + std::vector ydata_firstandlastrow{-1, -1, -1}; { Series write(name, Access::CREATE, MPI_COMM_WORLD, parameters.str()); Iteration it0 = write.iterations[0]; auto E_x = it0.meshes["E"]["x"]; E_x.resetDataset({Datatype::INT, {mpi_size, 4}}); - E_x.storeChunk(data, {mpi_rank, 0}, {1, 4}); + E_x.storeChunk(xdata, {mpi_rank, 0}, {1, 4}); + auto E_y = it0.meshes["E"]["y"]; + E_y.resetDataset({Datatype::INT, {5, 3ul * mpi_size}}); + E_y.prepareLoadStore() + .withContiguousContainer(ydata_firstandlastrow) + .offset({0, 3ul * mpi_rank}) + .extent({1, 3}) + .store(); + E_y.prepareLoadStore() + .offset({1, 3ul * mpi_rank}) + .extent({3, 3}) + .withContiguousContainer(ydata) + .memorySelection({{1, 1}, {5, 5}}) + .store(); + // if condition checks if this PR is available in ADIOS2: + // https://github.com/ornladios/ADIOS2/pull/4169 + if constexpr (CanTheMemorySelectionBeReset) + { + E_y.prepareLoadStore() + .withContiguousContainer(ydata_firstandlastrow) + .offset({4, 3ul * mpi_rank}) + .extent({1, 3}) + .store(); + } it0.close(); } { - Series read(name, Access::READ_ONLY, MPI_COMM_WORLD); + Series read( + name, + Access::READ_ONLY, + MPI_COMM_WORLD, + R"({"verify_homogeneous_extents": false})"); Iteration it0 = read.iterations[0]; auto E_x = it0.meshes["E"]["x"]; ChunkTable table = E_x.availableChunks(); @@ -492,6 +525,41 @@ void available_chunks_test(std::string const &file_ending) { REQUIRE(ranks[i] == i); } + + auto E_y = it0.meshes["E"]["y"]; + auto width = E_y.getExtent()[1]; + auto first_row = + E_y.prepareLoadStore().extent({1, width}).load().get(); + auto middle_rows = E_y.prepareLoadStore() + .offset({1, 0}) + .extent({3, width}) + .load() + .get(); + auto last_row = E_y.prepareLoadStore().offset({4, 0}).load().get(); + read.flush(); + + for (auto row : [&]() -> std::vector *> { + if constexpr (CanTheMemorySelectionBeReset) + { + return {&first_row, &last_row}; + } + else + { + return {&first_row}; + } + }()) + { + for (size_t i = 0; i < width; ++i) + { + REQUIRE(row->get()[i] == -1); + } + } + for (size_t i = 0; i < width * 3; ++i) + { + size_t row = i / width; + int required_value = row * 3 + (i % 3) + 1; + REQUIRE(middle_rows.get()[i] == required_value); + } } } diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 49aab1db18..ae6297ad5f 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -942,7 +942,11 @@ inline void constant_scalar(std::string const &file_ending) new unsigned int[6], [](unsigned int const *p) { delete[] p; }); unsigned int e{0}; std::generate(E.get(), E.get() + 6, [&e] { return e++; }); - E_y.storeChunk(std::move(E), {0, 0, 0}, {1, 2, 3}); + // check that const-type unique pointers work in the builder pattern + E_y.prepareLoadStore() + .extent({1, 2, 3}) + .withUniquePtr(std::move(E).static_cast_()) + .store(); // store a number of predefined attributes in E Mesh &E_mesh = s.snapshots()[1].meshes["E"]; @@ -1753,13 +1757,17 @@ inline void write_test( auto opaqueTypeDataset = rc.visit(); auto variantTypeDataset = rc.loadChunkVariant(); + auto variantTypeDataset2 = rc.prepareLoadStore().loadVariant().get(); rc.seriesFlush(); - std::visit( - [](auto &&shared_ptr) { - std::cout << "First value in loaded chunk: '" << shared_ptr.get()[0] - << '\'' << std::endl; - }, - variantTypeDataset); + for (auto ptr : {&variantTypeDataset, &variantTypeDataset2}) + { + std::visit( + [](auto &&shared_ptr) { + std::cout << "First value in loaded chunk: '" + << shared_ptr.get()[0] << '\'' << std::endl; + }, + *ptr); + } #ifndef _WIN32 if (test_rank_table)