filename of the migrated file
diff --git a/src/ansys/dpf/core/operators/result/migrate_to_h5dpf.py b/src/ansys/dpf/core/operators/result/migrate_to_h5dpf.py
index b5374a7965d..cbfd5ca7e9f 100644
--- a/src/ansys/dpf/core/operators/result/migrate_to_h5dpf.py
+++ b/src/ansys/dpf/core/operators/result/migrate_to_h5dpf.py
@@ -25,6 +25,8 @@ class migrate_to_h5dpf(Operator):
Parameters
----------
+ h5_chunk_size: int or GenericDataContainer, optional
+ Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.
dataset_size_compression_threshold: int or GenericDataContainer, optional
Integer value that defines the minimum dataset size (in bytes) to use h5 native compression Applicable for arrays of floats, doubles and integers.
h5_native_compression: int or DataTree or GenericDataContainer, optional
@@ -58,6 +60,8 @@ class migrate_to_h5dpf(Operator):
>>> op = dpf.operators.result.migrate_to_h5dpf()
>>> # Make input connections
+ >>> my_h5_chunk_size = int()
+ >>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
>>> my_dataset_size_compression_threshold = int()
>>> op.inputs.dataset_size_compression_threshold.connect(my_dataset_size_compression_threshold)
>>> my_h5_native_compression = int()
@@ -81,6 +85,7 @@ class migrate_to_h5dpf(Operator):
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.result.migrate_to_h5dpf(
+ ... h5_chunk_size=my_h5_chunk_size,
... dataset_size_compression_threshold=my_dataset_size_compression_threshold,
... h5_native_compression=my_h5_native_compression,
... export_floats=my_export_floats,
@@ -99,6 +104,7 @@ class migrate_to_h5dpf(Operator):
def __init__(
self,
+ h5_chunk_size=None,
dataset_size_compression_threshold=None,
h5_native_compression=None,
export_floats=None,
@@ -115,6 +121,8 @@ def __init__(
super().__init__(name="hdf5::h5dpf::migrate_file", config=config, server=server)
self._inputs = InputsMigrateToH5Dpf(self)
self._outputs = OutputsMigrateToH5Dpf(self)
+ if h5_chunk_size is not None:
+ self.inputs.h5_chunk_size.connect(h5_chunk_size)
if dataset_size_compression_threshold is not None:
self.inputs.dataset_size_compression_threshold.connect(
dataset_size_compression_threshold
@@ -151,6 +159,12 @@ def _spec() -> Specification:
spec = Specification(
description=description,
map_input_pin_spec={
+ -7: PinSpecification(
+ name="h5_chunk_size",
+ type_names=["int32", "generic_data_container"],
+ optional=True,
+ document=r"""Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.""",
+ ),
-5: PinSpecification(
name="dataset_size_compression_threshold",
type_names=["int32", "generic_data_container"],
@@ -279,6 +293,8 @@ class InputsMigrateToH5Dpf(_Inputs):
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.migrate_to_h5dpf()
+ >>> my_h5_chunk_size = int()
+ >>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
>>> my_dataset_size_compression_threshold = int()
>>> op.inputs.dataset_size_compression_threshold.connect(my_dataset_size_compression_threshold)
>>> my_h5_native_compression = int()
@@ -303,6 +319,8 @@ class InputsMigrateToH5Dpf(_Inputs):
def __init__(self, op: Operator):
super().__init__(migrate_to_h5dpf._spec().inputs, op)
+ self._h5_chunk_size = Input(migrate_to_h5dpf._spec().input_pin(-7), -7, op, -1)
+ self._inputs.append(self._h5_chunk_size)
self._dataset_size_compression_threshold = Input(
migrate_to_h5dpf._spec().input_pin(-5), -5, op, -1
)
@@ -336,6 +354,27 @@ def __init__(self, op: Operator):
)
self._inputs.append(self._filtering_workflow)
+ @property
+ def h5_chunk_size(self) -> Input:
+ r"""Allows to connect h5_chunk_size input to the operator.
+
+ Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.
+
+ Returns
+ -------
+ input:
+ An Input instance for this pin.
+
+ Examples
+ --------
+ >>> from ansys.dpf import core as dpf
+ >>> op = dpf.operators.result.migrate_to_h5dpf()
+ >>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
+ >>> # or
+ >>> op.inputs.h5_chunk_size(my_h5_chunk_size)
+ """
+ return self._h5_chunk_size
+
@property
def dataset_size_compression_threshold(self) -> Input:
r"""Allows to connect dataset_size_compression_threshold input to the operator.
diff --git a/src/ansys/dpf/core/operators/serialization/hdf5dpf_generate_result_file.py b/src/ansys/dpf/core/operators/serialization/hdf5dpf_generate_result_file.py
index 145de14efd3..79df86b80f6 100644
--- a/src/ansys/dpf/core/operators/serialization/hdf5dpf_generate_result_file.py
+++ b/src/ansys/dpf/core/operators/serialization/hdf5dpf_generate_result_file.py
@@ -21,6 +21,8 @@ class hdf5dpf_generate_result_file(Operator):
Parameters
----------
+ h5_chunk_size: int, optional
+ Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.
append_mode: bool, optional
Experimental: Allow appending chunked data to the file. This disables fields container content deduplication.
dataset_size_compression_threshold: int, optional
@@ -55,6 +57,8 @@ class hdf5dpf_generate_result_file(Operator):
>>> op = dpf.operators.serialization.hdf5dpf_generate_result_file()
>>> # Make input connections
+ >>> my_h5_chunk_size = int()
+ >>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
>>> my_append_mode = bool()
>>> op.inputs.append_mode.connect(my_append_mode)
>>> my_dataset_size_compression_threshold = int()
@@ -78,6 +82,7 @@ class hdf5dpf_generate_result_file(Operator):
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.serialization.hdf5dpf_generate_result_file(
+ ... h5_chunk_size=my_h5_chunk_size,
... append_mode=my_append_mode,
... dataset_size_compression_threshold=my_dataset_size_compression_threshold,
... h5_native_compression=my_h5_native_compression,
@@ -96,6 +101,7 @@ class hdf5dpf_generate_result_file(Operator):
def __init__(
self,
+ h5_chunk_size=None,
append_mode=None,
dataset_size_compression_threshold=None,
h5_native_compression=None,
@@ -114,6 +120,8 @@ def __init__(
)
self._inputs = InputsHdf5DpfGenerateResultFile(self)
self._outputs = OutputsHdf5DpfGenerateResultFile(self)
+ if h5_chunk_size is not None:
+ self.inputs.h5_chunk_size.connect(h5_chunk_size)
if append_mode is not None:
self.inputs.append_mode.connect(append_mode)
if dataset_size_compression_threshold is not None:
@@ -144,6 +152,12 @@ def _spec() -> Specification:
spec = Specification(
description=description,
map_input_pin_spec={
+ -7: PinSpecification(
+ name="h5_chunk_size",
+ type_names=["int32"],
+ optional=True,
+ document=r"""Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.""",
+ ),
-6: PinSpecification(
name="append_mode",
type_names=["bool"],
@@ -270,6 +284,8 @@ class InputsHdf5DpfGenerateResultFile(_Inputs):
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.hdf5dpf_generate_result_file()
+ >>> my_h5_chunk_size = int()
+ >>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
>>> my_append_mode = bool()
>>> op.inputs.append_mode.connect(my_append_mode)
>>> my_dataset_size_compression_threshold = int()
@@ -294,6 +310,10 @@ class InputsHdf5DpfGenerateResultFile(_Inputs):
def __init__(self, op: Operator):
super().__init__(hdf5dpf_generate_result_file._spec().inputs, op)
+ self._h5_chunk_size = Input(
+ hdf5dpf_generate_result_file._spec().input_pin(-7), -7, op, -1
+ )
+ self._inputs.append(self._h5_chunk_size)
self._append_mode = Input(
hdf5dpf_generate_result_file._spec().input_pin(-6), -6, op, -1
)
@@ -335,6 +355,27 @@ def __init__(self, op: Operator):
)
self._inputs.append(self._input_name2)
+ @property
+ def h5_chunk_size(self) -> Input:
+ r"""Allows to connect h5_chunk_size input to the operator.
+
+ Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.
+
+ Returns
+ -------
+ input:
+ An Input instance for this pin.
+
+ Examples
+ --------
+ >>> from ansys.dpf import core as dpf
+ >>> op = dpf.operators.serialization.hdf5dpf_generate_result_file()
+ >>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
+ >>> # or
+ >>> op.inputs.h5_chunk_size(my_h5_chunk_size)
+ """
+ return self._h5_chunk_size
+
@property
def append_mode(self) -> Input:
r"""Allows to connect append_mode input to the operator.
diff --git a/src/ansys/dpf/gatebin/Ans.Dpf.GrpcClient.dll b/src/ansys/dpf/gatebin/Ans.Dpf.GrpcClient.dll
index 0a992af2d3d..3a47b480637 100644
Binary files a/src/ansys/dpf/gatebin/Ans.Dpf.GrpcClient.dll and b/src/ansys/dpf/gatebin/Ans.Dpf.GrpcClient.dll differ
diff --git a/src/ansys/dpf/gatebin/DPFClientAPI.dll b/src/ansys/dpf/gatebin/DPFClientAPI.dll
index 1f403dfe224..7a2636f0a09 100644
Binary files a/src/ansys/dpf/gatebin/DPFClientAPI.dll and b/src/ansys/dpf/gatebin/DPFClientAPI.dll differ
diff --git a/src/ansys/dpf/gatebin/libAns.Dpf.GrpcClient.so b/src/ansys/dpf/gatebin/libAns.Dpf.GrpcClient.so
index 2e60e2e0402..c074c09c534 100644
Binary files a/src/ansys/dpf/gatebin/libAns.Dpf.GrpcClient.so and b/src/ansys/dpf/gatebin/libAns.Dpf.GrpcClient.so differ
diff --git a/src/ansys/dpf/gatebin/libDPFClientAPI.so b/src/ansys/dpf/gatebin/libDPFClientAPI.so
index 56db8ac6e43..f577e907481 100644
Binary files a/src/ansys/dpf/gatebin/libDPFClientAPI.so and b/src/ansys/dpf/gatebin/libDPFClientAPI.so differ