Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/test_docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ jobs:
- name: "Test API test_server"
uses: nick-fields/retry@v3
with:
timeout_minutes: 5
timeout_minutes: 8
max_attempts: 2
shell: bash
command: |
Expand Down
6 changes: 4 additions & 2 deletions doc/source/_static/dpf_operators.html
Original file line number Diff line number Diff line change
Expand Up @@ -14992,7 +14992,8 @@ <h2 class="h2-main">Configurating operators</h2>
</div></td></tr></tbody></table><table class="pin-box"><tbody><tr><td><pin-number-optional n="4" ellipsis = "false"></pin-number-optional></td><td><pin-name name="rel_stat_covar_matrix"></pin-name></td><td><req-type typeName="(fields_container)"></req-type></td><td><div class = "pin-des-text"><p>Fields container containing covariance matrices from a psd file: covariance matrix terms for displacement/velocity/acceleration mode-static shapes</p>
</div></td></tr></tbody></table></div><h2 class="op-des-h2">Outputs</h2><div><table class="pin-box"><tbody><tr><td><pin-number n="0" ellipsis = "false"></pin-number></td><td><pin-name name="psd"></pin-name></td><td><req-type typeName="(fields_container)"></req-type></td><td><div class = "pin-des-text"><p>PSD solution per label</p>
</div></td></tr></tbody></table></div><h2 class="op-des-h2">Configurations</h2><config-spec name="mutex" default="false" doc="If this option is set to true, the shared memory is prevented from being simultaneously accessed by multiple threads." types="bool" ></config-spec><config-spec name="num_threads" default="0" doc="Number of threads to use to run in parallel" types="int32" ></config-spec><config-spec name="run_in_parallel" default="true" doc="Loops are allowed to run in parallel if the value of this config is set to true." types="bool" ></config-spec><h2 class="op-des-h2">Scripting</h2><scripting-part scripting_name="expansion_psd" license="any_dpf_supported_increments" cat="math" plugin="core" cpp-name="expansion::psd"></scripting-part><h2 class="op-des-h2">Changelog</h2><op-changelog content='{"0.0.0":"New","0.0.1":"Fix handling of empty fields in mode shapes."}'></op-changelog></div><div class="operator" id="hdf5dpf generate result file" scripting_name="hdf5dpf_generate_result_file"plugin="core"cat="serialization"><h1 class="op-des-h1">serialization: hdf5dpf generate result file</h1><figure class="figure-op-des"> <figcaption > Description </figcaption><div class = "figure-op-des-text"><p>Generate a dpf result file from provided information.</p>
</div></figure><div class="op-version">Version 0.0.0</div><h2 class="op-des-h2">Inputs</h2><div><table class="pin-box"><tbody><tr><td><pin-number-optional n="-6" ellipsis = "false"></pin-number-optional></td><td><pin-name name="append_mode"></pin-name></td><td><req-type typeName="(bool)"></req-type></td><td><div class = "pin-des-text"><p>Experimental: Allow appending chunked data to the file. This disables fields container content deduplication.</p>
</div></figure><div class="op-version">Version 0.0.0</div><h2 class="op-des-h2">Inputs</h2><div><table class="pin-box"><tbody><tr><td><pin-number-optional n="-7" ellipsis = "false"></pin-number-optional></td><td><pin-name name="h5_chunk_size"></pin-name></td><td><req-type typeName="(int32)"></req-type></td><td><div class = "pin-des-text"><p>Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.</p>
</div></td></tr></tbody></table><table class="pin-box"><tbody><tr><td><pin-number-optional n="-6" ellipsis = "false"></pin-number-optional></td><td><pin-name name="append_mode"></pin-name></td><td><req-type typeName="(bool)"></req-type></td><td><div class = "pin-des-text"><p>Experimental: Allow appending chunked data to the file. This disables fields container content deduplication.</p>
</div></td></tr></tbody></table><table class="pin-box"><tbody><tr><td><pin-number-optional n="-5" ellipsis = "false"></pin-number-optional></td><td><pin-name name="dataset_size_compression_threshold"></pin-name></td><td><req-type typeName="(int32)"></req-type></td><td><div class = "pin-des-text"><p>Integer value that defines the minimum dataset size (in bytes) to use h5 native compression Applicable for arrays of floats, doubles and integers.</p>
</div></td></tr></tbody></table><table class="pin-box"><tbody><tr><td><pin-number-optional n="-2" ellipsis = "false"></pin-number-optional></td><td><pin-name name="h5_native_compression"></pin-name></td><td><req-type typeName="(int32 | abstract_data_tree)"></req-type></td><td><div class = "pin-des-text"><p>Integer value / DataTree that defines the h5 native compression used For Integer Input {0: No Compression (default); 1-9: GZIP Compression : 9 provides maximum compression but at the slowest speed.}For DataTree Input {type: None / GZIP / ZSTD; level: GZIP (1-9) / ZSTD (1-20); num_threads: ZSTD (&gt;0)}</p>
</div></td></tr></tbody></table><table class="pin-box"><tbody><tr><td><pin-number-optional n="-1" ellipsis = "false"></pin-number-optional></td><td><pin-name name="export_floats"></pin-name></td><td><req-type typeName="(bool)"></req-type></td><td><div class = "pin-des-text"><p>converts double to float to reduce file size (default is true)</p>
Expand All @@ -15003,7 +15004,8 @@ <h2 class="h2-main">Configurating operators</h2>
</div></td></tr></tbody></table><table class="pin-box"><tbody><tr><td><pin-number-optional n="4" ellipsis = "true"></pin-number-optional></td><td><pin-name name="input_name"></pin-name></td><td><req-type typeName="(string | any)"></req-type></td><td><div class = "pin-des-text"><p>Set of even and odd pins to serialize results. Odd pins (4, 6, 8...) are strings, and they represent the names of the results to be serialized. Even pins (5, 7, 9...) are DPF types, and they represent the results to be serialized. They should go in pairs (for each result name, there should be a result) and connected sequentially.</p>
</div></td></tr></tbody></table></div><h2 class="op-des-h2">Outputs</h2><div><table class="pin-box"><tbody><tr><td><pin-number n="0" ellipsis = "false"></pin-number></td><td><pin-name name="data_sources"></pin-name></td><td><req-type typeName="(data_sources)"></req-type></td><td><div class = "pin-des-text"><p>data_sources filled with the H5 generated file path.</p>
</div></td></tr></tbody></table></div><h2 class="op-des-h2">Configurations</h2><config-spec name="evaluate_inputs_before_run" default="false" doc="If this option is set to true, all input pins of the operator will be evaluated before entering the run method to maintain a correct Operator status." types="bool" ></config-spec><config-spec name="mutex" default="false" doc="If this option is set to true, the shared memory is prevented from being simultaneously accessed by multiple threads." types="bool" ></config-spec><h2 class="op-des-h2">Scripting</h2><scripting-part scripting_name="hdf5dpf_generate_result_file" license="none" cat="serialization" plugin="core" cpp-name="hdf5::h5dpf::make_result_file"></scripting-part><h2 class="op-des-h2">Changelog</h2><op-changelog content='{"0.0.0":"New"}'></op-changelog></div><div class="operator" id="migrate to h5dpf" scripting_name="migrate_to_h5dpf"plugin="core"cat="result"><h1 class="op-des-h1">result: migrate to h5dpf</h1><figure class="figure-op-des"> <figcaption > Description </figcaption><div class = "figure-op-des-text"><p>Read mesh properties from the results files contained in the streams or data sources and make those properties available through a mesh selection manager in output.User can input a GenericDataContainer that will map an item to a result name. Example of Map: {{ default: wf1}, {EUL: wf2}, {ENG_SE: wf3}}.</p>
</div></figure><div class="op-version">Version 0.0.0</div><h2 class="op-des-h2">Inputs</h2><div><table class="pin-box"><tbody><tr><td><pin-number-optional n="-5" ellipsis = "false"></pin-number-optional></td><td><pin-name name="dataset_size_compression_threshold"></pin-name></td><td><req-type typeName="(int32 | generic_data_container)"></req-type></td><td><div class = "pin-des-text"><p>Integer value that defines the minimum dataset size (in bytes) to use h5 native compression Applicable for arrays of floats, doubles and integers.</p>
</div></figure><div class="op-version">Version 0.0.0</div><h2 class="op-des-h2">Inputs</h2><div><table class="pin-box"><tbody><tr><td><pin-number-optional n="-7" ellipsis = "false"></pin-number-optional></td><td><pin-name name="h5_chunk_size"></pin-name></td><td><req-type typeName="(int32 | generic_data_container)"></req-type></td><td><div class = "pin-des-text"><p>Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.</p>
</div></td></tr></tbody></table><table class="pin-box"><tbody><tr><td><pin-number-optional n="-5" ellipsis = "false"></pin-number-optional></td><td><pin-name name="dataset_size_compression_threshold"></pin-name></td><td><req-type typeName="(int32 | generic_data_container)"></req-type></td><td><div class = "pin-des-text"><p>Integer value that defines the minimum dataset size (in bytes) to use h5 native compression Applicable for arrays of floats, doubles and integers.</p>
</div></td></tr></tbody></table><table class="pin-box"><tbody><tr><td><pin-number-optional n="-2" ellipsis = "false"></pin-number-optional></td><td><pin-name name="h5_native_compression"></pin-name></td><td><req-type typeName="(int32 | abstract_data_tree | generic_data_container)"></req-type></td><td><div class = "pin-des-text"><p>Integer value / DataTree that defines the h5 native compression used For Integer Input {0: No Compression (default); 1-9: GZIP Compression : 9 provides maximum compression but at the slowest speed.}For DataTree Input {type: None / GZIP / ZSTD; level: GZIP (1-9) / ZSTD (1-20); num_threads: ZSTD (&gt;0)}</p>
</div></td></tr></tbody></table><table class="pin-box"><tbody><tr><td><pin-number-optional n="-1" ellipsis = "false"></pin-number-optional></td><td><pin-name name="export_floats"></pin-name></td><td><req-type typeName="(bool | generic_data_container)"></req-type></td><td><div class = "pin-des-text"><p>Converts double to float to reduce file size (default is true).If False, nodal results are exported as double precision and elemental results as single precision.</p>
</div></td></tr></tbody></table><table class="pin-box"><tbody><tr><td><pin-number n="0" ellipsis = "false"></pin-number></td><td><pin-name name="filename"></pin-name></td><td><req-type typeName="(string)"></req-type></td><td><div class = "pin-des-text"><p>filename of the migrated file</p>
Expand Down
39 changes: 39 additions & 0 deletions src/ansys/dpf/core/operators/result/migrate_to_h5dpf.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ class migrate_to_h5dpf(Operator):

Parameters
----------
h5_chunk_size: int or GenericDataContainer, optional
Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.
dataset_size_compression_threshold: int or GenericDataContainer, optional
Integer value that defines the minimum dataset size (in bytes) to use h5 native compression Applicable for arrays of floats, doubles and integers.
h5_native_compression: int or DataTree or GenericDataContainer, optional
Expand Down Expand Up @@ -58,6 +60,8 @@ class migrate_to_h5dpf(Operator):
>>> op = dpf.operators.result.migrate_to_h5dpf()

>>> # Make input connections
>>> my_h5_chunk_size = int()
>>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
>>> my_dataset_size_compression_threshold = int()
>>> op.inputs.dataset_size_compression_threshold.connect(my_dataset_size_compression_threshold)
>>> my_h5_native_compression = int()
Expand All @@ -81,6 +85,7 @@ class migrate_to_h5dpf(Operator):

>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.result.migrate_to_h5dpf(
... h5_chunk_size=my_h5_chunk_size,
... dataset_size_compression_threshold=my_dataset_size_compression_threshold,
... h5_native_compression=my_h5_native_compression,
... export_floats=my_export_floats,
Expand All @@ -99,6 +104,7 @@ class migrate_to_h5dpf(Operator):

def __init__(
self,
h5_chunk_size=None,
dataset_size_compression_threshold=None,
h5_native_compression=None,
export_floats=None,
Expand All @@ -115,6 +121,8 @@ def __init__(
super().__init__(name="hdf5::h5dpf::migrate_file", config=config, server=server)
self._inputs = InputsMigrateToH5Dpf(self)
self._outputs = OutputsMigrateToH5Dpf(self)
if h5_chunk_size is not None:
self.inputs.h5_chunk_size.connect(h5_chunk_size)
if dataset_size_compression_threshold is not None:
self.inputs.dataset_size_compression_threshold.connect(
dataset_size_compression_threshold
Expand Down Expand Up @@ -151,6 +159,12 @@ def _spec() -> Specification:
spec = Specification(
description=description,
map_input_pin_spec={
-7: PinSpecification(
name="h5_chunk_size",
type_names=["int32", "generic_data_container"],
optional=True,
document=r"""Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.""",
),
-5: PinSpecification(
name="dataset_size_compression_threshold",
type_names=["int32", "generic_data_container"],
Expand Down Expand Up @@ -279,6 +293,8 @@ class InputsMigrateToH5Dpf(_Inputs):
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.migrate_to_h5dpf()
>>> my_h5_chunk_size = int()
>>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
>>> my_dataset_size_compression_threshold = int()
>>> op.inputs.dataset_size_compression_threshold.connect(my_dataset_size_compression_threshold)
>>> my_h5_native_compression = int()
Expand All @@ -303,6 +319,8 @@ class InputsMigrateToH5Dpf(_Inputs):

def __init__(self, op: Operator):
super().__init__(migrate_to_h5dpf._spec().inputs, op)
self._h5_chunk_size = Input(migrate_to_h5dpf._spec().input_pin(-7), -7, op, -1)
self._inputs.append(self._h5_chunk_size)
self._dataset_size_compression_threshold = Input(
migrate_to_h5dpf._spec().input_pin(-5), -5, op, -1
)
Expand Down Expand Up @@ -336,6 +354,27 @@ def __init__(self, op: Operator):
)
self._inputs.append(self._filtering_workflow)

@property
def h5_chunk_size(self) -> Input:
r"""Allows to connect h5_chunk_size input to the operator.

Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.

Returns
-------
input:
An Input instance for this pin.

Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.migrate_to_h5dpf()
>>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
>>> # or
>>> op.inputs.h5_chunk_size(my_h5_chunk_size)
"""
return self._h5_chunk_size

@property
def dataset_size_compression_threshold(self) -> Input:
r"""Allows to connect dataset_size_compression_threshold input to the operator.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ class hdf5dpf_generate_result_file(Operator):

Parameters
----------
h5_chunk_size: int, optional
Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.
append_mode: bool, optional
Experimental: Allow appending chunked data to the file. This disables fields container content deduplication.
dataset_size_compression_threshold: int, optional
Expand Down Expand Up @@ -55,6 +57,8 @@ class hdf5dpf_generate_result_file(Operator):
>>> op = dpf.operators.serialization.hdf5dpf_generate_result_file()

>>> # Make input connections
>>> my_h5_chunk_size = int()
>>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
>>> my_append_mode = bool()
>>> op.inputs.append_mode.connect(my_append_mode)
>>> my_dataset_size_compression_threshold = int()
Expand All @@ -78,6 +82,7 @@ class hdf5dpf_generate_result_file(Operator):

>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.serialization.hdf5dpf_generate_result_file(
... h5_chunk_size=my_h5_chunk_size,
... append_mode=my_append_mode,
... dataset_size_compression_threshold=my_dataset_size_compression_threshold,
... h5_native_compression=my_h5_native_compression,
Expand All @@ -96,6 +101,7 @@ class hdf5dpf_generate_result_file(Operator):

def __init__(
self,
h5_chunk_size=None,
append_mode=None,
dataset_size_compression_threshold=None,
h5_native_compression=None,
Expand All @@ -114,6 +120,8 @@ def __init__(
)
self._inputs = InputsHdf5DpfGenerateResultFile(self)
self._outputs = OutputsHdf5DpfGenerateResultFile(self)
if h5_chunk_size is not None:
self.inputs.h5_chunk_size.connect(h5_chunk_size)
if append_mode is not None:
self.inputs.append_mode.connect(append_mode)
if dataset_size_compression_threshold is not None:
Expand Down Expand Up @@ -144,6 +152,12 @@ def _spec() -> Specification:
spec = Specification(
description=description,
map_input_pin_spec={
-7: PinSpecification(
name="h5_chunk_size",
type_names=["int32"],
optional=True,
document=r"""Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.""",
),
-6: PinSpecification(
name="append_mode",
type_names=["bool"],
Expand Down Expand Up @@ -270,6 +284,8 @@ class InputsHdf5DpfGenerateResultFile(_Inputs):
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.hdf5dpf_generate_result_file()
>>> my_h5_chunk_size = int()
>>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
>>> my_append_mode = bool()
>>> op.inputs.append_mode.connect(my_append_mode)
>>> my_dataset_size_compression_threshold = int()
Expand All @@ -294,6 +310,10 @@ class InputsHdf5DpfGenerateResultFile(_Inputs):

def __init__(self, op: Operator):
super().__init__(hdf5dpf_generate_result_file._spec().inputs, op)
self._h5_chunk_size = Input(
hdf5dpf_generate_result_file._spec().input_pin(-7), -7, op, -1
)
self._inputs.append(self._h5_chunk_size)
self._append_mode = Input(
hdf5dpf_generate_result_file._spec().input_pin(-6), -6, op, -1
)
Expand Down Expand Up @@ -335,6 +355,27 @@ def __init__(self, op: Operator):
)
self._inputs.append(self._input_name2)

@property
def h5_chunk_size(self) -> Input:
r"""Allows to connect h5_chunk_size input to the operator.

Size of each HDF5 chunk in kilobytes (KB). Default: 1 MB when compression is enabled; for uncompressed datasets, the default is the full dataset size x dimension.

Returns
-------
input:
An Input instance for this pin.

Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.hdf5dpf_generate_result_file()
>>> op.inputs.h5_chunk_size.connect(my_h5_chunk_size)
>>> # or
>>> op.inputs.h5_chunk_size(my_h5_chunk_size)
"""
return self._h5_chunk_size

@property
def append_mode(self) -> Input:
r"""Allows to connect append_mode input to the operator.
Expand Down
Binary file modified src/ansys/dpf/gatebin/Ans.Dpf.GrpcClient.dll
Binary file not shown.
Binary file modified src/ansys/dpf/gatebin/DPFClientAPI.dll
Binary file not shown.
Binary file modified src/ansys/dpf/gatebin/libAns.Dpf.GrpcClient.so
Binary file not shown.
Binary file modified src/ansys/dpf/gatebin/libDPFClientAPI.so
Binary file not shown.