Skip to content

Commit ef17290

Browse files
authored
Update tensor pointer cast (#952)
* Update tensor pointer cast Signed-off-by: Raasz, Pawel <[email protected]> * Experiment with action cache Signed-off-by: Raasz, Pawel <[email protected]> * Update actions cache for mac and windows Signed-off-by: Raasz, Pawel <[email protected]> * Update java test Signed-off-by: Raasz, Pawel <[email protected]> * Update java tests Signed-off-by: Raasz, Pawel <[email protected]> * Update code style Signed-off-by: Raasz, Pawel <[email protected]> * Update code_style flow Signed-off-by: Raasz, Pawel <[email protected]> * Restore code style flow Signed-off-by: Raasz, Pawel <[email protected]> * Add hashes to GH actions Signed-off-by: Raasz, Pawel <[email protected]> --------- Signed-off-by: Raasz, Pawel <[email protected]>
1 parent b34ff38 commit ef17290

File tree

11 files changed

+28
-27
lines changed

11 files changed

+28
-27
lines changed

.github/workflows/linux.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ jobs:
9999
run: python3 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt
100100

101101
- name: Setup ccache
102-
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
102+
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
103103
with:
104104
# Should save cache only if run in the master branch of the base repo
105105
# github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push
@@ -156,7 +156,7 @@ jobs:
156156
run: |
157157
source ${INSTALL_DIR}/setupvars.sh
158158
gradle clean build --info
159-
159+
160160
for d in CPU HETERO:CPU; do
161161
gradle test -Prun_tests -DMODELS_PATH=${TEST_DATA} -Ddevice=$d --info;
162162
done
@@ -177,7 +177,7 @@ jobs:
177177
pushd ${INSTALL_DIR}
178178
tar -czvf ${BUILD_DIR}/openvino_package.tar.gz *
179179
popd
180-
180+
181181
pushd ${DEVELOPER_PACKAGE_DIR}
182182
tar -czvf ${BUILD_DIR}/openvino_developer_package.tar.gz *
183183
popd
@@ -265,7 +265,7 @@ jobs:
265265
pushd ${INSTALL_DIR}
266266
tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR}
267267
popd
268-
268+
269269
pushd ${INSTALL_DIR}
270270
tar -xzf openvino_developer_package.tar.gz -C ${INSTALL_DIR}
271271
popd
@@ -285,7 +285,7 @@ jobs:
285285
apt -y --no-install-recommends install software-properties-common curl
286286
287287
- name: Setup ccache
288-
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
288+
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
289289
with:
290290
# Should save cache only if run in the master branch of the base repo
291291
# github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push

.github/workflows/mac.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ jobs:
8484
#
8585

8686
- name: Setup ccache
87-
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
87+
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
8888
with:
8989
# Should save cache only if run in the master branch of the base repo
9090
# github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push

.github/workflows/windows.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ jobs:
110110
Add-Content -Path $env:GITHUB_PATH -Value "C:\ccache"
111111
112112
- name: Setup ccache
113-
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
113+
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
114114
with:
115115
# Should save cache only if run in the master branch of the base repo
116116
# github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push

modules/custom_operations/user_ie_extensions/calculate_grid.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ std::shared_ptr<ov::Node> CalculateGrid::clone_with_new_inputs(const ov::OutputV
2121
}
2222

2323
bool CalculateGrid::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
24-
const float* inpPos = reinterpret_cast<float*>(inputs[0].data());
24+
const float *inpPos = reinterpret_cast<const float *>(inputs[0].data());
2525
float* out = reinterpret_cast<float*>(outputs[0].data());
2626

2727
std::set<std::tuple<int, int, int> > outPos;

modules/custom_operations/user_ie_extensions/complex_mul.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ std::shared_ptr<ov::Node> ComplexMultiplication::clone_with_new_inputs(const ov:
2222
}
2323

2424
bool ComplexMultiplication::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
25-
const float* inp0 = reinterpret_cast<float*>(inputs[0].data());
26-
const float* inp1 = reinterpret_cast<float*>(inputs[1].data());
25+
const float *inp0 = reinterpret_cast<const float *>(inputs[0].data());
26+
const float *inp1 = reinterpret_cast<const float *>(inputs[1].data());
2727
float* out = reinterpret_cast<float*>(outputs[0].data());
2828

2929
size_t channels0 = inputs[0].get_shape()[1];

modules/custom_operations/user_ie_extensions/fft.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -127,12 +127,13 @@ bool FFT::visit_attributes(ov::AttributeVisitor& visitor) {
127127
}
128128

129129
bool FFT::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
130-
float* inpData = reinterpret_cast<float*>(inputs[0].data());
130+
//const_cast because the cvSetData use user pointer as non-const, should be ok as it looks like input data
131+
float *inpData = reinterpret_cast<float *>(const_cast<void*>(inputs[0].data()));
131132

132133
if (inputs[1].get_element_type() != ov::element::i32)
133134
OPENVINO_THROW("Unexpected dims type: " + inputs[1].get_element_type().to_string());
134135

135-
int32_t* signalDimsData = reinterpret_cast<int32_t*>(inputs[1].data());
136+
const int32_t *signalDimsData = reinterpret_cast<const int32_t *>(inputs[1].data());
136137
float* outData = reinterpret_cast<float*>(outputs[0].data());
137138
std::vector<size_t> dims = inputs[0].get_shape();
138139
const size_t numSignalDims = inputs[1].get_shape()[0];

modules/custom_operations/user_ie_extensions/grid_sample.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ std::shared_ptr<ov::Node> GridSample::clone_with_new_inputs(const ov::OutputVect
2626
}
2727

2828
bool GridSample::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
29-
const float* inpData = reinterpret_cast<float*>(inputs[0].data());
30-
const float* gridData = reinterpret_cast<float*>(inputs[1].data());
29+
const float *inpData = reinterpret_cast<const float *>(inputs[0].data());
30+
const float *gridData = reinterpret_cast<const float *>(inputs[1].data());
3131
float* outData = reinterpret_cast<float*>(outputs[0].data());
3232

3333
std::vector<size_t> inpDims = inputs[0].get_shape();

modules/custom_operations/user_ie_extensions/sparse_conv.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@ std::shared_ptr<ov::Node> SparseConv::clone_with_new_inputs(const ov::OutputVect
2323
}
2424

2525
bool SparseConv::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
26-
const float* features = reinterpret_cast<float*>(inputs[0].data());
27-
const float* inpPos = reinterpret_cast<float*>(inputs[1].data());
28-
const float* outPos = reinterpret_cast<float*>(inputs[2].data());
29-
const float* kernel = reinterpret_cast<float*>(inputs[3].data());
30-
const float* offset = reinterpret_cast<float*>(inputs[4].data());
26+
const float *features = reinterpret_cast<const float *>(inputs[0].data());
27+
const float *inpPos = reinterpret_cast<const float *>(inputs[1].data());
28+
const float *outPos = reinterpret_cast<const float *>(inputs[2].data());
29+
const float *kernel = reinterpret_cast<const float *>(inputs[3].data());
30+
const float *offset = reinterpret_cast<const float *>(inputs[4].data());
3131
float* out = reinterpret_cast<float*>(outputs[0].data());
3232
memset(out, 0, outputs[0].get_byte_size());
3333

modules/custom_operations/user_ie_extensions/sparse_conv_transpose.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@ std::shared_ptr<ov::Node> SparseConvTranspose::clone_with_new_inputs(const ov::O
2323
}
2424

2525
bool SparseConvTranspose::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
26-
const float* features = reinterpret_cast<float*>(inputs[0].data());
27-
const float* inpPos = reinterpret_cast<float*>(inputs[1].data());
28-
const float* outPos = reinterpret_cast<float*>(inputs[2].data());
29-
const float* kernel = reinterpret_cast<float*>(inputs[3].data());
30-
const float* offset = reinterpret_cast<float*>(inputs[4].data());
26+
const float *features = reinterpret_cast<const float *>(inputs[0].data());
27+
const float *inpPos = reinterpret_cast<const float *>(inputs[1].data());
28+
const float *outPos = reinterpret_cast<const float *>(inputs[2].data());
29+
const float *kernel = reinterpret_cast<const float *>(inputs[3].data());
30+
const float *offset = reinterpret_cast<const float *>(inputs[4].data());
3131
float* out = reinterpret_cast<float*>(outputs[0].data());
3232
memset(out, 0, outputs[0].get_byte_size());
3333

modules/java_api/src/test/java/org/intel/openvino/CompiledModelTests.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ public void testInputs() {
2323
List<Output> inputs = model.inputs();
2424

2525
assertEquals("data", inputs.get(0).get_any_name());
26-
assertEquals(ElementType.f32, inputs.get(0).get_element_type());
26+
assertEquals(ElementType.f16, inputs.get(0).get_element_type());
2727

2828
int[] shape = new int[] {1, 3, 32, 32};
2929
assertArrayEquals("Shape", shape, inputs.get(0).get_shape());
@@ -34,7 +34,7 @@ public void testOutputs() {
3434
List<Output> outputs = model.outputs();
3535

3636
assertEquals("fc_out", outputs.get(0).get_any_name());
37-
assertEquals(ElementType.f32, outputs.get(0).get_element_type());
37+
assertEquals(ElementType.f16, outputs.get(0).get_element_type());
3838

3939
int[] shape = new int[] {1, 10};
4040
assertArrayEquals("Shape", shape, outputs.get(0).get_shape());

0 commit comments

Comments
 (0)