Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 23 additions & 1 deletion src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
#include <string>
#include <utility>
#include <stdexcept>

#include "openvino/core/except.hpp"

namespace cldnn {
/// @addtogroup cpp_api C++ API
Expand Down Expand Up @@ -302,6 +302,28 @@ struct format {
fmt == bfvuwzyx);
}

static void get_axes_map(const format& fmt, int64_t* axes_map, size_t& map_size) {
const auto& o_order = fmt.order();
const auto& i_order = fmt.internal_order();

// output_order has more elements than allocated in axes_map
if (o_order.size() > map_size) {
OPENVINO_THROW("Layout dimension higher than expected" + std::to_string(o_order.size()));
}

map_size = o_order.size();

for (size_t i = 0; i < map_size; i++) {
auto c = o_order[i];
auto pos = i_order.find(c);

if (pos == std::string::npos)
OPENVINO_THROW("Unknown coord type: " + std::to_string(c));

axes_map[i] = pos;
}
}

static format get_default_format(size_t rank, bool is_weights = false, bool is_grouped = false);
static bool is_default_format(const format& fmt);

Expand Down
3 changes: 3 additions & 0 deletions src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,9 @@ struct layout {
}
}

cldnn::format get_format() const;
padding get_padding() const;

size_t get_rank() const;

size_t get_spatial_rank() const;
Expand Down
60 changes: 51 additions & 9 deletions src/plugins/intel_gpu/src/plugin/common_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,37 @@

namespace {

using namespace cldnn;

#define MAX_NUM_AXES 6
void get_linear_offset_params(layout& layout, tensor& start_points, tensor& end_points, int64_t* padded_sizes, int64_t* axes_map, size_t& map_size) {
auto fmt = layout.get_format();
auto data_padding = layout.get_padding();
auto default_fmt = format::get_default_format(fmt.dimension(), format::is_weights_format(fmt), format::is_grouped(fmt));

std::vector<tensor::value_type> lower_sizes, upper_sizes;
lower_sizes.assign(data_padding._lower_size.begin(), data_padding._lower_size.begin() + fmt.dimension());
upper_sizes.assign(data_padding._upper_size.begin(), data_padding._upper_size.begin() + fmt.dimension());
start_points = tensor(default_fmt, lower_sizes, 0);
const auto& u_padd = tensor(default_fmt, upper_sizes, 0);

auto t = layout.get_tensor();
end_points = t + start_points;

std::replace(t.raw.begin(), t.raw.end(), 0, 1);

format::get_axes_map(fmt, axes_map, map_size);
const auto& p_sizes = (t + start_points + u_padd).sizes(fmt);

if (p_sizes.size() < map_size) {
OPENVINO_THROW("Unsupported padded layout dimension" + std::to_string(p_sizes.size()));
}

for (size_t i = 0; i < p_sizes.size(); i++) {
padded_sizes[i] = p_sizes[i];
}
}

template <typename src_t, typename dst_t>
void convert_and_copy_no_pad(const src_t* src, dst_t* dst, size_t size) {
OPENVINO_ASSERT(src && dst, "[GPU] Src or Dst ptr is null");
Expand All @@ -26,15 +57,26 @@ void convert_and_copy_no_pad(const src_t* src, dst_t* dst, size_t size) {
}

template <typename src_t, typename dst_t>
void convert_and_copy_padded_source(const src_t* src, dst_t* dst, cldnn::layout layout) {
cldnn::tensor size = layout.get_tensor();
for (int64_t b = 0; b < size.batch[0]; b++) {
for (int64_t f = 0; f < size.feature[0]; f++) {
for (int64_t w = 0; w < size.spatial[3]; w++) {
for (int64_t z = 0; z < size.spatial[2]; z++) {
for (int64_t y = 0; y < size.spatial[1]; y++) {
for (int64_t x = 0; x < size.spatial[0]; x++) {
*dst++ = static_cast<dst_t>(src[layout.get_linear_offset(cldnn::tensor(b, f, x, y, z, w))]);
void convert_and_copy_padded_source(const src_t* src, dst_t* dst, layout& layout) {
tensor axes_start_point, axes_end_point;
int64_t padded_sizes[MAX_NUM_AXES], axes_map[MAX_NUM_AXES];
size_t map_len = MAX_NUM_AXES;

get_linear_offset_params(layout, axes_start_point, axes_end_point, padded_sizes, axes_map, map_len);

for (int64_t b = axes_start_point.batch[0]; b < axes_end_point.batch[0]; b++) {
for (int64_t f = axes_start_point.feature[0]; f < axes_end_point.feature[0]; f++) {
for (int64_t w = axes_start_point.spatial[3]; w < axes_end_point.spatial[3]; w++) {
for (int64_t z = axes_start_point.spatial[2]; z < axes_end_point.spatial[2]; z++) {
for (int64_t y = axes_start_point.spatial[1]; y < axes_end_point.spatial[1]; y++) {
for (int64_t x = axes_start_point.spatial[0]; x < axes_end_point.spatial[0]; x++) {
int64_t element_sizes[MAX_NUM_AXES] = {b, f, x, y, z, w};
size_t offset = element_sizes[axes_map[0]];

for (size_t i = 1; i < map_len; i++)
offset = offset * padded_sizes[i] + element_sizes[axes_map[i]];

*dst++ = static_cast<dst_t>(src[offset]);
}
}
}
Expand Down
8 changes: 8 additions & 0 deletions src/plugins/intel_gpu/src/runtime/layout.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,14 @@ std::vector<int32_t> convert_dimensions(const std::vector<int32_t>& sizes, const

} // namespace

format layout::get_format() const {
return format;
}

padding layout::get_padding() const {
return data_padding;
}

size_t layout::get_rank() const {
return format.dimension();
}
Expand Down
Loading