Skip to content

Commit 1734ec5

Browse files
committed
More fixes
Signed-off-by: Yuanyuan Chen <[email protected]>
1 parent 2adc08c commit 1734ec5

File tree

25 files changed

+268
-222
lines changed

25 files changed

+268
-222
lines changed

backends/cadence/hifi/operators/op_mean.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ using executorch::aten::RuntimeContext;
1717
using executorch::aten::ScalarType;
1818
using executorch::aten::Tensor;
1919
using executorch::runtime::ArrayRef;
20-
using torch::executor::Error;
2120
using std::optional;
21+
using torch::executor::Error;
2222

2323
namespace impl {
2424
namespace HiFi {

backends/cortex_m/ops/cmsis_scratch_buffer_context.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ class CMSISScratchBufferContext final {
5050
Tensor& scratch_buffer,
5151
const Tensor& weights,
5252
const Tensor& weight_zero_point,
53-
const torch::executor::optional<Tensor>& bias)
53+
const ::std::optional<Tensor>& bias)
5454
: scratch_ptr_(scratch_buffer.mutable_data_ptr<int8_t>()),
5555
total_size_(scratch_buffer.size(0)),
5656
base_ptr_(reinterpret_cast<uint8_t*>(scratch_ptr_)),

backends/cortex_m/ops/op_quantized_linear.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ Tensor& quantized_linear_out(
2727
const Tensor& weight_zero_point,
2828
const Tensor& weight_multiplier,
2929
const Tensor& weight_shift,
30-
const torch::executor::optional<Tensor>& bias,
30+
const ::std::optional<Tensor>& bias,
3131
const Tensor& bias_multiplier,
3232
const Tensor& bias_shift,
3333
const Tensor& scratch_buffer,
@@ -155,7 +155,7 @@ Tensor quantized_linear(
155155
const Tensor& weight_zero_point,
156156
const Tensor& weight_multiplier,
157157
const Tensor& weight_shift,
158-
const torch::executor::optional<Tensor>& bias,
158+
const ::std::optional<Tensor>& bias,
159159
const Tensor& bias_multiplier,
160160
const Tensor& bias_shift,
161161
const Tensor& scratch_buffer,

extension/aten_util/make_aten_functor_from_et_functor.h

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -140,10 +140,13 @@ struct type_convert<
140140
final {
141141
explicit type_convert(ETensor value)
142142
: value_(value),
143-
converted_(at::from_blob(
144-
value_.mutable_data_ptr(),
145-
std::vector<int64_t>{value_.sizes().begin(), value_.sizes().end()},
146-
c10::ScalarType(value_.scalar_type()))) {}
143+
converted_(
144+
at::from_blob(
145+
value_.mutable_data_ptr(),
146+
std::vector<int64_t>{
147+
value_.sizes().begin(),
148+
value_.sizes().end()},
149+
c10::ScalarType(value_.scalar_type()))) {}
147150

148151
ATensor call() {
149152
return converted_;

extension/aten_util/test/make_aten_functor_from_et_functor_test.cpp

Lines changed: 25 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -78,8 +78,7 @@ Tensor& sum_arrayref_tensor_out(
7878
}
7979

8080
Tensor& sum_arrayref_optional_tensor_out(
81-
torch::executor::ArrayRef<
82-
std::optional<torch::executor::Tensor>> a,
81+
torch::executor::ArrayRef<std::optional<torch::executor::Tensor>> a,
8382
Tensor& out) {
8483
for (int i = 0; i < a.size(); i++) {
8584
if (a[i].has_value()) {
@@ -140,15 +139,14 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestTypeMap_Optionals) {
140139
type_map<std::optional<int64_t>>::type,
141140
std::optional<int64_t>>::value));
142141
// Tensor.
142+
EXPECT_TRUE((std::is_same<
143+
type_map<std::optional<torch::executor::Tensor>>::type,
144+
std::optional<at::Tensor>>::value));
145+
// ArrayRef.
143146
EXPECT_TRUE(
144147
(std::is_same<
145-
type_map<std::optional<torch::executor::Tensor>>::type,
146-
std::optional<at::Tensor>>::value));
147-
// ArrayRef.
148-
EXPECT_TRUE((std::is_same<
149-
type_map<std::optional<
150-
torch::executor::ArrayRef<int64_t>>>::type,
151-
std::optional<c10::ArrayRef<int64_t>>>::value));
148+
type_map<std::optional<torch::executor::ArrayRef<int64_t>>>::type,
149+
std::optional<c10::ArrayRef<int64_t>>>::value));
152150
EXPECT_TRUE((std::is_same<
153151
type_map<std::optional<
154152
torch::executor::ArrayRef<torch::executor::Tensor>>>::type,
@@ -166,10 +164,10 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestTypeMap_ArrayRef) {
166164
type_map<torch::executor::ArrayRef<torch::executor::Tensor>>::type,
167165
c10::ArrayRef<at::Tensor>>::value));
168166
// Optionals.
169-
EXPECT_TRUE((std::is_same<
170-
type_map<torch::executor::ArrayRef<
171-
std::optional<int64_t>>>::type,
172-
c10::ArrayRef<std::optional<int64_t>>>::value));
167+
EXPECT_TRUE(
168+
(std::is_same<
169+
type_map<torch::executor::ArrayRef<std::optional<int64_t>>>::type,
170+
c10::ArrayRef<std::optional<int64_t>>>::value));
173171
EXPECT_TRUE((std::is_same<
174172
type_map<torch::executor::ArrayRef<
175173
std::optional<torch::executor::Tensor>>>::type,
@@ -197,8 +195,7 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_OptionalScalar) {
197195
optional_at_in)
198196
.call();
199197
EXPECT_TRUE(
200-
(std::is_same<decltype(optional_et), std::optional<int64_t>>::
201-
value));
198+
(std::is_same<decltype(optional_et), std::optional<int64_t>>::value));
202199

203200
// Convert optional et to at.
204201
auto optional_et_in = std::optional<int64_t>();
@@ -213,11 +210,10 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_OptionalScalar) {
213210
TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_OptionalTensor) {
214211
// Convert optional at to et.
215212
auto optional_at_in = std::optional<at::Tensor>();
216-
auto optional_et =
217-
type_convert<
218-
std::optional<at::Tensor>,
219-
std::optional<torch::executor::Tensor>>(optional_at_in)
220-
.call();
213+
auto optional_et = type_convert<
214+
std::optional<at::Tensor>,
215+
std::optional<torch::executor::Tensor>>(optional_at_in)
216+
.call();
221217
EXPECT_TRUE((std::is_same<
222218
decltype(optional_et),
223219
std::optional<torch::executor::Tensor>>::value));
@@ -427,9 +423,8 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) {
427423
const std::optional<int64_t> const_optional_at_in =
428424
std::optional<int64_t>(42);
429425
auto const_optional_et =
430-
type_convert<
431-
const std::optional<int64_t>,
432-
std::optional<int64_t>>(const_optional_at_in)
426+
type_convert<const std::optional<int64_t>, std::optional<int64_t>>(
427+
const_optional_at_in)
433428
.call();
434429
EXPECT_TRUE(const_optional_et.has_value());
435430
EXPECT_EQ(const_optional_et.value(), 42);
@@ -447,9 +442,8 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) {
447442
const std::optional<int64_t> const_optional_at_ref_in =
448443
std::optional<int64_t>(84);
449444
auto const_optional_et_from_ref =
450-
type_convert<
451-
const std::optional<int64_t>&,
452-
std::optional<int64_t>>(const_optional_at_ref_in)
445+
type_convert<const std::optional<int64_t>&, std::optional<int64_t>>(
446+
const_optional_at_ref_in)
453447
.call();
454448
EXPECT_TRUE(const_optional_et_from_ref.has_value());
455449
EXPECT_EQ(const_optional_et_from_ref.value(), 84);
@@ -459,8 +453,7 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) {
459453
std::optional<at::Tensor>(torch::tensor({5}));
460454
auto const_optional_tensor_converter = type_convert<
461455
const std::optional<at::Tensor>,
462-
std::optional<torch::executor::Tensor>>(
463-
const_optional_tensor_at_in);
456+
std::optional<torch::executor::Tensor>>(const_optional_tensor_at_in);
464457
auto const_optional_tensor_et = const_optional_tensor_converter.call();
465458
EXPECT_TRUE(const_optional_tensor_et.has_value());
466459
EXPECT_EQ(const_optional_tensor_et.value().const_data_ptr<int64_t>()[0], 5);
@@ -470,8 +463,7 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) {
470463
std::optional<at::Tensor>(torch::tensor({7}));
471464
auto optional_tensor_converter_from_ref = type_convert<
472465
std::optional<at::Tensor>&,
473-
std::optional<torch::executor::Tensor>>(
474-
optional_tensor_at_ref_in);
466+
std::optional<torch::executor::Tensor>>(optional_tensor_at_ref_in);
475467
auto optional_tensor_et_from_ref = optional_tensor_converter_from_ref.call();
476468
EXPECT_TRUE(optional_tensor_et_from_ref.has_value());
477469
EXPECT_EQ(
@@ -482,8 +474,7 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) {
482474
std::optional<at::Tensor>(torch::tensor({9}));
483475
auto const_optional_tensor_converter_from_ref = type_convert<
484476
const std::optional<at::Tensor>&,
485-
std::optional<torch::executor::Tensor>>(
486-
const_optional_tensor_at_ref_in);
477+
std::optional<torch::executor::Tensor>>(const_optional_tensor_at_ref_in);
487478
auto const_optional_tensor_et_from_ref =
488479
const_optional_tensor_converter_from_ref.call();
489480
EXPECT_TRUE(const_optional_tensor_et_from_ref.has_value());
@@ -494,9 +485,8 @@ TEST_F(MakeATenFunctorFromETFunctorTest, TestConvert_ConstRefOptionals) {
494485
// Test empty const optional conversions
495486
const std::optional<int64_t> empty_const_optional_at_in = std::nullopt;
496487
auto empty_const_optional_et =
497-
type_convert<
498-
const std::optional<int64_t>,
499-
std::optional<int64_t>>(empty_const_optional_at_in)
488+
type_convert<const std::optional<int64_t>, std::optional<int64_t>>(
489+
empty_const_optional_at_in)
500490
.call();
501491
EXPECT_FALSE(empty_const_optional_et.has_value());
502492

extension/flat_tensor/serialize/serialize.cpp

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -90,16 +90,18 @@ runtime::Error save_ptd(
9090
builder.CreateVector(
9191
tensor.dim_order().data(), tensor.dim_order().size()));
9292

93-
named_data.push_back(::flat_tensor_flatbuffer::CreateNamedData(
94-
/*_fbb=*/builder,
95-
/*key=*/key,
96-
/*segment_index=*/i,
97-
/*tensor_layout=*/tensor_layout));
98-
99-
segments.push_back(::flat_tensor_flatbuffer::CreateDataSegment(
100-
/*_fbb=*/builder,
101-
/*offset=*/total_segment_size,
102-
/*size=*/tensor.nbytes()));
93+
named_data.push_back(
94+
::flat_tensor_flatbuffer::CreateNamedData(
95+
/*_fbb=*/builder,
96+
/*key=*/key,
97+
/*segment_index=*/i,
98+
/*tensor_layout=*/tensor_layout));
99+
100+
segments.push_back(
101+
::flat_tensor_flatbuffer::CreateDataSegment(
102+
/*_fbb=*/builder,
103+
/*offset=*/total_segment_size,
104+
/*size=*/tensor.nbytes()));
103105

104106
// Do not pad the last tensor.
105107
total_segment_size += (i == tensor_count - 1)

extension/llm/runner/pybindings.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -587,11 +587,12 @@ PYBIND11_MODULE(_llm_runner, m) {
587587
if (audio_tensor.scalar_type() == torch::kUInt8) {
588588
uint8_t* data = audio_tensor.data_ptr<uint8_t>();
589589
std::vector<uint8_t> audio_data(data, data + audio_tensor.numel());
590-
return MultimodalInput(RawAudio{
591-
std::move(audio_data),
592-
static_cast<int32_t>(batch_size),
593-
static_cast<int32_t>(n_channels),
594-
static_cast<int32_t>(n_samples)});
590+
return MultimodalInput(
591+
RawAudio{
592+
std::move(audio_data),
593+
static_cast<int32_t>(batch_size),
594+
static_cast<int32_t>(n_channels),
595+
static_cast<int32_t>(n_samples)});
595596
} else {
596597
throw std::runtime_error(
597598
"Unsupported raw audio tensor dtype. Only uint8 is supported for raw audio.");

extension/module/module.cpp

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -195,8 +195,9 @@ runtime::Error Module::load(const Program::Verification verification) {
195195
for (const auto& data_map : named_data_maps_) {
196196
raw_data_maps.push_back(data_map.get());
197197
}
198-
auto res_merged = MergedDataMap::load(runtime::Span<const NamedDataMap*>(
199-
raw_data_maps.data(), raw_data_maps.size()));
198+
auto res_merged = MergedDataMap::load(
199+
runtime::Span<const NamedDataMap*>(
200+
raw_data_maps.data(), raw_data_maps.size()));
200201
if (!res_merged.ok()) {
201202
return res_merged.error();
202203
}
@@ -329,8 +330,9 @@ runtime::Error Module::set_inputs(
329330
const std::vector<runtime::EValue>& input_values) {
330331
ET_CHECK_OK_OR_RETURN_ERROR(load_method(method_name));
331332
auto& method = methods_.at(method_name).method;
332-
return method->set_inputs(executorch::aten::ArrayRef<runtime::EValue>(
333-
input_values.data(), input_values.size()));
333+
return method->set_inputs(
334+
executorch::aten::ArrayRef<runtime::EValue>(
335+
input_values.data(), input_values.size()));
334336
}
335337

336338
runtime::Error Module::set_output(

extension/named_data_map/merged_data_map.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,10 @@ class MergedDataMap final
3434
* @param[in] data_maps vector of NamedDataMap pointers to merge.
3535
* Note: the data maps must outlive the MergedDataMap instance.
3636
*/
37-
static executorch::runtime::Result<MergedDataMap>
38-
load(executorch::runtime::Span<
39-
const executorch::ET_RUNTIME_NAMESPACE::NamedDataMap*> named_data_maps);
37+
static executorch::runtime::Result<MergedDataMap> load(
38+
executorch::runtime::Span<
39+
const executorch::ET_RUNTIME_NAMESPACE::NamedDataMap*>
40+
named_data_maps);
4041

4142
/**
4243
* Retrieve the tensor_layout for the specified key.

extension/pybindings/pybindings.cpp

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -294,15 +294,17 @@ struct PyBundledModule : public BundledModule {
294294
uint32_t bundled_input_pool_size)
295295
: BundledModule(buffer.cast<std::string_view>().data()),
296296
bundled_program_ptr_(buffer),
297-
program_ptr_(static_cast<const void*>(
297+
program_ptr_(
298+
static_cast<const void*>(
299+
bundled_program_flatbuffer::GetBundledProgram(
300+
get_bundled_program_ptr())
301+
->program()
302+
->data())),
303+
program_len_(
298304
bundled_program_flatbuffer::GetBundledProgram(
299305
get_bundled_program_ptr())
300306
->program()
301-
->data())),
302-
program_len_(bundled_program_flatbuffer::GetBundledProgram(
303-
get_bundled_program_ptr())
304-
->program()
305-
->size()) {}
307+
->size()) {}
306308

307309
static std::unique_ptr<PyBundledModule> load_from_buffer(
308310
const py::bytes& buffer,

0 commit comments

Comments
 (0)