Skip to content

Commit 0267b77

Browse files
pytorchbotdbort
andauthored
Move examples/mediatek out from under the torch namespace (#5556)
Move examples/mediatek out from under the torch namespace (#5478) Summary: The code under examples/... is a proxy for user code, and users should never declare code under the `torch::` or `executorch::` namespaces. Move this code under the `example::` namespace to make it more clear that users should use their own namespaces when writing code like this. Pull Request resolved: #5478 Test Plan: - Built using the instructions at https://github.com/pytorch/executorch/blob/main/examples/mediatek/README.md Reviewed By: JacobSzwejbka, cccclai Differential Revision: D62992974 Pulled By: dbort fbshipit-source-id: b01f1b33d2853a0555ae19d79769a5bb6d0ba853 (cherry picked from commit 182f138) Co-authored-by: Dave Bort <[email protected]>
1 parent e6f7bb1 commit 0267b77

21 files changed

+106
-62
lines changed

examples/mediatek/executor_runner/llama_runner/FileMemMapper.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
#include <sys/stat.h>
1616
#include <unistd.h>
1717

18-
namespace torch::executor {
18+
namespace example {
1919

2020
class FileMemMapper { // Read-only mmap
2121
public:
@@ -97,4 +97,4 @@ class FileMemMapper { // Read-only mmap
9797
size_t mSize = 0;
9898
};
9999

100-
} // namespace torch::executor
100+
} // namespace example

examples/mediatek/executor_runner/llama_runner/LlamaConfig.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414
#include "llm_helper/include/llm_types.h"
1515

16-
namespace torch::executor {
16+
namespace example {
1717

1818
using llm_helper::LLMType;
1919

@@ -42,4 +42,4 @@ struct LlamaModelPaths {
4242
std::vector<std::string> gen_model_paths;
4343
};
4444

45-
} // namespace torch::executor
45+
} // namespace example

examples/mediatek/executor_runner/llama_runner/LlamaModelChunk.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
#include "llm_helper/include/mask_builder.h"
2727
#include "llm_helper/include/rotary_embedding.h"
2828

29-
namespace torch::executor {
29+
namespace example {
3030

3131
inline std::vector<size_t> getIndexRange(
3232
const size_t startIndex,
@@ -343,4 +343,4 @@ void LlamaModelChunk::InitCache() {
343343
}
344344
}
345345

346-
} // namespace torch::executor
346+
} // namespace example

examples/mediatek/executor_runner/llama_runner/LlamaModelChunk.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,12 @@
2727
#include "llm_helper/include/mask_builder.h"
2828
#include "llm_helper/include/rotary_embedding.h"
2929

30-
namespace torch::executor {
30+
namespace example {
3131

3232
using llm_helper::MaskBuilder;
3333
using llm_helper::RotaryEmbeddingMasterLut;
3434

35-
using TensorShape = Span<const int32_t>;
35+
using TensorShape = executorch::runtime::Span<const int32_t>;
3636
using ModelIndexMap = std::unordered_map<size_t, size_t>;
3737

3838
// Llama decoder chunk
@@ -135,4 +135,4 @@ class LlamaModelChunk : public ModelChunk {
135135
size_t mCurrentTokenIndex = 0;
136136
};
137137

138-
} // namespace torch::executor
138+
} // namespace example

examples/mediatek/executor_runner/llama_runner/LlamaRuntime.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
#include "llm_helper/include/rotary_embedding.h"
1919
#include "llm_helper/include/token_embedding.h"
2020

21-
namespace torch::executor {
21+
namespace example {
2222

2323
void LlamaRuntime::Initialize(
2424
const LlamaModelOptions& modelOptions,
@@ -201,4 +201,4 @@ const LlamaModelOptions& LlamaRuntime::GetModelOptions() const {
201201
return mModelOptions;
202202
}
203203

204-
} // namespace torch::executor
204+
} // namespace example

examples/mediatek/executor_runner/llama_runner/LlamaRuntime.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
#include "llm_helper/include/rotary_embedding.h"
2121
#include "llm_helper/include/token_embedding.h"
2222

23-
namespace torch::executor {
23+
namespace example {
2424

2525
class LlamaRuntime {
2626
public:
@@ -56,4 +56,4 @@ class LlamaRuntime {
5656
size_t mTokenIndex = 0;
5757
};
5858

59-
} // namespace torch::executor
59+
} // namespace example

examples/mediatek/executor_runner/llama_runner/ModelChunk.cpp

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,21 @@
2323
#define ENSURE_INIT \
2424
ET_CHECK_MSG(Initialized(), "Error: Model chunk not initialized.");
2525

26-
namespace torch::executor {
27-
28-
using util::FileDataLoader;
26+
namespace example {
27+
28+
using executorch::aten::Tensor;
29+
using executorch::aten::TensorImpl;
30+
using executorch::extension::FileDataLoader;
31+
using executorch::runtime::Error;
32+
using executorch::runtime::HierarchicalAllocator;
33+
using executorch::runtime::MemoryAllocator;
34+
using executorch::runtime::MemoryManager;
35+
using executorch::runtime::Method;
36+
using executorch::runtime::MethodMeta;
37+
using executorch::runtime::Program;
38+
using executorch::runtime::Result;
39+
using executorch::runtime::Span;
40+
using executorch::runtime::Tag;
2941

3042
static constexpr size_t kMethodAllocatorPoolSize = 4 * 1024U * 1024U; // 4MB
3143

@@ -572,4 +584,4 @@ void ModelChunk::ReleaseModelInstance(void* modelInstance) {
572584
}
573585
}
574586

575-
} // namespace torch::executor
587+
} // namespace example

examples/mediatek/executor_runner/llama_runner/ModelChunk.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
#include "MultiModelLoader.h"
1818

19-
namespace torch::executor {
19+
namespace example {
2020

2121
struct BufferInfo {
2222
void* data = nullptr;
@@ -91,7 +91,7 @@ class ModelChunk : protected MultiTokenSizeModelLoader {
9191
// Release allocated buffers for model IOs
9292
void ReleaseIoBuffers();
9393

94-
Method& GetModelMethod();
94+
executorch::runtime::Method& GetModelMethod();
9595

9696
private:
9797
// Override the virtual functions
@@ -119,4 +119,4 @@ class ModelChunk : protected MultiTokenSizeModelLoader {
119119
std::unordered_map<size_t, size_t> mModelOutToInIndexLinks;
120120
};
121121

122-
} // namespace torch::executor
122+
} // namespace example

examples/mediatek/executor_runner/llama_runner/MultiModelLoader.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
#include <unordered_map>
1717
#include <vector>
1818

19-
namespace torch::executor {
19+
namespace example {
2020

2121
template <typename IdType>
2222
void MultiModelLoader<IdType>::LoadModels() {
@@ -174,4 +174,4 @@ std::string MultiModelLoader<IdType>::GetIdString(const IdType& id) {
174174
template class MultiModelLoader<int>;
175175
template class MultiModelLoader<size_t>;
176176

177-
} // namespace torch::executor
177+
} // namespace example

examples/mediatek/executor_runner/llama_runner/MultiModelLoader.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
#include <unordered_map>
1313
#include <vector>
1414

15-
namespace torch::executor {
15+
namespace example {
1616

1717
template <typename IdType = size_t>
1818
class MultiModelLoader {
@@ -92,4 +92,4 @@ class MultiModelLoader {
9292
IdType mCurrentModelId = 0;
9393
};
9494

95-
} // namespace torch::executor
95+
} // namespace example

0 commit comments

Comments
 (0)