Skip to content

Commit b84bf31

Browse files
committed
debug
1 parent 84de45c commit b84bf31

File tree

3 files changed

+15
-4
lines changed

3 files changed

+15
-4
lines changed

extension/llm/runner/llm_runner_helper.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,9 +111,9 @@ get_llm_metadata(tokenizers::Tokenizer* tokenizer, Module* module) {
111111
if (!method_names.count(llm::kMaxSeqLen)) {
112112
ET_LOG(
113113
Error,
114-
"Required metadata method %s not found in model",
114+
"Required metadata method %s not found in model. Bypass",
115115
llm::kMaxSeqLen);
116-
return ::executorch::runtime::Error::InvalidArgument;
116+
// return ::executorch::runtime::Error::InvalidArgument;
117117
}
118118

119119
for (auto& pair : metadata) {

extension/llm/runner/multimodal_prefiller.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,8 @@ MultimodalPrefiller::MultimodalPrefiller(
3737
Result<uint64_t> MultimodalPrefiller::prefill(
3838
const MultimodalInput& input,
3939
int64_t& start_pos) {
40+
ET_LOG(Error, "Here 000000000000000000000000000000000000000000000000000000000000000000 %d", (int) (start_pos));
41+
ET_LOG(Error, "Here 000000000000000000000000000000000000000000000000000000000000000000 %s", input.get_text().c_str());
4042
// 1. Run encoder model.
4143
::executorch::runtime::EValue encoder_output;
4244
if (input.is_image()) {
@@ -73,12 +75,14 @@ Result<uint64_t> MultimodalPrefiller::prefill(
7375
auto& text = input.get_text();
7476
std::vector<uint64_t> tokens =
7577
ET_UNWRAP_TOKENIZER(tokenizer_->encode(text));
78+
ET_LOG(Error, "Here 666666666666666666666666666666666666666666666666666666666666666666666666666666");
7679

7780
auto text_tensor = executorch::extension::from_blob(
7881
tokens.data(),
7982
{1, static_cast<aten::SizesType>(tokens.size())},
8083
::executorch::aten::ScalarType::Long);
8184

85+
ET_LOG(Error, "Here 555555555555555555555555555555555555555555555555555555555555555555555555555");
8286
// Run text encoder (token embeddings)
8387
auto token_embedding_outputs =
8488
ET_UNWRAP(module_->execute(kTokenEmbeddingMethod, text_tensor));
@@ -89,6 +93,7 @@ Result<uint64_t> MultimodalPrefiller::prefill(
8993
// For any other input types, return error
9094
return ::executorch::runtime::Error::NotSupported;
9195
}
96+
ET_LOG(Error, "Here 000000000000000000000000000000000000000000000000000000000000000000000000000");
9297

9398
// 2. Run decoder model for prefill.
9499
// `cache_position` goes from start_pos to start_pos + encoder_output.size(1).
@@ -107,6 +112,7 @@ Result<uint64_t> MultimodalPrefiller::prefill(
107112
cache_positions.data(),
108113
{static_cast<int>(seq_len)},
109114
executorch::aten::ScalarType::Long);
115+
ET_LOG(Error, "Here 111111111111111111111111111111111111111111111111111111111111111111111111111111111111111");
110116
auto prefill_result = module_->execute(
111117
kTextModelMethod, {cache_position_tensor, encoder_output});
112118
if (prefill_result.error() != ::executorch::runtime::Error::Ok) {
@@ -121,6 +127,7 @@ Result<uint64_t> MultimodalPrefiller::prefill(
121127
return ::executorch::runtime::Error::InvalidState;
122128
}
123129
auto outputs_res = prefill_outputs[0].toTensor();
130+
ET_LOG(Error, "Here 222222222222222222222222222222222222222222222222222222222222222222222222222222");
124131

125132
// Update start_pos, tracking the current cache position.
126133
start_pos += seq_len;

extension/llm/runner/multimodal_runner.cpp

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,9 +57,9 @@ Error MultimodalRunner::load() {
5757
// Don't print with the same priority during warmup
5858
#define RUNNER_ET_LOG(warmup, format, ...) \
5959
if (warmup) { \
60-
ET_LOG(Debug, format, __VA_ARGS__); \
60+
ET_LOG(Error, format, __VA_ARGS__); \
6161
} else { \
62-
ET_LOG(Info, format, __VA_ARGS__); \
62+
ET_LOG(Error, format, __VA_ARGS__); \
6363
}
6464

6565
Error MultimodalRunner::generate(
@@ -104,16 +104,20 @@ Error MultimodalRunner::generate(
104104

105105
uint64_t prefill_next_token = 0;
106106
// Process multimodal inputs in order
107+
ET_LOG(Error, "0000000000000000000000000000000000000000000000000000SIZE%d", inputs.size());
107108
for (const MultimodalInput& input : inputs) {
109+
ET_LOG(Error, "00000000000000000000000000000000123321451345143100");
108110
prefill_next_token = ET_UNWRAP(multimodal_prefiller_->prefill(input, pos_));
109111
}
112+
ET_LOG(Error, "1111111111111111111111111111111111111111111111111111");
110113

111114
stats_->first_token_ms = time_in_ms();
112115
stats_->prompt_eval_end_ms = time_in_ms();
113116
stats_->num_prompt_tokens = pos_;
114117

115118
wrapped_callback(ET_UNWRAP_TOKENIZER(
116119
tokenizer_->decode(prefill_next_token, prefill_next_token)));
120+
ET_LOG(Info, "2222222222222222222222222222222222222222222222222222");
117121

118122
RUNNER_ET_LOG(
119123
config.warming,

0 commit comments

Comments
 (0)