|
| 1 | +# Usage: python iteration_log_parser.py --filename <filename> --total_rank <total_rank> --csv |
| 2 | +# The log file should be generated by redirecting the output of the server side and device side to the same file. |
| 3 | +# The benchmark scripts are under tensorrt_llm/serve/scripts/ |
| 4 | + |
| 5 | +import argparse |
| 6 | +import json |
| 7 | + |
| 8 | + |
| 9 | +class IterState: |
| 10 | + |
| 11 | + def __init__(self, total_rank, iter): |
| 12 | + self.total_rank = total_rank |
| 13 | + self.ctx_num = [-1] * total_rank |
| 14 | + self.gen_num = [-1] * total_rank |
| 15 | + self.request_num = [-1] * total_rank |
| 16 | + self.iter_time = -1 |
| 17 | + self.iter = iter |
| 18 | + |
| 19 | + @property |
| 20 | + def ctx_phase(self): |
| 21 | + assert all([i != -1 for i in self.ctx_num]), self.ctx_num |
| 22 | + return any([i > 0 for i in self.ctx_num]) |
| 23 | + |
| 24 | + @property |
| 25 | + def gen_phase(self): |
| 26 | + return not self.ctx_phase |
| 27 | + |
| 28 | + def update(self, rank, state_dict, elapsed_time): |
| 29 | + self.ctx_num[rank] = state_dict["num_ctx_tokens"] |
| 30 | + self.request_num[rank] = state_dict["num_ctx_requests"] |
| 31 | + self.gen_num[rank] = state_dict["num_generation_tokens"] |
| 32 | + if rank == 0: |
| 33 | + self.iter_time = elapsed_time |
| 34 | + |
| 35 | + |
| 36 | +class IterStateManager: |
| 37 | + |
| 38 | + def __init__(self, total_rank): |
| 39 | + self.iters: dict[int, IterState] = {} |
| 40 | + self.total_rank = total_rank |
| 41 | + self.benchmark_duration = 0 |
| 42 | + self.ttft_medium = 0 |
| 43 | + self.ttft_p99 = 0 |
| 44 | + |
| 45 | + def update(self, line): |
| 46 | + if "states = " not in line: |
| 47 | + return |
| 48 | + rank = int(line.split("rank =")[1].strip().split(",")[0].strip()) |
| 49 | + states = json.loads(line.split("states =")[1].strip().replace("'", '"')) |
| 50 | + elapsed_time = float( |
| 51 | + line.split("host_step_time =")[1].strip().split("ms,")[0].strip()) |
| 52 | + iter = int(line.split("iter =")[1].strip().split(",")[0].strip()) |
| 53 | + if iter not in self.iters: |
| 54 | + self.iters[iter] = IterState(self.total_rank, iter) |
| 55 | + self.iters[iter].update(rank, states, elapsed_time) |
| 56 | + |
| 57 | + def clear(self): |
| 58 | + self.iters = {} |
| 59 | + self.benchmark_duration = 0 |
| 60 | + self.ttft_medium = 0 |
| 61 | + self.ttft_p99 = 0 |
| 62 | + |
| 63 | + def print_result(self, prefix="", csv=False): |
| 64 | + ctx_phase = {"iter_num": 0, "time": 0, "request_num": 0} |
| 65 | + gen_phase = {"iter_num": 0, "time": 0} |
| 66 | + for iter_state in self.iters.values(): |
| 67 | + if iter_state.ctx_phase: |
| 68 | + ctx_phase["iter_num"] += 1 |
| 69 | + ctx_phase["request_num"] += iter_state.request_num[0] |
| 70 | + ctx_phase["time"] += iter_state.iter_time |
| 71 | + else: |
| 72 | + gen_phase["iter_num"] += 1 |
| 73 | + gen_phase["time"] += iter_state.iter_time |
| 74 | + if csv: |
| 75 | + print( |
| 76 | + f"{ctx_phase['iter_num']},{ctx_phase['time']/1000:.2f},{gen_phase['iter_num']},{gen_phase['time']/1000:.2f},{(ctx_phase['time'] + gen_phase['time'])/1000:.2f},{self.ttft_medium:.2f},{self.ttft_p99:.2f}" |
| 77 | + ) |
| 78 | + else: |
| 79 | + print( |
| 80 | + f"{prefix}ctx_phase: {ctx_phase}, gen_phase: {gen_phase}, ctx_phase_ratio: {ctx_phase['time'] / (ctx_phase['time'] + gen_phase['time']) * 100.0:.2f}%, Total time: {ctx_phase['time'] + gen_phase['time']}, TTFT_medium: {self.ttft_medium:.2f}, TTFT_p99: {self.ttft_p99:.2f}" |
| 81 | + ) |
| 82 | + |
| 83 | + |
| 84 | +def main(filename, total_rank, csv): |
| 85 | + with open(filename, "r", encoding="unicode_escape") as f: |
| 86 | + round = 0 |
| 87 | + start = 0 |
| 88 | + iter_state_manager = IterStateManager(total_rank) |
| 89 | + for line in f: |
| 90 | + if "Starting main benchmark run" in line: |
| 91 | + if start == 2: |
| 92 | + # print the result of the previous round |
| 93 | + iter_state_manager.print_result( |
| 94 | + f"Round: {round}, ", |
| 95 | + csv, |
| 96 | + ) |
| 97 | + round += 1 |
| 98 | + start = 1 |
| 99 | + iter_state_manager.clear() |
| 100 | + elif "[I] iter =" in line and start == 1: |
| 101 | + iter_state_manager.update(line) |
| 102 | + elif "Serving Benchmark Result" in line: |
| 103 | + start = 2 |
| 104 | + elif "Benchmark duration" in line: |
| 105 | + benchmark_duration = ( |
| 106 | + line.split("Benchmark duration (s): ")[1].strip().strip()) |
| 107 | + iter_state_manager.benchmark_duration = benchmark_duration |
| 108 | + elif "Median TTFT" in line: |
| 109 | + ttft_medium = float(line.split("Median TTFT (ms): ")[1].strip()) |
| 110 | + iter_state_manager.ttft_medium = ttft_medium |
| 111 | + elif "P99 TTFT" in line: |
| 112 | + ttft_p99 = float(line.split("P99 TTFT (ms): ")[1].strip()) |
| 113 | + iter_state_manager.ttft_p99 = ttft_p99 |
| 114 | + |
| 115 | + if start == 2: |
| 116 | + # print the result of the previous round |
| 117 | + iter_state_manager.print_result( |
| 118 | + f"Round: {round}, ", |
| 119 | + csv, |
| 120 | + ) |
| 121 | + |
| 122 | + |
| 123 | +if __name__ == "__main__": |
| 124 | + parser = argparse.ArgumentParser() |
| 125 | + parser.add_argument("--filename", type=str, required=True) |
| 126 | + parser.add_argument("--total_rank", type=int, default=1) |
| 127 | + parser.add_argument("--csv", action="store_true") |
| 128 | + args = parser.parse_args() |
| 129 | + main(args.filename, args.total_rank, args.csv) |
0 commit comments