@@ -1167,7 +1167,7 @@ def to_excel(
11671167 df .to_excel (
11681168 writer ,
11691169 sheet_name = name ,
1170- freeze_panes = (df .columns .nlevels + df . index . nlevels , df .index .nlevels ),
1170+ freeze_panes = (df .columns .nlevels + 1 , df .index .nlevels ),
11711171 )
11721172 f_highlights [name ] = tview .f_highlight
11731173 if tview .plots :
@@ -1222,15 +1222,15 @@ def to_excel(
12221222 writer ,
12231223 sheet_name = name ,
12241224 freeze_panes = (
1225- sbs_raw .columns .nlevels + sbs_raw . index . nlevels ,
1225+ sbs_raw .columns .nlevels + 1 ,
12261226 sbs_raw .index .nlevels ,
12271227 ),
12281228 )
12291229 sbs_agg .to_excel (
12301230 writer ,
12311231 sheet_name = f"{ name } -AGG" ,
12321232 freeze_panes = (
1233- sbs_agg .columns .nlevels + sbs_agg . index . nlevels ,
1233+ sbs_agg .columns .nlevels + 1 ,
12341234 sbs_agg .index .nlevels ,
12351235 ),
12361236 )
@@ -1456,9 +1456,7 @@ def _mkc(m, s):
14561456
14571457
14581458class CubeLogsPerformance (CubeLogs ):
1459- """
1460- Processes logs coming from experiments.
1461- """
1459+ """Processes logs coming from experiments."""
14621460
14631461 def __init__ (
14641462 self ,
@@ -1511,20 +1509,25 @@ def __init__(
15111509 "n_model_faster2x" ,
15121510 "n_model_faster3x" ,
15131511 "n_model_faster4x" ,
1512+ "n_model_faster5x" ,
15141513 "n_node_attention" ,
15151514 "n_node_attention23" ,
1516- "n_node_rotary_embedding" ,
1517- "n_node_rotary_embedding23" ,
1518- "n_node_layer_normalization" ,
1519- "n_node_layer_normalization23" ,
1515+ "n_node_causal_mask" ,
1516+ "n_node_constant" ,
15201517 "n_node_control_flow" ,
1521- "n_node_scatter " ,
1518+ "n_node_expand " ,
15221519 "n_node_function" ,
1520+ "n_node_gqa" ,
15231521 "n_node_initializer" ,
15241522 "n_node_initializer_small" ,
1525- "n_node_constant" ,
1523+ "n_node_layer_normalization" ,
1524+ "n_node_layer_normalization23" ,
1525+ "n_node_reshape" ,
1526+ "n_node_rotary_embedding" ,
1527+ "n_node_rotary_embedding23" ,
1528+ "n_node_scatter" ,
1529+ "n_node_sequence" ,
15261530 "n_node_shape" ,
1527- "n_node_expand" ,
15281531 "onnx_n_nodes_no_cst" ,
15291532 "peak_gpu_torch" ,
15301533 "peak_gpu_nvidia" ,
@@ -1690,6 +1693,11 @@ def first_err(df: pandas.DataFrame) -> pandas.Series:
16901693 "time_latency" ,
16911694 gdf (df , "time_latency_eager" ) > gdf (df , "time_latency" , np .inf ) * 3.98 ,
16921695 ),
1696+ n_model_faster5x = lambda df : gpreserve (
1697+ df ,
1698+ "time_latency" ,
1699+ gdf (df , "time_latency_eager" ) > gdf (df , "time_latency" , np .inf ) * 4.98 ,
1700+ ),
16931701 n_node_attention23 = lambda df : gpreserve (
16941702 df , "time_latency_eager" , gdf (df , "op_onnx__Attention" )
16951703 ),
@@ -1720,6 +1728,11 @@ def first_err(df: pandas.DataFrame) -> pandas.Series:
17201728 + gdf (df , "op_onnx_com.microsoft_DecoderMaskedMultiHeadAttention" , 0 )
17211729 + gdf (df , "op_onnx_com.microsoft_SparseAttention" , 0 ),
17221730 ),
1731+ n_node_gqa = lambda df : gpreserve (
1732+ df ,
1733+ "time_latency_eager" ,
1734+ gdf (df , "op_onnx_com.microsoft_GroupQueryAttention" , 0 ),
1735+ ),
17231736 n_node_layer_normalization = lambda df : gpreserve (
17241737 df ,
17251738 "time_latency_eager" ,
@@ -1764,9 +1777,22 @@ def first_err(df: pandas.DataFrame) -> pandas.Series:
17641777 n_node_shape = lambda df : gpreserve (
17651778 df , "time_latency_eager" , gdf (df , "op_onnx__Shape" )
17661779 ),
1780+ n_node_reshape = lambda df : gpreserve (
1781+ df , "time_latency_eager" , gdf (df , "op_onnx__Reshape" )
1782+ ),
17671783 n_node_expand = lambda df : gpreserve (
17681784 df , "time_latency_eager" , gdf (df , "op_onnx__Expand" )
17691785 ),
1786+ n_node_causal_mask = lambda df : gpreserve (
1787+ df ,
1788+ "time_latency_eager" ,
1789+ gdf (df , "op_onnx__CausalMask" , 0 ),
1790+ ),
1791+ n_node_sequence = lambda df : gpreserve (
1792+ df ,
1793+ "time_latency_eager" ,
1794+ gdf (df , "op_onnx__SequenceAt" , 0 ) + gdf (df , "op_onnx__SplitToSequence" , 0 ),
1795+ ),
17701796 )
17711797 assert (
17721798 formula in lambdas
0 commit comments