Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 66 additions & 12 deletions lglpy/timeline/data/processed_trace.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,12 @@
MetadataASTransfer, GPUStreamID, GPUStageID

LABEL_HEURISTICS = True
LABEL_MAX_LEN = 60
LABEL_MAX_LEN = 42
# Where to add ellipsis if label is too long
# - left = ...X
# - center = X...X
# - right = X...
LABEL_SPLIT = 'left'


class GPUWorkload:
Expand Down Expand Up @@ -78,30 +83,30 @@ def __init__(
self.submit = None
self.label_stack = None
self.parsed_label_name: Optional[str] = None
self.parsed_label_name_full: Optional[str] = None

if metadata:
self.submit = metadata.submit
self.label_stack = metadata.label_stack

def get_label_name(self) -> Optional[str]:
def get_label_name_full(self) -> Optional[str]:
'''
Get a cleaned up label name for a workload.

Warning: The heuristics here are not robust.
Get a cleaned up label name for a workload with no shortening.

Returns:
A modified label for use in the UI.
A label for use in the UI.
'''
# Cached label already parsed
if self.parsed_label_name_full is not None:
return self.parsed_label_name_full

# No label to parse
if not self.label_stack:
return None

# Cached label already parsed
if self.parsed_label_name is not None:
return self.parsed_label_name

if not LABEL_HEURISTICS:
return self.label_stack[-1]
self.parsed_label_name_full = self.label_stack[-1]
return self.parsed_label_name_full

# Create a copy we can edit ...
labels = list(self.label_stack)
Expand Down Expand Up @@ -141,7 +146,36 @@ def get_label_name(self) -> Optional[str]:
else:
label = '.'.join(labels)

if len(label) > LABEL_MAX_LEN:
self.parsed_label_name_full = label
return self.parsed_label_name_full

def get_label_name(self) -> Optional[str]:
'''
Get a cleaned up label name for a workload.

Warning: The heuristics here are not robust.

Returns:
A modified label for use in the UI.
'''
# Cached label already parsed
if self.parsed_label_name is not None:
return self.parsed_label_name

label = self.get_label_name_full()

# No label to parse
if not label:
return None

# Apply ellipsis shortening
if LABEL_SPLIT == 'left' and len(label) > LABEL_MAX_LEN:
label = f'...{label[-LABEL_MAX_LEN:]}'

elif LABEL_SPLIT == 'right' and len(label) > LABEL_MAX_LEN:
label = f'{label[0:LABEL_MAX_LEN]}...'

elif LABEL_SPLIT == 'center' and len(label) > LABEL_MAX_LEN:
half_max = LABEL_MAX_LEN // 2
prefix = label[0:half_max]
postfix = label[-half_max:]
Expand Down Expand Up @@ -176,6 +210,26 @@ def get_workload_name(self) -> str:

return label

def get_workload_name_full(self) -> str:
'''
Get a name for the workload.

This is based on the application debug label if there is one, but
with some heuristics to try and clean is up ...

Returns:
Returns the label for use in the UI.
'''
label = None
if self.label_stack:
label = self.get_label_name_full()

# Default label if no label or get_label_name heuristics stripped it
if not label:
return self.get_workload_type_name()

return label

def get_long_label(self) -> str:
'''
Get the long form label for this workload.
Expand Down
26 changes: 23 additions & 3 deletions lglpy/timeline/gui/timeline/info_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
from collections import defaultdict

from ...data.raw_trace import GPUStreamID, GPUStageID
from ...data.processed_trace import GPUWorkload
from ...data.processed_trace import GPUWorkload, LABEL_MAX_LEN
from ...drawable.text_pane_widget import TextPaneWidget


Expand All @@ -39,6 +39,7 @@ class TimelineInfoWidget(TextPaneWidget):
time ranges in the main timeline.
'''

FULL_LABEL = True
MAX_EVENTS = 5
VALIDSORTS = ['flush', 'runtime']

Expand Down Expand Up @@ -296,8 +297,27 @@ def compute_active_event_stats_single(self, event):
metrics.append('')
metrics.append('Workload properties:')

label = event.get_workload_name()
metrics.append(f' Name: {label}')
if self.FULL_LABEL:
label = event.get_workload_name_full()
else:
label = event.get_workload_name()

# Chunk a long label into line-width sections
chunk_size = LABEL_MAX_LEN + 3
next_chunk_size = chunk_size + 4
chunks = []
while label:
part = label[0: chunk_size]
chunks.append(part)

label = label[chunk_size:]
chunk_size = next_chunk_size

# Print a label in chunks
metrics.append(f' Name: {chunks[0]}')
for part in chunks[1:]:
metrics.append(f' {part}')

metrics.append(f' Stream: {stream_name}')
metrics.append(f' Stage: {stage_name}')
metrics.append(f' Duration: {event.duration / 1000000.0:0.2f} ms')
Expand Down