diff --git a/TODO b/TODO
index 178423c..2b2da24 100644
--- a/TODO
+++ b/TODO
@@ -1,5 +1,7 @@
** Bugs:
+* Don't step on distro's /usr/bin/pybootchartgui -- use /usr/local/
+
* FIXME: are we merging / glupping threads properly ?
+ how can sreadahead apparently be doing no I/O ? :-)
@@ -11,9 +13,6 @@
+ traced %d processes of $$ (!) :-)
+ consumed %d seconds of CPU [ render these ... ]
- + horizontal zoom - should keep view horiz centered:
- but does not - cf. set_xscale
-
+ merge processes for CPU counts:
+ if the ns count is -identical- (and non-zero) it is the same process
+ need to back-merge somehow ?
diff --git a/pybootchartgui/batch.py b/pybootchartgui/batch.py
index 05c714e..48cc503 100644
--- a/pybootchartgui/batch.py
+++ b/pybootchartgui/batch.py
@@ -15,7 +15,7 @@
import cairo
from . import draw
-from .draw import RenderOptions
+from .draw import DrawContext
def render(writer, trace, app_options, filename):
handlers = {
@@ -30,17 +30,19 @@ def render(writer, trace, app_options, filename):
else:
fmt = app_options.format
+ app_options.show_legends = True
+
if not (fmt in handlers):
writer.error ("Unknown format '%s'." % fmt)
return 10
make_surface, write_surface = handlers[fmt]
- options = RenderOptions (app_options)
- (w, h) = draw.extents (options, 1.0, trace)
- w = max (w, draw.MIN_IMG_W)
+ drawctx = DrawContext (app_options, trace)
+ (w, h) = draw.extents (drawctx, 1.0, trace)
+ w = max (w, draw.C.MIN_IMG_W)
surface = make_surface (w, h)
- ctx = cairo.Context (surface)
- draw.render (ctx, options, 1.0, trace)
+ cr = cairo.Context (surface)
+ draw.render (cr, drawctx, 1.0, trace)
write_surface (surface)
writer.status ("bootchart written to '%s'" % filename)
diff --git a/pybootchartgui/draw.py b/pybootchartgui/draw.py
index 3a4da25..1cd685d 100644
--- a/pybootchartgui/draw.py
+++ b/pybootchartgui/draw.py
@@ -13,32 +13,45 @@
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see .
-
+import sys
import cairo
import math
import re
import random
import colorsys
+import collections
+import traceback # debug
-class RenderOptions:
+from samples import IOStat, EventSample, PID_SCALE, LWP_OFFSET, KTHREADD_PID
+from . import writer
- def __init__(self, app_options):
- # should we render a cumulative CPU time chart
- self.cumulative = True
- self.charts = True
- self.kernel_only = False
- self.app_options = app_options
+# Constants: Put the more heavily used, non-derived constants in a named tuple, for immutability.
+# XX The syntax is awkward, but more elegant alternatives have run-time overhead.
+# http://stackoverflow.com/questions/4996815/ways-to-make-a-class-immutable-in-python
+DrawConsts = collections.namedtuple('XXtypename',
+ # |height of a process, in user-space
+ # | |taskstats-specific
+ ['CSEC','bar_h','off_x','off_y','proc_h','leg_s','CUML_HEIGHT','MIN_IMG_W','legend_indent'])
+C = DrawConsts( 100, 55, 10, 10, 16, 11, 2000, 800, 5)
- def proc_tree (self, trace):
- if self.kernel_only:
- return trace.kernel_tree
- else:
- return trace.proc_tree
+# Derived constants
+# XX create another namedtuple for these
+meminfo_bar_h = 2 * C.bar_h
# Process tree background color.
BACK_COLOR = (1.0, 1.0, 1.0, 1.0)
+TRANSPARENT = (0.0, 0.0, 0.0, 0.0)
+
WHITE = (1.0, 1.0, 1.0, 1.0)
+BLACK = (0.0, 0.0, 0.0, 1.0)
+DARK_GREY = (0.1, 0.1, 0.1)
+NOTEPAD_YELLOW = (0.95, 0.95, 0.8, 1.0)
+NOTEPAD_PINK = (1.0, 0.90, 1.0, 1.0)
+PURPLE = (0.6, 0.1, 0.6, 1.0)
+RED = (1.0, 0.0, 0.0)
+MAGENTA = (0.7, 0.0, 0.7, 1.0)
+
# Process tree border color.
BORDER_COLOR = (0.63, 0.63, 0.63, 1.0)
# Second tick line color.
@@ -62,13 +75,18 @@ def proc_tree (self, trace):
LEGEND_FONT_SIZE = 12
# CPU load chart color.
-CPU_COLOR = (0.40, 0.55, 0.70, 1.0)
+CPU_COLOR = (0.60, 0.60, 0.70, 1.0)
+# CPU system-mode load chart color.
+CPU_SYS_COLOR = (0.70, 0.65, 0.40, 1.0)
# IO wait chart color.
-IO_COLOR = (0.76, 0.48, 0.48, 0.5)
-# Disk throughput color.
-DISK_TPUT_COLOR = (0.20, 0.71, 0.20, 1.0)
-# CPU load chart color.
-FILE_OPEN_COLOR = (0.20, 0.71, 0.71, 1.0)
+IO_COLOR = (0.88, 0.74, 0.74, 1.0)
+
+PROCS_RUNNING_COLOR = (0.0, 1.0, 0.0, 1.0)
+PROCS_BLOCKED_COLOR = (0.7, 0.0, 0.0, 1.0)
+
+DISK_READ_COLOR = (0.20, 0.71, 0.20, 1.0)
+DISK_WRITE_COLOR = MAGENTA
+
# Mem cached color
MEM_CACHED_COLOR = CPU_COLOR
# Mem used color
@@ -76,16 +94,18 @@ def proc_tree (self, trace):
# Buffers color
MEM_BUFFERS_COLOR = (0.4, 0.4, 0.4, 0.3)
# Swap color
-MEM_SWAP_COLOR = DISK_TPUT_COLOR
+MEM_SWAP_COLOR = (0.20, 0.71, 0.20, 1.0)
+# Process CPU load of children -- including those waited for by the parent, but not captured by any collector sample
+CPU_CHILD_COLOR = (1.00, 0.70, 0.00, 1.0)
# Process border color.
PROC_BORDER_COLOR = (0.71, 0.71, 0.71, 1.0)
# Waiting process color.
-PROC_COLOR_D = (0.76, 0.48, 0.48, 0.5)
+PROC_COLOR_D = PROCS_BLOCKED_COLOR # (0.76, 0.45, 0.35, 1.0)
# Running process color.
-PROC_COLOR_R = CPU_COLOR
+PROC_COLOR_R = CPU_COLOR # (0.40, 0.50, 0.80, 1.0) # should look similar to CPU_COLOR
# Sleeping process color.
-PROC_COLOR_S = (0.94, 0.94, 0.94, 1.0)
+PROC_COLOR_S = (0.95, 0.93, 0.93, 1.0)
# Stopped process color.
PROC_COLOR_T = (0.94, 0.50, 0.50, 1.0)
# Zombie process color.
@@ -100,6 +120,10 @@ def proc_tree (self, trace):
# Process label font.
PROC_TEXT_FONT_SIZE = 12
+# Event tick color.
+DIM_EVENT_COLOR = (0.2, 0.2, 0.2)
+HIGHLIGHT_EVENT_COLOR = (0.4, 0.0, 0.6)
+
# Signature color.
SIG_COLOR = (0.0, 0.0, 0.0, 0.3125)
# Signature font.
@@ -108,7 +132,7 @@ def proc_tree (self, trace):
SIGNATURE = "http://github.com/mmeeks/bootchart"
# Process dependency line color.
-DEP_COLOR = (0.75, 0.75, 0.75, 1.0)
+DEP_COLOR = (0.75, 0.6, 0.75, 1.0)
# Process dependency line stroke.
DEP_STROKE = 1.0
@@ -121,15 +145,18 @@ def proc_tree (self, trace):
# Process states
STATE_UNDEFINED = 0
-STATE_RUNNING = 1
+STATE_RUNNING = 1 # useful state info
STATE_SLEEPING = 2
-STATE_WAITING = 3
+STATE_WAITING = 3 # useful state info
STATE_STOPPED = 4
STATE_ZOMBIE = 5
-STATE_COLORS = [(0, 0, 0, 0), PROC_COLOR_R, PROC_COLOR_S, PROC_COLOR_D, \
+STATE_COLORS = [(0, 0, 0, 0), PROCS_RUNNING_COLOR, PROC_COLOR_S, PROC_COLOR_D, \
PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
+JUSTIFY_LEFT = "left"
+JUSTIFY_CENTER = "center"
+
# CumulativeStats Types
STAT_TYPE_CPU = 0
STAT_TYPE_IO = 1
@@ -138,331 +165,762 @@ def proc_tree (self, trace):
def get_proc_state(flag):
return "RSDTZXW".find(flag) + 1
-def draw_text(ctx, text, color, x, y):
- ctx.set_source_rgba(*color)
- ctx.move_to(x, y)
- ctx.show_text(text)
-
-def draw_fill_rect(ctx, color, rect):
- ctx.set_source_rgba(*color)
- ctx.rectangle(*rect)
- ctx.fill()
-
-def draw_rect(ctx, color, rect):
- ctx.set_source_rgba(*color)
- ctx.rectangle(*rect)
- ctx.stroke()
-
-def draw_legend_box(ctx, label, fill_color, x, y, s):
- draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
- draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
-
-def draw_legend_line(ctx, label, fill_color, x, y, s):
- draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
- ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
- ctx.fill()
- draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
-
-def draw_label_in_box(ctx, color, label, x, y, w, maxx):
- label_w = ctx.text_extents(label)[2]
- label_x = x + w / 2 - label_w / 2
- if label_w + 10 > w:
- label_x = x + w + 5
- if label_x + label_w > maxx:
- label_x = x - label_w - 5
- draw_text(ctx, label, color, label_x, y)
-
-def draw_sec_labels(ctx, rect, sec_w, nsecs):
- ctx.set_font_size(AXIS_FONT_SIZE)
+def draw_text(cr, text, color, x, y):
+ cr.set_source_rgba(*color)
+ cr.move_to(x, y)
+ cr.show_text(text)
+ return cr.text_extents(text)[2]
+
+def draw_fill_rect(cr, color, rect):
+ cr.set_source_rgba(*color)
+ cr.rectangle(*rect)
+ cr.fill()
+
+def draw_rect(cr, color, rect):
+ cr.set_source_rgba(*color)
+ cr.rectangle(*rect)
+ cr.stroke()
+
+def draw_diamond(cr, x, y, w, h):
+ cr.save()
+ cr.set_line_width(0.0)
+ cr.move_to(x-w/2, y)
+ cr.line_to(x, y+h/2)
+ cr.line_to(x+w/2, y)
+ cr.line_to(x, y-h/2)
+ cr.line_to(x-w/2, y)
+ cr.fill()
+ cr.restore()
+
+def draw_legend_diamond(cr, label, fill_color, x, y, w, h):
+ cr.set_source_rgba(*fill_color)
+ draw_diamond(cr, x, y-h/2, w, h)
+ return draw_text(cr, label, TEXT_COLOR, x + w, y)
+
+def draw_legend_box(cr, label, fill_color, x, y, s):
+ draw_fill_rect(cr, fill_color, (x, y - s, s, s))
+ return s + 5 + draw_text(cr, label, TEXT_COLOR, x + s + 5, y)
+
+def draw_legend_line(cr, label, fill_color, x, y, s):
+ draw_fill_rect(cr, fill_color, (x, y - s/2, s + 1, 3))
+ cr.fill()
+ draw_text(cr, label, TEXT_COLOR, x + s + 5, y)
+
+def draw_label_at_time(cr, color, label, y, label_x):
+ draw_text(cr, label, color, label_x, y)
+ return cr.text_extents(label)[2] # return width
+
+# y is at process bar boundary above
+def draw_label_on_bg(cr, bg_color, color, label, y, label_x):
+ TOP_MARGIN=3 # user-space
+ label = label
+ x_bearing, y_bearing, width, height, x_advance, y_advance = cr.text_extents(label)
+ draw_fill_rect(cr, bg_color, (label_x, y+1, x_advance, C.proc_h-2))
+ draw_text(cr, label, color, label_x, y-y_bearing+TOP_MARGIN)
+ return x_advance
+
+def _time_origin_drawn(ctx, trace):
+ if ctx.app_options.prehistory:
+ # XX Would have to set to proc_tree.starttime for backwards compatibility
+ return 0
+ else:
+ return trace.cpu_stats[0].time - in_chart_X_margin(ctx.proc_tree(trace))
+
+def _sec_w(xscale):
+ return xscale * 50 # width of a second, in user-space
+
+# XX Migrating functions into the drawing object seems to be a loss: the
+# replacement of 'ctx' with 'self' is no help to the reader, and
+# additional text columns are lost to the necessary indentation
+class DrawContext:
+ '''set all drawing-related variables bindable at time of class PyBootchartWindow instantiation'''
+ def __init__(self, app_options, trace, cumulative = True, charts = True, kernel_only = False):
+ self.app_options = app_options
+ # should we render a cumulative CPU time chart
+ self.cumulative = cumulative # Bootchart2 collector only
+ self.charts = charts
+ self.kernel_only = kernel_only # set iff collector daemon saved output of `dmesg`
+ self.SWEEP_CSEC = None
+
+ self.ps_event_lists_valid = False
+
+ self.event_dump_list = None
+ self.trace = trace
+
+ self.cr = None # Cairo rendering context
+ self.time_origin_drawn = None # time of leftmost plotted data, as integer csecs
+ self.SEC_W = None
+ self.time_origin_relative = None # currently used only to locate events
+
+ # intra-rendering state
+ self.hide_process_y = None
+ self.unhide_process_y = None
+ self.proc_above_was_hidden = False
+
+ def _validate_event_state(self, ctx, trace):
+ def copy_if_enabled(color_list, re_index):
+ return [ec.color_regex[re_index] for ec in color_list if ec.enable]
+
+ self.event_RE = copy_if_enabled( self.app_options.event_color, 0)
+ self.event_interval_0_RE = copy_if_enabled( self.app_options.event_interval_color, 0)
+ self.event_interval_1_RE = copy_if_enabled( self.app_options.event_interval_color, 1)
+
+ if trace.ps_threads_stats:
+ key_fn = lambda ev: ev.tid * PID_SCALE + LWP_OFFSET
+ else:
+ key_fn = lambda ev: ev.pid * PID_SCALE
+
+ p_map = trace.ps_stats.process_map
+
+ # Copy events selected by currently enabled EventSources to per-process lists
+ for proc in p_map.values():
+ proc.events = []
+ for ep in filter(lambda ep: ep.enable, ctx.app_options.event_source.itervalues()):
+ for ev in ep.parsed:
+ key = key_fn(ev)
+ if key in p_map:
+ p_map[key].events.append(ev)
+ else:
+ writer.info("no samples of /proc/%d/task/%d/stat found -- event lost:\n\t%s" %
+ (ev.pid, ev.tid, ev.raw_log_line))
+
+ # Strip out from per-process lists any events not selected by a regexp.
+ for proc in p_map.values():
+ enabled_events = []
+ for ev in proc.events:
+ # Separate attrs, because they may be drawn differently
+ ev.event_match_any = None
+ ev.event_match_0 = None
+ ev.event_match_1 = None
+ def _set_attr_on_match(RE_list, event_match_attr):
+ # Only LAST matching regexp contributes to the label string --
+ # this allows user to append overriding regexes to the command line.
+ m = None
+ for ev_re in RE_list:
+ m = re.search(ev_re, ev.raw_log_line)
+ if m:
+ setattr(ev, event_match_attr, m)
+ return getattr(ev, event_match_attr) is not None
+ _set_attr_on_match(ctx.event_RE + ctx.event_interval_0_RE + ctx.event_interval_1_RE,
+ "event_match_any")
+ _set_attr_on_match(ctx.event_interval_0_RE, "event_match_0")
+ _set_attr_on_match(ctx.event_interval_1_RE, "event_match_1")
+ enabled_events.append(ev)
+ enabled_events.sort(key = lambda ev: ev.time_usec)
+ proc.events = enabled_events
+ proc.draw |= len(proc.events) > 0
+ self.ps_event_lists_valid = True
+
+ def per_render_init(self, cr, ctx, trace, time_origin_drawn, SEC_W, sweep_csec):
+ self.cr = cr
+ self.time_origin_drawn = time_origin_drawn
+ self.SEC_W = SEC_W
+ self.n_WIDTH = cr.text_extents("n")[2]
+ self.M_HEIGHT = cr.text_extents("M")[3]
+
+ # merge in enabled event sources
+ if not self.ps_event_lists_valid:
+ self._validate_event_state(ctx, trace)
+
+ self.SWEEP_CSEC = sweep_csec
+ if self.SWEEP_CSEC:
+ self.SWEEP_CSEC.sort()
+ self.time_origin_relative = self.SWEEP_CSEC[0]
+ elif self.app_options.absolute_uptime_event_times:
+ self.time_origin_relative = 0
+ else:
+ # align to time of first sample
+ self.time_origin_relative = self.time_origin_drawn + self.trace.proc_tree.sample_period
+
+ def proc_tree (self, trace):
+ return trace.kernel_tree if self.kernel_only else trace.proc_tree
+
+ def draw_process_label_in_box(self, color, bg_color, label,
+ x, y, w, minx, maxx):
+ # hack in a pair of left and right margins
+ extents = self.cr.text_extents("j" + label + "k") # XX "j", "k" were found by tedious trial-and-error
+ label = " " + label
+
+ label_y_bearing = extents[1]
+ label_w = extents[2]
+ label_height = extents[3]
+ label_y_advance = extents[5]
+
+ y += C.proc_h
+ if self.app_options.justify == JUSTIFY_CENTER and label_w + 10 <= w:
+ label_x = x + w / 2 - label_w / 2 # CENTER
+ else:
+ label_x = x - label_w - 2
+ if label_x < minx:
+ label_x = minx
+ # XX ugly magic constants, tuned by trial-and-error
+ draw_fill_rect(self.cr, bg_color, (label_x, y-1, label_w, -(C.proc_h-2)))
+ draw_text(self.cr, label, color, label_x, y-4)
+ return label_x+label_w
+
+# XX Should be "_csec_to_user"
+# Assume off_x translation is already applied, as it will be in drawing functions.
+def _csec_to_xscaled(t_csec, time_origin_drawn, sec_w):
+ return (t_csec-time_origin_drawn) * sec_w / C.CSEC
+
+def csec_to_xscaled(ctx, t_csec):
+ return _csec_to_xscaled(t_csec, ctx.time_origin_drawn, ctx.SEC_W)
+
+# Solve for t_csec:
+# x = (t_csec-ctx.time_origin_drawn) * ctx.SEC_W / C.CSEC + C.off_x
+#
+# x - C.off_x = (t_csec-ctx.time_origin_drawn) * ctx.SEC_W / C.CSEC
+# (x - C.off_x) * C.CSEC / ctx.SEC_W = t_csec-ctx.time_origin_drawn
+#
+def xscaled_to_csec(ctx, x):
+ return _xscaled_to_csec(x, ctx.SEC_W, ctx.time_origin_drawn)
+
+# XX Prefix with single underbar '_' -- double underbar '__' has special meaning to Python
+# that precludes use in exported functions.
+def _xscaled_to_csec(x, sec_w, _time_origin_drawn):
+ return (x - C.off_x) * C.CSEC / sec_w + _time_origin_drawn
+
+def ctx_save__csec_to_xscaled(ctx):
+ ctx.cr.save()
+ ctx.cr.scale(float(ctx.SEC_W) / C.CSEC, 1.0)
+ ctx.cr.translate(-ctx.time_origin_drawn, 0.0)
+
+def draw_sec_labels(ctx, rect, nsecs):
+ ctx.cr.set_font_size(AXIS_FONT_SIZE)
prev_x = 0
- for i in range(0, rect[2] + 1, sec_w):
- if ((i / sec_w) % nsecs == 0) :
- label = "%ds" % (i / sec_w)
- label_w = ctx.text_extents(label)[2]
+ for i in range(0, rect[2] + 1, ctx.SEC_W):
+ if ((i / ctx.SEC_W) % nsecs == 0) :
+ label = "%ds" % (i / ctx.SEC_W)
+ label_w = ctx.cr.text_extents(label)[2]
x = rect[0] + i - label_w/2
if x >= prev_x:
- draw_text(ctx, label, TEXT_COLOR, x, rect[1] - 2)
+ draw_text(ctx.cr, label, TEXT_COLOR, x, rect[1] - 2)
prev_x = x + label_w
-def draw_box_ticks(ctx, rect, sec_w):
- draw_rect(ctx, BORDER_COLOR, tuple(rect))
-
- ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
-
- for i in range(sec_w, rect[2] + 1, sec_w):
- if ((i / sec_w) % 5 == 0) :
- ctx.set_source_rgba(*TICK_COLOR_BOLD)
- else :
- ctx.set_source_rgba(*TICK_COLOR)
- ctx.move_to(rect[0] + i, rect[1] + 1)
- ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
- ctx.stroke()
-
- ctx.set_line_cap(cairo.LINE_CAP_BUTT)
+def draw_box_1(ctx, rect):
+ """ draws an outline one user-space unit in width around rect.
+ For best appearance with the default transform (or zoom to an odd multiple),
+ the corners of rect should be offset by USER_HALF (0.5).
+ This is a consequence of Cairo's line-drawing model."""
+ # XX But zooming currently is in steps of sqrt(2) -- what solution would give
+ # both pixel-aligned lines, and reasonble zoom steps?
+ # ... 1/8, 1/6, 1/4, 1/3, 1/2, 1/sqrt(2), 1, sqrt(2), 2, 3, 4, 5, 6, 8, ...
+ # XX Drawing within the chart should be similarly aligned.
+ ctx.cr.save()
+ ctx.cr.set_line_width(1.0)
+ draw_rect(ctx.cr, BORDER_COLOR, tuple(rect))
+ ctx.cr.restore()
def draw_annotations(ctx, proc_tree, times, rect):
- ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
- ctx.set_source_rgba(*ANNOTATION_COLOR)
- ctx.set_dash([4, 4])
+ ctx.cr.set_line_cap(cairo.LINE_CAP_SQUARE)
+ ctx.cr.set_source_rgba(*ANNOTATION_COLOR)
+ ctx.cr.set_dash([4, 4])
for time in times:
if time is not None:
- x = ((time - proc_tree.start_time) * rect[2] / proc_tree.duration)
+ x = csec_to_xscaled(ctx, time)
+
+ ctx.cr.move_to(x, rect[1] + 1)
+ ctx.cr.line_to(x, rect[1] + rect[3] - 1)
+ ctx.cr.stroke()
+
+ ctx.cr.set_line_cap(cairo.LINE_CAP_BUTT)
+ ctx.cr.set_dash([])
+
+def plot_line(cr, point, x, y):
+ cr.set_line_width(1.0)
+ cr.line_to(x, y) # rightward, and upward or downward
+
+# backward-looking
+def plot_square(cr, point, x, y):
+ cr.set_line_width(1.0)
+ cr.line_to(cr.get_current_point()[0], y) # upward or downward
+ cr.line_to(x, y) # rightward
+
+# backward-looking
+def plot_segment(cr, point, x, y, segment_tick_height):
+ cr.move_to(cr.get_current_point()[0], y) # upward or downward
+ if point[1] <= 0: # zero-Y samples draw nothing
+ cr.move_to(x, y)
+ return
+ #cr.set_line_width(1.0)
+ cr.line_to(x, y-segment_tick_height/2)
+ cr.rel_line_to(0, segment_tick_height)
+ cr.fill()
+ cr.move_to(x, y)
- ctx.move_to(rect[0] + x, rect[1] + 1)
- ctx.line_to(rect[0] + x, rect[1] + rect[3] - 1)
- ctx.stroke()
+SEGMENT_TICK_HEIGHT = 2
+def plot_segment_thin(cr, point, x, y):
+ plot_segment(cr, point, x, y, SEGMENT_TICK_HEIGHT)
+
+def plot_segment_fat(cr, point, x, y):
+ plot_segment(cr, point, x, y, 1.5*SEGMENT_TICK_HEIGHT)
+
+def _plot_scatter_positive(cr, point, x, y, w, h):
+ if point[1] <= 0:
+ return
+ draw_diamond(cr, x, y, w, h)
- ctx.set_line_cap(cairo.LINE_CAP_BUTT)
- ctx.set_dash([])
+def plot_scatter_positive_big(cr, point, x, y):
+ return _plot_scatter_positive(cr, point, x, y, 5.5, 5.5)
-def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree, data_range):
- ctx.set_line_width(0.5)
- x_shift = proc_tree.start_time
+def plot_scatter_positive_small(cr, point, x, y):
+ return _plot_scatter_positive(cr, point, x, y, 3.6, 3.6)
- def transform_point_coords(point, x_base, y_base, \
- xscale, yscale, x_trans, y_trans):
- x = (point[0] - x_base) * xscale + x_trans
- y = (point[1] - y_base) * -yscale + y_trans + chart_bounds[3]
+# All charts assumed to be full-width
+# XX horizontal coords in chart_bounds are now on-pixel-center
+def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree, data_range, plot_point_func):
+ def transform_point_coords(point, y_base, yscale):
+ x = csec_to_xscaled(ctx, point[0])
+ y = (point[1] - y_base) * -yscale + chart_bounds[1] + chart_bounds[3]
return x, y
- max_x = max (x for (x, y) in data)
max_y = max (y for (x, y) in data)
- # avoid divide by zero
- if max_y == 0:
+ if max_y <= 0: # avoid divide by zero
max_y = 1.0
- xscale = float (chart_bounds[2]) / max_x
# If data_range is given, scale the chart so that the value range in
# data_range matches the chart bounds exactly.
# Otherwise, scale so that the actual data matches the chart bounds.
- if data_range:
+ if data_range and (data_range[1] - data_range[0]) > 0:
yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
ybase = data_range[0]
else:
yscale = float(chart_bounds[3]) / max_y
ybase = 0
- first = transform_point_coords (data[0], x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
- last = transform_point_coords (data[-1], x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
+ ctx.cr.set_source_rgba(*color)
+
+ # move to the x of the missing first sample point
+ first = transform_point_coords ([ctx.time_origin_drawn + in_chart_X_margin(proc_tree), -9999],
+ ybase, yscale)
+ ctx.cr.move_to(first[0], first[1])
- ctx.set_source_rgba(*color)
- ctx.move_to(*first)
for point in data:
- x, y = transform_point_coords (point, x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
- ctx.line_to(x, y)
- if fill:
- ctx.stroke_preserve()
- ctx.line_to(last[0], chart_bounds[1]+chart_bounds[3])
- ctx.line_to(first[0], chart_bounds[1]+chart_bounds[3])
- ctx.line_to(first[0], first[1])
- ctx.fill()
- else:
- ctx.stroke()
- ctx.set_line_width(1.0)
-
-bar_h = 55
-meminfo_bar_h = 2 * bar_h
-header_h = 110 + 2 * (30 + bar_h) + 1 * (30 + meminfo_bar_h)
-# offsets
-off_x, off_y = 10, 10
-sec_w_base = 50 # the width of a second
-proc_h = 16 # the height of a process
-leg_s = 10
-MIN_IMG_W = 800
-CUML_HEIGHT = 2000 # Increased value to accomodate CPU and I/O Graphs
-OPTIONS = None
-
-def extents(options, xscale, trace):
- proc_tree = options.proc_tree(trace)
- w = int (proc_tree.duration * sec_w_base * xscale / 100) + 2*off_x
- h = proc_h * proc_tree.num_proc + 2 * off_y
- if options.charts:
- h += header_h
- if proc_tree.taskstats and options.cumulative:
- h += CUML_HEIGHT + 4 * off_y
- return (w, h)
-
-def clip_visible(clip, rect):
- xmax = max (clip[0], rect[0])
- ymax = max (clip[1], rect[1])
- xmin = min (clip[0] + clip[2], rect[0] + rect[2])
- ymin = min (clip[1] + clip[3], rect[1] + rect[3])
- return (xmin > xmax and ymin > ymax)
-
-def render_charts(ctx, options, clip, trace, curr_y, w, h, sec_w):
- proc_tree = options.proc_tree(trace)
-
- # render bar legend
- ctx.set_font_size(LEGEND_FONT_SIZE)
-
- draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
-
- # render I/O wait
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- draw_chart (ctx, IO_COLOR, True, chart_rect, \
- [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
- proc_tree, None)
- # render CPU load
- draw_chart (ctx, CPU_COLOR, True, chart_rect, \
- [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
- proc_tree, None)
+ x, y = transform_point_coords (point, ybase, yscale)
+ plot_point_func(ctx.cr, point, x, y)
- curr_y = curr_y + 30 + bar_h
+ final = transform_point_coords (data[-1], ybase, yscale)
- # render second chart
- draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+ if fill:
+ ctx.cr.set_line_width(0.0)
+ ctx.cr.stroke_preserve()
+ ctx.cr.line_to(final[0], chart_bounds[1]+chart_bounds[3])
+ ctx.cr.line_to(first[0], chart_bounds[1]+chart_bounds[3])
+ ctx.cr.line_to(first[0], first[1])
+ ctx.cr.fill()
+ else:
+ ctx.cr.stroke()
+ ctx.cr.set_line_width(1.0)
+ return max_y
+
+def in_chart_X_margin(proc_tree):
+ return proc_tree.sample_period
+
+# A _pure_ function of its arguments -- writes to no global state nor object.
+# Called from gui.py and batch.py, before instantiation of
+# DrawContext and first call to render(), then every time xscale
+# changes.
+# Returned (w, h) maximum useful x, y user coordinates -- minimums are 0, 0.
+# (w) will get bigger if xscale does.
+def extents(ctx, xscale, trace):
+ '''arg "options" is a RenderOptions object'''
+ proc_tree = ctx.proc_tree(trace)
+
+ w = int (_csec_to_xscaled(trace.cpu_stats[-1].time + in_chart_X_margin(proc_tree),
+ _time_origin_drawn(ctx, trace),
+ _sec_w(xscale)) \
+ + 2*C.off_x)
+
+ h = C.proc_h * proc_tree.num_proc + 2 * C.off_y
+ if ctx.charts:
+ h += 110 + (2 + len(trace.disk_stats)) * (30 + C.bar_h) + 1 * (30 + meminfo_bar_h)
+ if proc_tree.taskstats and ctx.cumulative:
+ h += C.CUML_HEIGHT + 4 * C.off_y
+ return (w, h) # includes C.off_x, C.off_y
+
+def render_charts(ctx, trace, curr_y, w, h):
+ proc_tree = ctx.proc_tree(trace)
+ x_onscreen = max(0, ctx.cr.device_to_user(0, 0)[0])
+
+ max_procs_blocked = \
+ max([sample.procs_blocked for sample in trace.cpu_stats])
+ max_procs_running = \
+ max([sample.procs_running for sample in trace.cpu_stats])
+
+ if ctx.app_options.show_legends:
+ # render bar legend
+ ctx.cr.set_font_size(LEGEND_FONT_SIZE)
+ curr_y += 20
+ curr_x = C.legend_indent + x_onscreen
+ curr_x += 20 + draw_legend_box(ctx.cr, "CPU (user)", CPU_COLOR, curr_x, curr_y, C.leg_s)
+ curr_x += 20 + draw_legend_box(ctx.cr, "CPU (sys)", CPU_SYS_COLOR, curr_x, curr_y, C.leg_s)
+ curr_x += 20 + draw_legend_box(ctx.cr, "I/O (wait)", IO_COLOR, curr_x, curr_y, C.leg_s)
+ curr_x += draw_legend_diamond(ctx.cr, str(max_procs_running) + " Runnable threads",
+ PROCS_RUNNING_COLOR, curr_x +10, curr_y, C.leg_s, C.leg_s)
+ curr_x += draw_legend_diamond(ctx.cr, str(max_procs_blocked) + " Blocked threads -- Uninterruptible Syscall",
+ PROCS_BLOCKED_COLOR, curr_x +70, curr_y, C.leg_s, C.leg_s)
+
+ chart_rect = (0, curr_y+10+USER_HALF, w, C.bar_h)
+ draw_box_1 (ctx, chart_rect)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ # render I/O wait -- a backwards delta
+ draw_chart (ctx, IO_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
+ proc_tree,
+ [0, 1],
+ plot_square)
+ # render CPU load -- a backwards delta
+ draw_chart (ctx, CPU_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
+ proc_tree,
+ [0, 1],
+ plot_square)
+ # superimpose "sys time", the fraction of CPU load spent in kernel -- a backwards delta
+ draw_chart (ctx, CPU_SYS_COLOR, True, chart_rect, \
+ [(sample.time, sample.sys) for sample in trace.cpu_stats], \
+ proc_tree,
+ [0, 1],
+ plot_square)
+
+ # instantaneous sample
+ draw_chart (ctx, PROCS_BLOCKED_COLOR, False, chart_rect,
+ [(sample.time, sample.procs_blocked) for sample in trace.cpu_stats], \
+ proc_tree, [0, max(max_procs_blocked, max_procs_running)], plot_scatter_positive_big)
+
+ # instantaneous sample
+ draw_chart (ctx, PROCS_RUNNING_COLOR, False, chart_rect,
+ [(sample.time, sample.procs_running) for sample in trace.cpu_stats], \
+ proc_tree, [0, max(max_procs_blocked, max_procs_running)], plot_scatter_positive_small)
+
+ curr_y += 8 + C.bar_h
+
+ # XXX Assume single device for now.
+ # XX Generate an IOStat containing max'es of all stats, instead?
+ max_whole_device_IOStat = IOStat._make(
+ getattr( max( trace.disk_stats[0].part_deltas,
+ key = lambda part_delta: getattr(part_delta.s.iostat, f)).s.iostat,
+ f)
+ for f in IOStat._fields)
+ max_whole_device_util = \
+ max( trace.disk_stats[0].part_deltas,
+ key = lambda part_delta: part_delta.util).util
+
+ if ctx.app_options.show_ops_not_bytes:
+ read_field = 'nreads'
+ write_field = 'nwrites'
+ else:
+ read_field = 'nsectors_read'
+ write_field = 'nsectors_write'
+
+ max_whole_device_read_or_write = max(getattr(max_whole_device_IOStat, write_field),
+ getattr(max_whole_device_IOStat, read_field))
+
+ if ctx.app_options.show_legends:
+ curr_y += 34
+ curr_x = C.legend_indent+x_onscreen
+ # render second chart
+ draw_legend_box(ctx.cr,
+ "Disk utilization -- fraction of sample interval I/O queue was not empty",
+ IO_COLOR, curr_x, curr_y, C.leg_s)
+ curr_x += 457
+
+ def draw_RW_legend(x, field, color, plot):
+ label = str(getattr(max_whole_device_IOStat, field)) + " " + field
+ cr = ctx.cr
+ cr.set_source_rgba(*color)
+ cr.move_to(x, -1)
+ PLOT_WIDTH = 30
+ plot(cr, [0,1], x+PLOT_WIDTH, curr_y-4)
+ x += PLOT_WIDTH+5
+ x += draw_text(cr, label, TEXT_COLOR, x, curr_y)
+ return x
+
+ curr_x = 10 + draw_RW_legend(curr_x,
+ read_field, DISK_READ_COLOR, plot_segment_thin)
+ curr_x = draw_RW_legend(curr_x,
+ write_field, DISK_WRITE_COLOR, plot_segment_fat)
+ if ctx.app_options.show_ops_not_bytes:
+ curr_x = draw_RW_legend(curr_x,
+ "nio_in_progress", BLACK, plot_scatter_positive_big)
# render I/O utilization
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- draw_chart (ctx, IO_COLOR, True, chart_rect, \
- [(sample.time, sample.util) for sample in trace.disk_stats], \
- proc_tree, None)
-
- # render disk throughput
- max_sample = max (trace.disk_stats, key = lambda s: s.tput)
- if clip_visible (clip, chart_rect):
- draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
- [(sample.time, sample.tput) for sample in trace.disk_stats], \
- proc_tree, None)
-
- pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
-
- shift_x, shift_y = -20, 20
- if (pos_x < off_x + 245):
- shift_x, shift_y = 5, 40
-
- label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
- draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
+ # No correction for non-constant sample.time -- but see sample-coalescing code in parsing.py.
+ for partition in trace.disk_stats:
+ if partition.hide:
+ continue
+ draw_text(ctx.cr, partition.label, TEXT_COLOR, C.legend_indent+x_onscreen, curr_y+18)
- curr_y = curr_y + 30 + bar_h
+ # utilization -- inherently normalized [0,1]
+ chart_rect = (0, curr_y+18+5+USER_HALF, w, C.bar_h)
+ draw_box_1 (ctx, chart_rect)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ # a backwards delta
+ draw_chart (ctx, IO_COLOR, True, chart_rect,
+ [(sample.s.time, sample.util) for sample in partition.part_deltas],
+ proc_tree, [0, max_whole_device_util], plot_square)
+
+ # write throughput -- a backwards delta
+ draw_chart (ctx, DISK_WRITE_COLOR, False, chart_rect,
+ [(sample.s.time, getattr(sample.s.iostat, write_field)) for sample in partition.part_deltas],
+ proc_tree, [0, max_whole_device_read_or_write], plot_segment_fat)
+
+ # overlay read throughput -- any overlapping read will be protrude around the edges
+ draw_chart (ctx, DISK_READ_COLOR, False, chart_rect,
+ [(sample.s.time, getattr(sample.s.iostat, read_field)) for sample in partition.part_deltas],
+ proc_tree, [0, max_whole_device_read_or_write], plot_segment_thin)
+
+ # overlay instantaneous count of number of I/O operations in progress, only if comparable to
+ # the read/write stats currently shown (ops).
+ if ctx.app_options.show_ops_not_bytes:
+ draw_chart (ctx, BLACK, False, chart_rect,
+ [(sample.s.time, sample.nio_in_progress) for sample in partition.part_deltas],
+ proc_tree,
+ [0, max(max_whole_device_read_or_write,
+ max_whole_device_IOStat.nio_in_progress)],
+ plot_scatter_positive_small)
+
+ curr_y += 18+C.bar_h
# render mem usage
- chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
+ chart_rect = (0, curr_y+30+USER_HALF, w, meminfo_bar_h)
mem_stats = trace.mem_stats
- if mem_stats and clip_visible (clip, chart_rect):
+ if mem_stats:
mem_scale = max(sample.records['MemTotal'] - sample.records['MemFree'] for sample in mem_stats)
- draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
- draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
- draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.records['SwapTotal'] - sample.records['SwapFree'])/1024 for sample in mem_stats]), \
- MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
- draw_box_ticks(ctx, chart_rect, sec_w)
- draw_annotations(ctx, proc_tree, trace.times, chart_rect)
+ if ctx.app_options.show_legends:
+ curr_y += 20
+ draw_legend_box(ctx.cr, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, curr_y, C.leg_s)
+ draw_legend_box(ctx.cr, "Used", MEM_USED_COLOR, 240, curr_y, C.leg_s)
+ draw_legend_box(ctx.cr, "Buffers", MEM_BUFFERS_COLOR, 360, curr_y, C.leg_s)
+ draw_legend_line(ctx.cr, "Swap (scale: %u MiB)" % max([(sample.records['SwapTotal'] - sample.records['SwapFree'])/1024 for sample in mem_stats]), \
+ MEM_SWAP_COLOR, 480, curr_y, C.leg_s)
+ draw_box_1 (ctx, chart_rect)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
[(sample.time, sample.records['MemTotal'] - sample.records['MemFree']) for sample in trace.mem_stats], \
- proc_tree, [0, mem_scale])
+ proc_tree, [0, mem_scale], plot_square)
draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
[(sample.time, sample.records['MemTotal'] - sample.records['MemFree'] - sample.records['Buffers']) for sample in mem_stats], \
- proc_tree, [0, mem_scale])
+ proc_tree, [0, mem_scale], plot_square)
draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
[(sample.time, sample.records['Cached']) for sample in mem_stats], \
- proc_tree, [0, mem_scale])
+ proc_tree, [0, mem_scale], plot_square)
draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
[(sample.time, float(sample.records['SwapTotal'] - sample.records['SwapFree'])) for sample in mem_stats], \
- proc_tree, None)
+ proc_tree, None, plot_square)
curr_y = curr_y + meminfo_bar_h
return curr_y
+def late_init_transform(cr):
+ cr.translate(C.off_x, 0) # current window-coord clip shrinks with loss of the C.off_x-wide strip on left
+
#
-# Render the chart.
+# Render the chart. Main entry point of this module.
#
-def render(ctx, options, xscale, trace):
- (w, h) = extents (options, xscale, trace)
- global OPTIONS
- OPTIONS = options.app_options
-
- proc_tree = options.proc_tree (trace)
+def render(cr, ctx, xscale, trace, sweep_csec = None, hide_process_y = None):
+ '''
+ "cr" is the Cairo drawing context -- the transform matrix it carries already has
+ panning translation and "zoom" scaling applied.
+ The asymmetrical "xscale" arg is not applied globally to "cr", because
+ it would distort letterforms of text output.
+ "ctx" is a DrawContext object.
+ '''
+ #traceback.print_stack()
+ ctx.per_render_init(cr, ctx, trace, _time_origin_drawn(ctx, trace), _sec_w(xscale), sweep_csec)
+ (w, h) = extents(ctx, xscale, trace)
+
+ ctx.cr.set_line_width(1.0)
+ ctx.cr.select_font_face(FONT_NAME)
+ draw_fill_rect(ctx.cr, WHITE, (0, 0, max(w, C.MIN_IMG_W), h))
+
+ ctx.cr.save()
+ late_init_transform(ctx.cr)
+
+ proc_tree = ctx.proc_tree (trace)
- # x, y, w, h
- clip = ctx.clip_extents()
-
- sec_w = int (xscale * sec_w_base)
- ctx.set_line_width(1.0)
- ctx.select_font_face(FONT_NAME)
- draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
- w -= 2*off_x
# draw the title and headers
if proc_tree.idle:
duration = proc_tree.idle
else:
- duration = proc_tree.duration
+ duration = proc_tree.duration()
- if not options.kernel_only:
+ if not ctx.kernel_only:
curr_y = draw_header (ctx, trace.headers, duration)
else:
- curr_y = off_y;
+ curr_y = C.off_y;
+
+ w -= 2*C.off_x
+ if ctx.charts:
+ curr_y = render_charts (ctx, trace, curr_y, w, h)
- if options.charts:
- curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
+ if ctx.app_options.show_legends and not ctx.kernel_only:
+ curr_y = draw_process_bar_chart_legends(ctx, curr_y)
# draw process boxes
proc_height = h
- if proc_tree.taskstats and options.cumulative:
- proc_height -= CUML_HEIGHT
-
- draw_process_bar_chart(ctx, clip, options, proc_tree, trace.times,
- curr_y, w, proc_height, sec_w)
-
- curr_y = proc_height
- ctx.set_font_size(SIG_FONT_SIZE)
- draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, proc_height - 8)
-
- # draw a cumulative CPU-time-per-process graph
- if proc_tree.taskstats and options.cumulative:
- cuml_rect = (off_x, curr_y + off_y, w, CUML_HEIGHT/2 - off_y * 2)
- if clip_visible (clip, cuml_rect):
- draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_CPU)
-
- # draw a cumulative I/O-time-per-process graph
- if proc_tree.taskstats and options.cumulative:
- cuml_rect = (off_x, curr_y + off_y * 100, w, CUML_HEIGHT/2 - off_y * 2)
- if clip_visible (clip, cuml_rect):
- draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_IO)
-
-def draw_process_bar_chart(ctx, clip, options, proc_tree, times, curr_y, w, h, sec_w):
- header_size = 0
- if not options.kernel_only:
- draw_legend_box (ctx, "Running (%cpu)",
- PROC_COLOR_R, off_x , curr_y + 45, leg_s)
- draw_legend_box (ctx, "Unint.sleep (I/O)",
- PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Sleeping",
- PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Zombie",
- PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
- header_size = 45
-
- chart_rect = [off_x, curr_y + header_size + 15,
- w, h - 2 * off_y - (curr_y + header_size + 15) + proc_h]
- ctx.set_font_size (PROC_TEXT_FONT_SIZE)
-
- draw_box_ticks (ctx, chart_rect, sec_w)
- if sec_w > 100:
+ if proc_tree.taskstats and ctx.cumulative:
+ proc_height -= C.CUML_HEIGHT
+
+ curr_y += ctx.M_HEIGHT
+
+ # curr_y points to the *top* of the first per-process line
+ if hide_process_y and hide_process_y[0] > (curr_y - C.proc_h/4):
+ hide_mod_proc_h = (hide_process_y[0] - curr_y) % C.proc_h
+ # if first button-down (hide_process_y[0]) falls in middle half of any process bar, then set up for hiding
+ if hide_mod_proc_h >= C.proc_h/4 and hide_mod_proc_h < C.proc_h*3/4:
+ hide_process_y.sort()
+ ctx.hide_process_y = hide_process_y
+ ctx.unhide_process_y = None
+ else: # unhide
+ ctx.hide_process_y = None
+ ctx.unhide_process_y = hide_process_y[0]
+
+ curr_y = draw_process_bar_chart(ctx, proc_tree, trace.times,
+ curr_y, w, proc_height)
+
+ if proc_tree.taskstats and ctx.cumulative:
+ # draw a cumulative CPU-time-per-process graph
+ cuml_rect = (0, proc_height + C.off_y, w, C.CUML_HEIGHT/2 - C.off_y * 2)
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, STAT_TYPE_CPU)
+
+ # draw a cumulative I/O-time-per-process graph
+ cuml_rect = (0, proc_height + C.off_y * 100, w, C.CUML_HEIGHT/2 - C.off_y * 2)
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, STAT_TYPE_IO)
+
+ if ctx.SWEEP_CSEC:
+ draw_sweep(ctx)
+
+ ctx.cr.restore()
+
+ if ctx.event_dump_list == None:
+ return
+
+ print # blank, separator line
+ # FIXME: Support of raw log dump of multiple log files is hard -- disable for now.
+ if ctx.app_options.dump_raw_event_context and False:
+ # dump all raw log lines between events, where "between" means merely textually,
+ # irrespective of time.
+ ctx.event_dump_list.sort(key = lambda e: e.raw_log_seek)
+ if len(ctx.event_dump_list):
+ # find first and last events of the list that contain a valid raw_log_seek
+ for event0 in ctx.event_dump_list:
+ if event0.raw_log_seek:
+ break
+ for eventN in reversed(ctx.event_dump_list):
+ if eventN.raw_log_seek:
+ break
+ if event0.raw_log_seek and eventN.raw_log_seek:
+ event0.raw_log_file.seek(event0.raw_log_seek)
+ eventN = ctx.event_dump_list[-1]
+ # for line in event0.raw_log_file.readline():
+ while event0.raw_log_file.tell() <= eventN.raw_log_seek:
+ print event0.raw_log_file.readline().rstrip()
+ else: # dump only digestible events
+ ctx.event_dump_list.sort(key = lambda ev: ev.time_usec)
+ for ev in ctx.event_dump_list:
+ print ev.dump_format()
+
+ ctx.event_dump_list = None
+
+def draw_sweep(ctx):
+ def draw_shading(cr, rect):
+ # alpha value of the rgba strikes a compromise between appearance on screen, and in printed screenshot
+ cr.set_source_rgba(0.0, 0.0, 0.0, 0.08)
+ cr.set_line_width(0.0)
+ cr.rectangle(rect)
+ cr.fill()
+ def draw_vertical(ctx, time, x):
+ cr = ctx.cr
+ cr.set_dash([1, 3])
+ cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
+ cr.set_line_width(1.0)
+ cr.move_to(x, 0)
+ cr.line_to(x, height)
+ cr.stroke()
+
+ height = int(ctx.cr.device_to_user(0,2000)[1])
+ x_itime = [None, None]
+ for i_time, time in enumerate(ctx.SWEEP_CSEC):
+ time = ctx.SWEEP_CSEC[i_time]
+ x = csec_to_xscaled(ctx, time)
+ draw_shading(ctx.cr, (int(x),0,int(ctx.cr.clip_extents()[i_time*2]-x),height))
+ draw_vertical(ctx, time, x)
+ x_itime[i_time] = x
+
+ top = ctx.cr.device_to_user(0, 0)[1]
+ origin = 0 if ctx.app_options.absolute_uptime_event_times else \
+ ctx.time_origin_drawn + ctx.trace.proc_tree.sample_period
+ draw_label_on_bg(ctx.cr, NOTEPAD_YELLOW, BLACK,
+ "{0:.6f}".format((ctx.SWEEP_CSEC[0] - origin)/C.CSEC),
+ top,
+ x_itime[0] + ctx.n_WIDTH/2)
+ if i_time == 0:
+ return
+ draw_label_on_bg(ctx.cr, NOTEPAD_YELLOW, BLACK,
+ "+{0:.6f}".format((ctx.SWEEP_CSEC[1] - ctx.SWEEP_CSEC[0])/C.CSEC),
+ top + ctx.M_HEIGHT*2,
+ x_itime[1] + ctx.n_WIDTH/2)
+
+def draw_process_bar_chart_legends(ctx, curr_y):
+ x_onscreen = max(0, ctx.cr.device_to_user(0, 0)[0])
+
+ def draw_process_or_thread_legend(color, label, x, y):
+ label_w = ctx.cr.text_extents(label)[2]
+ return ctx.draw_process_label_in_box(
+ PROC_TEXT_COLOR,
+ color,
+ label,
+ x, y,
+ -1,
+ ctx.cr.device_to_user(0, 0)[0],
+ ctx.cr.clip_extents()[2])
+
+ curr_y += 10
+ curr_x = draw_process_or_thread_legend(NOTEPAD_PINK, "[PPID:PID]", x_onscreen, curr_y)
+ curr_y += 16
+ curr_x = draw_process_or_thread_legend(NOTEPAD_YELLOW, "[PPID:PID:TID:NICE]", x_onscreen, curr_y)
+
+ curr_y += 15
+ curr_x += C.legend_indent
+ curr_x += 30 + draw_legend_diamond (ctx.cr, "Runnable",
+ PROCS_RUNNING_COLOR, curr_x, curr_y, C.leg_s*3/4, C.proc_h)
+ curr_x += 30 + draw_legend_diamond (ctx.cr, "Uninterruptible Syscall",
+ PROC_COLOR_D, curr_x, curr_y, C.leg_s*3/4, C.proc_h)
+ curr_x += 20 + draw_legend_box (ctx.cr, "Running (user)",
+ PROC_COLOR_R, curr_x, curr_y, C.leg_s)
+ curr_x += 20 + draw_legend_box (ctx.cr, "Running (sys)",
+ CPU_SYS_COLOR, curr_x, curr_y, C.leg_s)
+ curr_x += 20 + draw_legend_box (ctx.cr, "I/O wait",
+ IO_COLOR, curr_x, curr_y, C.leg_s)
+ curr_x += 20 + draw_legend_box (ctx.cr, "Child CPU time lost, charged to parent",
+ CPU_CHILD_COLOR, curr_x, curr_y, C.leg_s)
+ curr_x += 20 + draw_legend_box (ctx.cr, "Sleeping",
+ PROC_COLOR_S, curr_x, curr_y, C.leg_s)
+ curr_x += 20 + draw_legend_box (ctx.cr, "Zombie",
+ PROC_COLOR_Z, curr_x, curr_y, C.leg_s)
+ curr_y -= 15
+ return curr_y
+
+def draw_process_bar_chart(ctx, proc_tree, times, curr_y, w, h):
+ chart_rect = [-1, -1, -1, -1]
+ ctx.cr.set_font_size (PROC_TEXT_FONT_SIZE)
+
+ if ctx.SEC_W > 100:
nsec = 1
else:
nsec = 5
- draw_sec_labels (ctx, chart_rect, sec_w, nsec)
+ #draw_sec_labels (ctx.cr, chart_rect, nsec)
draw_annotations (ctx, proc_tree, times, chart_rect)
- y = curr_y + 60
+ curr_y += 15
for root in proc_tree.process_tree:
- draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect, clip)
- y = y + proc_h * proc_tree.num_nodes([root])
-
+ if not root.lwp():
+ curr_y = draw_processes_recursively(ctx, root, proc_tree, curr_y)[1]
+ if ctx.proc_above_was_hidden:
+ draw_hidden_process_separator(ctx, curr_y)
+ ctx.proc_above_was_hidden = False
+ return curr_y
def draw_header (ctx, headers, duration):
toshow = [
@@ -471,119 +929,410 @@ def draw_header (ctx, headers, duration):
('system.cpu', 'CPU', lambda s: re.sub('model name\s*:\s*', '', s, 1)),
('system.kernel.options', 'kernel options', lambda s: s),
]
+ toshow = []
- header_y = ctx.font_extents()[2] + 10
- ctx.set_font_size(TITLE_FONT_SIZE)
- draw_text(ctx, headers['title'], TEXT_COLOR, off_x, header_y)
- ctx.set_font_size(TEXT_FONT_SIZE)
+ cr = ctx.cr
+ header_y = cr.font_extents()[2] + 10
+ cr.set_font_size(TITLE_FONT_SIZE)
+ x_onscreen = C.legend_indent + max(0, ctx.cr.device_to_user(0, 0)[0])
+ draw_text(cr, headers['title'], TEXT_COLOR, x_onscreen, header_y)
+ cr.set_font_size(TEXT_FONT_SIZE)
for (headerkey, headertitle, mangle) in toshow:
- header_y += ctx.font_extents()[2]
+ header_y += cr.font_extents()[2]
if headerkey in headers:
value = headers.get(headerkey)
else:
value = ""
txt = headertitle + ': ' + mangle(value)
- draw_text(ctx, txt, TEXT_COLOR, off_x, header_y)
-
- dur = duration / 100.0
- txt = 'time : %02d:%05.2f' % (math.floor(dur/60), dur - 60 * math.floor(dur/60))
- if headers.get('system.maxpid') is not None:
- txt = txt + ' max pid: %s' % (headers.get('system.maxpid'))
+ draw_text(cr, txt, TEXT_COLOR, x_onscreen, header_y)
- header_y += ctx.font_extents()[2]
- draw_text (ctx, txt, TEXT_COLOR, off_x, header_y)
+# dur = duration / 100.0
+# txt = 'time : %02d:%05.2f' % (math.floor(dur/60), dur - 60 * math.floor(dur/60))
+# if headers.get('system.maxpid') is not None:
+# txt = txt + ' max pid: %s' % (headers.get('system.maxpid'))
+#
+# header_y += cr.font_extents()[2]
+# draw_text (cr, txt, TEXT_COLOR, 0, header_y)
return header_y
-def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect, clip) :
- x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
- w = ((proc.duration) * rect[2] / proc_tree.duration)
-
- draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip)
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
- ipid = int(proc.pid)
- if not OPTIONS.show_all:
- cmdString = proc.cmd
+# Cairo draws lines "on-center", so to draw a one-pixel width horizontal line
+# using the (default) 1:1 transform from user-space to device-space,
+# the Y coordinate must be offset by 1/2 user-coord.
+SEPARATOR_THICKNESS = 1.0
+SEP_HALF = SEPARATOR_THICKNESS / 2
+USER_HALF = 0.5
+BAR_HEIGHT = C.proc_h - SEPARATOR_THICKNESS
+
+def draw_visible_process_separator(ctx, proc, x, y, w):
+ ctx.cr.save()
+ ctx.cr.set_source_rgba(*PROC_BORDER_COLOR)
+ ctx.cr.set_line_width(SEPARATOR_THICKNESS)
+ ctx.cr.move_to(x+w, y)
+ ctx.cr.rel_line_to(-w, 0)
+ if proc.start_time < ctx.time_origin_drawn:
+ ctx.cr.stroke()
+ ctx.cr.move_to(x, y+C.proc_h)
else:
+ # XX No attempt to align the vertical line with the device pixel grid
+ ctx.cr.rel_line_to(0, C.proc_h)
+ ctx.cr.rel_line_to(w, 0)
+ ctx.cr.stroke()
+ ctx.cr.restore()
+
+def draw_hidden_process_separator(ctx, y):
+ DARK_GREY = 1.0, 1.0, 1.0
+ GREY = 0.3, 0.3, 0.3
+ ctx.cr.save()
+ ctx.cr.set_source_rgb(0.0, 1.0, 0.0)
+ ctx.cr.set_line_width(SEPARATOR_THICKNESS)
+ def draw_again():
+ ctx.cr.move_to(ctx.cr.clip_extents()[0], y)
+ ctx.cr.line_to(ctx.cr.clip_extents()[2], y)
+ ctx.cr.stroke()
+ ctx.cr.set_source_rgb(*DARK_GREY)
+ draw_again()
+ ctx.cr.set_source_rgb(*GREY)
+ ctx.cr.set_dash([1, 4])
+ draw_again()
+ ctx.cr.restore()
+
+def draw_process(ctx, proc, proc_tree, x, y, w):
+ draw_process_activity_colors(ctx, proc, proc_tree, x, y, w)
+
+ # Do not draw right-hand vertical border -- process exit never exactly known
+ draw_visible_process_separator(ctx, proc, x, y, w)
+
+ if proc.lwp() or not ctx.trace.ps_threads_stats:
+ draw_process_state_colors(ctx, proc, proc_tree, x, y, w)
+
+ # Event ticks step on the rectangle painted by draw_process_state_colors(),
+ # e.g. for non-interruptible wait.
+ # User can work around this by toggling off the event ticks.
+ n_highlighted_events = 0 if ctx.app_options.hide_events else \
+ draw_process_events(ctx, proc, proc_tree, x, y)
+
+ if proc_tree.taskstats and ctx.app_options.show_all:
cmdString = ''
- if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
- cmdString = cmdString + " [" + str(ipid / 1000) + "]"
- if OPTIONS.show_all:
- if proc.args:
- cmdString = cmdString + " '" + "' '".join(proc.args) + "'"
+ else:
+ cmdString = proc.cmd
+ if (ctx.app_options.show_pid or ctx.app_options.show_all) and proc.pid is not 0:
+ prefix = " ["
+ if ctx.app_options.show_all:
+ prefix += str(proc.ppid / 1000) + ":"
+ prefix += str(proc.pid / 1000)
+ if proc.lwp():
+ prefix += ":" + str(proc.tid / 1000)
+ prefix += ":" + str(proc.nice)
+ prefix += "]"
+ cmdString = prefix + cmdString
+ if ctx.app_options.show_all and proc.args:
+ cmdString += " '" + "' '".join(proc.args) + "'"
+
+ ctx.draw_process_label_in_box( PROC_TEXT_COLOR,
+ NOTEPAD_YELLOW if proc.lwp() else NOTEPAD_PINK,
+ cmdString,
+ csec_to_xscaled(ctx, max(proc.start_time,ctx.time_origin_drawn)),
+ y,
+ w,
+ ctx.cr.device_to_user(0, 0)[0],
+ ctx.cr.clip_extents()[2])
+ return n_highlighted_events
+
+def draw_processes_recursively(ctx, proc, proc_tree, y):
+ xmin = ctx.cr.device_to_user(0, 0)[0] # work around numeric overflow at high xscale factors
+ xmin = max(xmin, 0)
+
+ def draw_process_and_separator(ctx, proc, proc_tree, y):
+ x = max(xmin, csec_to_xscaled(ctx, proc.start_time))
+ w = max(xmin, csec_to_xscaled(ctx, proc.start_time + proc.duration)) - x
+ if ctx.hide_process_y and y+C.proc_h > ctx.hide_process_y[0] and proc.draw:
+ proc.draw = False
+ ctx.hide_process_y[1] -= C.proc_h
+ if y > (ctx.hide_process_y[1]) / C.proc_h * C.proc_h:
+ ctx.hide_process_y = None
+
+ elif ctx.unhide_process_y and y+C.proc_h*3/4 > ctx.unhide_process_y:
+ if proc.draw: # found end of run of hidden processes
+ ctx.unhide_process_y = None
+ else:
+ proc.draw = True
+ ctx.unhide_process_y += C.proc_h
+
+ if not proc.draw:
+ ctx.proc_above_was_hidden = True
+ return x, y
else:
- cmdString = cmdString + " " + proc.exe
+ n_highlighted_events = draw_process(ctx, proc, proc_tree, x, y+USER_HALF, w)
+ if ctx.proc_above_was_hidden:
+ draw_hidden_process_separator(ctx, y+USER_HALF)
+ ctx.proc_above_was_hidden = False
+ return x, y + C.proc_h*(1 if n_highlighted_events <= 0 else 2)
+
+ # Hide proc_ps_stat.log "processes" for the kernel-daemon children of kthreadd, provided that
+ # corresponding lwp entries are available from proc_ps_thread.log
+ if proc.ppid / PID_SCALE != KTHREADD_PID or not proc.lwp_list:
+ x, child_y = draw_process_and_separator(ctx, proc, proc_tree, y)
+ else:
+ x, child_y = 0, y
- draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
+ if proc.lwp_list is not None:
+ for lwp in proc.lwp_list:
+ x, child_y = draw_process_and_separator(ctx, lwp, proc_tree, child_y)
- next_y = y + proc_h
+ elder_sibling_y = None
for child in proc.child_list:
- if next_y > clip[1] + clip[3]:
- break
- child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect, clip)
- draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
- next_y = next_y + proc_h * proc_tree.num_nodes([child])
-
- return x, y
-
-
-def draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip):
+ # Processes draw their "own" LWPs, contrary to formal parentage relationship
+ if child.lwp_list is None:
+ continue
- if y > clip[1] + clip[3] or y + proc_h + 2 < clip[1]:
- return
+ child_x, next_y = draw_processes_recursively(ctx, child, proc_tree, child_y)
+ if proc.draw and child.draw:
+ # draw upward from child to elder sibling or parent (proc)
+ # XX draws lines on top of the process name label
+ #draw_process_connecting_lines(ctx, x, y, child_x, child_y, elder_sibling_y)
+ elder_sibling_y = child_y
+ child_y = next_y
+ return x, child_y
+
+def draw_process_activity_colors(ctx, proc, proc_tree, x, y, w):
+ draw_fill_rect(ctx.cr, PROC_COLOR_S, (x, y, w, C.proc_h))
+
+ ctx_save__csec_to_xscaled(ctx)
+ ctx.cr.set_line_width(0.0)
+ # draw visual reminder of unknowability of thread end time
+ ctx.cr.move_to(proc.samples[-1].time + proc_tree.sample_period, y+C.proc_h/2)
+ ctx.cr.line_to(proc.samples[-1].time, y+C.proc_h)
+ ctx.cr.line_to(proc.samples[-1].time, y)
+ ctx.cr.close_path()
+ ctx.cr.fill()
+
+ # cases:
+ # 1. proc started before sampling did
+ # XX should look up time of previous sample, not assume 'proc_tree.sample_period'
+ # 2. proc start after sampling
+ last_time = max(proc.start_time,
+ proc.samples[0].time - proc_tree.sample_period)
+
+ for sample in proc.samples:
+ cpu_self = sample.cpu_sample.user + sample.cpu_sample.sys
+ cpu_exited_child = 0 # XXXX sample.exited_child_user + sample.exited_child_sys
+ width = sample.time - last_time
+
+ if cpu_exited_child > 0:
+ height = (cpu_exited_child + cpu_self) * BAR_HEIGHT
+ draw_fill_rect(ctx.cr, CPU_CHILD_COLOR, (last_time, y+C.proc_h-SEP_HALF, width, -height))
+
+ if cpu_exited_child != 0:
+ print "cpu_exited_child == " + str(cpu_exited_child)
+
+ # XX For whole processes, the cpu_sample.io stat is the sum of waits for all threads.
+
+ # Inspection of kernel code shows that tick counters are rounded to nearest,
+ # so overflow is to be expected.
+ OVERFLOW_LIMIT=1.0
+ # XX What's the upper bound on rounding-induced overflow?
+ if sample.cpu_sample.io + cpu_self > 0:
+ height = min(OVERFLOW_LIMIT, (sample.cpu_sample.io + cpu_self)) * BAR_HEIGHT
+ draw_fill_rect(ctx.cr, IO_COLOR, (last_time, y+C.proc_h-SEP_HALF, width, -height))
+
+ for (cpu_field, color) in [(sample.cpu_sample.user + sample.cpu_sample.sys, PROC_COLOR_R),
+ (sample.cpu_sample.sys, CPU_SYS_COLOR)]:
+ # If this test fails -- no time ticks -- then skip changing of color.
+ if cpu_field > 0:
+ height = min(OVERFLOW_LIMIT, cpu_field) * BAR_HEIGHT
+ draw_fill_rect(ctx.cr, color, (last_time, y+C.proc_h-SEP_HALF, width, -height))
+ if cpu_field < cpu_self:
+ # draw a separator between the bar segments, to aid the eye in
+ # resolving the boundary
+ ctx.cr.save()
+ ctx.cr.move_to(last_time, y+C.proc_h-SEP_HALF-height)
+ ctx.cr.rel_line_to(width,0)
+ ctx.cr.set_source_rgba(*PROC_COLOR_S)
+ ctx.cr.set_line_width(DEP_STROKE/2)
+ ctx.cr.stroke()
+ ctx.cr.restore()
+
+ # If thread ran at all, draw a "speed bump", in the last used color, to help the user
+ # with rects that are too short to resolve.
+ tick_height = C.proc_h/5
+ ctx.cr.arc((last_time + sample.time)/2, y+C.proc_h-SEP_HALF, tick_height, math.pi, 0.0)
+ ctx.cr.close_path()
+ ctx.cr.fill()
+
+ if cpu_self > 1.0:
+ writer.info("process CPU+I/O time overflow: time {0:5d}, start_time {1:5d}, tid {2:5d}, width {3:2d}, cpu_self {4: >5.2f}".format(
+ sample.time, proc.start_time, proc.tid/1000, width, cpu_self))
+ OVERFLOW_BAR_HEIGHT=2
+ draw_fill_rect(ctx.cr, PURPLE, (last_time, y+SEP_HALF, width, OVERFLOW_BAR_HEIGHT))
+ cpu_self = 1.0 - float(OVERFLOW_BAR_HEIGHT)/C.proc_h
+
+ last_time = sample.time
+ ctx.cr.restore()
+
+def usec_to_csec(usec):
+ '''would drop precision without the float() cast'''
+ return float(usec) / 1000 / 10
+
+def csec_to_usec(csec):
+ return csec * 1000 * 10
+
+def draw_event_label(ctx, label, tx, y):
+ return draw_label_at_time(ctx.cr, HIGHLIGHT_EVENT_COLOR, label, y, tx)
+
+def format_label_time(ctx, delta):
+ if ctx.SWEEP_CSEC:
+ if abs(delta) < C.CSEC:
+ # less than a second, so format as whole milliseconds
+ return '{0:d}'.format(int(delta*10))
+ else:
+ # format as seconds, plus a variable number of digits after the decimal point
+ return '{0:.{prec}f}'.format(float(delta)/C.CSEC,
+ prec=min(3, max(1, abs(int(3*C.CSEC/delta)))))
+ else:
+ # formatting is independent of delta value
+ return '{0:.{prec}f}'.format(float(delta)/C.CSEC,
+ prec=min(3, max(0, int(ctx.SEC_W/100))))
+
+def print_event_times(ctx, y, ev_list):
+ last_x_touched = 0
+ last_label_str = None
+ for (ev, tx) in ev_list:
+ if not ctx.app_options.synthesize_sample_start_events and ev.raw_log_line == "pseudo-raw_log_line":
+ continue
+ m_highlight = ev.event_match_any
+ delta = usec_to_csec(ev.time_usec) - ctx.time_origin_relative
- draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
+ label_str = format_label_time(ctx, delta)
+ white_space = 8
+ if tx < last_x_touched + white_space:
+ continue
+ if m_highlight or label_str != last_label_str:
+ if m_highlight:
+ last_x_touched = tx + draw_label_on_bg(
+ ctx.cr,
+ WHITE,
+ HIGHLIGHT_EVENT_COLOR, label_str, y, tx)
+ else:
+ last_x_touched = tx + draw_label_on_bg(
+ ctx.cr,
+ TRANSPARENT,
+ DIM_EVENT_COLOR, label_str, y, tx)
+ last_label_str = label_str
+
+def draw_process_events(ctx, proc, proc_tree, x, y):
+ ctx.cr.save()
+ ctx.cr.set_line_width(0)
+
+ n_highlighted_events = 0
+ last_tx_plus_width_drawn = 0
+ ev_list = [(ev, csec_to_xscaled(ctx, usec_to_csec(ev.time_usec)))
+ for ev in proc.events]
+
+ # draw numbers
+ if ctx.app_options.print_event_times:
+ print_event_times(ctx, y, ev_list)
+
+ # draw ticks, maybe add to dump list
+ for (ev, tx) in ev_list:
+ if not ctx.app_options.synthesize_sample_start_events and ev.raw_log_line == "pseudo-raw_log_line":
+ continue
+ last_m = ev.event_match_any
+ if last_m:
+ ctx.cr.set_source_rgb(*HIGHLIGHT_EVENT_COLOR)
+ W = 2
+ if last_m.lastindex:
+ groups_concat = ""
+ for g in last_m.groups():
+ groups_concat += str(g)
+ else:
+ groups_concat = last_m.group(0)
+ if last_tx_plus_width_drawn < tx:
+ # draw bottom half of tick mark
+ tick_depth = 10
+ ctx.cr.move_to(tx, y +C.proc_h +tick_depth) # bottom
+
+ ctx.cr.rel_line_to(-W, -tick_depth -0.5) # top-left
+ ctx.cr.rel_line_to(2*W, 0) # top-right
+ ctx.cr.close_path()
+ ctx.cr.fill()
+
+ clear = 1.0 # clearance between down-tick and string
+ last_tx_plus_width_drawn = \
+ tx + clear + \
+ draw_event_label(ctx,
+ groups_concat,
+ tx + clear, y+2*C.proc_h-4)
+ n_highlighted_events += 1
+ else:
+ ctx.cr.set_source_rgb(*DIM_EVENT_COLOR)
+ W = 1
+
+ # If an interval bar should start at tx, record the time.
+ last_m = ev.event_match_0
+ if last_m:
+ proc.event_interval_0_tx = tx
+
+ # Draw interval bar that terminates at tx, if any.
+ if proc.event_interval_0_tx != None:
+ last_m = ev.event_match_1
+ if last_m:
+ ctx.cr.rectangle(proc.event_interval_0_tx, y+C.proc_h+0.5,
+ tx-proc.event_interval_0_tx, 1.5)
+ ctx.cr.fill()
+ proc.event_interval_0_tx = None
+
+ if ctx.event_dump_list != None and ctx.SWEEP_CSEC \
+ and ev.raw_log_file: # don't dump synthetic events
+ ev_time_csec = usec_to_csec(ev.time_usec)
+ if ev_time_csec >= ctx.SWEEP_CSEC[0] and ev_time_csec < ctx.SWEEP_CSEC[1]:
+ ctx.event_dump_list.append(ev)
+
+ # draw top half of tick mark
+ ctx.cr.move_to(tx, y+C.proc_h-6.5) # top
+
+ ctx.cr.rel_line_to(-W,6) # bottom-left
+ ctx.cr.rel_line_to(2*W,0) # bottom-right
+ ctx.cr.close_path()
+ ctx.cr.fill()
+
+ ctx.cr.restore()
+ return n_highlighted_events
+
+def draw_process_state_colors(ctx, proc, proc_tree, x, y, w):
last_tx = -1
for sample in proc.samples :
- tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
-
- # samples are sorted chronologically
- if tx < clip[0]:
- continue
- if tx > clip[0] + clip[2]:
- break
-
- tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
- if last_tx != -1 and abs(last_tx - tx) <= tw:
- tw -= last_tx - tx
- tx = last_tx
- tw = max (tw, 1) # nice to see at least something
-
- last_tx = tx + tw
+ tx = csec_to_xscaled(ctx, sample.time)
state = get_proc_state( sample.state )
-
- color = STATE_COLORS[state]
- if state == STATE_RUNNING:
- alpha = min (sample.cpu_sample.user + sample.cpu_sample.sys, 1.0)
- color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
-# print "render time %d [ tx %d tw %d ], sample state %s color %s alpha %g" % (sample.time, tx, tw, state, color, alpha)
- elif state == STATE_SLEEPING:
- continue
-
- draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
-
-def draw_process_connecting_lines(ctx, px, py, x, y, proc_h):
- ctx.set_source_rgba(*DEP_COLOR)
- ctx.set_dash([2, 2])
- if abs(px - x) < 3:
- dep_off_x = 3
- dep_off_y = proc_h / 4
- ctx.move_to(x, y + proc_h / 2)
- ctx.line_to(px - dep_off_x, y + proc_h / 2)
- ctx.line_to(px - dep_off_x, py - dep_off_y)
- ctx.line_to(px, py - dep_off_y)
+ if state == STATE_WAITING or state == STATE_RUNNING:
+ color = STATE_COLORS[state]
+ ctx.cr.set_source_rgba(*color)
+ draw_diamond(ctx.cr, tx, y + C.proc_h/2, 2.5, C.proc_h)
+
+def draw_process_connecting_lines(ctx, px, py, x, y, elder_sibling_y):
+ ON = 1
+ OFF = 2
+ DASH_LENGTH = ON + OFF
+
+ ctx.cr.save()
+ ctx.cr.set_source_rgba(*DEP_COLOR)
+ ctx.cr.set_dash([ON, OFF]) # repeated draws are not phase-synchronized, resulting in a solid line
+ ctx.cr.set_line_width(DEP_STROKE)
+
+ ctx.cr.move_to(x, y + C.proc_h / 2) # child's center
+ # exdent the connecting lines; otherwise the horizontal would be too short to see
+ dep_off_x = 3
+ dep_off_y = 0 # C.proc_h / 4
+ ctx.cr.line_to(px - dep_off_x, y + C.proc_h / 2) # leftward
+ if elder_sibling_y is not None:
+ ctx.cr.line_to(px - dep_off_x, elder_sibling_y + C.proc_h/2) # upward
else:
- ctx.move_to(x, y + proc_h / 2)
- ctx.line_to(px, y + proc_h / 2)
- ctx.line_to(px, py)
- ctx.stroke()
- ctx.set_dash([])
-
-# elide the bootchart collector - it is quite distorting
-def elide_bootchart(proc):
- return proc.cmd == 'bootchartd' or proc.cmd == 'bootchart-colle'
+ ctx.cr.line_to(px - dep_off_x, py + C.proc_h/2) # upward
+ ctx.cr.rel_line_to(dep_off_x, 0) # rightward
+ ctx.cr.stroke()
+ ctx.cr.restore()
class CumlSample:
def __init__(self, proc):
@@ -613,8 +1362,8 @@ def get_color(self):
self.color = (c[0], c[1], c[2], 1.0)
return self.color
-
-def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
+# taskstats-specific
+def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, stat_type):
global palette_idx
palette_idx = 0
@@ -627,9 +1376,6 @@ def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
else:
sample_value = 'io'
for proc in proc_tree.process_list:
- if elide_bootchart(proc):
- continue
-
for sample in proc.samples:
total_time += getattr(sample.cpu_sample, sample_value)
if not sample.time in time_hash:
@@ -662,7 +1408,7 @@ def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
# same colors each time we render
random.seed (0)
- ctx.set_line_width(1)
+ ctx.cr.set_line_width(1)
legends = []
labels = []
@@ -679,15 +1425,11 @@ def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
process_total_time = cuml
- # hide really tiny processes
- if cuml * pix_per_ns <= 2:
- continue
-
last_time = times[0]
y = last_below = below[last_time]
last_cuml = cuml = 0.0
- ctx.set_source_rgba(*cs.get_color())
+ ctx.cr.set_source_rgba(*cs.get_color())
for time in times:
render_seg = False
@@ -711,32 +1453,32 @@ def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
# draw the trailing rectangle from the last time to
# before now, at the height of the last segment.
if render_seg:
- w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration) + 1
- x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
- ctx.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
- ctx.fill()
-# ctx.stroke()
+ w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration()) + 1
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration())
+ ctx.cr.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
+ ctx.cr.fill()
+# ctx.cr.stroke()
last_time = time
y = below [time] - cuml
row[time] = y
# render the last segment
- x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration())
y = below[last_time] - cuml
- ctx.rectangle (x, y, chart_bounds[2] - x, cuml)
- ctx.fill()
-# ctx.stroke()
+ ctx.cr.rectangle (x, y, chart_bounds[2] - x, cuml)
+ ctx.cr.fill()
+# ctx.cr.stroke()
# render legend if it will fit
if cuml > 8:
label = cs.cmd
- extnts = ctx.text_extents(label)
+ extnts = ctx.cr.text_extents(label)
label_w = extnts[2]
label_h = extnts[3]
# print "Text extents %g by %g" % (label_w, label_h)
labels.append((label,
- chart_bounds[0] + chart_bounds[2] - label_w - off_x * 2,
+ chart_bounds[0] + chart_bounds[2] - label_w,
y + (cuml + label_h) / 2))
if cs in legends:
print("ARGH - duplicate process in list !")
@@ -745,19 +1487,18 @@ def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
below = row
- # render grid-lines over the top
- draw_box_ticks(ctx, chart_bounds, sec_w)
+ draw_box_1 (ctx, chart_bounds)
# render labels
for l in labels:
- draw_text(ctx, l[0], TEXT_COLOR, l[1], l[2])
+ draw_text(ctx.cr, l[0], TEXT_COLOR, l[1], l[2])
# Render legends
font_height = 20
label_width = 300
LEGENDS_PER_COL = 15
LEGENDS_TOTAL = 45
- ctx.set_font_size (TITLE_FONT_SIZE)
+ ctx.cr.set_font_size (TITLE_FONT_SIZE)
dur_secs = duration / 100
cpu_secs = total_time / 1000000000
@@ -770,19 +1511,19 @@ def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
label = "Cumulative I/O usage, by process; total I/O: " \
" %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
- draw_text(ctx, label, TEXT_COLOR, chart_bounds[0] + off_x,
+ draw_text(ctx.cr, label, TEXT_COLOR, chart_bounds[0],
chart_bounds[1] + font_height)
i = 0
legends.sort(lambda a, b: cmp (b[1], a[1]))
- ctx.set_font_size(TEXT_FONT_SIZE)
+ ctx.cr.set_font_size(TEXT_FONT_SIZE)
for t in legends:
cs = t[0]
time = t[1]
- x = chart_bounds[0] + off_x + int (i/LEGENDS_PER_COL) * label_width
+ x = chart_bounds[0] + int (i/LEGENDS_PER_COL) * label_width
y = chart_bounds[1] + font_height * ((i % LEGENDS_PER_COL) + 2)
str = "%s - %.0f(ms) (%2.2f%%)" % (cs.cmd, time/1000000, (time/total_time) * 100.0)
- draw_legend_box(ctx, str, cs.color, x, y, leg_s)
+ draw_legend_box(ctx.cr, str, cs.color, x, y, C.leg_s)
i = i + 1
if i >= LEGENDS_TOTAL:
break
diff --git a/pybootchartgui/gui.py b/pybootchartgui/gui.py
index ddeb88c..4051164 100644
--- a/pybootchartgui/gui.py
+++ b/pybootchartgui/gui.py
@@ -17,8 +17,9 @@
import gtk
import gtk.gdk
import gtk.keysyms
+import math
from . import draw
-from .draw import RenderOptions
+from .draw import DrawContext
class PyBootchartWidget(gtk.DrawingArea):
__gsignals__ = {
@@ -28,11 +29,14 @@ class PyBootchartWidget(gtk.DrawingArea):
'set-scroll-adjustments' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment))
}
- def __init__(self, trace, options, xscale):
+ def __init__(self, trace, drawctx, xscale):
gtk.DrawingArea.__init__(self)
+ # large values cause toolbar to morph into a dropdown
+ self.MAX_STATUS_LABEL_CHARS = 50 # XX gross hack
+
self.trace = trace
- self.options = options
+ self.drawctx = drawctx
self.set_flags(gtk.CAN_FOCUS)
@@ -43,6 +47,7 @@ def __init__(self, trace, options, xscale):
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect('key-press-event', self.on_key_press_event)
+ self.connect('key-release-event', self.on_key_release_event)
self.connect('set-scroll-adjustments', self.on_set_scroll_adjustments)
self.connect("size-allocate", self.on_allocation_size_changed)
@@ -52,59 +57,100 @@ def __init__(self, trace, options, xscale):
self.xscale = xscale
self.x, self.y = 0.0, 0.0
- self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
+ self.chart_width, self.chart_height = draw.extents(self.drawctx, self.xscale, self.trace)
self.hadj = None
self.vadj = None
self.hadj_changed_signal_id = None
self.vadj_changed_signal_id = None
- def do_expose_event(self, event):
- cr = self.window.cairo_create()
+ self.sweep_csec = None
- # set a clip region for the expose event
- cr.rectangle(
- event.area.x, event.area.y,
- event.area.width, event.area.height
- )
- cr.clip()
- self.draw(cr, self.get_allocation())
+ self.hide_process_y = [] # XX valid only between self.on_area_button_press() and self.draw()
+
+ def do_expose_event(self, event): # XX called on mouse entering or leaving window -- can these be disabled?
+ cr = self.window.cairo_create()
+ self.draw(cr)
return False
- def draw(self, cr, rect):
- cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
- cr.paint()
+ def cr_set_up_transform(self, cr):
cr.scale(self.zoom_ratio, self.zoom_ratio)
cr.translate(-self.x, -self.y)
- draw.render(cr, self.options, self.xscale, self.trace)
+
+ def draw(self, cr):
+ cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
+ cr.paint() # fill whole DrawingArea with white
+ self.cr_set_up_transform(cr)
+ draw.render(cr, self.drawctx, self.xscale, self.trace,
+ list(self.sweep_csec) if self.sweep_csec else None, # pass by value not ref
+ self.hide_process_y)
+ self.hide_process_y = []
def position_changed(self):
self.emit("position-changed", self.x, self.y)
- ZOOM_INCREMENT = 1.25
+ def device_to_csec_user_y(self, dx, dy):
+ cr = self.window.cairo_create()
+ self.cr_set_up_transform(cr) # depends on (self.x, self.y)
+ ux, uy = cr.device_to_user(dx, dy)
+ #self.chart_width, self.chart_height = draw.extents(self.drawctx, self.xscale, self.trace)
+ return draw._xscaled_to_csec(ux,
+ draw._sec_w(self.xscale),
+ draw._time_origin_drawn(self.drawctx, self.trace)), \
+ uy
+
+ # back-transform center of widget to (time, chart_height) coords
+ def current_center (self):
+ (wx, wy, ww, wh) = self.get_allocation()
+ return self.device_to_csec_user_y (ww/2, wh/2)
+
+ # Assuming all object attributes except self.x and self.y are now valid,
+ # and that a new zoom_ratio or xscale has been set, correspondingly
+ # set top-left user-space corner position (self.x, self.y) so that
+ # (csec_x, user_y) will be at window center
+ def set_center (self, ctr_csec_x, ctr_user_y):
+ ctr_user_x = draw.C.off_x + draw._csec_to_xscaled(ctr_csec_x,
+ draw._time_origin_drawn(self.drawctx, self.trace),
+ draw._sec_w(self.xscale))
+ # XX Use cr.device_to_user_distance() here ?
+ # Subtract off from the center a vector to the top-left corner.
+ self.x = (ctr_user_x - float(self.get_allocation()[2])/self.zoom_ratio/2)
+ self.y = (ctr_user_y - float(self.get_allocation()[3])/self.zoom_ratio/2)
+ self.position_changed()
+ ZOOM_INCREMENT = 2
+ ZOOM_HALF_INCREMENT = math.sqrt(2)
+ # Zoom maintaining the content at window's current center untranslated.
+ # "Center" is irrespective of any occlusion.
def zoom_image (self, zoom_ratio):
+ old_csec, old_y = self.current_center ()
+
self.zoom_ratio = zoom_ratio
+
+ self.set_center(old_csec, old_y)
self._set_scroll_adjustments (self.hadj, self.vadj)
self.queue_draw()
- def zoom_to_rect (self, rect):
+ def zoom_to_rect (self, rect): # rename "zoom_to_window_width"?
zoom_ratio = float(rect.width)/float(self.chart_width)
self.zoom_image(zoom_ratio)
self.x = 0
self.position_changed()
def set_xscale(self, xscale):
- old_mid_x = self.x + self.hadj.page_size / 2
+ old_csec, old_y = self.current_center ()
+
self.xscale = xscale
- self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
- new_x = old_mid_x
- self.zoom_image (self.zoom_ratio)
+ self.chart_width, self.chart_height = draw.extents(self.drawctx, self.xscale, self.trace)
+
+ self.set_center(old_csec, old_y)
+ self._set_scroll_adjustments (self.hadj, self.vadj)
+ self.queue_draw()
def on_expand(self, action):
- self.set_xscale (self.xscale * 1.5)
+ self.set_xscale (self.xscale * 1.1)
def on_contract(self, action):
- self.set_xscale (self.xscale / 1.5)
+ self.set_xscale (self.xscale / 1.1)
def on_zoom_in(self, action):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
@@ -116,16 +162,69 @@ def on_zoom_fit(self, action):
self.zoom_to_rect(self.get_allocation())
def on_zoom_100(self, action):
- self.zoom_image(1.0)
+ self.zoom_image(1.0) # XX replace with: self.zoom_ratio = 1.0 \ self.x = 0 \ self.y = 0
self.set_xscale(1.0)
- def show_toggled(self, button):
- self.options.app_options.show_all = button.get_property ('active')
+ def absolute_uptime_event_times(self, action, current):
+ self.drawctx.app_options.absolute_uptime_event_times = action.get_current_value()
+ self.queue_draw()
+
+ def show_IO_ops(self, action, current):
+ self.drawctx.app_options.show_ops_not_bytes = action.get_current_value()
+ self.queue_draw()
+
+ def show_thread_details(self, action):
+ self.drawctx.app_options.show_all = action.get_active()
+ self.queue_draw()
+
+ def hide_events(self, action):
+ self.drawctx.app_options.hide_events = not action.get_active()
+ self.queue_draw()
+
+ def print_event_times(self, action):
+ self.drawctx.app_options.print_event_times = action.get_active()
+ self.queue_draw()
+
+ def event_source_toggle(self, action):
+ self.drawctx.app_options.event_source[action.get_name()].enable = action.get_active()
+ self.drawctx.ps_event_lists_valid = False
+ self.queue_draw()
+
+ def _toggle_EventColor_by_label(self, ecl, action):
+ for ec in ecl:
+ if ec.label == action.get_name():
+ break
+ ec.enable = action.get_active()
+ self.drawctx.ps_event_lists_valid = False
+ self.queue_draw()
+
+ def event_toggle(self, action):
+ self._toggle_EventColor_by_label(
+ self.drawctx.app_options.event_color, action)
+
+ def event_interval_toggle(self, action):
+ self._toggle_EventColor_by_label(
+ self.drawctx.app_options.event_interval_color, action)
+
+ def dump_raw_event_context(self, action):
+ self.drawctx.app_options.dump_raw_event_context = action.get_active()
+ self.queue_draw()
+
+ def show_legends(self, action):
+ self.drawctx.app_options.show_legends = action.get_active()
self.queue_draw()
POS_INCREMENT = 100
+ def _set_italic_label(self, text):
+ self.status_label.set_markup("{0:s}".format(text))
+
+ # file:///usr/share/doc/libgtk2.0-doc/gtk/GtkWidget.html says:
+ # Returns :
+ # TRUE to stop other handlers from being invoked for the event.
+ # FALSE to propagate the event further
def on_key_press_event(self, widget, event):
+ # print str(event)
if event.keyval == gtk.keysyms.Left:
self.x -= self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Right:
@@ -134,50 +233,93 @@ def on_key_press_event(self, widget, event):
self.y -= self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == gtk.keysyms.Down:
self.y += self.POS_INCREMENT/self.zoom_ratio
+ elif event.keyval == gtk.keysyms.Control_L or event.keyval == gtk.keysyms.Control_R:
+ self._set_italic_label("time dilation w/ mouse wheel")
+ elif event.keyval == gtk.keysyms.Alt_L or event.keyval == gtk.keysyms.Alt_R:
+ self._set_italic_label("zoom w/ mouse wheel")
else:
return False
- self.queue_draw()
+ #self.queue_draw()
self.position_changed()
return True
+ def on_key_release_event(self, widget, event):
+ self._set_italic_label("")
+ return True
+
def on_area_button_press(self, area, event):
- if event.button == 2 or event.button == 1:
+ # cancel any pending action based on an earlier button pressed and now held down
+ self.hide_process_y = []
+
+ if event.button == 1:
+ self._set_italic_label("pan across chart")
area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
self.prevmousex = event.x
self.prevmousey = event.y
+ elif event.button == 2 and len(self.hide_process_y) == 0:
+ self._set_italic_label("hide/unhide threads/processes")
+ self.hide_process_y.append( self.device_to_csec_user_y(event.x, event.y)[1])
+ elif event.button == 3:
+ if not self.sweep_csec:
+ self._set_italic_label("dump events within time range to stdout")
+ self.sweep_csec = [self.device_to_csec_user_y(event.x, 0)[0],
+ self.device_to_csec_user_y(self.trace.end_time, 0)[0]]
+ else:
+ self._set_italic_label("")
+ self.sweep_csec = None
+ self.queue_draw()
if event.type not in (gtk.gdk.BUTTON_PRESS, gtk.gdk.BUTTON_RELEASE):
return False
- return False
+ return True
def on_area_button_release(self, area, event):
- if event.button == 2 or event.button == 1:
+ self.status_label.set_markup("")
+ if event.button == 1:
area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
self.prevmousex = None
self.prevmousey = None
return True
- return False
+ elif event.button == 2 and len(self.hide_process_y) == 1:
+ self.hide_process_y.append( self.device_to_csec_user_y(event.x, event.y)[1])
+ self.queue_draw()
+ elif event.button == 3:
+ if self.sweep_csec and \
+ self.sweep_csec[1] != self.device_to_csec_user_y(self.trace.end_time, 0)[0]:
+ self.drawctx.event_dump_list = [] # XX
+ self.queue_draw()
+ return True
def on_area_scroll_event(self, area, event):
if event.state & gtk.gdk.CONTROL_MASK:
if event.direction == gtk.gdk.SCROLL_UP:
- self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
+ self.on_expand(None)
return True
if event.direction == gtk.gdk.SCROLL_DOWN:
- self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
+ self.on_contract(None)
+ return True
+ elif event.state & gtk.gdk.MOD1_MASK:
+ if event.direction == gtk.gdk.SCROLL_UP:
+ self.zoom_image(self.zoom_ratio * self.ZOOM_HALF_INCREMENT)
+ return True
+ if event.direction == gtk.gdk.SCROLL_DOWN:
+ self.zoom_image(self.zoom_ratio / self.ZOOM_HALF_INCREMENT)
return True
- return False
def on_area_motion_notify(self, area, event):
state = event.state
- if state & gtk.gdk.BUTTON2_MASK or state & gtk.gdk.BUTTON1_MASK:
+ if state & gtk.gdk.BUTTON1_MASK:
x, y = int(event.x), int(event.y)
+ if self.prevmousex==None or self.prevmousey==None:
+ return True
# pan the image
self.x += (self.prevmousex - x)/self.zoom_ratio
self.y += (self.prevmousey - y)/self.zoom_ratio
- self.queue_draw()
self.prevmousex = x
self.prevmousey = y
self.position_changed()
+ elif state & gtk.gdk.BUTTON3_MASK and self.sweep_csec:
+ self.sweep_csec[1] = self.device_to_csec_user_y(event.x, 0)[0]
+ #self.queue_draw()
return True
def on_set_scroll_adjustments(self, area, hadj, vadj):
@@ -207,6 +349,7 @@ def _set_adj_upper(self, adj, upper):
if value_changed:
adj.value_changed()
+ # sets scroll bars to correct position and length -- no direct effect on image
def _set_scroll_adjustments(self, hadj, vadj):
if hadj == None:
hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
@@ -223,7 +366,7 @@ def _set_scroll_adjustments(self, hadj, vadj):
if hadj != None:
self.hadj = hadj
self._set_adj_upper (self.hadj, self.zoom_ratio * self.chart_width)
- self.hadj_changed_signal_id = self.hadj.connect('value-changed', self.on_adjustments_changed)
+ self.hadj_changed_signal_id = self.hadj.connect('value-changed', self.on_adjustments_changed) # XX leaky?
if vadj != None:
self.vadj = vadj
@@ -242,8 +385,16 @@ def on_position_changed(self, widget, x, y):
PyBootchartWidget.set_set_scroll_adjustments_signal('set-scroll-adjustments')
class PyBootchartShell(gtk.VBox):
- ui = '''
-
+ def __init__(self, window, trace, drawctx, xscale):
+ gtk.VBox.__init__(self)
+
+ self.widget = PyBootchartWidget(trace, drawctx, xscale)
+
+ # Create a UIManager instance
+ uimanager = self.uimanager = gtk.UIManager()
+
+ uimanager.add_ui_from_string('''
+
@@ -253,58 +404,227 @@ class PyBootchartShell(gtk.VBox):
-
- '''
- def __init__(self, window, trace, options, xscale):
- gtk.VBox.__init__(self)
-
- self.widget = PyBootchartWidget(trace, options, xscale)
+
+
+
+
+
+ ''')
- # Create a UIManager instance
- uimanager = self.uimanager = gtk.UIManager()
+ actiongroup = gtk.ActionGroup('Actions')
- # Add the accelerator group to the toplevel window
- accelgroup = uimanager.get_accel_group()
- window.add_accel_group(accelgroup)
+ # Zooming buttons
+ actiongroup.add_actions((
+ ('Expand', gtk.STOCK_ORIENTATION_LANDSCAPE, None, None, "widen", self.widget.on_expand),
+ ('Contract', gtk.STOCK_ORIENTATION_PORTRAIT, None, None, "narrow", self.widget.on_contract),
+ ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, "zoom in", self.widget.on_zoom_in),
+ ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, "zoom out", self.widget.on_zoom_out),
+ ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, "zoom-to-fit while preserving aspect ratio", self.widget.on_zoom_fit),
+ ('Zoom100', gtk.STOCK_ZOOM_100, None, None, "zoom to best image quality", self.widget.on_zoom_100),
+ ))
- # Create an ActionGroup
- actiongroup = gtk.ActionGroup('Actions')
- self.actiongroup = actiongroup
+ # "Time" drop-down menu
+ actiongroup.add_radio_actions([
+ ("bootchart daemon start", None, "bootchart daemon start -- startup of the collector daemon on the target", None, None, False),
+ ("system boot", None, "system boot i.e. 'uptime'", None, None, True),
+ ], 1 if drawctx.app_options.absolute_uptime_event_times else 0, self.widget.absolute_uptime_event_times
+ )
+ actiongroup.add_actions((
+ ("Time", None, "Time", None, "XXX why does this not show up??? time-origin selection for display"),
+ ))
- # Create actions
+ # I/O dropdown
+ actiongroup.add_radio_actions([
+ ("hardware operations", None, "hardware operations", None, None, True),
+ ("512-byte sectors", None, "512-byte sectors", None, None, False),
+ ], drawctx.app_options.show_ops_not_bytes, self.widget.show_IO_ops
+ )
actiongroup.add_actions((
- ('Expand', gtk.STOCK_ADD, None, None, None, self.widget.on_expand),
- ('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget.on_contract),
- ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
- ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
- ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget.on_zoom_fit),
- ('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
+ ("I/O", None, "I/O", None, ""),
))
- # Add the actiongroup to the uimanager
- uimanager.insert_action_group(actiongroup, 0)
+ # Threads dropdown
+ if not drawctx.kernel_only:
+ uimanager.add_ui_from_string('''
+
+
+
+
+
+ ''')
+ actiongroup.add_toggle_actions([
+ ('details', None, "details", None, None, self.widget.show_thread_details, drawctx.app_options.show_all),
+ ])
+ actiongroup.add_actions([
+ ("Threads", None, "Threads", None, ""),
+ ])
+
+
+ # Events dropdown
+ ui_Events = '''
+
+
+
+
+
+ '''
+ actiongroup.add_toggle_actions([
+ ('show', None, "show", None, None, self.widget.hide_events, not drawctx.app_options.hide_events),
+ ('times', None, "show times", None, None, self.widget.print_event_times, drawctx.app_options.print_event_times),
+ ])
+ uimanager.add_ui_from_string(ui_Events)
+ actiongroup.add_actions([
+ ("Events", None, "Events", None, ""),
+ ])
+
+
+ # Event Source dropdown
+ ui_Event_Source = '''
+
+
+
+
+
+ '''
+ uimanager.add_ui_from_string(ui_Event_Source)
+ actiongroup.add_actions([
+ ("Event_Source", None, "Ev-Sources", None, ""),
+ ])
+
+
+ # Event Color dropdown
+ ui_Event_Color = '''
+
+
+
+
+
+ '''
+ uimanager.add_ui_from_string(ui_Event_Color)
+ actiongroup.add_actions([
+ ("Event_Color", None, "Ev-Color", None, ""),
+ ])
+
+ # Stdout dropdown
+ # FIXME: Support of raw log dump of multiple log files is hard -- disable for now.
+ if False:
+ uimanager.add_ui_from_string( '''
+
+
+
+
+
+ ''')
+ actiongroup.add_toggle_actions([
+ ('dump raw', None, "dump raw log lines, with context", None, None,
+ self.widget.dump_raw_event_context, drawctx.app_options.dump_raw_event_context),
+ ])
+ actiongroup.add_actions([
+ ("Stdout", None, "Stdout", None, ""),
+ ])
+
+ # Help dropdown
+ uimanager.add_ui_from_string( '''
+
+
+
+
+
+ ''')
+ actiongroup.add_toggle_actions([
+ ('show legends', None, "show legends", None, None,
+ self.widget.show_legends, drawctx.app_options.show_legends),
+ ])
+ actiongroup.add_actions([
+ ("Help", None, "Help", None, ""),
+ ])
- # Add a UI description
- uimanager.add_ui_from_string(self.ui)
+ uimanager.insert_action_group(actiongroup, 0)
- # Scrolled window
- scrolled = gtk.ScrolledWindow()
- scrolled.add(self.widget)
# toolbar / h-box
- hbox = gtk.HBox(False, 8)
+ hbox = gtk.HBox(False, 0)
+ hbox.set_homogeneous(False)
- # Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
- hbox.pack_start(toolbar, True, True)
-
- if not options.kernel_only:
- # Misc. options
- button = gtk.CheckButton("Show more")
- button.connect ('toggled', self.widget.show_toggled)
- hbox.pack_start (button, False, True)
+ toolbar.set_style(gtk.TOOLBAR_ICONS) # !? no effect
+ toolbar.set_icon_size(gtk.ICON_SIZE_SMALL_TOOLBAR) # !? no effect
+ toolbar.set_orientation(gtk.ORIENTATION_HORIZONTAL)
+ #toolbar.set_alignment(0.0, 0.5)
+ hbox.pack_start(toolbar, expand=True, fill=True) # if expand== False, a dropdown menu appears
+
+ menubar = uimanager.get_widget("/MenuBar")
+ #menubar.set_alignment(0.0, 0.5)
+ hbox.pack_start(menubar, expand=False)
+
+ self.widget.status_label = gtk.Label("")
+ self.widget.status_label.set_width_chars(self.widget.MAX_STATUS_LABEL_CHARS)
+ self.widget.status_label.set_single_line_mode(True)
+ self.widget.status_label.set_alignment(0.98, 0.5)
+ hbox.pack_end(self.widget.status_label, expand=False, fill=True, padding=2)
self.pack_start(hbox, False)
+
+ # Scrolled window
+ scrolled = gtk.ScrolledWindow()
+ scrolled.add(self.widget)
+
self.pack_start(scrolled)
self.show_all()
@@ -314,34 +634,41 @@ def grab_focus(self, window):
class PyBootchartWindow(gtk.Window):
- def __init__(self, trace, app_options):
+ def __init__(self, app_options, trace):
gtk.Window.__init__(self)
window = self
window.set_title("Bootchart %s" % trace.filename)
- window.set_default_size(750, 550)
-
- tab_page = gtk.Notebook()
- tab_page.show()
- window.add(tab_page)
+ screen = window.get_screen()
+ window.set_default_size(screen.get_width() * 95/100,
+ screen.get_height() * 95/100)
+
+ full_drawctx = DrawContext(app_options, trace)
+ full_tree = PyBootchartShell(window, trace, full_drawctx,
+ # XX "1.7" is a hack
+ float(window.get_default_size()[0]) * 1.7 / \
+ ((trace.end_time - trace.start_time) + \
+ 2 * trace.ps_stats.sample_period))
+ # FIXME: Permanently disable top-level tabs?
+ if True:
+ window.add(full_tree)
+ else:
+ tab_page = gtk.Notebook()
+ tab_page.show()
+ window.add(tab_page)
- full_opts = RenderOptions(app_options)
- full_tree = PyBootchartShell(window, trace, full_opts, 1.0)
- tab_page.append_page (full_tree, gtk.Label("Full tree"))
+ tab_page.append_page (full_tree, gtk.Label("Full tree"))
- if trace.kernel is not None and len (trace.kernel) > 2:
- kernel_opts = RenderOptions(app_options)
- kernel_opts.cumulative = False
- kernel_opts.charts = False
- kernel_opts.kernel_only = True
- kernel_tree = PyBootchartShell(window, trace, kernel_opts, 5.0)
- tab_page.append_page (kernel_tree, gtk.Label("Kernel boot"))
+ if trace.kernel is not None and len (trace.kernel) > 2:
+ kernel_drawctx = DrawContext(app_options, trace, cumulative = False, charts = False, kernel_only = True)
+ kernel_tree = PyBootchartShell(window, trace, kernel_drawctx, 5.0)
+ tab_page.append_page (kernel_tree, gtk.Label("Kernel boot"))
full_tree.grab_focus(self)
self.show()
-def show(trace, options):
- win = PyBootchartWindow(trace, options)
+def show(trace, app_options):
+ win = PyBootchartWindow(app_options, trace)
win.connect('destroy', gtk.main_quit)
gtk.main()
diff --git a/pybootchartgui/main.py.in b/pybootchartgui/main.py.in
index 57f71dc..b063c55 100644
--- a/pybootchartgui/main.py.in
+++ b/pybootchartgui/main.py.in
@@ -20,72 +20,143 @@ from __future__ import print_function
import sys
import os
-import optparse
+import argparse
+import string
+
+from copy import copy
from . import parsing
from . import batch
+from . import writer
+from .samples import EventSource, EventColor
def _mk_options_parser():
- """Make an options parser."""
- usage = "%prog [options] PATH, ..., PATH"
- version = "%prog v@VER@"
- parser = optparse.OptionParser(usage, version=version)
- parser.add_option("-i", "--interactive", action="store_true", dest="interactive", default=False,
- help="start in active mode")
- parser.add_option("-f", "--format", dest="format", default="png", choices=["png", "svg", "pdf"],
- help="image format (png, svg, pdf); default format png")
- parser.add_option("-o", "--output", dest="output", metavar="PATH", default=None,
- help="output path (file or directory) where charts are stored")
- parser.add_option("-n", "--no-prune", action="store_false", dest="prune", default=True,
- help="do not prune the process tree")
- parser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False,
- help="suppress informational messages")
- parser.add_option("-t", "--boot-time", action="store_true", dest="boottime", default=False,
- help="only display the boot time of the boot in text format (stdout)")
- parser.add_option("--very-quiet", action="store_true", dest="veryquiet", default=False,
- help="suppress all messages except errors")
- parser.add_option("--verbose", action="store_true", dest="verbose", default=False,
- help="print all messages")
- parser.add_option("--profile", action="store_true", dest="profile", default=False,
- help="profile rendering of chart (only useful when in batch mode indicated by -f)")
- parser.add_option("--show-pid", action="store_true", dest="show_pid", default=False,
- help="show process ids in the bootchart as 'processname [pid]'")
- parser.add_option("--show-all", action="store_true", dest="show_all", default=False,
- help="show all process information in the bootchart as '/process/path/exe [pid] [args]'")
- parser.add_option("--crop-after", dest="crop_after", metavar="PROCESS", default=None,
- help="crop chart when idle after PROCESS is started")
- parser.add_option("--annotate", action="append", dest="annotate", metavar="PROCESS", default=None,
- help="annotate position where PROCESS is started; can be specified multiple times. " +
- "To create a single annotation when any one of a set of processes is started, use commas to separate the names")
- parser.add_option("--annotate-file", dest="annotate_file", metavar="FILENAME", default=None,
- help="filename to write annotation points to")
+ usage = "%(prog)s [options] PATH"
+ parser = argparse.ArgumentParser(usage=usage,
+ description="PATH must point to a bootchart*.tgz",
+ epilog=None)
+
+ parser.add_argument('--version', action='version', version="%(prog)s v@VER@")
+ parser.add_argument("--absolute-uptime-event-times", action="store_true", default=False,
+ dest="absolute_uptime_event_times",
+ help="event times shown are relative to system boot, rather than beginning of sampling")
+ parser.add_argument("-H", "--hide-low-CPU", dest="hide_low_CPU", type=int, default=0, metavar="CSEC",
+ help="Hide any thread consuming less than CSEC of CPU time," +
+ " unless it otherwise shows itself to be of interest." +
+ " Hiding means the thread is not shown on initial rendering, but can be shown by a mouse click." +
+ " A value of 0 may be specified, meaning the special case of showing threads that report 0 CSECs" +
+ " consumed, but are known to have executed due to having been born, died, or reported some" +
+ " state other than 'S' when sampled by the collector." +
+ " A negative value causes all threads to be shown.")
+ parser.add_argument("-q", "--quiet", action="store_true", dest="quiet", default=False,
+ help="suppress informational messages")
+ parser.add_argument("-v", "--verbose", action="count", dest="verbose", default=0,
+ help="Print debugging messages. Issuing twice will print yet more messages.")
+ parser.add_argument("--show-legends", action="store_true", dest="show_legends", default=False,
+ help="show legend text offering keys to chart symbols")
+ parser.add_argument("--show-pid", action="store_true", dest="show_pid", default=False,
+ help="show process ids in the bootchart as 'processname [pid]'")
+ parser.add_argument("--show-all", action="store_true", dest="show_all", default=False,
+ help="show all process information in the bootchart as '/process/path/exe [pid] [args]'")
+ parser.add_argument("-j", "--justify", dest="justify", default="left", choices=["left", "center"],
+ help="relocate the text within process bars (left, center)")
+
+ # disk stats
+ pg_IO = parser.add_argument_group("I/O-specific",
+ "Options specific to graphing of I/O activity, as gathered from /proc/diskstats")
+ pg_IO.add_argument("--partition", action="append", dest="partitions", default=[],
+ metavar="PARTITION",
+ help="draw a disk stat chart for any block device partition whose basename matches PARTITION")
+ pg_IO.add_argument("--relabel-partition", action="append", dest="partition_labels", default=[],
+ help="list of per-partition strings, each replacing the raw per-partition device name" +
+ "in the corresponding position")
+ pg_IO.add_argument("--show-ops-not-bytes", action="store_true", dest="show_ops_not_bytes", default=False,
+ help="chart number of I/O operations handed to driver, rather than bytes transferred per sample")
+
+ parser.add_argument_group(pg_IO)
+
+ # event plotting
+ parser.set_defaults(event_source = {})
+ pg_Events = parser.add_argument_group( "Event-specific",
+ "Options specific to plotting of instantaneous, asynchronous events, as gathered from" +
+ " target-specific logs." +
+ " A few internally generated pseudo-events are also available." +
+ " N.B.: The '_' character is unusable in label strings, as the PyGTK toolkit silently discards it.")
+ pg_Events.add_argument("--hide-events", action="store_true", dest="hide_events", default=False,
+ help="hide all event-related info, even the small black triangles of unhighlighted events.")
+ pg_Events.add_argument("--no-print-event-times", action="store_false", default=True, dest="print_event_times",
+ help="suppress printing time of each event, inside the box of the reporting process")
+ pg_Events.add_argument("--event-source", metavar=("label", "filename", "regex"),
+ action=handle_event_source_option, nargs=3,
+ help="Filename should contain event records, one per line of text." +
+ " Regex should contain assigments using (?P) syntax to initialize variables" +
+ " CLOCK_REALTIME_usec, pid, tid, and comm.")
+ pg_Events.add_argument("--dump-raw-event-context", action="store_true", dest="dump_raw_event_context", default=False,
+ help="in addition to log lines for selected events, dump log lines as well," +
+ " retaining original log sort order, which may not be temporally correct.")
+
+ pg_Events.add_argument("--no-synthesize-sample-start-events", action="store_false", default=True,
+ dest="synthesize_sample_start_events",
+ help="disable synthesis of an event marking each boundary between sample periods." +
+ "These are helpful in analyzing collector timing issues.")
+ parser.add_argument_group(pg_Events)
+
+ pg_EventHighlighting = parser.add_argument_group( "Event Highlighting",
+ "Select events to be highlighted in color, and annotated" +
+ " with text specific to the event, space permitting.")
+ parser.set_defaults(event_color = [])
+ pg_EventHighlighting.add_argument("-e", "--events", metavar=("label", "regex"), nargs=2,
+ action=handle_event_color,
+ help="Highlight events matching regex." +
+ " Regex syntax is similar to grep 'extended' REs." +
+ " To match FOO or BAR anywhere on the log line, use 'FOO|BAR'." +
+ " (file:///usr/share/doc/python/html/library/re.htm)")
+ parser.set_defaults(event_interval_color = [])
+ pg_EventHighlighting.add_argument("--events-interval", metavar=("label", "regex_start", "regex_end"), nargs=3,
+ action=handle_event_interval_color,
+ help="In addition to highlighting the events matching the two regex arguments," +
+ " highlight the interval of time between them.")
+ pg_EventHighlighting.add_argument('-d', "--events-disable", metavar=("label"),
+ action=handle_events_disable, nargs=1,
+ help="Disable event-parsing regex matching (label) -- its checkbox on the Ev-Color menu will be cleared." +
+ " Must appear after the option that specified (label).")
+ parser.add_argument_group(pg_EventHighlighting)
+
+ pg_Scripting = parser.add_argument_group("Scripting support",
+ "Options most useful in scripted processing of tgz batches")
+
+ pg_Scripting.add_argument("--batch", action="store_false", dest="interactive", default=True,
+ help="print a bootchart image, rather than starting the interactive GUI")
+ pg_Scripting.add_argument("--merge", dest="merge", default=False,
+ help="Prune the process tree of all activity from dull children: " +
+ "stats are added to parent, child process is lost in drawing of chart.")
+ pg_Scripting.add_argument("--prehistory", action="store_true", dest="prehistory", default=False,
+ help="extend process bars to the recorded start time of each, even if before any samples were collected")
+ pg_Scripting.add_argument("-t", "--boot-time", action="store_true", dest="boottime", default=False,
+ help="only display the boot time of the boot in text format (stdout)")
+ pg_Scripting.add_argument("-f", "--format", dest="format", default="png", choices=["png", "svg", "pdf"],
+ help="image format (png, svg, pdf); default format png")
+ pg_Scripting.add_argument("--very-quiet", action="store_true", dest="veryquiet", default=False,
+ help="suppress all messages except errors")
+ pg_Scripting.add_argument("-o", "--output", dest="output", metavar=("PATH"), default=None,
+ help="output path (file or directory) where charts are stored")
+ pg_Scripting.add_argument("--profile", action="store_true", dest="profile", default=False,
+ help="profile rendering of chart (only useful when in batch mode indicated by -f)")
+ pg_Scripting.add_argument("--crop-after", dest="crop_after", metavar=("PROCESS"), default=None,
+ help="crop chart when idle after PROCESS is started")
+ pg_Scripting.add_argument("--annotate", action="append", dest="annotate", metavar=("PROCESS"), default=None,
+ help="annotate position where PROCESS is started; can be specified multiple times. " +
+ "To create a single annotation when any one of a set of processes is started," +
+ " use commas to separate the names")
+ pg_Scripting.add_argument("--annotate-file", dest="annotate_file", metavar=("FILENAME"), default=None,
+ help="filename to write annotation points to")
+ parser.add_argument_group(pg_Scripting)
+
+ parser.add_argument(dest="paths", action="append",
+ help="bootchart.tgz")
+
return parser
-class Writer:
- def __init__(self, write, options):
- self.write = write
- self.options = options
-
- def error(self, msg):
- self.write(msg)
-
- def warn(self, msg):
- if not self.options.quiet:
- self.write(msg)
-
- def info(self, msg):
- if self.options.verbose:
- self.write(msg)
-
- def status(self, msg):
- if not self.options.quiet:
- self.write(msg)
-
-def _mk_writer(options):
- def write(s):
- print(s)
- return Writer(write, options)
-
def _get_filename(paths, options):
"""Construct a usable filename for outputs based on the paths and options given on the commandline."""
dname = ""
@@ -108,20 +179,45 @@ def _get_filename(paths, options):
fname = os.path.split(fname)[-1]
return os.path.join (dname, fname + "." + options.format)
+class handle_event_source_option(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ # XX superfluous: note that pdb `print values[1]` will double-print '\' characters
+ # fix = string.replace(values[1], r"\\", "\x5c")
+ es = EventSource(values[0], values[1], values[2])
+ namespace.event_source[es.label] = es
+
+class handle_event_color(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ ec = EventColor(values[0], values[1], None, True)
+ namespace.event_color.append(ec)
+
+class handle_event_interval_color(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ ec = EventColor(values[0], values[1], values[2], True)
+ namespace.event_interval_color.append(ec)
+
+class handle_events_disable(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ for ec in namespace.event_color + namespace.event_interval_color:
+ if ec.label == values[0]:
+ ec.enable = False
+ return
+ raise parsing.ParseError("Event-Color label {1:s} not found\n\t{0:s}".format(
+ opt_str, values[0]))
+
def main(argv=None):
try:
if argv is None:
argv = sys.argv[1:]
parser = _mk_options_parser()
- options, args = parser.parse_args(argv)
- writer = _mk_writer(options)
- if len(args) == 0:
- print("No path given, trying /var/log/bootchart.tgz")
- args = [ "/var/log/bootchart.tgz" ]
+ # XX 'argparse' documentation prefers to name the variable 'args' not 'options'
+ options = parser.parse_args(
+ ['--events', 'PC samples', '\A(0x[0-9a-f]+|\w+\+0x[0-9a-f]+)\Z'] + argv)
+ writer.init(options)
- res = parsing.Trace(writer, args, options)
+ res = parsing.Trace(options)
if options.interactive:
from . import gui
@@ -132,7 +228,7 @@ def main(argv=None):
if proc_tree.idle:
duration = proc_tree.idle
else:
- duration = proc_tree.duration
+ duration = proc_tree.duration()
dur = duration / 100.0
print('%02d:%05.2f' % (math.floor(dur/60), dur - 60 * math.floor(dur/60)))
else:
@@ -147,9 +243,9 @@ def main(argv=None):
print(file=f)
finally:
f.close()
- filename = _get_filename(args, options)
+ filename = options.filename
def render():
- batch.render(writer, res, options, filename)
+ batch.render(res, options, filename)
if options.profile:
import cProfile
import pstats
diff --git a/pybootchartgui/parsing.py b/pybootchartgui/parsing.py
index 16d0d24..7f47fe7 100644
--- a/pybootchartgui/parsing.py
+++ b/pybootchartgui/parsing.py
@@ -20,22 +20,29 @@
import string
import re
import tarfile
+import struct
+import bisect
from time import clock
+import collections
from collections import defaultdict
from functools import reduce
+from types import *
+from .draw import usec_to_csec, csec_to_usec
from .samples import *
from .process_tree import ProcessTree
+from . import writer
# Parsing produces as its end result a 'Trace'
class Trace:
- def __init__(self, writer, paths, options):
+ def __init__(self, options):
self.headers = None
self.disk_stats = None
self.ps_stats = None
+ self.ps_threads_stats = None
self.taskstats = None
- self.cpu_stats = None
+ self.cpu_stats = None # from /proc/stat
self.cmdline = None
self.kernel = None
self.kernel_tree = None
@@ -43,18 +50,26 @@ def __init__(self, writer, paths, options):
self.parent_map = None
self.mem_stats = None
- parse_paths (writer, self, paths)
+ # Read in all files, parse each into a time-ordered list, init many of the attributes above
+ parse_paths (self, options.paths, options)
+
+ assert self.ps_stats, "data sets that contain no proc_ps.log, only proc_ps_threads.log are now deprecated"
+ if self.ps_threads_stats:
+ for (k,v) in self.ps_threads_stats.process_map.iteritems():
+ self.ps_stats.process_map[k] = v
+ self.ps_threads_stats = True
+
if not self.valid():
raise ParseError("empty state: '%s' does not contain a valid bootchart" % ", ".join(paths))
# Turn that parsed information into something more useful
# link processes into a tree of pointers, calculate statistics
- self.compile(writer)
+ self.adorn_process_map(options)
# Crop the chart to the end of the first idle period after the given
# process
if options.crop_after:
- idle = self.crop (writer, options.crop_after)
+ idle = self.crop (options.crop_after)
else:
idle = None
@@ -70,23 +85,37 @@ def __init__(self, writer, paths, options):
else:
self.times.append(None)
- self.proc_tree = ProcessTree(writer, self.kernel, self.ps_stats,
+ self.proc_tree = ProcessTree(self.kernel, self.ps_stats,
self.ps_stats.sample_period,
self.headers.get("profile.process"),
- options.prune, idle, self.taskstats,
+ options, idle, self.taskstats,
self.parent_map is not None)
if self.kernel is not None:
- self.kernel_tree = ProcessTree(writer, self.kernel, None, 0,
+ self.kernel_tree = ProcessTree(self.kernel, None, 0,
self.headers.get("profile.process"),
- False, None, None, True)
+ options, None, None, True)
+
+ self._generate_sample_start_pseudo_events(options)
+
+ def _generate_sample_start_pseudo_events(self, options):
+ es = EventSource(" ~ bootchartd sample start points", "", "") # empty regex
+ es.parsed = []
+ es.enable = False
+ init_pid = 1
+ for cpu in self.cpu_stats:
+ # assign to the init process's bar, for lack of any better
+ ev = EventSample(csec_to_usec(cpu.time),
+ init_pid, init_pid,
+ "comm", None, None, "")
+ es.parsed.append(ev)
+ options.event_source[es.label] = es
def valid(self):
return self.headers != None and self.disk_stats != None and \
self.ps_stats != None and self.cpu_stats != None
-
- def compile(self, writer):
+ def adorn_process_map(self, options):
def find_parent_id_for(pid):
if pid is 0:
@@ -118,13 +147,14 @@ def find_parent_id_for(pid):
# print "proc %d '%s' not in cmdline" % (rpid, proc.exe)
# re-parent any stray orphans if we can
- if self.parent_map is not None:
+ if self.parent_map is not None: # requires either "kernel_pacct" or "paternity.log"
for process in self.ps_stats.process_map.values():
ppid = find_parent_id_for (int(process.pid / 1000))
if ppid:
process.ppid = ppid * 1000
- # stitch the tree together with pointers
+ # Init the upward "parent" pointers.
+ # Downward child pointers stored in a list -- remains empty until the ProcessTree is inited.
for process in self.ps_stats.process_map.values():
process.set_parent (self.ps_stats.process_map)
@@ -132,7 +162,7 @@ def find_parent_id_for(pid):
for process in self.ps_stats.process_map.values():
process.calc_stats (self.ps_stats.sample_period)
- def crop(self, writer, crop_after):
+ def crop(self, crop_after):
def is_idle_at(util, start, j):
k = j + 1
@@ -191,7 +221,7 @@ def is_idle(util, start):
and self.disk_stats[-1].time > crop_at:
self.disk_stats.pop()
- self.ps_stats.end_time = crop_at
+ self.end_time = crop_at
cropped_map = {}
for key, value in self.ps_stats.process_map.items():
@@ -245,7 +275,168 @@ def parse(block):
blocks = file.read().split('\n\n')
return [parse(block) for block in blocks if block.strip() and not block.endswith(' not running\n')]
-def _parse_proc_ps_log(writer, file):
+def _save_PC_sample(trace, es, time, pid, tid, comm, addr):
+ ev = EventSample(csec_to_usec(time), pid, tid, comm, None, None, "0x{0:08x}".format(addr))
+ es.parsed.append(ev)
+
+# Cases to handle:
+# 1. run starting (ltime==None)
+# 1.1 thread started in preceding sample_period
+# 1.2 thread started earlier
+# 2. run continuing
+# 2.1 thread continues (tid in processMap)
+# 2.2 thread starts
+def _handle_sample(options, trace, processMap, ltime, time,
+ pid, tid, lwp, nice, cmd, state, ppid, userCpu, sysCpu,
+ kstkeip, wchan, delayacct_blkio_ticks, c_user, c_sys, starttime,
+ num_cpus):
+ assert(type(c_user) is IntType)
+ assert(type(c_sys) is IntType)
+
+ if tid in processMap:
+ proc = processMap[tid]
+ else:
+ if time < starttime:
+ # large values signify a collector problem, e.g. resource starvation
+ writer.info("time (%dcs) < starttime (%dcs), diff %d -- TID %d" %
+ (time, starttime, time-starttime, tid/1000))
+
+ proc = Process(pid, tid, lwp, cmd, ppid, starttime)
+ if ltime: # process is starting during profiling run
+ proc.user_cpu_ticks[0] = 0
+ proc.sys_cpu_ticks [0] = 0
+ proc.delayacct_blkio_ticks [0] = 0
+ ltime = starttime
+ else:
+ proc.user_cpu_ticks[0] = userCpu
+ proc.sys_cpu_ticks [0] = sysCpu
+ proc.delayacct_blkio_ticks [0] = delayacct_blkio_ticks
+ ltime = -100000 # XX hacky way of forcing reported load toward zero
+ proc.user_cpu_ticks[-1] = proc.user_cpu_ticks[0]
+ proc.sys_cpu_ticks [-1] = proc.sys_cpu_ticks [0]
+ proc.delayacct_blkio_ticks[-1] = proc.delayacct_blkio_ticks[0]
+ processMap[tid] = proc # insert new process into the dict
+
+ proc.set_cmd(cmd) # over-write values from earlier samples
+ proc.set_nice(nice) # over-write values from earlier samples
+
+ userCpuLoad, sysCpuLoad, delayacctBlkioLoad = proc.calc_load(userCpu, sysCpu, delayacct_blkio_ticks,
+ max(1, time - ltime), num_cpus)
+
+ cpuSample = ProcessCPUSample('null', userCpuLoad, sysCpuLoad, c_user, c_sys, delayacctBlkioLoad, 0.0)
+ proc.samples.append(ProcessSample(time, state, cpuSample))
+
+ # per-tid store for use by a later phase of parsing of samples gathered at this 'time'
+ proc.user_cpu_ticks[-1] = userCpu
+ proc.sys_cpu_ticks[-1] = sysCpu
+ proc.delayacct_blkio_ticks[-1] = delayacct_blkio_ticks
+
+ if state == "R" and kstkeip != 0 and kstkeip != 0xffffffff:
+ _save_PC_sample(trace, options.event_source[" ~ current PC, if thread Runnable"],
+ time, pid/1000, tid/1000, cmd, kstkeip)
+ if state == "D" and kstkeip != 0 and kstkeip != 0xffffffff:
+ _save_PC_sample(trace, options.event_source[" ~ current PC, if thread in non-interruptible D-wait"],
+ time, pid/1000, tid/1000, cmd, kstkeip)
+ if state == "D" and wchan != 0:
+ _save_PC_sample(trace, options.event_source[" ~ kernel function wchan, if thread in non-interruptible D-wait"],
+ time, pid/1000, tid/1000, cmd, wchan)
+ return processMap
+
+# Consider this case: a process P and three waited-for children C1, C2, C3.
+# The collector daemon bootchartd takes samples at times t-2 and t-1.
+#
+# t-2 t-2+e t-1-e t-1 -->
+# . . . .
+# bootchartd sample . . sample
+# . . . .
+# P R R R R
+# C1 -- long-lived R R R R
+# C2 -- exited R exit - -
+# C3 -- phantom - fork exit -
+#
+# C1's CPU usage will be reported at both sample times t-2 and t-1 in the
+# {utime,stime} field of /proc/PID/stat, in units of clock ticks.
+# C2's usage will be reported at t-2. Any whole clock ticks thereafter will be
+# accumulated by the kernel, and reported to bootchartd at t-1 in the
+# {cutime,cstime} fields of its parent P, along with the rest of the clock ticks
+# charged to C2 during its entire lifetime.
+# C3's clock ticks will never be seen directly in any sample taken by
+# bootchartd, rather only in increments to P's {cutime,cstime} fields as
+# reported at t-1.
+#
+# We wish to graph on P's process bar at time t-1 all clock ticks consumed by
+# any of its children between t-2 and t-1 that cannot be reported on the
+# children's process bars -- C2's process bar ends at t-2 and C3 has none at
+# all. We'll call it "lost child time". The lost clock ticks may be counted
+# so:
+#
+# P{cutime,cstime}(t-1) - P{cutime,cstime}(t-2) - C2{utime,stime}(t-2)
+
+def accumulate_missing_child_ltime(processMap, ltime):
+ """ For only whole-process children found to have gone missing between 'ltime' and 'time' i.e. now,
+ accumulate clock ticks of each child's lifetime total to a counter
+ in the parent's Process"""
+ for p_p in processMap.itervalues():
+ p_p.missing_child_ticks = 0
+
+ for c_p in processMap.itervalues():
+ if c_p.ppid != 0 and \
+ c_p.tid == c_p.pid and \
+ c_p.samples[-1].time == ltime: # must have exited at 'time'
+ p_p = processMap[c_p.ppid]
+ p_p.missing_child_ticks += c_p.user_cpu_ticks[1] + c_p.sys_cpu_ticks[1]
+ continue
+ print "gone_missing,_last_seen_at", ltime, \
+ c_p.ppid/1000, ":", c_p.pid/1000, ":", c_p.tid/1000, p_p.missing_child_ticks
+
+def compute_lost_child_times(processMap, ltime, time):
+ """ For each parent process live at 'time', find clock ticks reported by
+ children exiting between 'ltime' and 'time'.
+ calculate CPU consumption during
+ the sample period of newly-lost children.
+ Insert time-weighted value into current sample."""
+ interval = time - ltime
+ for p_p in processMap.itervalues():
+ if p_p.pid != p_p.tid or \
+ p_p.samples[-1].time != time or \
+ len(p_p.samples) < 2:
+ continue
+ def total_c_ticks(sample):
+ return sample.cpu_sample.c_user + sample.cpu_sample.c_sys
+ parent_c_tick_delta = total_c_ticks(p_p.samples[-1]) \
+ - total_c_ticks(p_p.samples[-2])
+ # See this line in the diagram and comment above.
+ # P{cutime,cstime}(t-1) - P{cutime,cstime}(t-2) - C2{utime,stime}(t-2)
+ # XX Aggregating user and sys at this phase, before stuffing result into a per-sample
+ # object. Some other time might be better.
+ lost_child_ticks = parent_c_tick_delta - p_p.missing_child_ticks
+
+ p_p.samples[-1].lost_child = float(lost_child_ticks)/interval
+ if (parent_c_tick_delta != 0 or p_p.missing_child_ticks != 0):
+ print "compute_lost_child_times()", time, p_p.pid/1000, \
+ parent_c_tick_delta, p_p.missing_child_ticks, lost_child_ticks, interval #, p_p.samples[-1].lost_child
+
+def _distribute_belatedly_reported_delayacct_blkio_ticks(processMap):
+ for p in processMap.itervalues():
+ io_acc = 0.0
+ for s in p.samples[-1:0:-1]:
+ s.cpu_sample.io += io_acc
+ io_acc = 0.0
+ if s.cpu_sample.io + s.cpu_sample.user + s.cpu_sample.sys > 1.0:
+ io_acc = s.cpu_sample.io + s.cpu_sample.user + s.cpu_sample.sys - 1.0
+ s.cpu_sample.io -= io_acc
+
+def _init_pseudo_EventSources_for_PC_samples(options):
+ for label, enable in [
+ (" ~ current PC, if thread Runnable", False),
+ (" ~ current PC, if thread in non-interruptible D-wait", False),
+ (" ~ kernel function wchan, if thread in non-interruptible D-wait", True)]:
+ es = EventSource(label, "", "") # empty regex
+ es.enable = enable
+ es.parsed = []
+ options.event_source[label] = es # XX options.event_source must be used because the Trace object is not yet instantiated
+
+def _parse_proc_ps_log(options, trace, file, num_cpus):
"""
* See proc(5) for details.
*
@@ -253,8 +444,10 @@ def _parse_proc_ps_log(writer, file):
* cutime, cstime, priority, nice, 0, itrealvalue, starttime, vsize, rss, rlim, startcode, endcode, startstack,
* kstkesp, kstkeip}
"""
+ _init_pseudo_EventSources_for_PC_samples(options)
+
processMap = {}
- ltime = 0
+ ltime = None
timed_blocks = _parse_timed_blocks(file)
for time, lines in timed_blocks:
for line in lines:
@@ -265,25 +458,97 @@ def _parse_proc_ps_log(writer, file):
offset = [index for index, token in enumerate(tokens[1:]) if token[-1] == ')'][0]
pid, cmd, state, ppid = int(tokens[0]), ' '.join(tokens[1:2+offset]), tokens[2+offset], int(tokens[3+offset])
- userCpu, sysCpu, stime = int(tokens[13+offset]), int(tokens[14+offset]), int(tokens[21+offset])
+ userCpu, sysCpu = int(tokens[13+offset]), int(tokens[14+offset]),
+ c_user, c_sys = int(tokens[15+offset]), int(tokens[16+offset])
+ nice = int(tokens[18+offset])
+ starttime = int(tokens[21+offset])
+ kstkeip = int(tokens[29+offset])
+ wchan = int(tokens[34+offset])
+ delayacct_blkio_ticks = int(tokens[41+offset])
# magic fixed point-ness ...
pid *= 1000
ppid *= 1000
- if pid in processMap:
- process = processMap[pid]
- process.cmd = cmd.strip('()') # why rename after latest name??
- else:
- process = Process(writer, pid, cmd.strip('()'), ppid, min(time, stime))
- processMap[pid] = process
+ processMap = _handle_sample(options, trace, processMap, ltime, time,
+ pid, pid, False, nice, cmd, state, ppid,
+ userCpu, sysCpu, kstkeip, wchan, delayacct_blkio_ticks, c_user, c_sys, starttime,
+ num_cpus)
+ if ltime:
+ accumulate_missing_child_ltime(processMap, ltime)
+ # compute_lost_child_times(processMap, ltime, time)
+ ltime = time
- if process.last_user_cpu_time is not None and process.last_sys_cpu_time is not None and ltime is not None:
- userCpuLoad, sysCpuLoad = process.calc_load(userCpu, sysCpu, max(1, time - ltime))
- cpuSample = CPUSample('null', userCpuLoad, sysCpuLoad, 0.0)
- process.samples.append(ProcessSample(time, state, cpuSample))
+ if len (timed_blocks) < 2:
+ return None
+
+ startTime = timed_blocks[0][0]
+ avgSampleLength = (ltime - startTime)/(len (timed_blocks) - 1)
+
+ _distribute_belatedly_reported_delayacct_blkio_ticks(processMap)
+
+ return ProcessStats (processMap, len (timed_blocks), avgSampleLength)
+
+def _parse_proc_ps_threads_log(options, trace, file, lwp):
+ """
+ * 0* pid -- inserted here from value in /proc/*pid*/task/. Not to be found in /proc/*pid*/task/*tid*/stat.
+ * Not the same as pgrp, session, or tpgid. Refer to collector daemon source code for details.
+ * 1 tid
+ * 2 comm
+ * 3 state
+ * 4 ppid
+ * 5 flags
+ * 6 majflt
+ * 7 utime
+ * 8 stime
+ * 9 cutime
+ * 10 cstime
+ * 11 priority
+ * 12 nice
+ * 13 time_in_jiffies_the_process_started_after_system_boot
+ * 14 current_EIP_instruction_pointer
+ * 15 wchan
+ * 16 scheduling_policy
+ * 17* delayacct_blkio_ticks -- in proc_ps_threads-2.log only, requires CONFIG_TASK_DELAY_ACCT
+ """
+ _init_pseudo_EventSources_for_PC_samples(options)
+
+ processMap = {}
+ ltime = None
+ timed_blocks = _parse_timed_blocks(file)
+ for time, lines in timed_blocks:
+ for line in lines:
+ if line is '': continue
+ tokens = line.split(' ')
+ if len(tokens) < 17:
+ writer.status("misformatted line at time {0:d}:\n\t{1:s}".format(time,line))
+ continue
+
+ offset = [index for index, token in enumerate(tokens[2:]) if (len(token) > 0 and token[-1] == ')')][0]
+ pid, tid, cmd, state, ppid = int(tokens[0]), int(tokens[1]), ' '.join(tokens[2:3+offset]), tokens[3+offset], int(tokens[4+offset])
+ userCpu, sysCpu = int(tokens[7+offset]), int(tokens[8+offset])
+ c_user, c_sys = int(tokens[9+offset]), int(tokens[10+offset])
+ nice = int(tokens[12+offset])
+ kstkeip = int(tokens[14+offset])
+ wchan = int(tokens[15+offset])
+ delayacct_blkio_ticks = int(tokens[17+offset]) if len(tokens) == 18+offset else 0
+
+ assert(type(c_user) is IntType)
+ assert(type(c_sys) is IntType)
+ starttime = int(tokens[13+offset])
+
+ pid *= PID_SCALE
+ tid *= PID_SCALE
+ if lwp:
+ # force sorting later than whole-process records from /proc/PID/stat
+ pid += LWP_OFFSET
+ tid += LWP_OFFSET
- process.last_user_cpu_time = userCpu
- process.last_sys_cpu_time = sysCpu
+ ppid *= 1000
+
+ processMap = _handle_sample(options, trace, processMap, ltime, time,
+ pid, tid, lwp, nice, cmd, state, ppid,
+ userCpu, sysCpu, kstkeip, wchan, delayacct_blkio_ticks, c_user, c_sys, starttime,
+ 1)
ltime = time
if len (timed_blocks) < 2:
@@ -292,9 +557,11 @@ def _parse_proc_ps_log(writer, file):
startTime = timed_blocks[0][0]
avgSampleLength = (ltime - startTime)/(len (timed_blocks) - 1)
- return ProcessStats (writer, processMap, len (timed_blocks), avgSampleLength, startTime, ltime)
+ _distribute_belatedly_reported_delayacct_blkio_ticks(processMap)
+
+ return ProcessStats (processMap, len (timed_blocks), avgSampleLength)
-def _parse_taskstats_log(writer, file):
+def _parse_taskstats_log(file):
"""
* See bootchart-collector.c for details.
*
@@ -306,9 +573,9 @@ def _parse_taskstats_log(writer, file):
ltime = None
timed_blocks = _parse_timed_blocks(file)
for time, lines in timed_blocks:
- # we have no 'stime' from taskstats, so prep 'init'
+ # we have no 'starttime' from taskstats, so prep 'init'
if ltime is None:
- process = Process(writer, 1, '[init]', 0, 0)
+ process = Process(1, '[init]', False, 0, 0)
processMap[1000] = process
ltime = time
# continue
@@ -336,12 +603,12 @@ def _parse_taskstats_log(writer, file):
pid += 1
pidRewrites[opid] = pid
# print "process mutation ! '%s' vs '%s' pid %s -> pid %s\n" % (process.cmd, cmd, opid, pid)
- process = process.split (writer, pid, cmd, ppid, time)
+ process = process.split (pid, cmd, ppid, time)
processMap[pid] = process
else:
process.cmd = cmd;
else:
- process = Process(writer, pid, cmd, ppid, time)
+ process = Process(pid, pid, False, cmd, ppid, time)
processMap[pid] = process
delta_cpu_ns = (float) (cpu_ns - process.last_cpu_ns)
@@ -356,11 +623,12 @@ def _parse_taskstats_log(writer, file):
else:
state = "S"
- # retain the ns timing information into a CPUSample - that tries
+ # retain the ns timing information into a ProcessCPUSample - that tries
# with the old-style to be a %age of CPU used in this time-slice.
if delta_cpu_ns + delta_blkio_delay_ns + delta_swapin_delay_ns > 0:
# print "proc %s cpu_ns %g delta_cpu %g" % (cmd, cpu_ns, delta_cpu_ns)
- cpuSample = CPUSample('null', delta_cpu_ns, 0.0,
+ cpuSample = ProcessCPUSample('null', delta_cpu_ns, 0.0,
+ 0, 0,
delta_blkio_delay_ns,
delta_swapin_delay_ns)
process.samples.append(ProcessSample(time, state, cpuSample))
@@ -376,16 +644,27 @@ def _parse_taskstats_log(writer, file):
startTime = timed_blocks[0][0]
avgSampleLength = (ltime - startTime)/(len(timed_blocks)-1)
- return ProcessStats (writer, processMap, len (timed_blocks), avgSampleLength, startTime, ltime)
+ return ProcessStats (processMap, len (timed_blocks), avgSampleLength)
+
+def get_num_cpus(file):
+ """Get the number of CPUs from the ps_stats file."""
+ num_cpus = -1
+ for time, lines in _parse_timed_blocks(file):
+ for l in lines:
+ if l.split(' ')[0] == "intr":
+ file.seek(0)
+ return num_cpus
+ num_cpus += 1
+ assert(False)
def _parse_proc_stat_log(file):
samples = []
ltimes = None
for time, lines in _parse_timed_blocks(file):
- # skip emtpy lines
+ # skip empty lines
if not lines:
continue
- # CPU times {user, nice, system, idle, io_wait, irq, softirq}
+ # CPU times {user, nice, system, idle, io_wait, irq, softirq} summed over all cores.
tokens = lines[0].split()
times = [ int(token) for token in tokens[1:] ]
if ltimes:
@@ -393,54 +672,82 @@ def _parse_proc_stat_log(file):
system = float((times[2] + times[5] + times[6]) - (ltimes[2] + ltimes[5] + ltimes[6]))
idle = float(times[3] - ltimes[3])
iowait = float(times[4] - ltimes[4])
-
aSum = max(user + system + idle + iowait, 1)
- samples.append( CPUSample(time, user/aSum, system/aSum, iowait/aSum) )
+ procs_running = 0
+ procs_blocked = 0
+ for line in lines:
+ tokens = line.split()
+ if tokens[0] == 'procs_running':
+ procs_running = int(tokens[1])
+ if tokens[0] == 'procs_blocked':
+ procs_blocked = int(tokens[1])
+ if ltimes:
+ samples.append( SystemCPUSample(time, user/aSum, system/aSum, iowait/aSum, procs_running, procs_blocked) )
+ else:
+ samples.append( SystemCPUSample(time, 0.0, 0.0, 0.0, procs_running, procs_blocked) )
ltimes = times
- # skip the rest of statistics lines
return samples
-def _parse_proc_disk_stat_log(file, numCpu):
- """
- Parse file for disk stats, but only look at the whole device, eg. sda,
- not sda1, sda2 etc. The format of relevant lines should be:
- {major minor name rio rmerge rsect ruse wio wmerge wsect wuse running use aveq}
- """
- disk_regex_re = re.compile ('^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
+# matched not against whole line, but field only
+# XX Rework to be not a whitelist but rather a blacklist of uninteresting devices e.g. "loop*"
+part_name_re = re.compile('^([hsv]d.|mtdblock\d|mmcblk\d.*|cciss/c\d+d\d+.*)$')
- # this gets called an awful lot.
- def is_relevant_line(linetokens):
- if len(linetokens) != 14:
- return False
- disk = linetokens[2]
- return disk_regex_re.match(disk)
+def _parse_proc_disk_stat_log(file, options, numCpu):
+ """
+ Input file is organized:
+ [(time, [(major, minor, partition_name, iostats[])])]
+ Output form, as required for drawing:
+ [(partition_name, [(time, iostat_deltas[])])]
- disk_stat_samples = []
+ Finally, produce pseudo-partition sample sets containing sums
+ over each physical storage device, e.g. sda, mmcblk0.
- for time, lines in _parse_timed_blocks(file):
- sample = DiskStatSample(time)
- relevant_tokens = [linetokens for linetokens in map (lambda x: x.split(),lines) if is_relevant_line(linetokens)]
-
- for tokens in relevant_tokens:
- disk, rsect, wsect, use = tokens[2], int(tokens[5]), int(tokens[9]), int(tokens[12])
- sample.add_diskdata([rsect, wsect, use])
+ The input file was generated by block/genhd.c .
+ """
- disk_stat_samples.append(sample)
+ strip_slash_dev_slash = re.compile("/dev/(.*)$")
+# this_partition_regex_re = re.compile('^' + part + '.*$')
- disk_stats = []
- for sample1, sample2 in zip(disk_stat_samples[:-1], disk_stat_samples[1:]):
- interval = sample1.time - sample2.time
- if interval == 0:
- interval = 1
- sums = [ a - b for a, b in zip(sample1.diskdata, sample2.diskdata) ]
- readTput = sums[0] / 2.0 * 100.0 / interval
- writeTput = sums[1] / 2.0 * 100.0 / interval
- util = float( sums[2] ) / 10 / interval / numCpu
- util = max(0.0, min(1.0, util))
- disk_stats.append(DiskSample(sample2.time, readTput, writeTput, util))
+ parts_dict = {}
+ # for every timed_block, collect per-part lists of samples in 'parts_dict'
+ for time, lines in _parse_timed_blocks(file):
+ for line in lines:
+ def line_to_tokens(line):
+ linetokens = line.split()
+ return linetokens if len(linetokens) == 14 else None
- return disk_stats
+ tokenized_partition = line_to_tokens(line)
+ if not tokenized_partition:
+ continue
+ sample = PartitionSample( time, IOStat_make(tokenized_partition))
+ if not part_name_re.match(sample.iostat.name):
+ continue
+ if not sample.iostat.name in parts_dict:
+ parts_dict[sample.iostat.name] = []
+ parts_dict[sample.iostat.name].append(sample)
+
+ # take deltas, discard original (cumulative) samples
+ partitions = []
+ WHOLE_DEV = 0
+ for partSamples in parts_dict.iteritems():
+ partitions.append( PartitionDeltas(
+ partSamples[1], numCpu,
+ partSamples[0],
+ partSamples[0] # possibly to be replaced with partition_labels from command line
+ ))
+ partitions.sort(key = lambda p: p.name)
+ partitions[WHOLE_DEV].hide = False # whole device
+
+ if len(options.partitions) > 0:
+ for opt_name in options.partitions:
+ for part in partitions:
+ if part.name == opt_name:
+ part.hide = False
+ part.label = options.partition_labels[0]
+ options.partition_labels = options.partition_labels[1:]
+
+ return partitions
def _parse_proc_meminfo_log(file):
"""
@@ -478,13 +785,13 @@ def _parse_proc_meminfo_log(file):
# ...
# [ 0.039993] calling migration_init+0x0/0x6b @ 1
# [ 0.039993] initcall migration_init+0x0/0x6b returned 1 after 0 usecs
-def _parse_dmesg(writer, file):
+def _parse_dmesg(file):
timestamp_re = re.compile ("^\[\s*(\d+\.\d+)\s*]\s+(.*)$")
split_re = re.compile ("^(\S+)\s+([\S\+_-]+) (.*)$")
processMap = {}
idx = 0
inc = 1.0 / 1000000
- kernel = Process(writer, idx, "k-boot", 0, 0.1)
+ kernel = Process(idx, idx, False, "k-boot", 0, 0.1)
processMap['k-boot'] = kernel
base_ts = False
max_ts = 0
@@ -531,7 +838,7 @@ def _parse_dmesg(writer, file):
# print "match: '%s' ('%g') at '%s'" % (func, ppid, time_ms)
name = func.split ('+', 1) [0]
idx += inc
- processMap[func] = Process(writer, ppid + idx, name, ppid, time_ms / 10)
+ processMap[func] = Process(ppid + idx, ppid + idx, False, name, ppid, time_ms / 10)
elif type == "initcall":
# print "finished: '%s' at '%s'" % (func, time_ms)
if func in processMap:
@@ -545,11 +852,72 @@ def _parse_dmesg(writer, file):
return processMap.values()
+def get_boot_relative_usec(state, boot_time_as_usecs_since_epoch, time_usec):
+ boot_relative_usec = time_usec - boot_time_as_usecs_since_epoch
+ if boot_relative_usec < csec_to_usec(state.start_time):
+ return None
+ if boot_relative_usec > csec_to_usec(state.end_time):
+ return None
+ return boot_relative_usec
+
+def parse_raw_log(state, boot_time_as_usecs_since_epoch, log_file, fields_re):
+ '''
+ Parse variously-formatted logs containing recorded events, as guided by a
+ set of target-specific regexps provided on the command line.
+ Eventual output is per-process lists of events in temporal order.
+ '''
+ fields_re_c = re.compile(fields_re)
+
+ samples = []
+ for line in log_file:
+ if line is '':
+ continue
+ m = fields_re_c.search(line)
+ if m == None:
+ continue
+
+ time_usec = float(m.group('CLOCK_REALTIME_usec')) # See `man 3 clock_gettime`
+ # tolerate any loss of precision in the timestamp, by rounding down
+ # FIXME: Don't simply round down -- show the user the (min,max) interval
+ # corresponding to the low-precision number.
+ while time_usec < 1300*1000*1000*1000*1000:
+ time_usec *= 10.0
+ while time_usec > 1300*1000*1000*1000*1000 * 2:
+ time_usec /= 10.0
+
+ try:
+ pid = int(m.group('pid'))
+ except IndexError:
+ # "inherited" by parent's per-thread/process bar
+ pid = 1
+
+ try:
+ tid = int(m.group('tid'))
+ except IndexError:
+ # "inherited" by parent's per-thread/process bar
+ tid = pid
+
+ try:
+ comm = m.group('comm')
+ except IndexError:
+ comm = ""
+
+ raw_log_seek = log_file.tell() - len(line)
+
+ boot_relative_usec = get_boot_relative_usec(
+ state, boot_time_as_usecs_since_epoch, time_usec)
+ if boot_relative_usec:
+ samples.append( EventSample(
+ boot_relative_usec,
+ pid, tid, comm,
+ log_file, raw_log_seek, line))
+ return samples
+
#
# Parse binary pacct accounting file output if we have one
# cf. /usr/include/linux/acct.h
#
-def _parse_pacct(writer, file):
+def _parse_pacct(file):
# read LE int32
def _read_le_int32(file):
byts = file.read(4)
@@ -573,7 +941,7 @@ def _read_le_int32(file):
file.seek (16, 1) # acct_comm
return parent_map
-def _parse_paternity_log(writer, file):
+def _parse_paternity_log(file):
parent_map = {}
parent_map[0] = 0
for line in file.read().split('\n'):
@@ -585,7 +953,7 @@ def _parse_paternity_log(writer, file):
print("Odd paternity line '%s'" % (line))
return parent_map
-def _parse_cmdline_log(writer, file):
+def _parse_cmdline_log(file):
cmdLines = {}
for block in file.read().split('\n\n'):
lines = block.split('\n')
@@ -600,85 +968,100 @@ def _parse_cmdline_log(writer, file):
cmdLines[pid] = values
return cmdLines
-def get_num_cpus(headers):
- """Get the number of CPUs from the system.cpu header property. As the
- CPU utilization graphs are relative, the number of CPUs currently makes
- no difference."""
- if headers is None:
- return 1
- if headers.get("system.cpu.num"):
- return max (int (headers.get("system.cpu.num")), 1)
- cpu_model = headers.get("system.cpu")
- if cpu_model is None:
- return 1
- mat = re.match(".*\\((\\d+)\\)", cpu_model)
- if mat is None:
- return 1
- return max (int(mat.group(1)), 1)
-
-def _do_parse(writer, state, name, file):
+def _do_parse(state, tf, name, file, options):
writer.status("parsing '%s'" % name)
t1 = clock()
if name == "header":
state.headers = _parse_headers(file)
elif name == "proc_diskstats.log":
- state.disk_stats = _parse_proc_disk_stat_log(file, get_num_cpus(state.headers))
+ state.disk_stats = _parse_proc_disk_stat_log(file, options, state.num_cpus)
elif name == "taskstats.log":
- state.ps_stats = _parse_taskstats_log(writer, file)
+ state.ps_stats = _parse_taskstats_log(file)
state.taskstats = True
elif name == "proc_stat.log":
+ state.num_cpus = get_num_cpus(file)
state.cpu_stats = _parse_proc_stat_log(file)
+ state.start_time = state.cpu_stats[0].time
+ state.end_time = state.cpu_stats[-1].time
elif name == "proc_meminfo.log":
state.mem_stats = _parse_proc_meminfo_log(file)
elif name == "dmesg":
- state.kernel = _parse_dmesg(writer, file)
+ state.kernel = _parse_dmesg(file)
elif name == "cmdline2.log":
- state.cmdline = _parse_cmdline_log(writer, file)
+ state.cmdline = _parse_cmdline_log(file)
elif name == "paternity.log":
- state.parent_map = _parse_paternity_log(writer, file)
+ state.parent_map = _parse_paternity_log(file)
elif name == "proc_ps.log": # obsoleted by TASKSTATS
- state.ps_stats = _parse_proc_ps_log(writer, file)
+ state.ps_stats = _parse_proc_ps_log(options, state, file, state.num_cpus)
+ elif name == "proc_ps_compact.log":
+ state.ps_stats = _parse_proc_ps_threads_log(options, state, file, False)
+ elif name == "proc_ps_threads.log" or name == "proc_ps_threads-2.log" :
+ state.ps_threads_stats = _parse_proc_ps_threads_log(options, state, file, True)
elif name == "kernel_pacct": # obsoleted by PROC_EVENTS
- state.parent_map = _parse_pacct(writer, file)
+ state.parent_map = _parse_pacct(file)
+ elif hasattr(options, "event_source"):
+ boot_t = state.headers.get("boot_time_as_usecs_since_epoch")
+ assert boot_t, NotImplementedError
+ for es in options.event_source.itervalues():
+ if name == es.filename:
+ parser = parse_raw_log
+ es.parsed = parser(state, long(boot_t), file, es.regex)
+ es.enable = len(es.parsed) > 0
+ file.seek(0) # file will be re-scanned for each regex
+ writer.info("parsed {0:5d} events from {1:16s} using {2:s}".format(
+ len(es.parsed), file.name, es.regex))
+ else:
+ pass # unknown file in tarball
t2 = clock()
writer.info(" %s seconds" % str(t2-t1))
return state
-def parse_file(writer, state, filename):
+def parse_file(state, filename, options):
if state.filename is None:
state.filename = filename
basename = os.path.basename(filename)
with open(filename, "rb") as file:
- return _do_parse(writer, state, basename, file)
+ return _do_parse(state, None, basename, file, options)
-def parse_paths(writer, state, paths):
+def parse_paths(state, paths, options):
for path in paths:
root, extension = os.path.splitext(path)
if not(os.path.exists(path)):
- writer.warn("warning: path '%s' does not exist, ignoring." % path)
- continue
+ raise ParseError("\n\tpath '%s' does not exist" % path)
state.filename = path
if os.path.isdir(path):
files = [ f for f in [os.path.join(path, f) for f in os.listdir(path)] if os.path.isfile(f) ]
files.sort()
- state = parse_paths(writer, state, files)
+ state = parse_paths(state, files, options)
elif extension in [".tar", ".tgz", ".gz"]:
if extension == ".gz":
root, extension = os.path.splitext(root)
if extension != ".tar":
writer.warn("warning: can only handle zipped tar files, not zipped '%s'-files; ignoring" % extension)
continue
- tf = None
+ state.tf = None
try:
- writer.status("parsing '%s'" % path)
- tf = tarfile.open(path, 'r:*')
- for name in tf.getnames():
- state = _do_parse(writer, state, name, tf.extractfile(name))
+ state.tf = tarfile.open(path, 'r:*')
+
+ # parsing of other files depends on these
+ early_opens = ["header", "proc_stat.log"]
+ def not_in_early_opens(name):
+ return len(filter(lambda n: n==name, early_opens)) == 0
+
+ for name in early_opens + filter(not_in_early_opens, state.tf.getnames()):
+ # Extracted file should be seekable, presumably a decompressed copy.
+ # file:///usr/share/doc/python2.6/html/library/tarfile.html?highlight=extractfile#tarfile.TarFile.extractfile
+ # XX Python 2.6 extractfile() assumes file contains lines of text, not binary :-(
+ extracted_file = state.tf.extractfile(name)
+ if not extracted_file:
+ continue
+ state = _do_parse(state, state.tf, name, extracted_file, options)
except tarfile.ReadError as error:
raise ParseError("error: could not read tarfile '%s': %s." % (path, error))
- finally:
- if tf != None:
- tf.close()
else:
- state = parse_file(writer, state, path)
+ state = parse_file(state, path, options)
+
+ for es in options.event_source.itervalues():
+ if es.parsed == None:
+ raise ParseError("\n\tevents file found on command line but not in tarball: {0}\n".format(es.filename))
return state
diff --git a/pybootchartgui/process_tree.py b/pybootchartgui/process_tree.py
index 58bc2dd..f5c3622 100644
--- a/pybootchartgui/process_tree.py
+++ b/pybootchartgui/process_tree.py
@@ -13,11 +13,19 @@
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see .
+MAX_PID=100000
+
+from . import writer
+from samples import PID_SCALE, LWP_OFFSET
+
+def sort_func(proc):
+ return long(proc.pid) / 1000 * MAX_PID + proc.tid / 1000
+
class ProcessTree:
"""ProcessTree encapsulates a process tree. The tree is built from log files
- retrieved during the boot process. When building the process tree, it is
- pruned and merged in order to be able to visualize it in a comprehensible
- manner.
+ retrieved during the boot process. When building the process tree, it may be
+ 'pruned' i.e. processes merged in order to be able to visualize the whole
+ in a comprehensible manner.
The following pruning techniques are used:
@@ -37,51 +45,72 @@ class ProcessTree:
LOGGER_PROC = 'bootchart-colle'
EXPLODER_PROCESSES = set(['hwup'])
- def __init__(self, writer, kernel, psstats, sample_period,
- monitoredApp, prune, idle, taskstats,
+ def __init__(self, kernel, ps_stats, sample_period,
+ monitoredApp, options, idle, taskstats,
accurate_parentage, for_testing = False):
- self.writer = writer
self.process_tree = []
self.taskstats = taskstats
- if psstats is None:
+ if ps_stats is None:
process_list = kernel
elif kernel is None:
- process_list = psstats.process_map.values()
+ process_list = ps_stats.process_map.values()
else:
- process_list = kernel + psstats.process_map.values()
- self.process_list = sorted(process_list, key = lambda p: p.pid)
+ process_list = kernel + ps_stats.process_map.values()
+ self.process_list = sorted(process_list, key = sort_func)
self.sample_period = sample_period
+ # LWPs get appended to a list in their ps_stats Process
+ for proc in self.process_list:
+ if proc.lwp():
+ try:
+ ps_stats.process_map[proc.pid / PID_SCALE * PID_SCALE].lwp_list.append(proc)
+ except KeyError:
+ writer.info("LWP (thread) {0:d} dropped - no matching process found\n".format(proc.tid))
+ self.process_list.remove(proc)
+ for proc in self.process_list:
+ if not proc.lwp():
+ proc.lwp_list.sort(key = sort_func)
+
self.build()
if not accurate_parentage:
self.update_ppids_for_daemons(self.process_list)
+ self.init_lost_child_times() # time delta
+
self.start_time = self.get_start_time(self.process_tree)
self.end_time = self.get_end_time(self.process_tree)
- self.duration = self.end_time - self.start_time
+ self.options = options
self.idle = idle
if for_testing:
return
- removed = self.merge_logger(self.process_tree, self.LOGGER_PROC, monitoredApp, False)
- writer.status("merged %i logger processes" % removed)
+ # XX Specific to bootchart2 collector; not executed for known /proc-sampling collectors.
+ # XX Test bootchart2 with this disabled.
+ if False: # XX disable this dubious code entirely for now
+ removed = self.merge_logger(self.process_tree, self.LOGGER_PROC, monitoredApp, False)
+ writer.status("merged %i logger processes" % removed)
+
+ p_processes = self.prune(self.process_tree, None, self.is_inactive_process)
+ writer.status("hid %i processes" % p_processes)
- if prune:
- p_processes = self.prune(self.process_tree, None)
+ if options.merge:
p_exploders = self.merge_exploders(self.process_tree, self.EXPLODER_PROCESSES)
p_threads = self.merge_siblings(self.process_tree)
p_runs = self.merge_runs(self.process_tree)
- writer.status("pruned %i process, %i exploders, %i threads, and %i runs" % (p_processes, p_exploders, p_threads, p_runs))
+ writer.status("pruned %i exploders, %i threads, and %i runs" %
+ (p_exploders, p_threads, p_runs))
self.sort(self.process_tree)
self.start_time = self.get_start_time(self.process_tree)
self.end_time = self.get_end_time(self.process_tree)
- self.duration = self.end_time - self.start_time
self.num_proc = self.num_nodes(self.process_tree)
+ def duration(self):
+ return self.end_time - self.start_time
+
def build(self):
"""Build the process tree from the list of top samples."""
self.process_tree = []
@@ -91,19 +120,40 @@ def build(self):
else:
proc.parent.child_list.append(proc)
+ def init_lost_child_times(self):
+ for c in self.process_list:
+ p = c.parent
+ return
+ for p in self.process_list:
+ for s in p.samples:
+ child_user_ticks = 0
+ child_sys_ticks = 0
+ for c in proc.child_list:
+ child_user_ticks += c.samples[s.time].user
+ child_sys_ticks += c.samples[s.time].sys
+
def sort(self, process_subtree):
"""Sort process tree."""
for p in process_subtree:
- p.child_list.sort(key = lambda p: p.pid)
+ p.child_list.sort(key = sort_func)
self.sort(p.child_list)
def num_nodes(self, process_list):
"Counts the number of nodes in the specified process tree."""
nodes = 0
for proc in process_list:
- nodes = nodes + self.num_nodes(proc.child_list)
+ nodes += self.num_nodes(proc.child_list)
return nodes + len(process_list)
+ def num_nodes_drawn(self, process_list):
+ "Counts the number of nodes in the specified process tree."""
+ nodes = 0
+ for proc in process_list:
+ nodes += self.num_nodes_drawn(proc.child_list)
+ if proc.draw:
+ nodes += 1
+ return nodes
+
def get_start_time(self, process_subtree):
"""Returns the start time of the process subtree. This is the start
time of the earliest process.
@@ -152,47 +202,31 @@ def update_ppids_for_daemons(self, process_list):
p.child_list = []
self.build()
- def prune(self, process_subtree, parent):
- """Prunes the process tree by removing idle processes and processes
- that only live for the duration of a single top sample. Sibling
- processes with the same command line (i.e. threads) are merged
- together. This filters out sleepy background processes, short-lived
- processes and bootcharts' analysis tools.
- """
- def is_idle_background_process_without_children(p):
- process_end = p.start_time + p.duration
- return not p.active and \
- process_end >= self.start_time + self.duration and \
- p.start_time > self.start_time and \
- p.duration > 0.9 * self.duration and \
- self.num_nodes(p.child_list) == 0
+ def is_active_process(self, p):
+ # (self.options.hide_low_CPU == 0) is a special case, documented in the usage message.
+ return p.cpu_tick_count_during_run() > self.options.hide_low_CPU or \
+ (self.options.hide_low_CPU == 0 and \
+ p.sleepingCount != len(p.samples)) # born, died, or did not sleep for the duration
- num_removed = 0
+ def is_inactive_process(self, p):
+ return not self.is_active_process(p)
+
+ def is_inactive_process_without_children(self, p):
+ return self.is_inactive_process(p) and \
+ self.num_nodes(p.child_list) == 0
+
+ def prune(self, process_subtree, parent, pruning_test):
+ n_pruned = 0
idx = 0
while idx < len(process_subtree):
p = process_subtree[idx]
- if parent != None or len(p.child_list) == 0:
-
- prune = False
- if is_idle_background_process_without_children(p):
- prune = True
- elif p.duration <= 2 * self.sample_period:
- # short-lived process
- prune = True
-
- if prune:
- process_subtree.pop(idx)
- for c in p.child_list:
- process_subtree.insert(idx, c)
- num_removed += 1
- continue
- else:
- num_removed += self.prune(p.child_list, p)
- else:
- num_removed += self.prune(p.child_list, p)
+ if pruning_test(p):
+ p.draw = False
+ n_pruned += 1
+ n_pruned += self.prune(p.child_list, p, pruning_test)
idx += 1
- return num_removed
+ return n_pruned
def merge_logger(self, process_subtree, logger_proc, monitored_app, app_tree):
"""Merges the logger's process subtree. The logger will typically
@@ -254,7 +288,7 @@ def merge_siblings(self, process_subtree):
idx -= 1
num_removed += 1
p.child_list.extend(nextp.child_list)
- self.merge_processes(p, nextp)
+ self.subsume_process(p, nextp)
num_removed += self.merge_siblings(p.child_list)
idx += 1
if len(process_subtree) > 0:
@@ -274,16 +308,17 @@ def merge_runs(self, process_subtree):
if len(p.child_list) == 1 and p.child_list[0].cmd == p.cmd:
child = p.child_list[0]
p.child_list = list(child.child_list)
- self.merge_processes(p, child)
+ self.subsume_process(p, child)
num_removed += 1
continue
num_removed += self.merge_runs(p.child_list)
idx += 1
return num_removed
- def merge_processes(self, p1, p2):
- """Merges two process' samples."""
- p1.samples.extend(p2.samples)
+ # XX return a new instance instead, so that start_time and end_time can be made immutable?
+ def subsume_process(self, p1, p2):
+ """Subsume process p2 into p1. Attributes of p2 other than samples[], start_time and end_time are lost."""
+ p1.samples.extend(p2.samples) # result no longer necessarily in temporal order
p1.samples.sort( key = lambda p: p.time )
p1time = p1.start_time
p2time = p2.start_time
diff --git a/pybootchartgui/samples.py b/pybootchartgui/samples.py
index ce703b8..cd71216 100644
--- a/pybootchartgui/samples.py
+++ b/pybootchartgui/samples.py
@@ -13,21 +13,137 @@
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see .
+from types import *
+import collections
+import re
-class DiskStatSample:
- def __init__(self, time):
+from . import writer
+
+# To understand this, see comment "Fedora hack"
+PID_SCALE = 1000
+LWP_OFFSET = 1
+
+KTHREADD_PID = 2
+
+class EventSource:
+ """ Extract (EventSample)s from some disjoint subset of the available log entries """
+ def __init__(self, label, filename, regex):
+ self.label = label # descriptive name for GUI
+ self.filename = filename
+ self.regex = regex
+ self.parsed = None # list of EventSamples parsed from filename
+ self.enable = None # initially set to True iff at least one sample was parsed; maintained by gui.py thereafter
+
+class EventSample:
+ def dump_format(self):
+ return "{0:10f} {1:5d} {2:5d} {3} {4}".format( \
+ float(self.time_usec)/1000/1000, self.pid, self.tid, self.comm, self.raw_log_line.rstrip())
+
+ def __init__(self, time_usec, pid, tid, comm, raw_log_file, raw_log_seek, raw_log_line):
+ self.time_usec = time_usec
+ self.pid = pid
+ self.tid = tid
+ self.comm = comm
+ self.raw_log_file = raw_log_file
+ self.raw_log_seek = raw_log_seek
+ self.raw_log_line = raw_log_line
+ if pid != 1:
+ writer.debug(self.dump_format())
+
+class EventColor:
+ def __init__(self, label, regex0, regex1, enable):
+ self.label = label
+ self.color_regex = []
+ self.color_regex.append(re.compile(regex0))
+ if regex1 is not None:
+ self.color_regex.append(re.compile(regex1))
+ self.enable = enable
+
+# See Documentation/iostats.txt.
+IOStat_field_names = ['major', 'minor', 'name',
+ 'nreads', 'nreads_merged', 'nsectors_read', 'nread_time_msec',
+ 'nwrites', 'nwrites_merged', 'nsectors_write', 'nwrite_time_msec',
+ 'nio_in_progress', # not an accumulator
+ 'io_msec', 'io_weighted_msec']
+IOStat = collections.namedtuple('typename_IOStat', IOStat_field_names)
+
+# wrapper necessary to produce desired 'int' rather than 'string' types
+def IOStat_make(fields_as_list):
+ return IOStat._make(fields_as_list[:3] + [int(a) for a in fields_as_list[3:]])
+
+def IOStat_op(sa, sb, f):
+ la = list(sa.iostat)
+ lb = list(sb.iostat)
+ preamble = [sa.iostat.major, sa.iostat.minor, sa.iostat.name]
+ return IOStat._make(preamble +
+ [f(int(a),int(b)) for a, b in zip(la[3:], lb[3:])])
+
+def IOStat_diff(sa, sb):
+ return IOStat_op(sa, sb, lambda a,b: a - b)
+def IOStat_sum(sa, sb):
+ return IOStat_op(sa, sb, lambda a,b: a + b)
+def IOStat_max2(sa, sb):
+ return IOStat_op(sa, sb, lambda a,b: max(a, b))
+
+class PartitionSample:
+ def __init__(self, time, IOStat):
self.time = time
- self.diskdata = [0, 0, 0]
- def add_diskdata(self, new_diskdata):
- self.diskdata = [ a + b for a, b in zip(self.diskdata, new_diskdata) ]
+ self.iostat = IOStat
-class CPUSample:
- def __init__(self, time, user, sys, io = 0.0, swap = 0.0):
+class PartitionDelta:
+ def __init__(self, time, IOStat, util, nio_in_progress):
+ self.util = util # computed, not a simple delta
+ self.nio_in_progress = int(nio_in_progress) # an instantaneous count, not a delta
+ self.s = PartitionSample(time, IOStat)
+
+class PartitionDeltas:
+ def __init__(self, partSamples, numCpu, name, label):
+ assert( type(partSamples) is list)
+
+ self.name = name
+ self.label = label # to be drawn for this PartitionSamples object, in a label on the chart
+ self.numCpu = numCpu
+ self.hide = True
+ self.part_deltas = []
+
+ COALESCE_THRESHOLD = 1 # XX needs synchronization with other graphs
+ partSamples_coalesced = [(partSamples[0])]
+ for sample in partSamples:
+ if sample.time - partSamples_coalesced[-1].time < COALESCE_THRESHOLD:
+ continue
+ partSamples_coalesced.append(sample)
+
+ for sample1, sample2 in zip(partSamples_coalesced[:-1], partSamples_coalesced[1:]):
+ interval = sample2.time - sample1.time
+ diff = IOStat_diff(sample2, sample1)
+ util = float(diff.io_msec) / 10 / interval / numCpu
+ self.part_deltas.append( PartitionDelta(sample2.time, diff,
+ util, sample2.iostat.nio_in_progress))
+
+ # Very short intervals amplify round-off under division by time delta, so coalesce now.
+ # XX scaling issue for high-efficiency collector!
+
+class SystemCPUSample:
+ def __init__(self, time, user, sys, io, procs_running, procs_blocked):
self.time = time
self.user = user
self.sys = sys
self.io = io
- self.swap = swap
+ self.procs_running = procs_running
+ self.procs_blocked = procs_blocked
+
+class ProcessCPUSample:
+ def __init__(self, time, user, sys, c_user, c_sys, io, swap):
+ self.time = time
+ self.user = user
+ self.sys = sys
+ self.c_user = c_user # directly from /proc: accumulates upon exit of waited-for child
+ self.c_sys = c_sys
+ self.io = io # taskstats-specific
+ self.swap = swap # taskstats-specific
+
+ assert(type(self.c_user) is IntType)
+ assert(type(self.c_sys) is IntType)
@property
def cpu(self):
@@ -37,6 +153,18 @@ def __str__(self):
return str(self.time) + "\t" + str(self.user) + "\t" + \
str(self.sys) + "\t" + str(self.io) + "\t" + str (self.swap)
+class ProcessSample:
+ def __init__(self, time, state, cpu_sample):
+ self.time = time
+ self.state = state
+ self.cpu_sample = cpu_sample # ProcessCPUSample
+
+ # delta per sample interval. Computed later in parsing.
+ self.lost_child = None
+
+ def __str__(self):
+ return str(self.time) + "\t" + str(self.state) + "\t" + str(self.cpu_sample)
+
class MemSample:
def __init__(self, time):
self.time = time
@@ -45,50 +173,62 @@ def __init__(self, time):
def add_value(self, name, value):
self.records[name] = value
-class ProcessSample:
- def __init__(self, time, state, cpu_sample):
- self.time = time
- self.state = state
- self.cpu_sample = cpu_sample
-
- def __str__(self):
- return str(self.time) + "\t" + str(self.state) + "\t" + str(self.cpu_sample)
class ProcessStats:
- def __init__(self, writer, process_map, sample_count, sample_period, start_time, end_time):
+ """stats over the collection of all processes, all samples"""
+ def __init__(self, process_map, sample_count, sample_period):
self.process_map = process_map
self.sample_count = sample_count
self.sample_period = sample_period
- self.start_time = start_time
- self.end_time = end_time
writer.info ("%d samples, avg. sample length %f" % (self.sample_count, self.sample_period))
writer.info ("process list size: %d" % len (self.process_map.values()))
class Process:
- def __init__(self, writer, pid, cmd, ppid, start_time):
- self.writer = writer
+ def __init__(self, pid, tid, lwp, cmd, ppid, start_time):
self.pid = pid
- self.cmd = cmd
- self.exe = cmd
+ self.tid = tid
+ assert(type(lwp) is BooleanType)
+ self.lwp_list = None if lwp else []
+ self.exe = cmd # may be overwritten, later
self.args = []
self.ppid = ppid
+ self.set_cmd(cmd) # XX depends on self.ppid
self.start_time = start_time
self.duration = 0
- self.samples = []
+ self.samples = [] # list of ProcessCPUSample
+ self.events = [] # time-ordered list of EventSample
+ self.event_interval_0_tx = None
self.parent = None
self.child_list = []
- self.active = None
- self.last_user_cpu_time = None
- self.last_sys_cpu_time = None
+ self.user_cpu_ticks = [None, 0] # [first, last]
+ self.sys_cpu_ticks = [None, 0]
+ self.delayacct_blkio_ticks = [None, 0]
+
+ # For transient use as an accumulator during early parsing -- when
+ # concurrent samples of all threads can be accessed O(1).
+ self.missing_child_ticks = None
self.last_cpu_ns = 0
self.last_blkio_delay_ns = 0
self.last_swapin_delay_ns = 0
+ # dynamic, view-dependent per-process state boolean
+ self.draw = True
+
+ # Is this an LWP a/k/a pthread?
+ def lwp(self):
+ return self.lwp_list == None
+
+ def cpu_tick_count_during_run(self):
+ ''' total CPU clock ticks reported for this process during the profiling run'''
+ return self.user_cpu_ticks[-1] + self.sys_cpu_ticks[-1] \
+ - (self.user_cpu_ticks[0] + self.sys_cpu_ticks[0])
+
# split this process' run - triggered by a name change
- def split(self, writer, pid, cmd, ppid, start_time):
- split = Process (writer, pid, cmd, ppid, start_time)
+ # XX called only if taskstats.log is provided (bootchart2 daemon)
+ def split(self, pid, cmd, ppid, start_time):
+ split = Process (pid, cmd, ppid, start_time)
split.last_cpu_ns = self.last_cpu_ns
split.last_blkio_delay_ns = self.last_blkio_delay_ns
@@ -104,40 +244,38 @@ def calc_stats(self, samplePeriod):
firstSample = self.samples[0]
lastSample = self.samples[-1]
self.start_time = min(firstSample.time, self.start_time)
- self.duration = lastSample.time - self.start_time + samplePeriod
-
- activeCount = sum( [1 for sample in self.samples if sample.cpu_sample and sample.cpu_sample.sys + sample.cpu_sample.user + sample.cpu_sample.io > 0.0] )
- activeCount = activeCount + sum( [1 for sample in self.samples if sample.state == 'D'] )
- self.active = (activeCount>2)
-
- def calc_load(self, userCpu, sysCpu, interval):
- userCpuLoad = float(userCpu - self.last_user_cpu_time) / interval
- sysCpuLoad = float(sysCpu - self.last_sys_cpu_time) / interval
- cpuLoad = userCpuLoad + sysCpuLoad
- # normalize
- if cpuLoad > 1.0:
- userCpuLoad = userCpuLoad / cpuLoad
- sysCpuLoad = sysCpuLoad / cpuLoad
- return (userCpuLoad, sysCpuLoad)
+ # self.duration is the _minimum_ known duration of the thread
+ self.duration = lastSample.time - self.start_time
+
+ self.sleepingCount = sum([1 for sample in self.samples if sample.state == 'S'])
+
+ def calc_load(self, userCpu, sysCpu, delayacct_blkio_ticks, interval, num_cpus):
+ downscale = interval * num_cpus
+ # all args in units of clock ticks
+ userCpuLoad = float(userCpu - self.user_cpu_ticks[-1]) / downscale
+ sysCpuLoad = float(sysCpu - self.sys_cpu_ticks[-1]) / downscale
+ delayacctBlkioLoad = float(delayacct_blkio_ticks - self.delayacct_blkio_ticks[-1]) / downscale
+ return (userCpuLoad, sysCpuLoad, delayacctBlkioLoad)
def set_parent(self, processMap):
if self.ppid != None:
self.parent = processMap.get (self.ppid)
if self.parent == None and self.pid / 1000 > 1 and \
- not (self.ppid == 2000 or self.pid == 2000): # kernel threads: ppid=2
- self.writer.warn("Missing CONFIG_PROC_EVENTS: no parent for pid '%i' ('%s') with ppid '%i'" \
- % (self.pid,self.cmd,self.ppid))
+ not (self.ppid/PID_SCALE == 2 or self.pid/PID_SCALE == 2): # kernel threads: ppid=2
+ writer.warn("Missing CONFIG_PROC_EVENTS: no parent for pid '%i' ('%s') with ppid '%i'" \
+ % (self.pid,self.cmd,self.ppid))
def get_end_time(self):
return self.start_time + self.duration
-class DiskSample:
- def __init__(self, time, read, write, util):
- self.time = time
- self.read = read
- self.write = write
- self.util = util
- self.tput = read + write
+ def set_cmd(self, cmd):
+ cmd = cmd.strip('()')
+ # In case the collector writes the executable's pathname into the 'comm' field
+ # of /proc/[pid]/stat, strip off all but the basename, to preserve screen space
+ # -- but some kernel threads are named like so 'ksoftirqd/0'. Hack around
+ # this by carving out an exception for all children of 'kthreadd'
+ kthreadd_pid = 2 * PID_SCALE
+ self.cmd = cmd.split('/')[-1] if self.ppid != kthreadd_pid else cmd
- def __str__(self):
- return "\t".join([str(self.time), str(self.read), str(self.write), str(self.util)])
+ def set_nice(self, nice):
+ self.nice = nice
diff --git a/pybootchartgui/writer.py b/pybootchartgui/writer.py
new file mode 100644
index 0000000..3d9b8d0
--- /dev/null
+++ b/pybootchartgui/writer.py
@@ -0,0 +1,38 @@
+import sys
+
+_writer = None
+
+class Writer:
+ def __init__(self, options):
+ self.options = options
+
+ def _write(self, s):
+ sys.stderr.write(s + "\n")
+
+def init(options):
+ global _writer
+ _writer = Writer(options)
+
+def fatal(msg):
+ _writer._write(msg)
+ exit(1)
+
+def error(msg):
+ _writer._write(msg)
+
+def warn(msg):
+ if not _writer.options.quiet:
+ _writer._write(msg)
+
+def info(msg):
+ if _writer.options.verbose > 0:
+ _writer._write(msg)
+
+def debug(msg):
+ if _writer.options.verbose > 1:
+ _writer._write(msg)
+
+def status(msg):
+ if not _writer.options.quiet:
+ _writer._write(msg)
+