|
| 1 | +// This file contains the code and map definitions that are shared between |
| 2 | +// the tracers, as well as a dispatcher program that can be attached to a |
| 3 | +// perf event and will call the appropriate tracer for a given process |
| 4 | + |
| 5 | +#include "bpfdefs.h" |
| 6 | +#include "util.h" |
| 7 | +#include "hash.h" |
| 8 | +#include "kernel.h" |
| 9 | +#include "tracemgmt.h" |
| 10 | +#include "tsd.h" |
| 11 | +#include "types.h" |
| 12 | + |
| 13 | +static inline __attribute__((__always_inline__)) |
| 14 | +void process_value(GoMapBucket *map_value, CustomLabelsArray *out, unsigned i) { |
| 15 | + if (map_value->tophash[i] == 0) |
| 16 | + return; |
| 17 | + if (out->len >= MAX_CUSTOM_LABELS) |
| 18 | + return; |
| 19 | + CustomLabel *lbl = &out->labels[out->len]; |
| 20 | + if (map_value->keys[i].str != NULL) { |
| 21 | + long res = bpf_probe_read_user(lbl->key.key_bytes, CUSTOM_LABEL_MAX_KEY_LEN, map_value->keys[i].str); |
| 22 | + if (res) { |
| 23 | + DEBUG_PRINT("cl: failed to read key for custom label (%lx): %ld", (unsigned long) map_value->keys[i].str, res); |
| 24 | + return; |
| 25 | + } |
| 26 | + res = bpf_probe_read_user(lbl->val.val_bytes, CUSTOM_LABEL_MAX_VAL_LEN, map_value->values[i].str); |
| 27 | + if (res) { |
| 28 | + DEBUG_PRINT("cl: failed to read value for custom label: %ld", res); |
| 29 | + return; |
| 30 | + } |
| 31 | + lbl->key_len = map_value->keys[i].len; |
| 32 | + lbl->val_len = map_value->values[i].len; |
| 33 | + } |
| 34 | + out->len++; |
| 35 | +} |
| 36 | + |
| 37 | +static inline __attribute__((__always_inline__)) |
| 38 | +bool process_bucket(PerCPURecord *record, void *label_buckets, int j) { |
| 39 | + CustomLabelsArray *out = &record->customLabelsState.cla; |
| 40 | + GoMapBucket *map_value = &record->goMapBucket; |
| 41 | + long res = bpf_probe_read(map_value, sizeof(GoMapBucket), label_buckets + (j * sizeof(GoMapBucket))); |
| 42 | + if (res < 0) { |
| 43 | + return false; |
| 44 | + } |
| 45 | + |
| 46 | + process_value(map_value, out, 0); |
| 47 | + process_value(map_value, out, 1); |
| 48 | + process_value(map_value, out, 2); |
| 49 | + process_value(map_value, out, 3); |
| 50 | + process_value(map_value, out, 4); |
| 51 | + process_value(map_value, out, 5); |
| 52 | + process_value(map_value, out, 6); |
| 53 | + process_value(map_value, out, 7); |
| 54 | + |
| 55 | + return false; |
| 56 | +} |
| 57 | + |
| 58 | +// Go processes store the current goroutine in thread local store. From there |
| 59 | +// this reads the g (aka goroutine) struct, then the m (the actual operating |
| 60 | +// system thread) of that goroutine, and finally curg (current goroutine). This |
| 61 | +// chain is necessary because getg().m.curg points to the current user g |
| 62 | +// assigned to the thread (curg == getg() when not on the system stack). curg |
| 63 | +// may be nil if there is no user g, such as when running in the scheduler. If |
| 64 | +// curg is nil, then g is either a system stack (called g0) or a signal handler |
| 65 | +// g (gsignal). Neither one will ever have label. |
| 66 | +static inline __attribute__((__always_inline__)) |
| 67 | +bool get_go_custom_labels(struct pt_regs *ctx, PerCPURecord *record, GoCustomLabelsOffsets *offs) { |
| 68 | + long res; |
| 69 | + |
| 70 | + size_t curg_ptr_addr; |
| 71 | + res = bpf_probe_read_user(&curg_ptr_addr, sizeof(void *), (void *)(record->customLabelsState.go_m_ptr + offs->curg)); |
| 72 | + if (res < 0) { |
| 73 | + DEBUG_PRINT("cl: failed to read value for m_ptr->curg: %ld", res); |
| 74 | + return false; |
| 75 | + } |
| 76 | + |
| 77 | + void *labels_map_ptr_ptr; |
| 78 | + res = bpf_probe_read_user(&labels_map_ptr_ptr, sizeof(void *), (void *)(curg_ptr_addr + offs->labels)); |
| 79 | + if (res < 0) { |
| 80 | + DEBUG_PRINT("cl: failed to read value for curg->labels (%lx->%lx): %ld", (unsigned long)curg_ptr_addr, |
| 81 | + (unsigned long) offs->labels, res); |
| 82 | + return false; |
| 83 | + } |
| 84 | + |
| 85 | + void *labels_map_ptr; |
| 86 | + res = bpf_probe_read(&labels_map_ptr, sizeof(labels_map_ptr), labels_map_ptr_ptr); |
| 87 | + if (res < 0) { |
| 88 | + DEBUG_PRINT("cl: failed to read value for labels_map_ptr (%lx): %ld", (unsigned long)labels_map_ptr_ptr, res); |
| 89 | + return false; |
| 90 | + } |
| 91 | + |
| 92 | + u64 labels_count = 0; |
| 93 | + res = bpf_probe_read(&labels_count, sizeof(labels_count), labels_map_ptr + offs->hmap_count); |
| 94 | + if (res < 0) { |
| 95 | + DEBUG_PRINT("cl: failed to read value for labels_count: %ld", res); |
| 96 | + return false; |
| 97 | + } |
| 98 | + if (labels_count == 0) { |
| 99 | + DEBUG_PRINT("cl: no labels"); |
| 100 | + return false; |
| 101 | + } |
| 102 | + |
| 103 | + unsigned char log_2_bucket_count; |
| 104 | + res = bpf_probe_read(&log_2_bucket_count, sizeof(log_2_bucket_count), labels_map_ptr + offs->hmap_log2_bucket_count); |
| 105 | + if (res < 0) { |
| 106 | + DEBUG_PRINT("cl: failed to read value for bucket_count: %ld", res); |
| 107 | + return false; |
| 108 | + } |
| 109 | + void *label_buckets; |
| 110 | + res = bpf_probe_read(&label_buckets, sizeof(label_buckets), labels_map_ptr + offs->hmap_buckets); |
| 111 | + if (res < 0) { |
| 112 | + DEBUG_PRINT("cl: failed to read value for label_buckets: %ld", res); |
| 113 | + return false; |
| 114 | + } |
| 115 | + |
| 116 | + // Manually unroll loop to support 4.19 kernel, auto unroll doesn't work as well |
| 117 | + // and we can't support as many buckets. |
| 118 | + u64 bucket_count = MIN(MAX_CUSTOM_LABELS, 1 << log_2_bucket_count); |
| 119 | + switch (bucket_count) { |
| 120 | + case 14: if (process_bucket(record, label_buckets, 13)) return true; |
| 121 | + case 13: if (process_bucket(record, label_buckets, 12)) return true; |
| 122 | + case 12: if (process_bucket(record, label_buckets, 11)) return true; |
| 123 | + case 11: if (process_bucket(record, label_buckets, 10)) return true; |
| 124 | + case 10: if (process_bucket(record, label_buckets, 9)) return true; |
| 125 | + case 9: if (process_bucket(record, label_buckets, 8)) return true; |
| 126 | + case 8: if (process_bucket(record, label_buckets, 7)) return true; |
| 127 | + case 7: if (process_bucket(record, label_buckets, 6)) return true; |
| 128 | + case 6: if (process_bucket(record, label_buckets, 5)) return true; |
| 129 | + case 5: if (process_bucket(record, label_buckets, 4)) return true; |
| 130 | + case 4: if (process_bucket(record, label_buckets, 3)) return true; |
| 131 | + case 3: if (process_bucket(record, label_buckets, 2)) return true; |
| 132 | + case 2: if (process_bucket(record, label_buckets, 1)) return true; |
| 133 | + case 1: if (process_bucket(record, label_buckets, 0)) return true; |
| 134 | + } |
| 135 | + |
| 136 | + return false; |
| 137 | +} |
| 138 | + |
| 139 | + |
| 140 | +SEC("perf_event/go_labels") |
| 141 | +int go_labels(struct pt_regs *ctx) { |
| 142 | + PerCPURecord *record = get_per_cpu_record(); |
| 143 | + if (!record) |
| 144 | + return -1; |
| 145 | + |
| 146 | + u32 pid = record->trace.pid; |
| 147 | + GoCustomLabelsOffsets *offsets = bpf_map_lookup_elem(&go_procs, &pid); |
| 148 | + if (!offsets) { |
| 149 | + DEBUG_PRINT("cl: no offsets, %d not recognized as a go binary", pid); |
| 150 | + return -1; |
| 151 | + } |
| 152 | + DEBUG_PRINT("cl: go offsets found, %d recognized as a go binary: m_ptr: %lx", pid, (unsigned long)record->customLabelsState.go_m_ptr); |
| 153 | + bool success = get_go_custom_labels(ctx, record, offsets); |
| 154 | + if (!success) { |
| 155 | + increment_metric(metricID_UnwindGoCustomLabelsFailures); |
| 156 | + } |
| 157 | + |
| 158 | + tail_call(ctx, PROG_UNWIND_STOP); |
| 159 | + return 0; |
| 160 | +} |
0 commit comments