@@ -43,13 +43,40 @@ struct {
43
43
__uint (value_size , sizeof (struct bpf_perf_event_value ));
44
44
} cgrp_readings SEC (".maps" );
45
45
46
+ /* new kernel cgroup definition */
47
+ struct cgroup___new {
48
+ int level ;
49
+ struct cgroup * ancestors [];
50
+ } __attribute__((preserve_access_index ));
51
+
52
+ /* old kernel cgroup definition */
53
+ struct cgroup___old {
54
+ int level ;
55
+ u64 ancestor_ids [];
56
+ } __attribute__((preserve_access_index ));
57
+
46
58
const volatile __u32 num_events = 1 ;
47
59
const volatile __u32 num_cpus = 1 ;
48
60
49
61
int enabled = 0 ;
50
62
int use_cgroup_v2 = 0 ;
51
63
int perf_subsys_id = -1 ;
52
64
65
+ static inline __u64 get_cgroup_v1_ancestor_id (struct cgroup * cgrp , int level )
66
+ {
67
+ /* recast pointer to capture new type for compiler */
68
+ struct cgroup___new * cgrp_new = (void * )cgrp ;
69
+
70
+ if (bpf_core_field_exists (cgrp_new -> ancestors )) {
71
+ return BPF_CORE_READ (cgrp_new , ancestors [level ], kn , id );
72
+ } else {
73
+ /* recast pointer to capture old type for compiler */
74
+ struct cgroup___old * cgrp_old = (void * )cgrp ;
75
+
76
+ return BPF_CORE_READ (cgrp_old , ancestor_ids [level ]);
77
+ }
78
+ }
79
+
53
80
static inline int get_cgroup_v1_idx (__u32 * cgrps , int size )
54
81
{
55
82
struct task_struct * p = (void * )bpf_get_current_task ();
@@ -77,7 +104,7 @@ static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
77
104
break ;
78
105
79
106
// convert cgroup-id to a map index
80
- cgrp_id = BPF_CORE_READ (cgrp , ancestors [ i ], kn , id );
107
+ cgrp_id = get_cgroup_v1_ancestor_id (cgrp , i );
81
108
elem = bpf_map_lookup_elem (& cgrp_idx , & cgrp_id );
82
109
if (!elem )
83
110
continue ;
0 commit comments