|
| 1 | +const CgroupsManager = @This(); |
| 2 | + |
| 3 | +const std = @import("std"); |
| 4 | +const testing = std.testing; |
| 5 | +const mem = std.mem; |
| 6 | +const meta = std.meta; |
| 7 | +const fmt = std.fmt; |
| 8 | +const fs = std.fs; |
| 9 | +const os = std.os; |
| 10 | +const Allocator = mem.Allocator; |
| 11 | + |
| 12 | +const util = @import("util.zig"); |
| 13 | + |
| 14 | +const cgroup_procs_file_name = "cgroups.procs"; |
| 15 | + |
| 16 | +// Currently, runzigc only supports cgroup v1. |
| 17 | + |
| 18 | +// TODO(musaprg): refactor this file more generic, avoid dirty implementation |
| 19 | + |
| 20 | +const CgroupPaths = struct { |
| 21 | + // TODO(musaprg): enable all subsystem |
| 22 | + // blkio: []const u8 = "", |
| 23 | + cpu: []const u8, |
| 24 | + cpuacct: []const u8, |
| 25 | + // cpuset: []const u8, |
| 26 | + devices: []const u8, |
| 27 | + freezer: []const u8, |
| 28 | + // hugetlb: []const u8, |
| 29 | + memory: []const u8, |
| 30 | + // net_cls: []const u8, |
| 31 | + // net_prio: []const u8, |
| 32 | + // perf_event: []const u8, |
| 33 | + //pids: []const u8, |
| 34 | + //rdma: []const u8, |
| 35 | +}; |
| 36 | + |
| 37 | +allocator: Allocator, |
| 38 | +cgroup_paths: CgroupPaths, |
| 39 | + |
| 40 | +pub fn new(allocator: Allocator, container_id: []const u8) !CgroupsManager { |
| 41 | + return CgroupsManager{ |
| 42 | + .allocator = allocator, |
| 43 | + // TODO(musaprg): Cgroup v2 |
| 44 | + .cgroup_paths = CgroupPaths{ |
| 45 | + // TODO(musaprg): enable all subsystem |
| 46 | + .blkio = try generate_cgroups_path(allocator, "blkio", container_id), |
| 47 | + .cpu = try generate_cgroups_path(allocator, "cpu", container_id), |
| 48 | + .cpuacct = try generate_cgroups_path(allocator, "cpuacct", container_id), |
| 49 | + //.cpuset = try generate_cgroups_path(allocator, "cpuset", container_id), |
| 50 | + .devices = try generate_cgroups_path(allocator, "devices", container_id), |
| 51 | + .freezer = try generate_cgroups_path(allocator, "freezer", container_id), |
| 52 | + //.hugetlb = try generate_cgroups_path(allocator, "hugetlb", container_id), |
| 53 | + .memory = try generate_cgroups_path(allocator, "memory", container_id), |
| 54 | + // .net_cls = try generate_cgroups_path(allocator, "net_cls", container_id), |
| 55 | + // .net_prio = try generate_cgroups_path(allocator, "net_prio", container_id), |
| 56 | + // .perf_event = try generate_cgroups_path(allocator, "perf_event", container_id), |
| 57 | + // .pids = try generate_cgroups_path(allocator, "pids", container_id), |
| 58 | + // .rdma = try generate_cgroups_path(allocator, "rdma", container_id), |
| 59 | + }, |
| 60 | + }; |
| 61 | +} |
| 62 | + |
| 63 | +pub fn join(self: *CgroupsManager, pid: os.pid_t) !void { |
| 64 | + inline for (std.meta.fields(@TypeOf(self.cgroup_paths))) |f| { |
| 65 | + var subsystem_path = @as(f.field_type, @field(x, f.name)); |
| 66 | + try util.mkdirAll(subsystem_path, 0o755); |
| 67 | + const cgroup_procs_path = try fs.path.join(self.allocator, &[_][]const u8{ subsystem_path, cgroup_procs_file_name }); |
| 68 | + const cgroup_procs = try fs.cwd().openFile(cgroup_cpu_tasks_path, .{ .write = true }); |
| 69 | + defer cgroup_procs.close(); |
| 70 | + const cgroup_procs_content = try fmt.allocPrint(allocator, "{}\n", .{pid}); |
| 71 | + defer allocator.free(cgroup_procs_content); |
| 72 | + try cgroup_procs.writer().writeAll(cgroup_procs_content); |
| 73 | + } |
| 74 | +} |
| 75 | + |
| 76 | +// Do freeze with freezer subsystem |
| 77 | +pub fn freeze(self: *CgroupsManager) !void { |
| 78 | + // FIXME(musaprg): Implement me |
| 79 | +} |
| 80 | + |
| 81 | +// Remove cgroup subsystem resource |
| 82 | +pub fn destroy(self: *CgroupsManager) !void { |
| 83 | + // TODO(musaprg): backoff retry |
| 84 | + // FIXME(musaprg): Implement me |
| 85 | +} |
| 86 | + |
| 87 | +pub fn deinit(self: *CgroupsManager) void { |
| 88 | + // TODO(musaprg): consider more clever way |
| 89 | + // TODO(musaprg): enable all subsystem |
| 90 | + // self.allocator.free(self.cgroup_paths.blkio); |
| 91 | + self.allocator.free(self.cgroup_paths.cpu); |
| 92 | + self.allocator.free(self.cgroup_paths.cpuacct); |
| 93 | + //self.allocator.free(self.cgroup_paths.cpuset); |
| 94 | + self.allocator.free(self.cgroup_paths.devices); |
| 95 | + self.allocator.free(self.cgroup_paths.freezer); |
| 96 | + // self.allocator.free(self.cgroup_paths.hugetlb); |
| 97 | + self.allocator.free(self.cgroup_paths.memory); |
| 98 | + // self.allocator.free(self.cgroup_paths.net_cls); |
| 99 | + // self.allocator.free(self.cgroup_paths.net_prio); |
| 100 | + // self.allocator.free(self.cgroup_paths.perf_event); |
| 101 | + // self.allocator.free(self.cgroup_paths.pids); |
| 102 | + // self.allocator.free(self.cgroup_paths.rdma); |
| 103 | +} |
| 104 | + |
| 105 | +fn generate_cgroups_path(allocator: Allocator, name: []const u8, container_id: []const u8) ![]const u8 { |
| 106 | + return try fs.path.join(allocator, &[_][]const u8{ "/sys/fs/cgroup", name, "runzigc", container_id }); |
| 107 | +} |
| 108 | + |
| 109 | +test "new" { |
| 110 | + var arena = std.heap.ArenaAllocator.init(testing.allocator); |
| 111 | + defer arena.deinit(); |
| 112 | + var allocator = arena.allocator(); |
| 113 | + const cgroups_manager = try CgroupsManager.new(allocator, "hoge"); |
| 114 | + try testing.expect(mem.eql(u8, "/sys/fs/cgroup/blkio/runzigc/hoge", cgroups_manager.cgroup_paths.blkio)); |
| 115 | + // TODO(musaprg): write for each field case |
| 116 | +} |
0 commit comments