From 200c173b5314ea6e6ae8d46a875bcd800170a833 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 9 Mar 2025 22:45:42 -0500 Subject: [PATCH] Zig 0.14 Functionally the only changes are with build.zig: I removed the ability to target individual examples/benchmarks (easy to add back later) and I changed the flags to `emit-` prefixes to match typical Zig usage. --- .github/workflows/test.yml | 15 +- README.md | 4 +- build.zig | 420 +++++++++++++++++-------------------- build.zig.zon | 7 +- flake.lock | 6 +- flake.nix | 2 +- nix/package.nix | 2 +- src/ThreadPool.zig | 2 +- src/backend/epoll.zig | 50 +++-- src/backend/io_uring.zig | 22 +- src/backend/iocp.zig | 2 +- src/backend/kqueue.zig | 109 +++++----- src/build/ScdocStep.zig | 163 -------------- src/c_api.zig | 8 +- src/darwin.zig | 376 +++++++++++++++++++++++++++++++++ src/dynamic.zig | 6 +- src/linux/timerfd.zig | 21 +- src/queue.zig | 2 +- src/queue_mpsc.zig | 2 +- src/watcher/async.zig | 35 ++-- src/watcher/process.zig | 8 +- src/watcher/stream.zig | 2 +- src/windows.zig | 7 +- 23 files changed, 722 insertions(+), 549 deletions(-) delete mode 100644 src/build/ScdocStep.zig create mode 100644 src/darwin.zig diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 530d1bad..beedf260 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -26,9 +26,6 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 - with: - submodules: recursive - fetch-depth: 0 # Install Nix and use that to run our tests so our environment matches exactly. - uses: cachix/install-nix-action@v30 @@ -51,9 +48,6 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 - with: - submodules: recursive - fetch-depth: 0 # Install Nix and use that to run our tests so our environment matches exactly. - uses: cachix/install-nix-action@v30 @@ -72,7 +66,7 @@ jobs: # run: nix develop -c zig build test -Dtarget=wasm32-wasi -fwasmtime --summary all - name: build all benchmarks and examples - run: nix develop -c zig build -Dexample -Dbench --summary all + run: nix develop -c zig build -Demit-example -Demit-bench --summary all # Run a full build to ensure that works - run: nix build @@ -85,17 +79,14 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 - with: - submodules: recursive - fetch-depth: 0 - name: Install zig uses: goto-bus-stop/setup-zig@v2 with: - version: 0.13.0 + version: 0.14.0 - name: test run: zig build test --summary all - name: build all benchmarks and examples - run: zig build -Dexample -Dbench --summary all + run: zig build -Demit-example -Demit-bench --summary all diff --git a/README.md b/README.md index 1cec3d59..36323562 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ further makes it compatible with any language out there that can communicate with C APIs). **Project Status: Stable for most use cases.** libxev is in daily use by -large projects such as [Ghostty](https://ghostty.org), +large projects such as [Ghostty](https://ghostty.org), [zml](https://github.com/zml/zml), and more. For most use cases, libxev has been shown to be stable at scale. libxev has a broad featureset and there are likely less well-used corners of the library, but for most @@ -271,7 +271,7 @@ directory. # Build -Build requires the installation of the Zig 0.13.0. libxev follows stable +Build requires the installation of the Zig 0.14.0. libxev follows stable Zig releases and generally does not support nightly builds. When a stable release is imminent we may have a branch that supports it. **libxev has no other build dependencies.** diff --git a/build.zig b/build.zig index 0acf29f3..43294bb1 100644 --- a/build.zig +++ b/build.zig @@ -1,153 +1,75 @@ const std = @import("std"); -const CompileStep = std.build.Step.Compile; -const ScdocStep = @import("src/build/ScdocStep.zig"); - +const Step = std.Build.Step; + +/// A note on my build.zig style: I try to create all the artifacts first, +/// unattached to any steps. At the end of the build() function, I create +/// steps or attach unattached artifacts to predefined steps such as +/// install. This means the only thing affecting the `zig build` user +/// interaction is at the end of the build() file and makes it easier +/// to reason about the structure. pub fn build(b: *std.Build) !void { const target = b.standardTargetOptions(.{}); const optimize = b.standardOptimizeOption(.{}); _ = b.addModule("xev", .{ .root_source_file = b.path("src/main.zig") }); - const man_pages = b.option( + const emit_man = b.option( bool, - "man-pages", + "emit-man-pages", "Set to true to build man pages. Requires scdoc. Defaults to true if scdoc is found.", - ) orelse if (b.findProgram(&[_][]const u8{"scdoc"}, &[_][]const u8{})) |_| + ) orelse if (b.findProgram( + &[_][]const u8{"scdoc"}, + &[_][]const u8{}, + )) |_| true else |err| switch (err) { error.FileNotFound => false, else => return err, }; - const bench_name = b.option( - []const u8, - "bench-name", - "Build and install a single benchmark", - ); - - const bench_install = b.option( - bool, - "bench", - "Install the benchmark binaries to zig-out/bench", - ) orelse (bench_name != null); - - const example_name = b.option( - []const u8, - "example-name", - "Build and install a single example", - ); - - const example_install = b.option( + const emit_bench = b.option( bool, - "example", - "Install the example binaries to zig-out/example", - ) orelse (example_name != null); + "emit-bench", + "Install the benchmark binaries to zig-out", + ) orelse false; - const test_install = b.option( + const emit_examples = b.option( bool, - "install-tests", - "Install the test binaries into zig-out", + "emit-example", + "Install the example binaries to zig-out", ) orelse false; - // Our tests require libc on Linux and Mac. Note that libxev itself - // does NOT require libc. - const test_libc = switch (target.result.os.tag) { - .linux, .macos => true, - else => false, - }; - - const test_filter = b.option([]const u8, "test-filter", "Filter for test"); - - // We always build our test exe as part of `zig build` so that - // we can easily run it manually without digging through the cache. - const test_exe = b.addTest(.{ - .name = "xev-test", - .root_source_file = b.path("src/main.zig"), - .target = target, - .optimize = optimize, - .filter = test_filter, - }); - if (test_libc) test_exe.linkLibC(); // Tests depend on libc, libxev does not - if (test_install) b.installArtifact(test_exe); - - // zig build test test binary and runner. - const tests_run = b.addRunArtifact(test_exe); - const test_step = b.step("test", "Run tests"); - test_step.dependOn(&tests_run.step); - // Static C lib - const static_c_lib: ?*std.Build.Step.Compile = if (target.result.os.tag != .wasi) lib: { + const static_lib: ?*Step.Compile = lib: { + if (target.result.os.tag == .wasi) break :lib null; + const static_lib = b.addStaticLibrary(.{ .name = "xev", .root_source_file = b.path("src/c_api.zig"), .target = target, .optimize = optimize, }); - static_lib.linkLibC(); - - // Link required libraries if targeting Windows if (target.result.os.tag == .windows) { static_lib.linkSystemLibrary("ws2_32"); static_lib.linkSystemLibrary("mswsock"); } - - b.installArtifact(static_lib); - b.default_step.dependOn(&static_lib.step); - - const static_binding_test = b.addExecutable(.{ - .name = "static-binding-test", - .target = target, - .optimize = optimize, - }); - static_binding_test.linkLibC(); - static_binding_test.addIncludePath(b.path("include")); - static_binding_test.addCSourceFile(.{ - .file = b.path("examples/_basic.c"), - .flags = &[_][]const u8{ "-Wall", "-Wextra", "-pedantic", "-std=c99", "-D_POSIX_C_SOURCE=199309L" }, - }); - static_binding_test.linkLibrary(static_lib); - if (test_install) b.installArtifact(static_binding_test); - - if (target.query.isNative()) { - const static_binding_test_run = b.addRunArtifact(static_binding_test); - test_step.dependOn(&static_binding_test_run.step); - } - break :lib static_lib; - } else null; + }; - // Dynamic C lib. We only build this if this is the native target so we - // can link to libxml2 on our native system. - if (target.query.isNative()) { - const dynamic_lib_name = "xev"; + // Dynamic C lib + const dynamic_lib: ?*Step.Compile = lib: { + // We require native so we can link to libxml2 + if (!target.query.isNative()) break :lib null; const dynamic_lib = b.addSharedLibrary(.{ - .name = dynamic_lib_name, + .name = "xev", .root_source_file = b.path("src/c_api.zig"), .target = target, .optimize = optimize, }); - b.installArtifact(dynamic_lib); - b.default_step.dependOn(&dynamic_lib.step); - - const dynamic_binding_test = b.addExecutable(.{ - .name = "dynamic-binding-test", - .target = target, - .optimize = optimize, - }); - dynamic_binding_test.linkLibC(); - dynamic_binding_test.addIncludePath(b.path("include")); - dynamic_binding_test.addCSourceFile(.{ - .file = b.path("examples/_basic.c"), - .flags = &[_][]const u8{ "-Wall", "-Wextra", "-pedantic", "-std=c99" }, - }); - dynamic_binding_test.linkLibrary(dynamic_lib); - if (test_install) b.installArtifact(dynamic_binding_test); - - const dynamic_binding_test_run = b.addRunArtifact(dynamic_binding_test); - test_step.dependOn(&dynamic_binding_test_run.step); - } + break :lib dynamic_lib; + }; // C Headers const c_header = b.addInstallFileWithDir( @@ -155,15 +77,10 @@ pub fn build(b: *std.Build) !void { .header, "xev.h", ); - b.getInstallStep().dependOn(&c_header.step); // pkg-config - { - const file = try b.cache_root.join(b.allocator, &[_][]const u8{"libxev.pc"}); - const pkgconfig_file = try std.fs.cwd().createFile(file, .{}); - - const writer = pkgconfig_file.writer(); - try writer.print( + const pc: *Step.InstallFile = pc: { + const file = b.addWriteFile("libxev.pc", b.fmt( \\prefix={s} \\includedir=${{prefix}}/include \\libdir=${{prefix}}/lib @@ -174,147 +91,177 @@ pub fn build(b: *std.Build) !void { \\Version: 0.1.0 \\Cflags: -I${{includedir}} \\Libs: -L${{libdir}} -lxev - , .{b.install_prefix}); - defer pkgconfig_file.close(); - - b.getInstallStep().dependOn(&b.addInstallFileWithDir( - .{ .cwd_relative = file }, + , .{b.install_prefix})); + break :pc b.addInstallFileWithDir( + file.getDirectory().path(b, "libxev.pc"), .prefix, "share/pkgconfig/libxev.pc", - ).step); - } + ); + }; - // Benchmarks - _ = try benchTargets(b, target, optimize, bench_install, bench_name); + // Man pages + const man = try manPages(b); + + // Benchmarks and examples + const benchmarks = try buildBenchmarks(b, target); + const examples = try buildExamples(b, target, optimize, static_lib); + + // Test Executable + const test_exe: *Step.Compile = test_exe: { + const test_filter = b.option( + []const u8, + "test-filter", + "Filter for test", + ); + const test_exe = b.addTest(.{ + .name = "xev-test", + .root_source_file = b.path("src/main.zig"), + .target = target, + .optimize = optimize, + .filter = test_filter, + }); + switch (target.result.os.tag) { + .linux, .macos => test_exe.linkLibC(), + else => {}, + } + break :test_exe test_exe; + }; - // Examples - _ = try exampleTargets(b, target, optimize, static_c_lib, example_install, example_name); + // "test" Step + { + const tests_run = b.addRunArtifact(test_exe); + const test_step = b.step("test", "Run tests"); + test_step.dependOn(&tests_run.step); + } - // Man pages - if (man_pages) { - const scdoc_step = ScdocStep.create(b); - try scdoc_step.install(); + if (static_lib) |v| b.installArtifact(v); + if (dynamic_lib) |v| b.installArtifact(v); + b.getInstallStep().dependOn(&c_header.step); + b.getInstallStep().dependOn(&pc.step); + b.installArtifact(test_exe); + if (emit_man) { + for (man) |step| b.getInstallStep().dependOn(step); } + if (emit_bench) for (benchmarks) |exe| { + b.getInstallStep().dependOn(&b.addInstallArtifact( + exe, + .{ .dest_dir = .{ .override = .{ + .custom = "bin/bench", + } } }, + ).step); + }; + if (emit_examples) for (examples) |exe| { + b.getInstallStep().dependOn(&b.addInstallArtifact( + exe, + .{ .dest_dir = .{ .override = .{ + .custom = "bin/example", + } } }, + ).step); + }; } -fn benchTargets( +fn buildBenchmarks( b: *std.Build, target: std.Build.ResolvedTarget, - mode: std.builtin.OptimizeMode, - install: bool, - install_name: ?[]const u8, -) !std.StringHashMap(*std.Build.Step.Compile) { - _ = mode; - - var map = std.StringHashMap(*std.Build.Step.Compile).init(b.allocator); +) ![]const *Step.Compile { + var steps = std.ArrayList(*Step.Compile).init(b.allocator); + defer steps.deinit(); - // Open the directory - const c_dir_path = "src/bench"; - var c_dir = try std.fs.cwd().openDir(comptime thisDir() ++ "/" ++ c_dir_path, .{ .iterate = true }); - defer c_dir.close(); + var dir = try std.fs.cwd().openDir(try b.build_root.join( + b.allocator, + &.{ "src", "bench" }, + ), .{ .iterate = true }); + defer dir.close(); // Go through and add each as a step - var c_dir_it = c_dir.iterate(); - while (try c_dir_it.next()) |entry| { + var it = dir.iterate(); + while (try it.next()) |entry| { // Get the index of the last '.' so we can strip the extension. - const index = std.mem.lastIndexOfScalar(u8, entry.name, '.') orelse continue; + const index = std.mem.lastIndexOfScalar( + u8, + entry.name, + '.', + ) orelse continue; if (index == 0) continue; // Name of the app and full path to the entrypoint. const name = entry.name[0..index]; - const path = try std.fs.path.join(b.allocator, &[_][]const u8{ - c_dir_path, - entry.name, - }); - - // If we have specified a specific name, only install that one. - if (install_name) |n| { - if (!std.mem.eql(u8, n, name)) continue; - } // Executable builder. - const c_exe = b.addExecutable(.{ + const exe = b.addExecutable(.{ .name = name, - .root_source_file = b.path(path), + .root_source_file = b.path(b.fmt( + "src/bench/{s}", + .{entry.name}, + )), .target = target, .optimize = .ReleaseFast, // benchmarks are always release fast }); - c_exe.root_module.addImport("xev", b.modules.get("xev").?); - if (install) { - const install_step = b.addInstallArtifact(c_exe, .{ - .dest_dir = .{ .override = .{ .custom = "bench" } }, - }); - b.getInstallStep().dependOn(&install_step.step); - } + exe.root_module.addImport("xev", b.modules.get("xev").?); // Store the mapping - try map.put(try b.allocator.dupe(u8, name), c_exe); + try steps.append(exe); } - return map; + return try steps.toOwnedSlice(); } -fn exampleTargets( +fn buildExamples( b: *std.Build, target: std.Build.ResolvedTarget, optimize: std.builtin.OptimizeMode, - c_lib_: ?*std.Build.Step.Compile, - install: bool, - install_name: ?[]const u8, -) !void { - // Ignore if we're not installing - if (!install) return; - - // Open the directory - const c_dir_path = (comptime thisDir()) ++ "/examples"; - var c_dir = try std.fs.cwd().openDir(c_dir_path, .{ .iterate = true }); - defer c_dir.close(); + c_lib_: ?*Step.Compile, +) ![]const *Step.Compile { + var steps = std.ArrayList(*Step.Compile).init(b.allocator); + defer steps.deinit(); + + var dir = try std.fs.cwd().openDir(try b.build_root.join( + b.allocator, + &.{"examples"}, + ), .{ .iterate = true }); + defer dir.close(); // Go through and add each as a step - var c_dir_it = c_dir.iterate(); - while (try c_dir_it.next()) |entry| { + var it = dir.iterate(); + while (try it.next()) |entry| { // Get the index of the last '.' so we can strip the extension. - const index = std.mem.lastIndexOfScalar(u8, entry.name, '.') orelse continue; + const index = std.mem.lastIndexOfScalar( + u8, + entry.name, + '.', + ) orelse continue; if (index == 0) continue; - // If we have specified a specific name, only install that one. - if (install_name) |n| { - if (!std.mem.eql(u8, n, entry.name)) continue; - } - // Name of the app and full path to the entrypoint. const name = entry.name[0..index]; - const path = try std.fs.path.join(b.allocator, &[_][]const u8{ - c_dir_path, - entry.name, - }); const is_zig = std.mem.eql(u8, entry.name[index + 1 ..], "zig"); - if (is_zig) { - const c_exe = b.addExecutable(.{ + const exe: *Step.Compile = if (is_zig) exe: { + const exe = b.addExecutable(.{ .name = name, - .root_source_file = .{ .cwd_relative = path }, + .root_source_file = b.path(b.fmt( + "examples/{s}", + .{entry.name}, + )), .target = target, .optimize = optimize, }); - c_exe.root_module.addImport("xev", b.modules.get("xev").?); - if (install) { - const install_step = b.addInstallArtifact(c_exe, .{ - .dest_dir = .{ .override = .{ .custom = "example" } }, - }); - b.getInstallStep().dependOn(&install_step.step); - } - } else { + exe.root_module.addImport("xev", b.modules.get("xev").?); + break :exe exe; + } else exe: { const c_lib = c_lib_ orelse return error.UnsupportedPlatform; - const c_exe = b.addExecutable(.{ + const exe = b.addExecutable(.{ .name = name, .target = target, .optimize = optimize, }); - c_exe.linkLibC(); - c_exe.addIncludePath(b.path("include")); - c_exe.addCSourceFile(.{ - .file = .{ .cwd_relative = path }, + exe.linkLibC(); + exe.addIncludePath(b.path("include")); + exe.addCSourceFile(.{ + .file = b.path(b.fmt( + "examples/{s}", + .{entry.name}, + )), .flags = &[_][]const u8{ "-Wall", "-Wextra", @@ -323,31 +270,44 @@ fn exampleTargets( "-D_POSIX_C_SOURCE=199309L", }, }); - c_exe.linkLibrary(c_lib); - if (install) { - const install_step = b.addInstallArtifact(c_exe, .{ - .dest_dir = .{ .override = .{ .custom = "example" } }, - }); - b.getInstallStep().dependOn(&install_step.step); - } - } + exe.linkLibrary(c_lib); + break :exe exe; + }; - // If we have specified a specific name, only install that one. - if (install_name) |_| break; - } else { - if (install_name) |n| { - std.debug.print("No example file named: {s}\n", .{n}); - std.debug.print("Choices:\n", .{}); - var c_dir_it2 = c_dir.iterate(); - while (try c_dir_it2.next()) |entry| { - std.debug.print("\t{s}\n", .{entry.name}); - } - return error.InvalidExampleName; - } + // Store the mapping + try steps.append(exe); } + + return try steps.toOwnedSlice(); } -/// Path to the directory with the build.zig. -fn thisDir() []const u8 { - return std.fs.path.dirname(@src().file) orelse unreachable; +fn manPages(b: *std.Build) ![]const *Step { + var steps = std.ArrayList(*Step).init(b.allocator); + defer steps.deinit(); + + var dir = try std.fs.cwd().openDir(try b.build_root.join( + b.allocator, + &.{"docs"}, + ), .{ .iterate = true }); + defer dir.close(); + + var it = dir.iterate(); + while (try it.next()) |*entry| { + // Filenames must end in "{section}.scd" and sections are + // single numerals. + const base = entry.name[0 .. entry.name.len - 4]; + const section = base[base.len - 1 ..]; + + const cmd = b.addSystemCommand(&.{"scdoc"}); + cmd.setStdIn(.{ .lazy_path = b.path( + b.fmt("docs/{s}", .{entry.name}), + ) }); + + try steps.append(&b.addInstallFile( + cmd.captureStdOut(), + b.fmt("share/man/man{s}/{s}", .{ section, base }), + ).step); + } + + return try steps.toOwnedSlice(); } diff --git a/build.zig.zon b/build.zig.zon index 0881bdbb..40a33e63 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -1,6 +1,7 @@ .{ - .name = "libxev", - .minimum_zig_version = "0.12.0-dev.3191+9cf28d1e9", - .paths = .{""}, + .name = .libxev, + .minimum_zig_version = "0.14.0", .version = "0.0.0", + .fingerprint = 0x30f7363573edabf3, + .paths = .{""}, } diff --git a/flake.lock b/flake.lock index 72e7c05f..6139d98f 100644 --- a/flake.lock +++ b/flake.lock @@ -144,11 +144,11 @@ "nixpkgs": "nixpkgs_2" }, "locked": { - "lastModified": 1717848532, - "narHash": "sha256-d+xIUvSTreHl8pAmU1fnmkfDTGQYCn2Rb/zOwByxS2M=", + "lastModified": 1741566430, + "narHash": "sha256-BEKycplUAd9A0KBKIKrV2tw11+JmXEbVU8zMpe4AJ38=", "owner": "mitchellh", "repo": "zig-overlay", - "rev": "02fc5cc555fc14fda40c42d7c3250efa43812b43", + "rev": "b2897fe1ff741b627cbb8c6e41531fe9d5a4ed47", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 26498242..849ce0d2 100644 --- a/flake.nix +++ b/flake.nix @@ -24,7 +24,7 @@ # Other overlays (final: prev: rec { zigpkgs = inputs.zig.packages.${prev.system}; - zig = inputs.zig.packages.${prev.system}."0.13.0"; + zig = inputs.zig.packages.${prev.system}."0.14.0"; # Latest versions wasmtime = inputs.nixpkgs-unstable.legacyPackages.${prev.system}.wasmtime; diff --git a/nix/package.nix b/nix/package.nix index db4d4fb0..4817154d 100644 --- a/nix/package.nix +++ b/nix/package.nix @@ -24,7 +24,7 @@ stdenv.mkDerivation rec { installPhase = '' runHook preInstall - zig build -Doptimize=ReleaseFast -Dman-pages --prefix $out install + zig build -Doptimize=ReleaseFast -Demit-man-pages --prefix $out install runHook postInstall ''; diff --git a/src/ThreadPool.zig b/src/ThreadPool.zig index aec09d74..8f601e14 100644 --- a/src/ThreadPool.zig +++ b/src/ThreadPool.zig @@ -453,7 +453,7 @@ const Event = struct { // Acquire barrier to ensure operations before the shutdown() are seen after the wait(). // Shutdown is rare so it's better to have an Acquire barrier here instead of on CAS failure + load which are common. if (state == SHUTDOWN) { - @fence(.acquire); + _ = self.state.load(.acquire); return; } diff --git a/src/backend/epoll.zig b/src/backend/epoll.zig index 16e1de70..d8ad2fc5 100644 --- a/src/backend/epoll.zig +++ b/src/backend/epoll.zig @@ -173,14 +173,18 @@ pub const Loop = struct { // Calculate all the values, being careful about overflows in order // to just return the maximum value. - const sec = std.math.mul(isize, self.cached_now.tv_sec, std.time.ms_per_s) catch return max; - const nsec = @divFloor(self.cached_now.tv_nsec, std.time.ns_per_ms); + const sec = std.math.mul(isize, self.cached_now.sec, std.time.ms_per_s) catch return max; + const nsec = @divFloor(self.cached_now.nsec, std.time.ns_per_ms); return std.math.lossyCast(i64, sec +| nsec); } /// Update the cached time. pub fn update_now(self: *Loop) void { - posix.clock_gettime(posix.CLOCK.MONOTONIC, &self.cached_now) catch {}; + if (posix.clock_gettime(posix.CLOCK.MONOTONIC)) |new_time| { + self.cached_now = new_time; + } else |_| { + // Errors are ignored. + } } /// Add a timer to the loop. The timer will execute in "next_ms". This @@ -252,8 +256,8 @@ pub const Loop = struct { // There are lots of failure scenarios here in math. If we see any // of them we just use the maximum value. const max: posix.timespec = .{ - .tv_sec = std.math.maxInt(isize), - .tv_nsec = std.math.maxInt(isize), + .sec = std.math.maxInt(isize), + .nsec = std.math.maxInt(isize), }; const next_s = std.math.cast(isize, next_ms / std.time.ms_per_s) orelse @@ -264,9 +268,9 @@ pub const Loop = struct { ) orelse return max; return .{ - .tv_sec = std.math.add(isize, self.cached_now.tv_sec, next_s) catch + .sec = std.math.add(isize, self.cached_now.sec, next_s) catch return max, - .tv_nsec = std.math.add(isize, self.cached_now.tv_nsec, next_ns) catch + .nsec = std.math.add(isize, self.cached_now.nsec, next_ns) catch return max, }; } @@ -403,10 +407,10 @@ pub const Loop = struct { const t = self.timers.peek() orelse break :timeout -1; // Determine the time in milliseconds. - const ms_now = @as(u64, @intCast(self.cached_now.tv_sec)) * std.time.ms_per_s + - @as(u64, @intCast(self.cached_now.tv_nsec)) / std.time.ns_per_ms; - const ms_next = @as(u64, @intCast(t.next.tv_sec)) * std.time.ms_per_s + - @as(u64, @intCast(t.next.tv_nsec)) / std.time.ns_per_ms; + const ms_now = @as(u64, @intCast(self.cached_now.sec)) * std.time.ms_per_s + + @as(u64, @intCast(self.cached_now.nsec)) / std.time.ns_per_ms; + const ms_next = @as(u64, @intCast(t.next.sec)) * std.time.ms_per_s + + @as(u64, @intCast(t.next.nsec)) / std.time.ns_per_ms; break :timeout @as(i32, @intCast(ms_next -| ms_now)); }; @@ -1248,16 +1252,16 @@ pub const Operation = union(OperationType) { /// any software is running in 584 years waiting on this timer... /// shame on me I guess... but I'll be dead. fn ns(self: *const Timer) u64 { - assert(self.next.tv_sec >= 0); - assert(self.next.tv_nsec >= 0); + assert(self.next.sec >= 0); + assert(self.next.nsec >= 0); const max = std.math.maxInt(u64); const s_ns = std.math.mul( u64, - @as(u64, @intCast(self.next.tv_sec)), + @as(u64, @intCast(self.next.sec)), std.time.ns_per_s, ) catch return max; - return std.math.add(u64, s_ns, @as(u64, @intCast(self.next.tv_nsec))) catch + return std.math.add(u64, s_ns, @as(u64, @intCast(self.next.nsec))) catch return max; } }; @@ -1336,10 +1340,10 @@ pub const ReadError = ThreadPoolError || posix.EpollCtlError || posix.PReadError || posix.RecvFromError || error{ - DupFailed, - EOF, - Unknown, -}; + DupFailed, + EOF, + Unknown, + }; pub const WriteError = ThreadPoolError || posix.EpollCtlError || posix.WriteError || @@ -1347,9 +1351,9 @@ pub const WriteError = ThreadPoolError || posix.EpollCtlError || posix.SendError || posix.SendMsgError || error{ - DupFailed, - Unknown, -}; + DupFailed, + Unknown, + }; pub const TimerError = error{ Unexpected, @@ -1620,7 +1624,7 @@ test "epoll: timerfd" { // We'll try with a simple timerfd const Timerfd = @import("../linux/timerfd.zig").Timerfd; - var t = try Timerfd.init(.monotonic, .{}); + var t = try Timerfd.init(.MONOTONIC, .{}); defer t.deinit(); try t.set(.{}, &.{ .value = .{ .nanoseconds = 1 } }, null); diff --git a/src/backend/io_uring.zig b/src/backend/io_uring.zig index 416a3ad1..18d6c4ee 100644 --- a/src/backend/io_uring.zig +++ b/src/backend/io_uring.zig @@ -114,15 +114,19 @@ pub const Loop = struct { // Calculate all the values, being careful about overflows in order // to just return the maximum value. - const sec = std.math.mul(isize, self.cached_now.tv_sec, std.time.ms_per_s) catch return max; - const nsec = @divFloor(self.cached_now.tv_nsec, std.time.ns_per_ms); + const sec = std.math.mul(isize, self.cached_now.sec, std.time.ms_per_s) catch return max; + const nsec = @divFloor(self.cached_now.nsec, std.time.ns_per_ms); return std.math.lossyCast(i64, sec +| nsec); } /// Update the cached time. pub fn update_now(self: *Loop) void { - posix.clock_gettime(posix.CLOCK.MONOTONIC, &self.cached_now) catch {}; - self.flags.now_outdated = false; + if (posix.clock_gettime(posix.CLOCK.MONOTONIC)) |new_time| { + self.cached_now = new_time; + self.flags.now_outdated = false; + } else |_| { + // Errors are ignored. + } } /// Tick the loop. The mode is comptime so we can do some tricks to @@ -315,8 +319,8 @@ pub const Loop = struct { // There are lots of failure scenarios here in math. If we see any // of them we just use the maximum value. const max: linux.kernel_timespec = .{ - .tv_sec = std.math.maxInt(isize), - .tv_nsec = std.math.maxInt(isize), + .sec = std.math.maxInt(isize), + .nsec = std.math.maxInt(isize), }; const next_s = std.math.cast(isize, next_ms / std.time.ms_per_s) orelse @@ -329,9 +333,9 @@ pub const Loop = struct { if (self.flags.now_outdated) self.update_now(); return .{ - .tv_sec = std.math.add(isize, self.cached_now.tv_sec, next_s) catch + .sec = std.math.add(isize, self.cached_now.sec, next_s) catch return max, - .tv_nsec = std.math.add(isize, self.cached_now.tv_nsec, next_ns) catch + .nsec = std.math.add(isize, self.cached_now.nsec, next_ns) catch return max, }; } @@ -1170,7 +1174,7 @@ test "io_uring: timerfd" { // We'll try with a simple timerfd const Timerfd = @import("../linux/timerfd.zig").Timerfd; - var t = try Timerfd.init(.monotonic, .{}); + var t = try Timerfd.init(.MONOTONIC, .{}); defer t.deinit(); try t.set(.{}, &.{ .value = .{ .nanoseconds = 1 } }, null); diff --git a/src/backend/iocp.zig b/src/backend/iocp.zig index dd7a026c..5d841226 100644 --- a/src/backend/iocp.zig +++ b/src/backend/iocp.zig @@ -973,7 +973,7 @@ pub const Completion = struct { if (result != windows.TRUE) { const err = windows.ws2_32.WSAGetLastError(); - const r = .{ + const r: Result = .{ .accept = switch (err) { windows.ws2_32.WinsockError.WSA_OPERATION_ABORTED => error.Canceled, else => windows.unexpectedWSAError(err), diff --git a/src/backend/kqueue.zig b/src/backend/kqueue.zig index 2a85f2f8..7b228e98 100644 --- a/src/backend/kqueue.zig +++ b/src/backend/kqueue.zig @@ -4,6 +4,7 @@ const std = @import("std"); const builtin = @import("builtin"); const assert = std.debug.assert; const posix = std.posix; +const darwin = @import("../darwin.zig"); const queue = @import("../queue.zig"); const queue_mpsc = @import("../queue_mpsc.zig"); const heap = @import("../heap.zig"); @@ -188,7 +189,7 @@ pub const Loop = struct { self.completions.push(c); events[events_len] = ev; - events[events_len].flags = posix.system.EV_DELETE; + events[events_len].flags = std.c.EV.DELETE; events_len += 1; if (events_len >= events.len) break :queue_pop; }, @@ -230,14 +231,14 @@ pub const Loop = struct { if (ev.udata == 0) continue; // We handle deletions separately. - if (ev.flags & posix.system.EV_DELETE != 0) continue; + if (ev.flags & std.c.EV.DELETE != 0) continue; const c: *Completion = @ptrFromInt(@as(usize, @intCast(ev.udata))); // If EV_ERROR is set, then submission failed for this // completion. We get the syscall errorcode from data and // store it. - if (ev.flags & posix.system.EV_ERROR != 0) { + if (ev.flags & std.c.EV.ERROR != 0) { c.result = c.syscall_result(-@as(i32, @intCast(ev.data))); } else { // No error, means that this completion is ready to work. @@ -311,9 +312,9 @@ pub const Loop = struct { // event. We have to add here because we need a stable self pointer. const events = [_]Kevent{.{ .ident = @as(usize, @intCast(self.mach_port.port)), - .filter = posix.system.EVFILT_MACHPORT, - .flags = posix.system.EV_ADD | posix.system.EV_ENABLE, - .fflags = posix.system.MACH_RCV_MSG, + .filter = std.c.EVFILT.MACHPORT, + .flags = std.c.EV.ADD | std.c.EV.ENABLE, + .fflags = darwin.MACH_RCV_MSG, .data = 0, .udata = 0, .ext = .{ @@ -448,7 +449,7 @@ pub const Loop = struct { .disarm => { if (disarm_ev) |ev| { events[changes] = ev; - events[changes].flags = posix.system.EV_DELETE; + events[changes].flags = std.c.EV.DELETE; events[changes].udata = 0; changes += 1; assert(changes <= events.len); @@ -474,16 +475,16 @@ pub const Loop = struct { const t = self.timers.peek() orelse break :timeout null; // Determine the time in milliseconds. - const ms_now = @as(u64, @intCast(self.cached_now.tv_sec)) * std.time.ms_per_s + - @as(u64, @intCast(self.cached_now.tv_nsec)) / std.time.ns_per_ms; - const ms_next = @as(u64, @intCast(t.next.tv_sec)) * std.time.ms_per_s + - @as(u64, @intCast(t.next.tv_nsec)) / std.time.ns_per_ms; + const ms_now = @as(u64, @intCast(self.cached_now.sec)) * std.time.ms_per_s + + @as(u64, @intCast(self.cached_now.nsec)) / std.time.ns_per_ms; + const ms_next = @as(u64, @intCast(t.next.sec)) * std.time.ms_per_s + + @as(u64, @intCast(t.next.nsec)) / std.time.ns_per_ms; const ms = ms_next -| ms_now; // Convert to s/ns for the timespec const sec = ms / std.time.ms_per_s; const nsec = (ms % std.time.ms_per_s) * std.time.ns_per_ms; - break :timeout .{ .tv_sec = @intCast(sec), .tv_nsec = @intCast(nsec) }; + break :timeout .{ .sec = @intCast(sec), .nsec = @intCast(nsec) }; }; // Wait for changes. Note that we ALWAYS attempt to get completions @@ -521,13 +522,13 @@ pub const Loop = struct { // Ignore any successful deletions. This can only happen // from disarms below and in that case we already processed // their callback. - if (ev.flags & posix.system.EV_DELETE != 0) continue; + if (ev.flags & std.c.EV.DELETE != 0) continue; // This can only be set during changelist processing so // that means that this event was never actually active. // Therefore, we only decrement the waiters by 1 if we // processed an active change. - if (ev.flags & posix.system.EV_ERROR != 0) { + if (ev.flags & std.c.EV.ERROR != 0) { // We cannot use c here because c is already dead // at this point for this event. continue; @@ -547,7 +548,7 @@ pub const Loop = struct { // Mark this event for deletion, it'll happen // on the next tick. events[changes] = ev; - events[changes].flags = posix.system.EV_DELETE; + events[changes].flags = std.c.EV.DELETE; events[changes].udata = 0; changes += 1; assert(changes <= events.len); @@ -585,14 +586,18 @@ pub const Loop = struct { // Calculate all the values, being careful about overflows in order // to just return the maximum value. - const sec = std.math.mul(isize, self.cached_now.tv_sec, std.time.ms_per_s) catch return max; - const nsec = @divFloor(self.cached_now.tv_nsec, std.time.ns_per_ms); + const sec = std.math.mul(isize, self.cached_now.sec, std.time.ms_per_s) catch return max; + const nsec = @divFloor(self.cached_now.nsec, std.time.ns_per_ms); return std.math.lossyCast(i64, sec +| nsec); } /// Update the cached time. pub fn update_now(self: *Loop) void { - posix.clock_gettime(posix.CLOCK.MONOTONIC, &self.cached_now) catch {}; + if (posix.clock_gettime(posix.CLOCK.MONOTONIC)) |new_time| { + self.cached_now = new_time; + } else |_| { + // Errors are ignored. + } } /// Add a timer to the loop. The timer will execute in "next_ms". This @@ -664,8 +669,8 @@ pub const Loop = struct { // There are lots of failure scenarios here in math. If we see any // of them we just use the maximum value. const max: posix.timespec = .{ - .tv_sec = std.math.maxInt(isize), - .tv_nsec = std.math.maxInt(isize), + .sec = std.math.maxInt(isize), + .nsec = std.math.maxInt(isize), }; const next_s = std.math.cast(isize, next_ms / std.time.ms_per_s) orelse @@ -676,9 +681,9 @@ pub const Loop = struct { ) orelse return max; return .{ - .tv_sec = std.math.add(isize, self.cached_now.tv_sec, next_s) catch + .sec = std.math.add(isize, self.cached_now.sec, next_s) catch return max, - .tv_nsec = std.math.add(isize, self.cached_now.tv_nsec, next_ns) catch + .nsec = std.math.add(isize, self.cached_now.nsec, next_ns) catch return max, }; } @@ -1052,8 +1057,8 @@ pub const Completion = struct { .accept => |v| kevent_init(.{ .ident = @intCast(v.socket), - .filter = posix.system.EVFILT_READ, - .flags = posix.system.EV_ADD | posix.system.EV_ENABLE, + .filter = std.c.EVFILT.READ, + .flags = std.c.EV.ADD | std.c.EV.ENABLE, .fflags = 0, .data = 0, .udata = @intFromPtr(self), @@ -1061,8 +1066,8 @@ pub const Completion = struct { .connect => |v| kevent_init(.{ .ident = @intCast(v.socket), - .filter = posix.system.EVFILT_WRITE, - .flags = posix.system.EV_ADD | posix.system.EV_ENABLE, + .filter = std.c.EVFILT.WRITE, + .flags = std.c.EV.ADD | std.c.EV.ENABLE, .fflags = 0, .data = 0, .udata = @intFromPtr(self), @@ -1082,9 +1087,9 @@ pub const Completion = struct { // buffer since MACH_RCV_MSG is set. break :kevent .{ .ident = @intCast(v.port), - .filter = posix.system.EVFILT_MACHPORT, - .flags = posix.system.EV_ADD | posix.system.EV_ENABLE, - .fflags = posix.system.MACH_RCV_MSG, + .filter = std.c.EVFILT.MACHPORT, + .flags = std.c.EV.ADD | std.c.EV.ENABLE, + .fflags = darwin.MACH_RCV_MSG, .data = 0, .udata = @intFromPtr(self), .ext = .{ @intFromPtr(slice.ptr), slice.len }, @@ -1093,8 +1098,8 @@ pub const Completion = struct { .proc => |v| kevent_init(.{ .ident = @intCast(v.pid), - .filter = posix.system.EVFILT_PROC, - .flags = posix.system.EV_ADD | posix.system.EV_ENABLE, + .filter = std.c.EVFILT.PROC, + .flags = std.c.EV.ADD | std.c.EV.ENABLE, .fflags = v.flags, .data = 0, .udata = @intFromPtr(self), @@ -1102,8 +1107,8 @@ pub const Completion = struct { inline .write, .pwrite, .send, .sendto => |v| kevent_init(.{ .ident = @intCast(v.fd), - .filter = posix.system.EVFILT_WRITE, - .flags = posix.system.EV_ADD | posix.system.EV_ENABLE, + .filter = std.c.EVFILT.WRITE, + .flags = std.c.EV.ADD | std.c.EV.ENABLE, .fflags = 0, .data = 0, .udata = @intFromPtr(self), @@ -1111,8 +1116,8 @@ pub const Completion = struct { inline .read, .pread, .recv, .recvfrom => |v| kevent_init(.{ .ident = @intCast(v.fd), - .filter = posix.system.EVFILT_READ, - .flags = posix.system.EV_ADD | posix.system.EV_ENABLE, + .filter = std.c.EVFILT.READ, + .flags = std.c.EV.ADD | std.c.EV.ENABLE, .fflags = 0, .data = 0, .udata = @intFromPtr(self), @@ -1261,7 +1266,7 @@ pub const Completion = struct { const ev = ev_ orelse break :res .{ .proc = ProcError.MissingKevent }; // If we have the exit status, we read it. - if (ev.fflags & (posix.system.NOTE_EXIT | posix.system.NOTE_EXITSTATUS) > 0) { + if (ev.fflags & (std.c.NOTE.EXIT | std.c.NOTE.EXITSTATUS) > 0) { const data: u32 = @intCast(ev.data); if (posix.W.IFEXITED(data)) break :res .{ .proc = posix.W.EXITSTATUS(data), @@ -1537,7 +1542,7 @@ pub const Operation = union(OperationType) { proc: struct { pid: posix.pid_t, - flags: u32 = posix.system.NOTE_EXIT | posix.system.NOTE_EXITSTATUS, + flags: u32 = std.c.NOTE.EXIT | std.c.NOTE.EXITSTATUS, }, }; @@ -1711,16 +1716,16 @@ const Timer = struct { /// any software is running in 584 years waiting on this timer... /// shame on me I guess... but I'll be dead. fn ns(self: *const Timer) u64 { - assert(self.next.tv_sec >= 0); - assert(self.next.tv_nsec >= 0); + assert(self.next.sec >= 0); + assert(self.next.nsec >= 0); const max = std.math.maxInt(u64); const s_ns = std.math.mul( u64, - @as(u64, @intCast(self.next.tv_sec)), + @as(u64, @intCast(self.next.sec)), std.time.ns_per_s, ) catch return max; - return std.math.add(u64, s_ns, @as(u64, @intCast(self.next.tv_nsec))) catch + return std.math.add(u64, s_ns, @as(u64, @intCast(self.next.nsec))) catch return max; } }; @@ -2514,8 +2519,8 @@ test "kqueue: mach port" { const mach_self = posix.system.mach_task_self(); var mach_port: posix.system.mach_port_name_t = undefined; try testing.expectEqual( - posix.system.KernE.SUCCESS, - posix.system.getKernError(posix.system.mach_port_allocate( + darwin.KernE.SUCCESS, + darwin.getKernError(posix.system.mach_port_allocate( mach_self, @intFromEnum(posix.system.MACH_PORT_RIGHT.RECEIVE), &mach_port, @@ -2557,23 +2562,23 @@ test "kqueue: mach port" { try testing.expect(!called); // Send a message to the port - var msg: posix.system.mach_msg_header_t = .{ + var msg: darwin.mach_msg_header_t = .{ .msgh_bits = @intFromEnum(posix.system.MACH_MSG_TYPE.MAKE_SEND_ONCE), - .msgh_size = @sizeOf(posix.system.mach_msg_header_t), + .msgh_size = @sizeOf(darwin.mach_msg_header_t), .msgh_remote_port = mach_port, - .msgh_local_port = posix.system.MACH_PORT_NULL, + .msgh_local_port = darwin.MACH_PORT_NULL, .msgh_voucher_port = undefined, .msgh_id = undefined, }; - try testing.expectEqual(posix.system.MachMsgE.SUCCESS, posix.system.getMachMsgError( - posix.system.mach_msg( + try testing.expectEqual(darwin.MachMsgE.SUCCESS, darwin.getMachMsgError( + darwin.mach_msg( &msg, - posix.system.MACH_SEND_MSG, + darwin.MACH_SEND_MSG, msg.msgh_size, 0, - posix.system.MACH_PORT_NULL, - posix.system.MACH_MSG_TIMEOUT_NONE, - posix.system.MACH_PORT_NULL, + darwin.MACH_PORT_NULL, + darwin.MACH_MSG_TIMEOUT_NONE, + darwin.MACH_PORT_NULL, ), )); diff --git a/src/build/ScdocStep.zig b/src/build/ScdocStep.zig deleted file mode 100644 index 47d38d16..00000000 --- a/src/build/ScdocStep.zig +++ /dev/null @@ -1,163 +0,0 @@ -const std = @import("std"); -const mem = std.mem; -const fs = std.fs; -const Step = std.Build.Step; -const Build = std.Build; - -/// ScdocStep generates man pages using scdoc(1). -/// -/// It reads all the raw pages from src_path and writes them to out_path. -/// src_path is typically "docs/" relative to the build root and out_path is -/// the build cache. -/// -/// The man pages can be installed by calling install() on the step. -const ScdocStep = @This(); - -step: Step, -builder: *Build, - -/// path to read man page sources from, defaults to the "doc/" subdirectory -/// from the build.zig file. This must be an absolute path. -src_path: []const u8, - -/// path where the generated man pages will be written (NOT installed). This -/// defaults to build cache root. -out_path: []const u8, - -pub fn create(builder: *Build) *ScdocStep { - const self = builder.allocator.create(ScdocStep) catch unreachable; - self.* = init(builder); - return self; -} - -pub fn init(builder: *Build) ScdocStep { - return ScdocStep{ - .builder = builder, - .step = Step.init(.{ - .id = .custom, - .name = "generate man pages", - .owner = builder, - .makeFn = make, - }), - .src_path = builder.pathFromRoot("docs/"), - .out_path = builder.cache_root.join(builder.allocator, &[_][]const u8{ - "man", - }) catch unreachable, - }; -} - -fn make(step: *std.Build.Step, _: std.Progress.Node) !void { - const self: *ScdocStep = @fieldParentPtr("step", step); - - // Create our cache path - // TODO(mitchellh): ideally this would be pure zig - { - const command = try std.fmt.allocPrint( - self.builder.allocator, - "rm -f {[path]s}/* && mkdir -p {[path]s}", - .{ .path = self.out_path }, - ); - _ = self.builder.run(&[_][]const u8{ "sh", "-c", command }); - } - - // Find all our man pages which are in our src path ending with ".scd". - var dir = try fs.openDirAbsolute(self.src_path, .{ .iterate = true }); - defer dir.close(); - - var iter = dir.iterate(); - while (try iter.next()) |*entry| { - // We only want "scd" files to generate. - if (!mem.eql(u8, fs.path.extension(entry.name), ".scd")) { - continue; - } - - const src = try fs.path.join( - self.builder.allocator, - &[_][]const u8{ self.src_path, entry.name }, - ); - - const dst = try fs.path.join( - self.builder.allocator, - &[_][]const u8{ self.out_path, entry.name[0..(entry.name.len - 4)] }, - ); - - const command = try std.fmt.allocPrint( - self.builder.allocator, - "scdoc < {s} > {s}", - .{ src, dst }, - ); - _ = self.builder.run(&[_][]const u8{ "sh", "-c", command }); - } -} - -pub fn install(self: *ScdocStep) !void { - // Ensure that `zig build install` depends on our generation step first. - self.builder.getInstallStep().dependOn(&self.step); - - // Then run our install step which looks at what we made out of our - // generation and moves it to the install prefix. - const install_step = InstallStep.create(self.builder, self); - self.builder.getInstallStep().dependOn(&install_step.step); -} - -/// Install man pages, create using install() on ScdocStep. -const InstallStep = struct { - step: Step, - builder: *Build, - scdoc: *ScdocStep, - - pub fn create(builder: *Build, scdoc: *ScdocStep) *InstallStep { - const self = builder.allocator.create(InstallStep) catch unreachable; - self.* = InstallStep.init(builder, scdoc); - self.step.dependOn(&scdoc.step); - return self; - } - - fn init(builder: *Build, scdoc: *ScdocStep) InstallStep { - return InstallStep{ - .builder = builder, - .step = Step.init(.{ - .id = .custom, - .name = "install man pages", - .owner = builder, - .makeFn = InstallStep.make, - }), - .scdoc = scdoc, - }; - } - - fn make(step: *Step, progress: std.Progress.Node) !void { - const self: *InstallStep = @fieldParentPtr("step", step); - - // Get our absolute output path - var path = self.scdoc.out_path; - if (!fs.path.isAbsolute(path)) { - path = self.builder.pathFromRoot(path); - } - - // Find all our man pages which are in our src path ending with ".scd". - var dir = try fs.openDirAbsolute(path, .{ .iterate = true }); - defer dir.close(); - var iter = dir.iterate(); - while (try iter.next()) |*entry| { - // We expect filenames to be "foo.3" and this gets us "3" - const section = entry.name[(entry.name.len - 1)..]; - - const src = try fs.path.join( - self.builder.allocator, - &[_][]const u8{ path, entry.name }, - ); - const output = try std.fmt.allocPrint( - self.builder.allocator, - "share/man/man{s}/{s}", - .{ section, entry.name }, - ); - - const fileStep = self.builder.addInstallFile( - .{ .cwd_relative = src }, - output, - ); - try fileStep.step.make(progress); - } - } -}; diff --git a/src/c_api.zig b/src/c_api.zig index c5172fff..c83c8cc9 100644 --- a/src/c_api.zig +++ b/src/c_api.zig @@ -150,7 +150,7 @@ export fn xev_timer_run( ?*anyopaque, ) callconv(.C) xev.CallbackAction, ) void { - const Callback = @typeInfo(@TypeOf(cb)).Pointer.child; + const Callback = @typeInfo(@TypeOf(cb)).pointer.child; const extern_c = @as(*Completion, @ptrCast(@alignCast(c))); extern_c.c_callback = @as(*const anyopaque, @ptrCast(cb)); @@ -190,7 +190,7 @@ export fn xev_timer_reset( ?*anyopaque, ) callconv(.C) xev.CallbackAction, ) void { - const Callback = @typeInfo(@TypeOf(cb)).Pointer.child; + const Callback = @typeInfo(@TypeOf(cb)).pointer.child; const extern_c = @as(*Completion, @ptrCast(@alignCast(c))); extern_c.c_callback = @as(*const anyopaque, @ptrCast(cb)); @@ -229,7 +229,7 @@ export fn xev_timer_cancel( ?*anyopaque, ) callconv(.C) xev.CallbackAction, ) void { - const Callback = @typeInfo(@TypeOf(cb)).Pointer.child; + const Callback = @typeInfo(@TypeOf(cb)).pointer.child; const extern_c = @as(*Completion, @ptrCast(@alignCast(c_cancel))); extern_c.c_callback = @as(*const anyopaque, @ptrCast(cb)); @@ -284,7 +284,7 @@ export fn xev_async_wait( ?*anyopaque, ) callconv(.C) xev.CallbackAction, ) void { - const Callback = @typeInfo(@TypeOf(cb)).Pointer.child; + const Callback = @typeInfo(@TypeOf(cb)).pointer.child; const extern_c = @as(*Completion, @ptrCast(@alignCast(c))); extern_c.c_callback = @as(*const anyopaque, @ptrCast(cb)); diff --git a/src/darwin.zig b/src/darwin.zig new file mode 100644 index 00000000..6f95c2e0 --- /dev/null +++ b/src/darwin.zig @@ -0,0 +1,376 @@ +//! These are copied from Zig's stdlib as of Zig 0.14 because +//! they are no longer exported. This is probably going to be +//! fixed in the future: https://github.com/ziglang/zig/pull/21218 +const std = @import("std"); + +pub const MACH_SEND_MSG = 0x00000001; +pub const MACH_RCV_MSG = 0x00000002; + +pub const MACH_SEND_TIMEOUT = 0x00000010; +pub const MACH_SEND_OVERRIDE = 0x00000020; +pub const MACH_SEND_INTERRUPT = 0x00000040; +pub const MACH_SEND_NOTIFY = 0x00000080; +pub const MACH_SEND_ALWAYS = 0x00010000; +pub const MACH_SEND_FILTER_NONFATAL = 0x00010000; +pub const MACH_SEND_TRAILER = 0x00020000; +pub const MACH_SEND_NOIMPORTANCE = 0x00040000; +pub const MACH_SEND_NODENAP = MACH_SEND_NOIMPORTANCE; +pub const MACH_SEND_IMPORTANCE = 0x00080000; +pub const MACH_SEND_SYNC_OVERRIDE = 0x00100000; +pub const MACH_SEND_PROPAGATE_QOS = 0x00200000; +pub const MACH_SEND_SYNC_USE_THRPRI = MACH_SEND_PROPAGATE_QOS; +pub const MACH_SEND_KERNEL = 0x00400000; +pub const MACH_SEND_SYNC_BOOTSTRAP_CHECKIN = 0x00800000; + +pub const MACH_RCV_TIMEOUT = 0x00000100; +pub const MACH_RCV_NOTIFY = 0x00000000; +pub const MACH_RCV_INTERRUPT = 0x00000400; +pub const MACH_RCV_VOUCHER = 0x00000800; +pub const MACH_RCV_OVERWRITE = 0x00000000; +pub const MACH_RCV_GUARDED_DESC = 0x00001000; +pub const MACH_RCV_SYNC_WAIT = 0x00004000; +pub const MACH_RCV_SYNC_PEEK = 0x00008000; + +pub const MACH_PORT_NULL: mach_port_t = 0; +pub const MACH_MSG_TIMEOUT_NONE: mach_msg_timeout_t = 0; + +pub const natural_t = c_uint; +pub const integer_t = c_int; +pub const mach_port_t = c_uint; +pub const mach_port_name_t = natural_t; +pub const mach_msg_bits_t = c_uint; +pub const mach_msg_id_t = integer_t; +pub const mach_msg_type_number_t = natural_t; +pub const mach_msg_type_name_t = c_uint; +pub const mach_msg_option_t = integer_t; +pub const mach_msg_size_t = natural_t; +pub const mach_msg_timeout_t = natural_t; +pub const mach_msg_return_t = std.c.kern_return_t; + +pub const mach_msg_header_t = extern struct { + msgh_bits: mach_msg_bits_t, + msgh_size: mach_msg_size_t, + msgh_remote_port: mach_port_t, + msgh_local_port: mach_port_t, + msgh_voucher_port: mach_port_name_t, + msgh_id: mach_msg_id_t, +}; + +pub extern "c" fn mach_msg( + msg: ?*mach_msg_header_t, + option: mach_msg_option_t, + send_size: mach_msg_size_t, + rcv_size: mach_msg_size_t, + rcv_name: mach_port_name_t, + timeout: mach_msg_timeout_t, + notify: mach_port_name_t, +) std.c.kern_return_t; + +pub fn getKernError(err: std.c.kern_return_t) KernE { + return @as(KernE, @enumFromInt(@as(u32, @truncate(@as(usize, @intCast(err)))))); +} + +/// Kernel return values +pub const KernE = enum(u32) { + SUCCESS = 0, + + /// Specified address is not currently valid + INVALID_ADDRESS = 1, + + /// Specified memory is valid, but does not permit the + /// required forms of access. + PROTECTION_FAILURE = 2, + + /// The address range specified is already in use, or + /// no address range of the size specified could be + /// found. + NO_SPACE = 3, + + /// The function requested was not applicable to this + /// type of argument, or an argument is invalid + INVALID_ARGUMENT = 4, + + /// The function could not be performed. A catch-all. + FAILURE = 5, + + /// A system resource could not be allocated to fulfill + /// this request. This failure may not be permanent. + RESOURCE_SHORTAGE = 6, + + /// The task in question does not hold receive rights + /// for the port argument. + NOT_RECEIVER = 7, + + /// Bogus access restriction. + NO_ACCESS = 8, + + /// During a page fault, the target address refers to a + /// memory object that has been destroyed. This + /// failure is permanent. + MEMORY_FAILURE = 9, + + /// During a page fault, the memory object indicated + /// that the data could not be returned. This failure + /// may be temporary; future attempts to access this + /// same data may succeed, as defined by the memory + /// object. + MEMORY_ERROR = 10, + + /// The receive right is already a member of the portset. + ALREADY_IN_SET = 11, + + /// The receive right is not a member of a port set. + NOT_IN_SET = 12, + + /// The name already denotes a right in the task. + NAME_EXISTS = 13, + + /// The operation was aborted. Ipc code will + /// catch this and reflect it as a message error. + ABORTED = 14, + + /// The name doesn't denote a right in the task. + INVALID_NAME = 15, + + /// Target task isn't an active task. + INVALID_TASK = 16, + + /// The name denotes a right, but not an appropriate right. + INVALID_RIGHT = 17, + + /// A blatant range error. + INVALID_VALUE = 18, + + /// Operation would overflow limit on user-references. + UREFS_OVERFLOW = 19, + + /// The supplied (port) capability is improper. + INVALID_CAPABILITY = 20, + + /// The task already has send or receive rights + /// for the port under another name. + RIGHT_EXISTS = 21, + + /// Target host isn't actually a host. + INVALID_HOST = 22, + + /// An attempt was made to supply "precious" data + /// for memory that is already present in a + /// memory object. + MEMORY_PRESENT = 23, + + /// A page was requested of a memory manager via + /// memory_object_data_request for an object using + /// a MEMORY_OBJECT_COPY_CALL strategy, with the + /// VM_PROT_WANTS_COPY flag being used to specify + /// that the page desired is for a copy of the + /// object, and the memory manager has detected + /// the page was pushed into a copy of the object + /// while the kernel was walking the shadow chain + /// from the copy to the object. This error code + /// is delivered via memory_object_data_error + /// and is handled by the kernel (it forces the + /// kernel to restart the fault). It will not be + /// seen by users. + MEMORY_DATA_MOVED = 24, + + /// A strategic copy was attempted of an object + /// upon which a quicker copy is now possible. + /// The caller should retry the copy using + /// vm_object_copy_quickly. This error code + /// is seen only by the kernel. + MEMORY_RESTART_COPY = 25, + + /// An argument applied to assert processor set privilege + /// was not a processor set control port. + INVALID_PROCESSOR_SET = 26, + + /// The specified scheduling attributes exceed the thread's + /// limits. + POLICY_LIMIT = 27, + + /// The specified scheduling policy is not currently + /// enabled for the processor set. + INVALID_POLICY = 28, + + /// The external memory manager failed to initialize the + /// memory object. + INVALID_OBJECT = 29, + + /// A thread is attempting to wait for an event for which + /// there is already a waiting thread. + ALREADY_WAITING = 30, + + /// An attempt was made to destroy the default processor + /// set. + DEFAULT_SET = 31, + + /// An attempt was made to fetch an exception port that is + /// protected, or to abort a thread while processing a + /// protected exception. + EXCEPTION_PROTECTED = 32, + + /// A ledger was required but not supplied. + INVALID_LEDGER = 33, + + /// The port was not a memory cache control port. + INVALID_MEMORY_CONTROL = 34, + + /// An argument supplied to assert security privilege + /// was not a host security port. + INVALID_SECURITY = 35, + + /// thread_depress_abort was called on a thread which + /// was not currently depressed. + NOT_DEPRESSED = 36, + + /// Object has been terminated and is no longer available + TERMINATED = 37, + + /// Lock set has been destroyed and is no longer available. + LOCK_SET_DESTROYED = 38, + + /// The thread holding the lock terminated before releasing + /// the lock + LOCK_UNSTABLE = 39, + + /// The lock is already owned by another thread + LOCK_OWNED = 40, + + /// The lock is already owned by the calling thread + LOCK_OWNED_SELF = 41, + + /// Semaphore has been destroyed and is no longer available. + SEMAPHORE_DESTROYED = 42, + + /// Return from RPC indicating the target server was + /// terminated before it successfully replied + RPC_SERVER_TERMINATED = 43, + + /// Terminate an orphaned activation. + RPC_TERMINATE_ORPHAN = 44, + + /// Allow an orphaned activation to continue executing. + RPC_CONTINUE_ORPHAN = 45, + + /// Empty thread activation (No thread linked to it) + NOT_SUPPORTED = 46, + + /// Remote node down or inaccessible. + NODE_DOWN = 47, + + /// A signalled thread was not actually waiting. + NOT_WAITING = 48, + + /// Some thread-oriented operation (semaphore_wait) timed out + OPERATION_TIMED_OUT = 49, + + /// During a page fault, indicates that the page was rejected + /// as a result of a signature check. + CODESIGN_ERROR = 50, + + /// The requested property cannot be changed at this time. + POLICY_STATIC = 51, + + /// The provided buffer is of insufficient size for the requested data. + INSUFFICIENT_BUFFER_SIZE = 52, + + /// Denied by security policy + DENIED = 53, + + /// The KC on which the function is operating is missing + MISSING_KC = 54, + + /// The KC on which the function is operating is invalid + INVALID_KC = 55, + + /// A search or query operation did not return a result + NOT_FOUND = 56, + + _, +}; + +pub fn getMachMsgError(err: mach_msg_return_t) MachMsgE { + return @as(MachMsgE, @enumFromInt(@as(u32, @truncate(@as(usize, @intCast(err)))))); +} + +/// Mach msg return values +pub const MachMsgE = enum(u32) { + SUCCESS = 0x00000000, + + /// Thread is waiting to send. (Internal use only.) + SEND_IN_PROGRESS = 0x10000001, + /// Bogus in-line data. + SEND_INVALID_DATA = 0x10000002, + /// Bogus destination port. + SEND_INVALID_DEST = 0x10000003, + /// Message not sent before timeout expired. + SEND_TIMED_OUT = 0x10000004, + /// Bogus voucher port. + SEND_INVALID_VOUCHER = 0x10000005, + /// Software interrupt. + SEND_INTERRUPTED = 0x10000007, + /// Data doesn't contain a complete message. + SEND_MSG_TOO_SMALL = 0x10000008, + /// Bogus reply port. + SEND_INVALID_REPLY = 0x10000009, + /// Bogus port rights in the message body. + SEND_INVALID_RIGHT = 0x1000000a, + /// Bogus notify port argument. + SEND_INVALID_NOTIFY = 0x1000000b, + /// Invalid out-of-line memory pointer. + SEND_INVALID_MEMORY = 0x1000000c, + /// No message buffer is available. + SEND_NO_BUFFER = 0x1000000d, + /// Send is too large for port + SEND_TOO_LARGE = 0x1000000e, + /// Invalid msg-type specification. + SEND_INVALID_TYPE = 0x1000000f, + /// A field in the header had a bad value. + SEND_INVALID_HEADER = 0x10000010, + /// The trailer to be sent does not match kernel format. + SEND_INVALID_TRAILER = 0x10000011, + /// The sending thread context did not match the context on the dest port + SEND_INVALID_CONTEXT = 0x10000012, + /// compatibility: no longer a returned error + SEND_INVALID_RT_OOL_SIZE = 0x10000015, + /// The destination port doesn't accept ports in body + SEND_NO_GRANT_DEST = 0x10000016, + /// Message send was rejected by message filter + SEND_MSG_FILTERED = 0x10000017, + + /// Thread is waiting for receive. (Internal use only.) + RCV_IN_PROGRESS = 0x10004001, + /// Bogus name for receive port/port-set. + RCV_INVALID_NAME = 0x10004002, + /// Didn't get a message within the timeout value. + RCV_TIMED_OUT = 0x10004003, + /// Message buffer is not large enough for inline data. + RCV_TOO_LARGE = 0x10004004, + /// Software interrupt. + RCV_INTERRUPTED = 0x10004005, + /// compatibility: no longer a returned error + RCV_PORT_CHANGED = 0x10004006, + /// Bogus notify port argument. + RCV_INVALID_NOTIFY = 0x10004007, + /// Bogus message buffer for inline data. + RCV_INVALID_DATA = 0x10004008, + /// Port/set was sent away/died during receive. + RCV_PORT_DIED = 0x10004009, + /// compatibility: no longer a returned error + RCV_IN_SET = 0x1000400a, + /// Error receiving message header. See special bits. + RCV_HEADER_ERROR = 0x1000400b, + /// Error receiving message body. See special bits. + RCV_BODY_ERROR = 0x1000400c, + /// Invalid msg-type specification in scatter list. + RCV_INVALID_TYPE = 0x1000400d, + /// Out-of-line overwrite region is not large enough + RCV_SCATTER_SMALL = 0x1000400e, + /// trailer type or number of trailer elements not supported + RCV_INVALID_TRAILER = 0x1000400f, + /// Waiting for receive with timeout. (Internal use only.) + RCV_IN_PROGRESS_TIMED = 0x10004011, + /// invalid reply port used in a STRICT_REPLY message + RCV_INVALID_REPLY = 0x10004012, +}; diff --git a/src/dynamic.zig b/src/dynamic.zig index 6d9a0f27..7e5c497c 100644 --- a/src/dynamic.zig +++ b/src/dynamic.zig @@ -433,8 +433,8 @@ fn EnumSubset(comptime T: type, comptime values: []const T) type { .value = @intFromEnum(value), }; - return @Type(.{ .Enum = .{ - .tag_type = @typeInfo(T).Enum.tag_type, + return @Type(.{ .@"enum" = .{ + .tag_type = @typeInfo(T).@"enum".tag_type, .fields = &fields, .decls = &.{}, .is_exhaustive = true, @@ -491,7 +491,7 @@ fn Union( } return @Type(.{ - .Union = .{ + .@"union" = .{ .layout = .auto, .tag_type = if (tagged) EnumSubset( AllBackend, diff --git a/src/linux/timerfd.zig b/src/linux/timerfd.zig index 5e5bd4a4..1bb73262 100644 --- a/src/linux/timerfd.zig +++ b/src/linux/timerfd.zig @@ -14,8 +14,11 @@ pub const Timerfd = struct { fd: i32, /// timerfd_create - pub fn init(clock: Clock, flags: linux.TFD) !Timerfd { - const res = linux.timerfd_create(@intFromEnum(clock), flags); + pub fn init( + clock: linux.timerfd_clockid_t, + flags: linux.TFD, + ) !Timerfd { + const res = linux.timerfd_create(clock, flags); return switch (posix.errno(res)) { .SUCCESS => .{ .fd = @as(i32, @intCast(res)) }, else => error.UnknownError, @@ -56,18 +59,6 @@ pub const Timerfd = struct { }; } - /// The clocks available for a Timerfd. This is a non-exhaustive enum - /// so that unsupported values can be attempted to be passed into the - /// system calls. - pub const Clock = enum(i32) { - realtime = 0, - monotonic = 1, - boottime = 7, - realtime_alarm = 8, - boottime_alarm = 9, - _, - }; - /// itimerspec pub const Spec = extern struct { interval: TimeSpec = .{}, @@ -84,7 +75,7 @@ pub const Timerfd = struct { test Timerfd { const testing = std.testing; - var t = try Timerfd.init(.monotonic, .{}); + var t = try Timerfd.init(.MONOTONIC, .{}); defer t.deinit(); // Set diff --git a/src/queue.zig b/src/queue.zig index aa3dfe58..7cafda37 100644 --- a/src/queue.zig +++ b/src/queue.zig @@ -73,7 +73,7 @@ test Intrusive { try testing.expect(q.empty()); // Elems - var elems: [10]Elem = .{.{}} ** 10; + var elems: [10]Elem = .{Elem{}} ** 10; // One try testing.expect(q.pop() == null); diff --git a/src/queue_mpsc.zig b/src/queue_mpsc.zig index 68f4e8f3..3a9f4cbb 100644 --- a/src/queue_mpsc.zig +++ b/src/queue_mpsc.zig @@ -90,7 +90,7 @@ test Intrusive { q.init(); // Elems - var elems: [10]Elem = .{.{}} ** 10; + var elems: [10]Elem = .{Elem{}} ** 10; // One try testing.expect(q.pop() == null); diff --git a/src/watcher/async.zig b/src/watcher/async.zig index c344d560..f911a495 100644 --- a/src/watcher/async.zig +++ b/src/watcher/async.zig @@ -4,6 +4,7 @@ const builtin = @import("builtin"); const assert = std.debug.assert; const posix = std.posix; const common = @import("common.zig"); +const darwin = @import("../darwin.zig"); pub fn Async(comptime xev: type) type { if (xev.dynamic) return AsyncDynamic(xev); @@ -147,7 +148,7 @@ fn AsyncMachPort(comptime xev: type) type { name: posix.system.mach_port_name_t, flavor: mach_port_flavor_t, info: *anyopaque, - count: posix.system.mach_msg_type_number_t, + count: darwin.mach_msg_type_number_t, ) posix.system.kern_return_t; extern "c" fn mach_port_destroy( task: posix.system.ipc_space_t, @@ -164,7 +165,7 @@ fn AsyncMachPort(comptime xev: type) type { // Allocate the port var mach_port: posix.system.mach_port_name_t = undefined; - switch (posix.system.getKernError(posix.system.mach_port_allocate( + switch (darwin.getKernError(posix.system.mach_port_allocate( mach_self, @intFromEnum(posix.system.MACH_PORT_RIGHT.RECEIVE), &mach_port, @@ -175,7 +176,7 @@ fn AsyncMachPort(comptime xev: type) type { errdefer _ = mach_port_destroy(mach_self, mach_port); // Insert a send right into the port since we also use this to send - switch (posix.system.getKernError(posix.system.mach_port_insert_right( + switch (darwin.getKernError(posix.system.mach_port_insert_right( mach_self, mach_port, mach_port, @@ -188,7 +189,7 @@ fn AsyncMachPort(comptime xev: type) type { // Modify the port queue size to be 1 because we are only // using it for notifications and not for any other purpose. var limits: mach_port_limits = .{ .mpl_qlimit = 1 }; - switch (posix.system.getKernError(mach_port_set_attributes( + switch (darwin.getKernError(mach_port_set_attributes( mach_self, mach_port, MACH_PORT_LIMITS_INFO, @@ -272,18 +273,18 @@ fn AsyncMachPort(comptime xev: type) type { /// Drain the given mach port. All message bodies are discarded. fn drain(port: posix.system.mach_port_name_t) void { var message: struct { - header: posix.system.mach_msg_header_t, + header: darwin.mach_msg_header_t, } = undefined; while (true) { - switch (posix.system.getMachMsgError(posix.system.mach_msg( + switch (darwin.getMachMsgError(darwin.mach_msg( &message.header, - posix.system.MACH_RCV_MSG | posix.system.MACH_RCV_TIMEOUT, + darwin.MACH_RCV_MSG | darwin.MACH_RCV_TIMEOUT, 0, @sizeOf(@TypeOf(message)), port, - posix.system.MACH_MSG_TIMEOUT_NONE, - posix.system.MACH_PORT_NULL, + darwin.MACH_MSG_TIMEOUT_NONE, + darwin.MACH_PORT_NULL, ))) { // This means a read would've blocked, so we drained. .RCV_TIMED_OUT => return, @@ -309,26 +310,26 @@ fn AsyncMachPort(comptime xev: type) type { /// ticking or not). pub fn notify(self: Self) !void { // This constructs an empty mach message. It has no data. - var msg: posix.system.mach_msg_header_t = .{ + var msg: darwin.mach_msg_header_t = .{ // We use COPY_SEND which will not increment any send ref // counts because it'll reuse the existing send right. .msgh_bits = @intFromEnum(posix.system.MACH_MSG_TYPE.COPY_SEND), - .msgh_size = @sizeOf(posix.system.mach_msg_header_t), + .msgh_size = @sizeOf(darwin.mach_msg_header_t), .msgh_remote_port = self.port, - .msgh_local_port = posix.system.MACH_PORT_NULL, + .msgh_local_port = darwin.MACH_PORT_NULL, .msgh_voucher_port = undefined, .msgh_id = undefined, }; - return switch (posix.system.getMachMsgError( - posix.system.mach_msg( + return switch (darwin.getMachMsgError( + darwin.mach_msg( &msg, - posix.system.MACH_SEND_MSG | posix.system.MACH_SEND_TIMEOUT, + darwin.MACH_SEND_MSG | darwin.MACH_SEND_TIMEOUT, msg.msgh_size, 0, - posix.system.MACH_PORT_NULL, + darwin.MACH_PORT_NULL, 0, // Fail instantly if the port is full - posix.system.MACH_PORT_NULL, + darwin.MACH_PORT_NULL, ), )) { .SUCCESS => {}, diff --git a/src/watcher/process.zig b/src/watcher/process.zig index df1e2bd4..eec59254 100644 --- a/src/watcher/process.zig +++ b/src/watcher/process.zig @@ -183,7 +183,7 @@ fn ProcessKqueue(comptime xev: type) type { .op = .{ .proc = .{ .pid = self.pid, - .flags = posix.system.NOTE_EXIT | posix.system.NOTE_EXITSTATUS, + .flags = std.c.NOTE.EXIT | std.c.NOTE.EXITSTATUS, }, }, @@ -251,7 +251,7 @@ fn ProcessIocp(comptime xev: type) type { if (dup_result == 0) return windows.unexpectedError(windows.kernel32.GetLastError()); const job = try windows.exp.CreateJobObject(null, null); - errdefer _ = windows.kernel32.CloseHandle(job); + errdefer _ = windows.CloseHandle(job); try windows.exp.AssignProcessToJobObject(job, dup_process); @@ -262,8 +262,8 @@ fn ProcessIocp(comptime xev: type) type { } pub fn deinit(self: *Self) void { - _ = windows.kernel32.CloseHandle(self.job); - _ = windows.kernel32.CloseHandle(self.process); + _ = windows.CloseHandle(self.job); + _ = windows.CloseHandle(self.process); } pub fn wait( diff --git a/src/watcher/stream.zig b/src/watcher/stream.zig index 2c1ccbca..a947244d 100644 --- a/src/watcher/stream.zig +++ b/src/watcher/stream.zig @@ -129,7 +129,7 @@ fn Pollable(comptime xev: type, comptime T: type, comptime options: Options) typ comptime { for (xev.candidates) |be| { const CandidateT = @field(be.Api(), options.type.?); - const info = @typeInfo(CandidateT).Struct; + const info = @typeInfo(CandidateT).@"struct"; for (info.decls) |decl| { if (std.mem.eql(u8, decl.name, "poll")) break; } else return struct {}; diff --git a/src/windows.zig b/src/windows.zig index 7716e9e5..c2b2fa02 100644 --- a/src/windows.zig +++ b/src/windows.zig @@ -1,10 +1,13 @@ +//! Namespace containing missing utils from std + const std = @import("std"); const windows = std.os.windows; const posix = std.posix; pub usingnamespace std.os.windows; -/// Namespace containing missing utils from std +pub extern "kernel32" fn DeleteFileW(lpFileName: [*:0]const u16) callconv(windows.WINAPI) windows.BOOL; + pub const exp = struct { pub const STATUS_PENDING = 0x00000103; pub const STILL_ACTIVE = STATUS_PENDING; @@ -165,7 +168,7 @@ pub const exp = struct { pub const DeleteFileError = error{} || posix.UnexpectedError; pub fn DeleteFile(name: [*:0]const u16) DeleteFileError!void { - const result: windows.BOOL = windows.kernel32.DeleteFileW(name); + const result: windows.BOOL = DeleteFileW(name); if (result == windows.FALSE) { const err = windows.kernel32.GetLastError(); return switch (err) {