termio, flatpak: implement process watcher with xev

This allows `termio.Exec` to track processes spawned via
`FlatpakHostCommand`, finally allowing Ghostty to function as a
Flatpak.

Alongside this is a few bug fixes:

* Don't add ghostty to PATH when running in flatpak mode since it's
  unreachable.
* Correctly handle exit status returned by Flatpak. Previously this was
  not processed and contains extra status bits.
* Use correct type for PID returned by Flatpak.
This commit is contained in:
Leorize
2025-03-10 00:37:03 -05:00
committed by Mitchell Hashimoto
parent f8f9f7041a
commit 009b53c45e
3 changed files with 233 additions and 37 deletions

View File

@ -11,6 +11,7 @@ jobs:
runs-on: namespace-profile-ghostty-sm
needs:
- build-bench
- build-flatpak
- build-linux
- build-linux-libghostty
- build-nix
@ -79,6 +80,40 @@ jobs:
- name: Build Benchmarks
run: nix develop -c zig build -Dapp-runtime=glfw -Demit-bench
build-flatpak:
strategy:
fail-fast: false
runs-on: namespace-profile-ghostty-sm
needs: test
env:
ZIG_LOCAL_CACHE_DIR: /zig/local-cache
ZIG_GLOBAL_CACHE_DIR: /zig/global-cache
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Cache
uses: namespacelabs/nscloud-cache-action@v1.2.0
with:
path: |
/nix
/zig
# Install Nix and use that to run our tests so our environment matches exactly.
- uses: cachix/install-nix-action@v30
with:
nix_path: nixpkgs=channel:nixos-unstable
- uses: cachix/cachix-action@v15
with:
name: ghostty
authToken: "${{ secrets.CACHIX_AUTH_TOKEN }}"
- name: Build with Flatpak
run: |
nix develop -c \
zig build \
-Dflatpak=true
build-linux:
strategy:
fail-fast: false

View File

@ -3,6 +3,7 @@ const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const builtin = @import("builtin");
const posix = std.posix;
const xev = @import("../global.zig").xev;
const log = std.log.scoped(.flatpak);
@ -71,18 +72,28 @@ pub const FlatpakHostCommand = struct {
/// Process started with the given pid on the host.
started: struct {
pid: c_int,
pid: u32,
loop_xev: ?*xev.Loop,
completion: ?*Completion,
subscription: c.guint,
loop: *c.GMainLoop,
},
/// Process exited
exited: struct {
pid: c_int,
pid: u32,
status: u8,
},
};
pub const Completion = struct {
callback: *const fn (ud: ?*anyopaque, l: *xev.Loop, c: *Completion, r: WaitError!u8) void = noopCallback,
c_xev: xev.Completion = .{},
userdata: ?*anyopaque = null,
timer: ?xev.Timer = null,
result: ?WaitError!u8 = null,
};
/// Errors that are possible from us.
pub const Error = error{
FlatpakMustBeStarted,
@ -91,12 +102,14 @@ pub const FlatpakHostCommand = struct {
FlatpakRPCFail,
};
pub const WaitError = xev.Timer.RunError || Error;
/// Spawn the command. This will start the host command. On return,
/// the pid will be available. This must only be called with the
/// state in "init".
///
/// Precondition: The self pointer MUST be stable.
pub fn spawn(self: *FlatpakHostCommand, alloc: Allocator) !c_int {
pub fn spawn(self: *FlatpakHostCommand, alloc: Allocator) !u32 {
const thread = try std.Thread.spawn(.{}, threadMain, .{ self, alloc });
thread.setName("flatpak-host-command") catch {};
@ -135,6 +148,77 @@ pub const FlatpakHostCommand = struct {
}
}
/// Wait for the process to end asynchronously via libxev. This
/// can only be called ONCE.
pub fn waitXev(
self: *FlatpakHostCommand,
loop: *xev.Loop,
completion: *Completion,
comptime Userdata: type,
userdata: ?*Userdata,
comptime cb: *const fn (
ud: ?*Userdata,
l: *xev.Loop,
c: *Completion,
r: WaitError!u8,
) void,
) void {
self.state_mutex.lock();
defer self.state_mutex.unlock();
completion.* = .{
.callback = (struct {
fn callback(
ud_: ?*anyopaque,
l_inner: *xev.Loop,
c_inner: *Completion,
r: WaitError!u8,
) void {
const ud = @as(?*Userdata, if (Userdata == void) null else @ptrCast(@alignCast(ud_)));
@call(.always_inline, cb, .{ ud, l_inner, c_inner, r });
}
}).callback,
.userdata = userdata,
.timer = xev.Timer.init() catch unreachable, // not great, but xev timer can't fail atm
};
switch (self.state) {
.init => completion.result = Error.FlatpakMustBeStarted,
.err => completion.result = Error.FlatpakSpawnFail,
.started => |*v| {
v.loop_xev = loop;
v.completion = completion;
return;
},
.exited => |v| {
completion.result = v.status;
},
}
completion.timer.?.run(
loop,
&completion.c_xev,
0,
anyopaque,
completion.userdata,
(struct {
fn callback(
ud: ?*anyopaque,
l_inner: *xev.Loop,
c_inner: *xev.Completion,
r: xev.Timer.RunError!void,
) xev.CallbackAction {
const c_outer: *Completion = @fieldParentPtr("c_xev", c_inner);
defer if (c_outer.timer) |*t| t.deinit();
const result = if (r) |_| c_outer.result.? else |err| err;
c_outer.callback(ud, l_inner, c_outer, result);
return .disarm;
}
}).callback,
);
}
/// Send a signal to the started command. This does nothing if the
/// command is not in the started state.
pub fn signal(self: *FlatpakHostCommand, sig: u8, pg: bool) !void {
@ -326,7 +410,7 @@ pub const FlatpakHostCommand = struct {
};
defer c.g_variant_unref(reply);
var pid: c_int = 0;
var pid: u32 = 0;
c.g_variant_get(reply, "(u)", &pid);
log.debug("HostCommand started pid={} subscription={}", .{
pid,
@ -338,6 +422,8 @@ pub const FlatpakHostCommand = struct {
.pid = pid,
.subscription = subscription_id,
.loop = loop,
.completion = null,
.loop_xev = null,
},
});
}
@ -366,18 +452,44 @@ pub const FlatpakHostCommand = struct {
break :state self.state.started;
};
var pid: c_int = 0;
var exit_status: c_int = 0;
c.g_variant_get(params.?, "(uu)", &pid, &exit_status);
var pid: u32 = 0;
var exit_status_raw: u32 = 0;
c.g_variant_get(params.?, "(uu)", &pid, &exit_status_raw);
if (state.pid != pid) return;
const exit_status = posix.W.EXITSTATUS(exit_status_raw);
// Update our state
self.updateState(.{
.exited = .{
.pid = pid,
.status = std.math.cast(u8, exit_status) orelse 255,
.status = exit_status,
},
});
if (state.completion) |completion| {
completion.result = exit_status;
completion.timer.?.run(
state.loop_xev.?,
&completion.c_xev,
0,
anyopaque,
completion.userdata,
(struct {
fn callback(
ud_inner: ?*anyopaque,
l_inner: *xev.Loop,
c_inner: *xev.Completion,
r: xev.Timer.RunError!void,
) xev.CallbackAction {
const c_outer: *Completion = @fieldParentPtr("c_xev", c_inner);
defer if (c_outer.timer) |*t| t.deinit();
const result = if (r) |_| c_outer.result.? else |err| err;
c_outer.callback(ud_inner, l_inner, c_outer, result);
return .disarm;
}
}).callback,
);
}
log.debug("HostCommand exited pid={} status={}", .{ pid, exit_status });
// We're done now, so we can unsubscribe
@ -386,4 +498,6 @@ pub const FlatpakHostCommand = struct {
// We are also done with our loop so we can exit.
c.g_main_loop_quit(state.loop);
}
fn noopCallback(_: ?*anyopaque, _: *xev.Loop, _: *Completion, _: WaitError!u8) void {}
};

View File

@ -31,6 +31,12 @@ const log = std.log.scoped(.io_exec);
/// The termios poll rate in milliseconds.
const TERMIOS_POLL_MS = 200;
/// If we build with flatpak support then we have to keep track of
/// a potential execution on the host.
const FlatpakHostCommand = if (!build_config.flatpak) struct {
pub const Completion = struct {};
} else internal_os.FlatpakHostCommand;
/// The subprocess state for our exec backend.
subprocess: Subprocess,
@ -95,11 +101,26 @@ pub fn threadEnter(
};
errdefer self.subprocess.stop();
// Watcher to detect subprocess exit
var process: ?xev.Process = process: {
// If we're executing via Flatpak then we can't do
// traditional process watching (its implemented
// as a special case in os/flatpak.zig) since the
// command is on the host.
if (comptime build_config.flatpak) {
if (self.subprocess.flatpak_command != null) {
break :process null;
}
}
// Get the pid from the subprocess
const pid = pid: {
const command = self.subprocess.command orelse return error.ProcessNotStarted;
break :pid command.pid orelse return error.ProcessNoPid;
const command = self.subprocess.command orelse
return error.ProcessNotStarted;
const pid = command.pid orelse
return error.ProcessNoPid;
break :process try xev.Process.init(pid);
};
errdefer if (process) |*p| p.deinit();
// Track our process start time for abnormal exits
const process_start = try std.time.Instant.now();
@ -114,10 +135,6 @@ pub fn threadEnter(
var stream = xev.Stream.initFd(pty_fds.write);
errdefer stream.deinit();
// Watcher to detect subprocess exit
var process = try xev.Process.init(pid);
errdefer process.deinit();
// Start our timer to read termios state changes. This is used
// to detect things such as when password input is being done
// so we can render the terminal in a different way.
@ -145,14 +162,26 @@ pub fn threadEnter(
.termios_timer = termios_timer,
} };
// Start our process watcher
process.wait(
// Start our process watcher. If we have an xev.Process use it.
if (process) |*p| p.wait(
td.loop,
&td.backend.exec.process_wait_c,
termio.Termio.ThreadData,
td,
processExit,
) else if (comptime build_config.flatpak) {
// If we're in flatpak and we have a flatpak command
// then we can run the special flatpak logic for watching.
if (self.subprocess.flatpak_command) |*c| {
c.waitXev(
td.loop,
&td.backend.exec.flatpak_wait_c,
termio.Termio.ThreadData,
td,
flatpakExit,
);
}
}
// Start our termios timer. We don't support this on Windows.
// Fundamentally, we could support this on Windows so we're just
@ -339,15 +368,7 @@ fn execFailedInChild() !void {
_ = try reader.read(&buf);
}
fn processExit(
td_: ?*termio.Termio.ThreadData,
_: *xev.Loop,
_: *xev.Completion,
r: xev.Process.WaitError!u32,
) xev.CallbackAction {
const exit_code = r catch unreachable;
const td = td_.?;
fn processExitCommon(td: *termio.Termio.ThreadData, exit_code: u32) void {
assert(td.backend == .exec);
const execdata = &td.backend.exec;
execdata.exited = true;
@ -393,7 +414,7 @@ fn processExit(
}, null);
td.mailbox.notify();
return .disarm;
return;
}
// If we're purposely waiting then we just return since the process
@ -413,17 +434,36 @@ fn processExit(
t.modes.set(.cursor_visible, false);
}
return .disarm;
return;
}
// Notify our surface we want to close
_ = td.surface_mailbox.push(.{
.child_exited = {},
}, .{ .forever = {} });
}
fn processExit(
td_: ?*termio.Termio.ThreadData,
_: *xev.Loop,
_: *xev.Completion,
r: xev.Process.WaitError!u32,
) xev.CallbackAction {
const exit_code = r catch unreachable;
processExitCommon(td_.?, exit_code);
return .disarm;
}
fn flatpakExit(
td_: ?*termio.Termio.ThreadData,
_: *xev.Loop,
_: *FlatpakHostCommand.Completion,
r: FlatpakHostCommand.WaitError!u8,
) void {
const exit_code = r catch unreachable;
processExitCommon(td_.?, exit_code);
}
fn termiosTimer(
td_: ?*termio.Termio.ThreadData,
_: *xev.Loop,
@ -630,7 +670,7 @@ pub const ThreadData = struct {
write_stream: xev.Stream,
/// The process watcher
process: xev.Process,
process: ?xev.Process,
/// This is the pool of available (unused) write requests. If you grab
/// one from the pool, you must put it back when you're done!
@ -646,6 +686,10 @@ pub const ThreadData = struct {
/// subsequently to wait for the data_stream to close.
process_wait_c: xev.Completion = .{},
// The completion specific to Flatpak process waiting. If
// we aren't compiling with Flatpak support this is zero-sized.
flatpak_wait_c: FlatpakHostCommand.Completion = .{},
/// Reader thread state
read_thread: std.Thread,
read_thread_pipe: posix.fd_t,
@ -670,7 +714,7 @@ pub const ThreadData = struct {
self.write_buf_pool.deinit(alloc);
// Stop our process watcher
self.process.deinit();
if (self.process) |*p| p.deinit();
// Stop our write stream
self.write_stream.deinit();
@ -693,10 +737,6 @@ pub const Config = struct {
};
const Subprocess = struct {
/// If we build with flatpak support then we have to keep track of
/// a potential execution on the host.
const FlatpakHostCommand = if (build_config.flatpak) internal_os.FlatpakHostCommand else void;
const c = @cImport({
@cInclude("errno.h");
@cInclude("signal.h");
@ -763,6 +803,13 @@ const Subprocess = struct {
// Add our binary to the path if we can find it.
ghostty_path: {
// Skip this for flatpak since host cannot reach them
if ((comptime build_config.flatpak) and
internal_os.isFlatpak())
{
break :ghostty_path;
}
var exe_buf: [std.fs.max_path_bytes]u8 = undefined;
const exe_bin_path = std.fs.selfExePath(&exe_buf) catch |err| {
log.warn("failed to get ghostty exe path err={}", .{err});
@ -1229,7 +1276,7 @@ const Subprocess = struct {
}
// Kill our Flatpak command
if (FlatpakHostCommand != void) {
if (comptime build_config.flatpak) {
if (self.flatpak_command) |*cmd| {
killCommandFlatpak(cmd) catch |err|
log.err("error sending SIGHUP to command, may hang: {}", .{err});