Merge pull request #247 from mitchellh/gtk-single-instance

GTK single instance, fix UB in termio read thread termination
This commit is contained in:
Mitchell Hashimoto
2023-08-07 21:43:08 -07:00
committed by GitHub
4 changed files with 120 additions and 42 deletions

View File

@ -65,6 +65,14 @@ pub const App = struct {
// up when we take a pass at cleaning up the dev mode. // up when we take a pass at cleaning up the dev mode.
if (DevMode.enabled) DevMode.instance.config = config; if (DevMode.enabled) DevMode.instance.config = config;
// Queue a single new window that starts on launch
_ = core_app.mailbox.push(.{
.new_window = .{},
}, .{ .forever = {} });
// We want the event loop to wake up instantly so we can process our tick.
glfw.postEmptyEvent();
return .{ return .{
.app = core_app, .app = core_app,
.config = config, .config = config,

View File

@ -31,11 +31,7 @@ const log = std.log.scoped(.gtk);
/// application frameworks also have this restriction so it simplifies /// application frameworks also have this restriction so it simplifies
/// the assumptions. /// the assumptions.
pub const App = struct { pub const App = struct {
pub const Options = struct { pub const Options = struct {};
/// GTK app ID. This is currently unused but should remain populated
/// for the future.
id: [:0]const u8 = "com.mitchellh.ghostty",
};
core_app: *CoreApp, core_app: *CoreApp,
config: Config, config: Config,
@ -62,9 +58,14 @@ pub const App = struct {
var config = try Config.load(core_app.alloc); var config = try Config.load(core_app.alloc);
errdefer config.deinit(); errdefer config.deinit();
// Our uniqueness ID is based on whether we're in a debug mode or not.
// In debug mode we want to be separate so we can develop Ghostty in
// Ghostty.
const uniqueness_id = "com.mitchellh.ghostty" ++ if (builtin.mode == .Debug) "-debug" else "";
// Create our GTK Application which encapsulates our process. // Create our GTK Application which encapsulates our process.
const app = @as(?*c.GtkApplication, @ptrCast(c.gtk_application_new( const app = @as(?*c.GtkApplication, @ptrCast(c.gtk_application_new(
null, uniqueness_id,
// GTK >= 2.74 // GTK >= 2.74
if (@hasDecl(c, "G_APPLICATION_DEFAULT_FLAGS")) if (@hasDecl(c, "G_APPLICATION_DEFAULT_FLAGS"))
@ -77,7 +78,7 @@ pub const App = struct {
app, app,
"activate", "activate",
c.G_CALLBACK(&activate), c.G_CALLBACK(&activate),
null, core_app,
null, null,
G_CONNECT_DEFAULT, G_CONNECT_DEFAULT,
); );
@ -121,6 +122,11 @@ pub const App = struct {
.ctx = ctx, .ctx = ctx,
.cursor_default = cursor_default, .cursor_default = cursor_default,
.cursor_ibeam = cursor_ibeam, .cursor_ibeam = cursor_ibeam,
// If we are NOT the primary instance, then we never want to run.
// This means that another instance of the GTK app is running and
// our "activate" call above will open a window.
.running = c.g_application_get_is_remote(gapp) == 0,
}; };
} }
@ -261,15 +267,18 @@ pub const App = struct {
}.callback, null); }.callback, null);
} }
/// This is called by the "activate" signal. This is sent on program
/// startup and also when a secondary instance launches and requests
/// a new window.
fn activate(app: *c.GtkApplication, ud: ?*anyopaque) callconv(.C) void { fn activate(app: *c.GtkApplication, ud: ?*anyopaque) callconv(.C) void {
_ = app; _ = app;
_ = ud;
// We purposely don't do anything on activation right now. We have const core_app: *CoreApp = @ptrCast(@alignCast(ud orelse return));
// this callback because if we don't then GTK emits a warning to
// stderr that we don't want. We emit a debug log just so that we know // Queue a new window
// we reached this point. _ = core_app.mailbox.push(.{
log.debug("application activated", .{}); .new_window = .{},
}, .{ .forever = {} });
} }
}; };

View File

@ -34,9 +34,6 @@ pub fn main() !void {
var app_runtime = try apprt.App.init(app, .{}); var app_runtime = try apprt.App.init(app, .{});
defer app_runtime.terminate(); defer app_runtime.terminate();
// Create an initial window
try app_runtime.newWindow(null);
// Run the GUI event loop // Run the GUI event loop
try app_runtime.run(); try app_runtime.run();
} }

View File

@ -139,6 +139,12 @@ pub fn threadEnter(self: *Exec, thread: *termio.Thread) !ThreadData {
break :pid command.pid orelse return error.ProcessNoPid; break :pid command.pid orelse return error.ProcessNoPid;
}; };
// Create our pipe that we'll use to kill our read thread.
// pipe[0] is the read end, pipe[1] is the write end.
const pipe = try std.os.pipe();
errdefer std.os.close(pipe[0]);
errdefer std.os.close(pipe[1]);
// Setup our data that is used for callbacks // Setup our data that is used for callbacks
var ev_data_ptr = try alloc.create(EventData); var ev_data_ptr = try alloc.create(EventData);
errdefer alloc.destroy(ev_data_ptr); errdefer alloc.destroy(ev_data_ptr);
@ -194,7 +200,7 @@ pub fn threadEnter(self: *Exec, thread: *termio.Thread) !ThreadData {
const read_thread = try std.Thread.spawn( const read_thread = try std.Thread.spawn(
.{}, .{},
ReadThread.threadMain, ReadThread.threadMain,
.{ master_fd, ev_data_ptr }, .{ master_fd, ev_data_ptr, pipe[0] },
); );
read_thread.setName("io-reader") catch {}; read_thread.setName("io-reader") catch {};
@ -203,6 +209,7 @@ pub fn threadEnter(self: *Exec, thread: *termio.Thread) !ThreadData {
.alloc = alloc, .alloc = alloc,
.ev = ev_data_ptr, .ev = ev_data_ptr,
.read_thread = read_thread, .read_thread = read_thread,
.read_thread_pipe = pipe[1],
}; };
} }
@ -214,7 +221,11 @@ pub fn threadExit(self: *Exec, data: ThreadData) void {
if (data.ev.process_exited) self.subprocess.externalExit(); if (data.ev.process_exited) self.subprocess.externalExit();
self.subprocess.stop(); self.subprocess.stop();
// Wait for our reader thread to end // Quit our read thread after exiting the subprocess so that
// we don't get stuck waiting for data to stop flowing if it is
// a particularly noisy process.
_ = std.os.write(data.read_thread_pipe, "x") catch |err|
log.warn("error writing to read thread quit pipe err={}", .{err});
data.read_thread.join(); data.read_thread.join();
} }
@ -338,8 +349,10 @@ const ThreadData = struct {
/// Our read thread /// Our read thread
read_thread: std.Thread, read_thread: std.Thread,
read_thread_pipe: std.os.fd_t,
pub fn deinit(self: *ThreadData) void { pub fn deinit(self: *ThreadData) void {
std.os.close(self.read_thread_pipe);
self.ev.deinit(self.alloc); self.ev.deinit(self.alloc);
self.alloc.destroy(self.ev); self.alloc.destroy(self.ev);
self.* = undefined; self.* = undefined;
@ -659,6 +672,7 @@ const Subprocess = struct {
/// Clean up the subprocess. This will stop the subprocess if it is started. /// Clean up the subprocess. This will stop the subprocess if it is started.
pub fn deinit(self: *Subprocess) void { pub fn deinit(self: *Subprocess) void {
self.stop(); self.stop();
if (self.pty) |*pty| pty.deinit();
self.arena.deinit(); self.arena.deinit();
self.* = undefined; self.* = undefined;
} }
@ -767,7 +781,8 @@ const Subprocess = struct {
} }
/// Stop the subprocess. This is safe to call anytime. This will wait /// Stop the subprocess. This is safe to call anytime. This will wait
/// for the subprocess to end so it will block. /// for the subprocess to end so it will block. This does not close
/// the pty.
pub fn stop(self: *Subprocess) void { pub fn stop(self: *Subprocess) void {
// Kill our command // Kill our command
if (self.command) |*cmd| { if (self.command) |*cmd| {
@ -788,14 +803,6 @@ const Subprocess = struct {
self.flatpak_command = null; self.flatpak_command = null;
} }
} }
// Close our PTY. We do this after killing our command because on
// macOS, close will block until all blocking operations read/write
// are done with it and our reader thread is probably still alive.
if (self.pty) |*pty| {
pty.deinit();
self.pty = null;
}
} }
/// Resize the pty subprocess. This is safe to call anytime. /// Resize the pty subprocess. This is safe to call anytime.
@ -934,10 +941,43 @@ const Subprocess = struct {
/// This is also empirically fast compared to putting the read into /// This is also empirically fast compared to putting the read into
/// an async mechanism like io_uring/epoll because the reads are generally /// an async mechanism like io_uring/epoll because the reads are generally
/// small. /// small.
///
/// We use a basic poll syscall here because we are only monitoring two
/// fds and this is still much faster and lower overhead than any async
/// mechanism.
const ReadThread = struct { const ReadThread = struct {
/// The main entrypoint for the thread. /// The main entrypoint for the thread.
fn threadMain(fd: std.os.fd_t, ev: *EventData) void { fn threadMain(fd: std.os.fd_t, ev: *EventData, quit: std.os.fd_t) void {
// Always close our end of the pipe when we exit.
defer std.os.close(quit);
// First thing, we want to set the fd to non-blocking. We do this
// so that we can try to read from the fd in a tight loop and only
// check the quit fd occasionally.
if (std.os.fcntl(fd, std.os.F.GETFL, 0)) |flags| {
_ = std.os.fcntl(fd, std.os.F.SETFL, flags | std.os.O.NONBLOCK) catch |err| {
log.warn("read thread failed to set flags err={}", .{err});
log.warn("this isn't a fatal error, but may cause performance issues", .{});
};
} else |err| {
log.warn("read thread failed to get flags err={}", .{err});
log.warn("this isn't a fatal error, but may cause performance issues", .{});
}
// Build up the list of fds we're going to poll. We are looking
// for data on the pty and our quit notification.
var pollfds: [2]std.os.pollfd = .{
.{ .fd = fd, .events = std.os.POLL.IN, .revents = undefined },
.{ .fd = quit, .events = std.os.POLL.IN, .revents = undefined },
};
var buf: [1024]u8 = undefined; var buf: [1024]u8 = undefined;
while (true) {
// We try to read from the file descriptor as long as possible
// to maximize performance. We only check the quit fd if the
// main fd blocks. This optimizes for the realistic scenario that
// the data will eventually stop while we're trying to quit. This
// is always true because we kill the process.
while (true) { while (true) {
const n = std.os.read(fd, &buf) catch |err| { const n = std.os.read(fd, &buf) catch |err| {
switch (err) { switch (err) {
@ -945,19 +985,43 @@ const ReadThread = struct {
// gracefully shutting down. // gracefully shutting down.
error.NotOpenForReading, error.NotOpenForReading,
error.InputOutput, error.InputOutput,
=> log.info("io reader exiting", .{}), => {
log.info("io reader exiting", .{});
return;
},
// No more data, fall back to poll and check for
// exit conditions.
error.WouldBlock => break,
else => { else => {
log.err("io reader error err={}", .{err}); log.err("io reader error err={}", .{err});
unreachable; unreachable;
}, },
} }
return;
}; };
// This happens on macOS instead of WouldBlock when the
// child process dies. To be safe, we just break the loop
// and let our poll happen.
if (n == 0) break;
// log.info("DATA: {d}", .{n}); // log.info("DATA: {d}", .{n});
@call(.always_inline, process, .{ ev, buf[0..n] }); @call(.always_inline, process, .{ ev, buf[0..n] });
} }
// Wait for data.
_ = std.os.poll(&pollfds, 0) catch |err| {
log.warn("poll failed on read thread, exiting early err={}", .{err});
return;
};
// If our quit fd is set, we're done.
if (pollfds[1].revents & std.os.POLL.IN != 0) {
log.info("read thread got quit signal", .{});
return;
}
}
} }
fn process( fn process(