From 60a36ca5cc8e96658ad2349420f4076d21c65c76 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 7 Aug 2023 19:26:57 -0700 Subject: [PATCH 1/7] apprt/gtk: enable single instance mode --- src/apprt/gtk.zig | 40 +++++++++++++++++++++++++++------------- src/main.zig | 3 --- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/src/apprt/gtk.zig b/src/apprt/gtk.zig index 4c3471979..64d48be5a 100644 --- a/src/apprt/gtk.zig +++ b/src/apprt/gtk.zig @@ -31,11 +31,7 @@ const log = std.log.scoped(.gtk); /// application frameworks also have this restriction so it simplifies /// the assumptions. pub const App = struct { - pub const Options = struct { - /// GTK app ID. This is currently unused but should remain populated - /// for the future. - id: [:0]const u8 = "com.mitchellh.ghostty", - }; + pub const Options = struct {}; core_app: *CoreApp, config: Config, @@ -62,9 +58,14 @@ pub const App = struct { var config = try Config.load(core_app.alloc); errdefer config.deinit(); + // Our uniqueness ID is based on whether we're in a debug mode or not. + // In debug mode we want to be separate so we can develop Ghostty in + // Ghostty. + const uniqueness_id = "com.mitchellh.ghostty" ++ if (builtin.mode == .Debug) "-debug" else ""; + // Create our GTK Application which encapsulates our process. const app = @as(?*c.GtkApplication, @ptrCast(c.gtk_application_new( - null, + uniqueness_id, // GTK >= 2.74 if (@hasDecl(c, "G_APPLICATION_DEFAULT_FLAGS")) @@ -77,7 +78,7 @@ pub const App = struct { app, "activate", c.G_CALLBACK(&activate), - null, + core_app, null, G_CONNECT_DEFAULT, ); @@ -121,12 +122,19 @@ pub const App = struct { .ctx = ctx, .cursor_default = cursor_default, .cursor_ibeam = cursor_ibeam, + + // If we are NOT the primary instance, then we never want to run. + // This means that another instance of the GTK app is running and + // our "activate" call above will open a window. + .running = c.g_application_get_is_remote(gapp) == 0, }; } // Terminate the application. The application will not be restarted after // this so all global state can be cleaned up. pub fn terminate(self: *App) void { + c.g_signal_emit(self.app, c.g_signal_lookup("shutdown", c.g_application_get_type()), 0); + c.g_settings_sync(); while (c.g_main_context_iteration(self.ctx, 0) != 0) {} c.g_main_context_release(self.ctx); @@ -183,6 +191,9 @@ pub const App = struct { _ = parent_; const alloc = self.core_app.alloc; + // If we're trying to quit, then do not open any new windows. + if (!self.running) return; + // Allocate a fixed pointer for our window. We try to minimize // allocations but windows and other GUI requirements are so minimal // compared to the steady-state terminal operation so we use heap @@ -261,15 +272,18 @@ pub const App = struct { }.callback, null); } + /// This is called by the "activate" signal. This is sent on program + /// startup and also when a secondary instance launches and requests + /// a new window. fn activate(app: *c.GtkApplication, ud: ?*anyopaque) callconv(.C) void { _ = app; - _ = ud; - // We purposely don't do anything on activation right now. We have - // this callback because if we don't then GTK emits a warning to - // stderr that we don't want. We emit a debug log just so that we know - // we reached this point. - log.debug("application activated", .{}); + const core_app: *CoreApp = @ptrCast(@alignCast(ud orelse return)); + + // Queue a new window + _ = core_app.mailbox.push(.{ + .new_window = .{}, + }, .{ .instant = {} }); } }; diff --git a/src/main.zig b/src/main.zig index 2ba800b0e..1f030a150 100644 --- a/src/main.zig +++ b/src/main.zig @@ -34,9 +34,6 @@ pub fn main() !void { var app_runtime = try apprt.App.init(app, .{}); defer app_runtime.terminate(); - // Create an initial window - try app_runtime.newWindow(null); - // Run the GUI event loop try app_runtime.run(); } From 2e98d43a584a90d14172b631d519bad499ac8a75 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 7 Aug 2023 19:28:48 -0700 Subject: [PATCH 2/7] apprt/glfw: launch window on startup --- src/apprt/glfw.zig | 8 ++++++++ src/apprt/gtk.zig | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/apprt/glfw.zig b/src/apprt/glfw.zig index c3cb6d683..1c2d3b2c1 100644 --- a/src/apprt/glfw.zig +++ b/src/apprt/glfw.zig @@ -65,6 +65,14 @@ pub const App = struct { // up when we take a pass at cleaning up the dev mode. if (DevMode.enabled) DevMode.instance.config = config; + // Queue a single new window that starts on launch + _ = core_app.mailbox.push(.{ + .new_window = .{}, + }, .{ .forever = {} }); + + // We want the event loop to wake up instantly so we can process our tick. + glfw.postEmptyEvent(); + return .{ .app = core_app, .config = config, diff --git a/src/apprt/gtk.zig b/src/apprt/gtk.zig index 64d48be5a..abb06842d 100644 --- a/src/apprt/gtk.zig +++ b/src/apprt/gtk.zig @@ -283,7 +283,7 @@ pub const App = struct { // Queue a new window _ = core_app.mailbox.push(.{ .new_window = .{}, - }, .{ .instant = {} }); + }, .{ .forever = {} }); } }; From f40a90e7aaf9d3914eaf63b0e54e7388a7703cb9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 7 Aug 2023 20:40:23 -0700 Subject: [PATCH 3/7] termio: use a pipe to notify the reader thread to quit Simultaneously reading and closing a fd is UB. We need to ensure that we quit, then close. --- src/termio/Exec.zig | 50 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 5 deletions(-) diff --git a/src/termio/Exec.zig b/src/termio/Exec.zig index 08808e3f8..89ecf726c 100644 --- a/src/termio/Exec.zig +++ b/src/termio/Exec.zig @@ -139,6 +139,12 @@ pub fn threadEnter(self: *Exec, thread: *termio.Thread) !ThreadData { break :pid command.pid orelse return error.ProcessNoPid; }; + // Create our pipe that we'll use to kill our read thread. + // pipe[0] is the read end, pipe[1] is the write end. + const pipe = try std.os.pipe(); + errdefer std.os.close(pipe[0]); + errdefer std.os.close(pipe[1]); + // Setup our data that is used for callbacks var ev_data_ptr = try alloc.create(EventData); errdefer alloc.destroy(ev_data_ptr); @@ -194,7 +200,7 @@ pub fn threadEnter(self: *Exec, thread: *termio.Thread) !ThreadData { const read_thread = try std.Thread.spawn( .{}, ReadThread.threadMain, - .{ master_fd, ev_data_ptr }, + .{ master_fd, ev_data_ptr, pipe[0] }, ); read_thread.setName("io-reader") catch {}; @@ -203,6 +209,7 @@ pub fn threadEnter(self: *Exec, thread: *termio.Thread) !ThreadData { .alloc = alloc, .ev = ev_data_ptr, .read_thread = read_thread, + .read_thread_pipe = pipe[1], }; } @@ -210,12 +217,15 @@ pub fn threadExit(self: *Exec, data: ThreadData) void { // Clear out our data since we're not active anymore. self.data = null; + // Quit our read thread first so that we aren't simultaneously + // performing a read/close on the pty fd. + _ = std.os.write(data.read_thread_pipe, "x") catch |err| + log.warn("error writing to read thread quit pipe err={}", .{err}); + data.read_thread.join(); + // Stop our subprocess if (data.ev.process_exited) self.subprocess.externalExit(); self.subprocess.stop(); - - // Wait for our reader thread to end - data.read_thread.join(); } /// Update the configuration. @@ -338,8 +348,10 @@ const ThreadData = struct { /// Our read thread read_thread: std.Thread, + read_thread_pipe: std.os.fd_t, pub fn deinit(self: *ThreadData) void { + std.os.close(self.read_thread_pipe); self.ev.deinit(self.alloc); self.alloc.destroy(self.ev); self.* = undefined; @@ -934,11 +946,39 @@ const Subprocess = struct { /// This is also empirically fast compared to putting the read into /// an async mechanism like io_uring/epoll because the reads are generally /// small. +/// +/// We use a basic poll syscall here because we are only monitoring two +/// fds and this is still much faster and lower overhead than any async +/// mechanism. const ReadThread = struct { /// The main entrypoint for the thread. - fn threadMain(fd: std.os.fd_t, ev: *EventData) void { + fn threadMain(fd: std.os.fd_t, ev: *EventData, quit: std.os.fd_t) void { + // Always close our end of the pipe when we exit. + defer std.os.close(quit); + + // Build up the list of fds we're going to poll. We are looking + // for data on the pty and our quit notification. + var pollfds: [2]std.os.pollfd = .{ + .{ .fd = fd, .events = std.os.POLL.IN, .revents = undefined }, + .{ .fd = quit, .events = std.os.POLL.IN, .revents = undefined }, + }; + var buf: [1024]u8 = undefined; while (true) { + // Wait for data. + _ = std.os.poll(&pollfds, 0) catch |err| { + log.warn("poll failed on read thread, exiting early err={}", .{err}); + return; + }; + + // If our quit fd is set, we're done. + if (pollfds[1].revents & std.os.POLL.IN != 0) { + log.info("read thread got quit signal", .{}); + return; + } + + // Ensure our pty has data. + if (pollfds[0].revents & std.os.POLL.IN == 0) continue; const n = std.os.read(fd, &buf) catch |err| { switch (err) { // This means our pty is closed. We're probably From 816cad07b9b735782577e4b6ed03c46fbc3c7ee6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 7 Aug 2023 20:55:01 -0700 Subject: [PATCH 4/7] termio/exec: set pty fd to non-blocking, try to read as long as possible --- src/termio/Exec.zig | 63 ++++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 21 deletions(-) diff --git a/src/termio/Exec.zig b/src/termio/Exec.zig index 89ecf726c..85dd9a775 100644 --- a/src/termio/Exec.zig +++ b/src/termio/Exec.zig @@ -956,6 +956,16 @@ const ReadThread = struct { // Always close our end of the pipe when we exit. defer std.os.close(quit); + // First thing, we want to set the fd to non-blocking. We do this + // so that we can try to read from the fd in a tight loop and only + // check the quit fd occasionally. + if (std.os.fcntl(fd, std.os.F.GETFL, 0)) |flags| { + _ = std.os.fcntl(fd, std.os.F.SETFL, flags | std.os.O.NONBLOCK) catch |err| { + log.warn("read thread failed to set flags err={}", .{err}); + log.warn("this isn't a fatal error, but may cause performance issues", .{}); + }; + } else |_| {} + // Build up the list of fds we're going to poll. We are looking // for data on the pty and our quit notification. var pollfds: [2]std.os.pollfd = .{ @@ -965,6 +975,38 @@ const ReadThread = struct { var buf: [1024]u8 = undefined; while (true) { + // We try to read from the file descriptor as long as possible + // to maximize performance. We only check the quit fd if the + // main fd blocks. This optimizes for the realistic scenario that + // the data will eventually stop while we're trying to quit. This + // is always true because we kill the process. + while (true) { + const n = std.os.read(fd, &buf) catch |err| { + switch (err) { + // This means our pty is closed. We're probably + // gracefully shutting down. + error.NotOpenForReading, + error.InputOutput, + => { + log.info("io reader exiting", .{}); + return; + }, + + // No more data, fall back to poll and check for + // exit conditions. + error.WouldBlock => break, + + else => { + log.err("io reader error err={}", .{err}); + unreachable; + }, + } + }; + + // log.info("DATA: {d}", .{n}); + @call(.always_inline, process, .{ ev, buf[0..n] }); + } + // Wait for data. _ = std.os.poll(&pollfds, 0) catch |err| { log.warn("poll failed on read thread, exiting early err={}", .{err}); @@ -976,27 +1018,6 @@ const ReadThread = struct { log.info("read thread got quit signal", .{}); return; } - - // Ensure our pty has data. - if (pollfds[0].revents & std.os.POLL.IN == 0) continue; - const n = std.os.read(fd, &buf) catch |err| { - switch (err) { - // This means our pty is closed. We're probably - // gracefully shutting down. - error.NotOpenForReading, - error.InputOutput, - => log.info("io reader exiting", .{}), - - else => { - log.err("io reader error err={}", .{err}); - unreachable; - }, - } - return; - }; - - // log.info("DATA: {d}", .{n}); - @call(.always_inline, process, .{ ev, buf[0..n] }); } } From b5bb3b373928f5967e50a1844e83a6f09549158b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 7 Aug 2023 21:10:11 -0700 Subject: [PATCH 5/7] termio/exec: if read thread gets 0 bytes then break the tight loop --- src/termio/Exec.zig | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/termio/Exec.zig b/src/termio/Exec.zig index 85dd9a775..5e6cc2f3d 100644 --- a/src/termio/Exec.zig +++ b/src/termio/Exec.zig @@ -964,7 +964,10 @@ const ReadThread = struct { log.warn("read thread failed to set flags err={}", .{err}); log.warn("this isn't a fatal error, but may cause performance issues", .{}); }; - } else |_| {} + } else |err| { + log.warn("read thread failed to get flags err={}", .{err}); + log.warn("this isn't a fatal error, but may cause performance issues", .{}); + } // Build up the list of fds we're going to poll. We are looking // for data on the pty and our quit notification. @@ -1003,6 +1006,11 @@ const ReadThread = struct { } }; + // This happens on macOS instead of WouldBlock when the + // child process dies. To be safe, we just break the loop + // and let our poll happen. + if (n == 0) break; + // log.info("DATA: {d}", .{n}); @call(.always_inline, process, .{ ev, buf[0..n] }); } From e2e876bfa16b615a1306294a655761dfd3c227b9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 7 Aug 2023 21:13:33 -0700 Subject: [PATCH 6/7] termio/exec: make sure that process is dead before killing read thread --- src/termio/Exec.zig | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/src/termio/Exec.zig b/src/termio/Exec.zig index 5e6cc2f3d..eed0c5e93 100644 --- a/src/termio/Exec.zig +++ b/src/termio/Exec.zig @@ -217,15 +217,16 @@ pub fn threadExit(self: *Exec, data: ThreadData) void { // Clear out our data since we're not active anymore. self.data = null; - // Quit our read thread first so that we aren't simultaneously - // performing a read/close on the pty fd. - _ = std.os.write(data.read_thread_pipe, "x") catch |err| - log.warn("error writing to read thread quit pipe err={}", .{err}); - data.read_thread.join(); - // Stop our subprocess if (data.ev.process_exited) self.subprocess.externalExit(); self.subprocess.stop(); + + // Quit our read thread after exiting the subprocess so that + // we don't get stuck waiting for data to stop flowing if it is + // a particularly noisy process. + _ = std.os.write(data.read_thread_pipe, "x") catch |err| + log.warn("error writing to read thread quit pipe err={}", .{err}); + data.read_thread.join(); } /// Update the configuration. @@ -671,6 +672,7 @@ const Subprocess = struct { /// Clean up the subprocess. This will stop the subprocess if it is started. pub fn deinit(self: *Subprocess) void { self.stop(); + if (self.pty) |*pty| pty.deinit(); self.arena.deinit(); self.* = undefined; } @@ -779,7 +781,8 @@ const Subprocess = struct { } /// Stop the subprocess. This is safe to call anytime. This will wait - /// for the subprocess to end so it will block. + /// for the subprocess to end so it will block. This does not close + /// the pty. pub fn stop(self: *Subprocess) void { // Kill our command if (self.command) |*cmd| { @@ -800,14 +803,6 @@ const Subprocess = struct { self.flatpak_command = null; } } - - // Close our PTY. We do this after killing our command because on - // macOS, close will block until all blocking operations read/write - // are done with it and our reader thread is probably still alive. - if (self.pty) |*pty| { - pty.deinit(); - self.pty = null; - } } /// Resize the pty subprocess. This is safe to call anytime. From 6d32d7499606633c882edfdd3cc477b09060e2b7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 7 Aug 2023 21:15:42 -0700 Subject: [PATCH 7/7] apprt/gtk: remove useless calls --- src/apprt/gtk.zig | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/apprt/gtk.zig b/src/apprt/gtk.zig index abb06842d..705880962 100644 --- a/src/apprt/gtk.zig +++ b/src/apprt/gtk.zig @@ -133,8 +133,6 @@ pub const App = struct { // Terminate the application. The application will not be restarted after // this so all global state can be cleaned up. pub fn terminate(self: *App) void { - c.g_signal_emit(self.app, c.g_signal_lookup("shutdown", c.g_application_get_type()), 0); - c.g_settings_sync(); while (c.g_main_context_iteration(self.ctx, 0) != 0) {} c.g_main_context_release(self.ctx); @@ -191,9 +189,6 @@ pub const App = struct { _ = parent_; const alloc = self.core_app.alloc; - // If we're trying to quit, then do not open any new windows. - if (!self.running) return; - // Allocate a fixed pointer for our window. We try to minimize // allocations but windows and other GUI requirements are so minimal // compared to the steady-state terminal operation so we use heap