mirror of
https://github.com/ghostty-org/ghostty.git
synced 2025-07-16 16:56:09 +03:00
Move pty IO to dedicated thread
* This completes ours multi-threaded architecture started with #21 by moving pty IO to a dedicated thread. * A bounded, blocking queue implementation is introduced for inter-thread communication. * The Window thread (which previously also did IO) no longer uses libuv, and is purely a windowing event loop. ## Performance On IO heavy workloads such as `cat big-file.txt`, throughput has increased by more than 400%. In general, I'm noticing more consistent frame rates across all workloads, with dramatic differences in IO heavy workloads. ## Architectural Notes There are now three threads per window as shown below: ``` ┌─────────────────────────────────┐ │ │ │ Window │ │ │ └─────────────────────────────────┘ │ ┌───────────────────┴──────────────────┐ │ │ ▼ ▼ ┌─────────────────────────────────┐ ┌─────────────────────────────────┐ │ │ │ │ │ Renderer │◀───│ IO │ │ │ │ │ └─────────────────────────────────┘ └─────────────────────────────────┘ ``` Notes: * The window thread is responsible purely for windowing events: focus, mouse movement, keyboard input, etc. * The IO thread is responsible for pty IO such as reading and writing to the pty fd. This thread also owns the terminal state. * The renderer is responsible for turning terminal state into screen data, and auxiliary visual functions such as cursor blink. The arrows in the diagram above show how threads can communicate. Communication is done through one-way, MPSC (multi-producer single-consumer) bounded queues. The MPSC queue implementation is _not optimal_ and can be improved but our workload is not a message-heavy workload. Threads _also use shared memory_, as noted in #21. For large data structures such as terminal state, mutexes are utilized to avoid large copies.
This commit is contained in:
69
src/App.zig
69
src/App.zig
@ -7,7 +7,6 @@ const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const glfw = @import("glfw");
|
||||
const Window = @import("Window.zig");
|
||||
const libuv = @import("libuv");
|
||||
const tracy = @import("tracy");
|
||||
const Config = @import("config.zig").Config;
|
||||
|
||||
@ -20,11 +19,6 @@ alloc: Allocator,
|
||||
/// single window operations.
|
||||
window: *Window,
|
||||
|
||||
// The main event loop for the application. The user data of this loop
|
||||
// is always the allocator used to create the loop. This is a convenience
|
||||
// so that users of the loop always have an allocator.
|
||||
loop: libuv.Loop,
|
||||
|
||||
// The configuration for the app.
|
||||
config: *const Config,
|
||||
|
||||
@ -32,63 +26,23 @@ config: *const Config,
|
||||
/// up the renderer state, compiles the shaders, etc. This is the primary
|
||||
/// "startup" logic.
|
||||
pub fn init(alloc: Allocator, config: *const Config) !App {
|
||||
// Create the event loop
|
||||
var loop = try libuv.Loop.init(alloc);
|
||||
errdefer loop.deinit(alloc);
|
||||
|
||||
// We always store allocator pointer on the loop data so that
|
||||
// handles can use our global allocator.
|
||||
const allocPtr = try alloc.create(Allocator);
|
||||
errdefer alloc.destroy(allocPtr);
|
||||
allocPtr.* = alloc;
|
||||
loop.setData(allocPtr);
|
||||
|
||||
// Create the window
|
||||
var window = try Window.create(alloc, loop, config);
|
||||
var window = try Window.create(alloc, config);
|
||||
errdefer window.destroy();
|
||||
|
||||
return App{
|
||||
.alloc = alloc,
|
||||
.window = window,
|
||||
.loop = loop,
|
||||
.config = config,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *App) void {
|
||||
self.window.destroy();
|
||||
|
||||
// Run the loop one more time, because destroying our other things
|
||||
// like windows usually cancel all our event loop stuff and we need
|
||||
// one more run through to finalize all the closes.
|
||||
_ = self.loop.run(.default) catch |err|
|
||||
log.err("error finalizing event loop: {}", .{err});
|
||||
|
||||
// Dealloc our allocator copy
|
||||
self.alloc.destroy(self.loop.getData(Allocator).?);
|
||||
|
||||
self.loop.deinit(self.alloc);
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn run(self: App) !void {
|
||||
// We are embedding two event loops: glfw and libuv. To do this, we
|
||||
// create a separate thread that watches for libuv events and notifies
|
||||
// glfw to wake up so we can run the libuv tick.
|
||||
var embed = try libuv.Embed.init(self.alloc, self.loop, (struct {
|
||||
fn callback() void {
|
||||
glfw.postEmptyEvent() catch unreachable;
|
||||
}
|
||||
}).callback);
|
||||
defer embed.deinit(self.alloc);
|
||||
try embed.start();
|
||||
|
||||
// This async handle is used to "wake up" the embed thread so we can
|
||||
// exit immediately once the windows want to close.
|
||||
var async_h = try libuv.Async.init(self.alloc, self.loop, (struct {
|
||||
fn callback(_: *libuv.Async) void {}
|
||||
}).callback);
|
||||
|
||||
while (!self.window.shouldClose()) {
|
||||
// Block for any glfw events. This may also be an "empty" event
|
||||
// posted by the libuv watcher so that we trigger a libuv loop tick.
|
||||
@ -96,26 +50,5 @@ pub fn run(self: App) !void {
|
||||
|
||||
// Mark this so we're in a totally different "frame"
|
||||
tracy.frameMark();
|
||||
|
||||
// Run the libuv loop
|
||||
const frame = tracy.frame("libuv");
|
||||
defer frame.end();
|
||||
try embed.loopRun();
|
||||
}
|
||||
|
||||
// Notify the embed thread to stop. We do this before we send on the
|
||||
// async handle so that when the thread goes around it exits.
|
||||
embed.stop();
|
||||
|
||||
// Wake up the event loop and schedule our close.
|
||||
try async_h.send();
|
||||
async_h.close((struct {
|
||||
fn callback(h: *libuv.Async) void {
|
||||
const alloc = h.loop().getData(Allocator).?.*;
|
||||
h.deinit(alloc);
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Wait for the thread to end which should be almost instant.
|
||||
try embed.join();
|
||||
}
|
||||
|
1006
src/Window.zig
1006
src/Window.zig
File diff suppressed because it is too large
Load Diff
251
src/blocking_queue.zig
Normal file
251
src/blocking_queue.zig
Normal file
@ -0,0 +1,251 @@
|
||||
//! Blocking queue implementation aimed primarily for message passing
|
||||
//! between threads.
|
||||
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
/// Returns a blocking queue implementation for type T.
|
||||
///
|
||||
/// This is tailor made for ghostty usage so it isn't meant to be maximally
|
||||
/// generic, but I'm happy to make it more generic over time. Traits of this
|
||||
/// queue that are specific to our usage:
|
||||
///
|
||||
/// - Fixed size. We expect our queue to quickly drain and also not be
|
||||
/// too large so we prefer a fixed size queue for now.
|
||||
/// - No blocking pop. We use an external event loop mechanism such as
|
||||
/// eventfd to notify our waiter that there is no data available so
|
||||
/// we don't need to implement a blocking pop.
|
||||
/// - Drain function. Most queues usually pop one at a time. We have
|
||||
/// a mechanism for draining since on every IO loop our TTY drains
|
||||
/// the full queue so we can get rid of the overhead of a ton of
|
||||
/// locks and bounds checking and do a one-time drain.
|
||||
///
|
||||
/// One key usage pattern is that our blocking queues are single producer
|
||||
/// single consumer (SPSC). This should let us do some interesting optimizations
|
||||
/// in the future. At the time of writing this, the blocking queue implementation
|
||||
/// is purposely naive to build something quickly, but we should benchmark
|
||||
/// and make this more optimized as necessary.
|
||||
pub fn BlockingQueue(
|
||||
comptime T: type,
|
||||
comptime capacity: usize,
|
||||
) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
// The type we use for queue size types. We can optimize this
|
||||
// in the future to be the correct bit-size for our preallocated
|
||||
// size for this queue.
|
||||
const Size = u32;
|
||||
|
||||
// The bounds of this queue. We recast this to Size so we can do math.
|
||||
const bounds = @intCast(Size, capacity);
|
||||
|
||||
/// Specifies the timeout for an operation.
|
||||
pub const Timeout = union(enum) {
|
||||
/// Fail instantly (non-blocking).
|
||||
instant: void,
|
||||
|
||||
/// Run forever or until interrupted
|
||||
forever: void,
|
||||
|
||||
/// Nanoseconds
|
||||
ns: u64,
|
||||
};
|
||||
|
||||
/// Our data. The values are undefined until they are written.
|
||||
data: [bounds]T,
|
||||
|
||||
/// The next location to write (next empty loc) and next location
|
||||
/// to read (next non-empty loc). The number of written elements.
|
||||
write: Size,
|
||||
read: Size,
|
||||
len: Size,
|
||||
|
||||
/// The big mutex that must be held to read/write.
|
||||
mutex: std.Thread.Mutex,
|
||||
|
||||
/// A CV for being notified when the queue is no longer full. This is
|
||||
/// used for writing. Note we DON'T have a CV for waiting on the
|
||||
/// queue not being EMPTY because we use external notifiers for that.
|
||||
cond_not_full: std.Thread.Condition,
|
||||
not_full_waiters: usize,
|
||||
|
||||
/// Allocate the blocking queue. Allocation must always happen on
|
||||
/// the heap due to shared concurrency state.
|
||||
pub fn create(alloc: Allocator) !*Self {
|
||||
const ptr = try alloc.create(Self);
|
||||
errdefer alloc.destroy(ptr);
|
||||
|
||||
ptr.* = .{
|
||||
.data = undefined,
|
||||
.len = 0,
|
||||
.write = 0,
|
||||
.read = 0,
|
||||
.mutex = .{},
|
||||
.cond_not_full = .{},
|
||||
.not_full_waiters = 0,
|
||||
};
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/// Free all the resources for this queue. This should only be
|
||||
/// called once all producers and consumers have quit.
|
||||
pub fn destroy(self: *Self, alloc: Allocator) void {
|
||||
self.* = undefined;
|
||||
alloc.destroy(self);
|
||||
}
|
||||
|
||||
/// Push a value to the queue. This returns the total size of the
|
||||
/// queue (unread items) after the push. A return value of zero
|
||||
/// means that the push failed.
|
||||
pub fn push(self: *Self, value: T, timeout: Timeout) Size {
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
// The
|
||||
if (self.full()) {
|
||||
switch (timeout) {
|
||||
// If we're not waiting, then we failed to write.
|
||||
.instant => return 0,
|
||||
|
||||
.forever => {
|
||||
self.not_full_waiters += 1;
|
||||
defer self.not_full_waiters -= 1;
|
||||
self.cond_not_full.wait(&self.mutex);
|
||||
},
|
||||
|
||||
.ns => |ns| {
|
||||
self.not_full_waiters += 1;
|
||||
defer self.not_full_waiters -= 1;
|
||||
self.cond_not_full.timedWait(&self.mutex, ns) catch return 0;
|
||||
},
|
||||
}
|
||||
|
||||
// If we're still full, then we failed to write. This can
|
||||
// happen in situations where we are interrupted.
|
||||
if (self.full()) return 0;
|
||||
}
|
||||
|
||||
// Add our data and update our accounting
|
||||
self.data[self.write] = value;
|
||||
self.write += 1;
|
||||
if (self.write >= bounds) self.write -= bounds;
|
||||
self.len += 1;
|
||||
|
||||
return self.len;
|
||||
}
|
||||
|
||||
/// Pop a value from the queue without blocking.
|
||||
pub fn pop(self: *Self) ?T {
|
||||
self.mutex.lock();
|
||||
defer self.mutex.unlock();
|
||||
|
||||
// If we're empty we have nothing
|
||||
if (self.len == 0) return null;
|
||||
|
||||
// Get the index we're going to read data from and do some
|
||||
// accounting. We don't copy the value here to avoid copying twice.
|
||||
const n = self.read;
|
||||
self.read += 1;
|
||||
if (self.read >= bounds) self.read -= bounds;
|
||||
self.len -= 1;
|
||||
|
||||
// If we have consumers waiting on a full queue, notify.
|
||||
if (self.not_full_waiters > 0) self.cond_not_full.signal();
|
||||
|
||||
return self.data[n];
|
||||
}
|
||||
|
||||
/// Pop all values from the queue. This will hold the big mutex
|
||||
/// until `deinit` is called on the return value. This is used if
|
||||
/// you know you're going to "pop" and utilize all the values
|
||||
/// quickly to avoid many locks, bounds checks, and cv signals.
|
||||
pub fn drain(self: *Self) DrainIterator {
|
||||
self.mutex.lock();
|
||||
return .{ .queue = self };
|
||||
}
|
||||
|
||||
pub const DrainIterator = struct {
|
||||
queue: *Self,
|
||||
|
||||
pub fn next(self: *DrainIterator) ?T {
|
||||
if (self.queue.len == 0) return null;
|
||||
|
||||
// Read and account
|
||||
const n = self.queue.read;
|
||||
self.queue.read += 1;
|
||||
if (self.queue.read >= bounds) self.queue.read -= bounds;
|
||||
self.queue.len -= 1;
|
||||
|
||||
return self.queue.data[n];
|
||||
}
|
||||
|
||||
pub fn deinit(self: *DrainIterator) void {
|
||||
// If we have consumers waiting on a full queue, notify.
|
||||
if (self.queue.not_full_waiters > 0) self.queue.cond_not_full.signal();
|
||||
|
||||
// Unlock
|
||||
self.queue.mutex.unlock();
|
||||
}
|
||||
};
|
||||
|
||||
/// Returns true if the queue is full. This is not public because
|
||||
/// it requires the lock to be held.
|
||||
inline fn full(self: *Self) bool {
|
||||
return self.len == bounds;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test "basic push and pop" {
|
||||
const testing = std.testing;
|
||||
const alloc = testing.allocator;
|
||||
|
||||
const Q = BlockingQueue(u64, 4);
|
||||
const q = try Q.create(alloc);
|
||||
defer q.destroy(alloc);
|
||||
|
||||
// Should have no values
|
||||
try testing.expect(q.pop() == null);
|
||||
|
||||
// Push until we're full
|
||||
try testing.expectEqual(@as(Q.Size, 1), q.push(1, .{ .instant = {} }));
|
||||
try testing.expectEqual(@as(Q.Size, 2), q.push(2, .{ .instant = {} }));
|
||||
try testing.expectEqual(@as(Q.Size, 3), q.push(3, .{ .instant = {} }));
|
||||
try testing.expectEqual(@as(Q.Size, 4), q.push(4, .{ .instant = {} }));
|
||||
try testing.expectEqual(@as(Q.Size, 0), q.push(5, .{ .instant = {} }));
|
||||
|
||||
// Pop!
|
||||
try testing.expect(q.pop().? == 1);
|
||||
try testing.expect(q.pop().? == 2);
|
||||
try testing.expect(q.pop().? == 3);
|
||||
try testing.expect(q.pop().? == 4);
|
||||
try testing.expect(q.pop() == null);
|
||||
|
||||
// Drain does nothing
|
||||
var it = q.drain();
|
||||
try testing.expect(it.next() == null);
|
||||
it.deinit();
|
||||
|
||||
// Verify we can still push
|
||||
try testing.expectEqual(@as(Q.Size, 1), q.push(1, .{ .instant = {} }));
|
||||
}
|
||||
|
||||
test "timed push" {
|
||||
const testing = std.testing;
|
||||
const alloc = testing.allocator;
|
||||
|
||||
const Q = BlockingQueue(u64, 1);
|
||||
const q = try Q.create(alloc);
|
||||
defer q.destroy(alloc);
|
||||
|
||||
// Push
|
||||
try testing.expectEqual(@as(Q.Size, 1), q.push(1, .{ .instant = {} }));
|
||||
try testing.expectEqual(@as(Q.Size, 0), q.push(2, .{ .instant = {} }));
|
||||
|
||||
// Timed push should fail
|
||||
try testing.expectEqual(@as(Q.Size, 0), q.push(2, .{ .ns = 1000 }));
|
||||
}
|
@ -196,6 +196,7 @@ test {
|
||||
_ = @import("font/main.zig");
|
||||
_ = @import("renderer.zig");
|
||||
_ = @import("terminal/Terminal.zig");
|
||||
_ = @import("termio.zig");
|
||||
_ = @import("input.zig");
|
||||
|
||||
// Libraries
|
||||
@ -203,6 +204,7 @@ test {
|
||||
_ = @import("terminal/main.zig");
|
||||
|
||||
// TODO
|
||||
_ = @import("blocking_queue.zig");
|
||||
_ = @import("config.zig");
|
||||
_ = @import("homedir.zig");
|
||||
_ = @import("passwd.zig");
|
||||
|
@ -10,6 +10,7 @@
|
||||
const builtin = @import("builtin");
|
||||
|
||||
pub usingnamespace @import("renderer/cursor.zig");
|
||||
pub usingnamespace @import("renderer/message.zig");
|
||||
pub usingnamespace @import("renderer/size.zig");
|
||||
pub const Metal = @import("renderer/Metal.zig");
|
||||
pub const OpenGL = @import("renderer/OpenGL.zig");
|
||||
|
@ -33,6 +33,9 @@ alloc: std.mem.Allocator,
|
||||
/// Current cell dimensions for this grid.
|
||||
cell_size: renderer.CellSize,
|
||||
|
||||
/// True if the window is focused
|
||||
focused: bool,
|
||||
|
||||
/// Whether the cursor is visible or not. This is used to control cursor
|
||||
/// blinking.
|
||||
cursor_visible: bool,
|
||||
@ -205,6 +208,7 @@ pub fn init(alloc: Allocator, font_group: *font.GroupCache) !Metal {
|
||||
.cell_size = .{ .width = metrics.cell_width, .height = metrics.cell_height },
|
||||
.background = .{ .r = 0, .g = 0, .b = 0 },
|
||||
.foreground = .{ .r = 255, .g = 255, .b = 255 },
|
||||
.focused = true,
|
||||
.cursor_visible = true,
|
||||
.cursor_style = .box,
|
||||
|
||||
@ -290,6 +294,16 @@ pub fn threadExit(self: *const Metal) void {
|
||||
// Metal requires no per-thread state.
|
||||
}
|
||||
|
||||
/// Callback when the focus changes for the terminal this is rendering.
|
||||
pub fn setFocus(self: *Metal, focus: bool) !void {
|
||||
self.focused = focus;
|
||||
}
|
||||
|
||||
/// Called to toggle the blink state of the cursor
|
||||
pub fn blinkCursor(self: *Metal, reset: bool) void {
|
||||
self.cursor_visible = reset or !self.cursor_visible;
|
||||
}
|
||||
|
||||
/// The primary render callback that is completely thread-safe.
|
||||
pub fn render(
|
||||
self: *Metal,
|
||||
@ -315,8 +329,8 @@ pub fn render(
|
||||
defer state.resize_screen = null;
|
||||
|
||||
// Setup our cursor state
|
||||
if (state.focused) {
|
||||
self.cursor_visible = state.cursor.visible and !state.cursor.blink;
|
||||
if (self.focused) {
|
||||
self.cursor_visible = self.cursor_visible and state.cursor.visible;
|
||||
self.cursor_style = renderer.CursorStyle.fromTerminal(state.cursor.style) orelse .box;
|
||||
} else {
|
||||
self.cursor_visible = true;
|
||||
|
@ -74,6 +74,9 @@ foreground: terminal.color.RGB,
|
||||
/// Default background color
|
||||
background: terminal.color.RGB,
|
||||
|
||||
/// True if the window is focused
|
||||
focused: bool,
|
||||
|
||||
/// The raw structure that maps directly to the buffer sent to the vertex shader.
|
||||
/// This must be "extern" so that the field order is not reordered by the
|
||||
/// Zig compiler.
|
||||
@ -292,6 +295,7 @@ pub fn init(alloc: Allocator, font_group: *font.GroupCache) !OpenGL {
|
||||
.cursor_style = .box,
|
||||
.background = .{ .r = 0, .g = 0, .b = 0 },
|
||||
.foreground = .{ .r = 255, .g = 255, .b = 255 },
|
||||
.focused = true,
|
||||
};
|
||||
}
|
||||
|
||||
@ -432,6 +436,16 @@ pub fn threadExit(self: *const OpenGL) void {
|
||||
glfw.makeContextCurrent(null) catch {};
|
||||
}
|
||||
|
||||
/// Callback when the focus changes for the terminal this is rendering.
|
||||
pub fn setFocus(self: *OpenGL, focus: bool) !void {
|
||||
self.focused = focus;
|
||||
}
|
||||
|
||||
/// Called to toggle the blink state of the cursor
|
||||
pub fn blinkCursor(self: *OpenGL, reset: bool) void {
|
||||
self.cursor_visible = reset or !self.cursor_visible;
|
||||
}
|
||||
|
||||
/// The primary render callback that is completely thread-safe.
|
||||
pub fn render(
|
||||
self: *OpenGL,
|
||||
@ -455,8 +469,8 @@ pub fn render(
|
||||
defer state.resize_screen = null;
|
||||
|
||||
// Setup our cursor state
|
||||
if (state.focused) {
|
||||
self.cursor_visible = state.cursor.visible and !state.cursor.blink;
|
||||
if (self.focused) {
|
||||
self.cursor_visible = self.cursor_visible and state.cursor.visible;
|
||||
self.cursor_style = renderer.CursorStyle.fromTerminal(state.cursor.style) orelse .box;
|
||||
} else {
|
||||
self.cursor_visible = true;
|
||||
|
@ -1,6 +1,7 @@
|
||||
//! This is the render state that is given to a renderer.
|
||||
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const DevMode = @import("../DevMode.zig");
|
||||
const terminal = @import("../terminal/main.zig");
|
||||
const renderer = @import("../renderer.zig");
|
||||
@ -11,9 +12,6 @@ const renderer = @import("../renderer.zig");
|
||||
/// state (i.e. the terminal, devmode, etc. values).
|
||||
mutex: *std.Thread.Mutex,
|
||||
|
||||
/// True if the window is focused
|
||||
focused: bool,
|
||||
|
||||
/// A new screen size if the screen was resized.
|
||||
resize_screen: ?renderer.ScreenSize,
|
||||
|
||||
@ -35,8 +33,4 @@ pub const Cursor = struct {
|
||||
/// "blink" settings, see "blink" for that. This is used to turn the
|
||||
/// cursor ON or OFF.
|
||||
visible: bool = true,
|
||||
|
||||
/// Whether the cursor is currently blinking. If it is blinking, then
|
||||
/// the cursor will not be rendered.
|
||||
blink: bool = false,
|
||||
};
|
||||
|
@ -7,11 +7,16 @@ const builtin = @import("builtin");
|
||||
const glfw = @import("glfw");
|
||||
const libuv = @import("libuv");
|
||||
const renderer = @import("../renderer.zig");
|
||||
const gl = @import("../opengl.zig");
|
||||
const BlockingQueue = @import("../blocking_queue.zig").BlockingQueue;
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
const log = std.log.scoped(.renderer_thread);
|
||||
|
||||
/// The type used for sending messages to the IO thread. For now this is
|
||||
/// hardcoded with a capacity. We can make this a comptime parameter in
|
||||
/// the future if we want it configurable.
|
||||
pub const Mailbox = BlockingQueue(renderer.Message, 64);
|
||||
|
||||
/// The main event loop for the application. The user data of this loop
|
||||
/// is always the allocator used to create the loop. This is a convenience
|
||||
/// so that users of the loop always have an allocator.
|
||||
@ -27,6 +32,9 @@ stop: libuv.Async,
|
||||
/// The timer used for rendering
|
||||
render_h: libuv.Timer,
|
||||
|
||||
/// The timer used for cursor blinking
|
||||
cursor_h: libuv.Timer,
|
||||
|
||||
/// The windo we're rendering to.
|
||||
window: glfw.Window,
|
||||
|
||||
@ -36,6 +44,10 @@ renderer: *renderer.Renderer,
|
||||
/// Pointer to the shared state that is used to generate the final render.
|
||||
state: *renderer.State,
|
||||
|
||||
/// The mailbox that can be used to send this thread messages. Note
|
||||
/// this is a blocking queue so if it is full you will get errors (or block).
|
||||
mailbox: *Mailbox,
|
||||
|
||||
/// Initialize the thread. This does not START the thread. This only sets
|
||||
/// up all the internal state necessary prior to starting the thread. It
|
||||
/// is up to the caller to start the thread with the threadMain entrypoint.
|
||||
@ -83,14 +95,29 @@ pub fn init(
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Setup a timer for blinking the cursor
|
||||
var cursor_timer = try libuv.Timer.init(alloc, loop);
|
||||
errdefer cursor_timer.close((struct {
|
||||
fn callback(t: *libuv.Timer) void {
|
||||
const alloc_h = t.loop().getData(Allocator).?.*;
|
||||
t.deinit(alloc_h);
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// The mailbox for messaging this thread
|
||||
var mailbox = try Mailbox.create(alloc);
|
||||
errdefer mailbox.destroy(alloc);
|
||||
|
||||
return Thread{
|
||||
.loop = loop,
|
||||
.wakeup = wakeup_h,
|
||||
.stop = stop_h,
|
||||
.render_h = render_h,
|
||||
.cursor_h = cursor_timer,
|
||||
.window = window,
|
||||
.renderer = renderer_impl,
|
||||
.state = state,
|
||||
.mailbox = mailbox,
|
||||
};
|
||||
}
|
||||
|
||||
@ -120,6 +147,12 @@ pub fn deinit(self: *Thread) void {
|
||||
h.deinit(handle_alloc);
|
||||
}
|
||||
}).callback);
|
||||
self.cursor_h.close((struct {
|
||||
fn callback(h: *libuv.Timer) void {
|
||||
const handle_alloc = h.loop().getData(Allocator).?.*;
|
||||
h.deinit(handle_alloc);
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Run the loop one more time, because destroying our other things
|
||||
// like windows usually cancel all our event loop stuff and we need
|
||||
@ -127,6 +160,9 @@ pub fn deinit(self: *Thread) void {
|
||||
_ = self.loop.run(.default) catch |err|
|
||||
log.err("error finalizing event loop: {}", .{err});
|
||||
|
||||
// Nothing can possibly access the mailbox anymore, destroy it.
|
||||
self.mailbox.destroy(alloc);
|
||||
|
||||
// Dealloc our allocator copy
|
||||
alloc.destroy(alloc_ptr);
|
||||
|
||||
@ -158,12 +194,59 @@ fn threadMain_(self: *Thread) !void {
|
||||
defer self.render_h.setData(null);
|
||||
try self.wakeup.send();
|
||||
|
||||
// Setup a timer for blinking the cursor
|
||||
self.cursor_h.setData(self);
|
||||
try self.cursor_h.start(cursorTimerCallback, 600, 600);
|
||||
|
||||
// Run
|
||||
log.debug("starting renderer thread", .{});
|
||||
defer log.debug("exiting renderer thread", .{});
|
||||
_ = try self.loop.run(.default);
|
||||
}
|
||||
|
||||
/// Drain the mailbox.
|
||||
fn drainMailbox(self: *Thread) !void {
|
||||
// This holds the mailbox lock for the duration of the drain. The
|
||||
// expectation is that all our message handlers will be non-blocking
|
||||
// ENOUGH to not mess up throughput on producers.
|
||||
|
||||
var drain = self.mailbox.drain();
|
||||
defer drain.deinit();
|
||||
|
||||
while (drain.next()) |message| {
|
||||
log.debug("mailbox message={}", .{message});
|
||||
switch (message) {
|
||||
.focus => |v| {
|
||||
// Set it on the renderer
|
||||
try self.renderer.setFocus(v);
|
||||
|
||||
if (!v) {
|
||||
// If we're not focused, then we stop the cursor blink
|
||||
try self.cursor_h.stop();
|
||||
} else {
|
||||
// If we're focused, we immediately show the cursor again
|
||||
// and then restart the timer.
|
||||
if (!try self.cursor_h.isActive()) {
|
||||
self.renderer.blinkCursor(true);
|
||||
try self.cursor_h.start(
|
||||
cursorTimerCallback,
|
||||
self.cursor_h.getRepeat(),
|
||||
self.cursor_h.getRepeat(),
|
||||
);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
.reset_cursor_blink => {
|
||||
self.renderer.blinkCursor(true);
|
||||
if (try self.cursor_h.isActive()) {
|
||||
_ = try self.cursor_h.again();
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn wakeupCallback(h: *libuv.Async) void {
|
||||
const t = h.getData(Thread) orelse {
|
||||
// This shouldn't happen so we log it.
|
||||
@ -171,6 +254,11 @@ fn wakeupCallback(h: *libuv.Async) void {
|
||||
return;
|
||||
};
|
||||
|
||||
// When we wake up, we check the mailbox. Mailbox producers should
|
||||
// wake up our thread after publishing.
|
||||
t.drainMailbox() catch |err|
|
||||
log.err("error draining mailbox err={}", .{err});
|
||||
|
||||
// If the timer is already active then we don't have to do anything.
|
||||
const active = t.render_h.isActive() catch true;
|
||||
if (active) return;
|
||||
@ -191,6 +279,17 @@ fn renderCallback(h: *libuv.Timer) void {
|
||||
log.warn("error rendering err={}", .{err});
|
||||
}
|
||||
|
||||
fn cursorTimerCallback(h: *libuv.Timer) void {
|
||||
const t = h.getData(Thread) orelse {
|
||||
// This shouldn't happen so we log it.
|
||||
log.warn("render callback fired without data set", .{});
|
||||
return;
|
||||
};
|
||||
|
||||
t.renderer.blinkCursor(false);
|
||||
t.wakeup.send() catch {};
|
||||
}
|
||||
|
||||
fn stopCallback(h: *libuv.Async) void {
|
||||
h.loop().stop();
|
||||
}
|
||||
|
15
src/renderer/message.zig
Normal file
15
src/renderer/message.zig
Normal file
@ -0,0 +1,15 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
/// The messages that can be sent to a renderer thread.
|
||||
pub const Message = union(enum) {
|
||||
/// A change in state in the window focus that this renderer is
|
||||
/// rendering within. This is only sent when a change is detected so
|
||||
/// the renderer is expected to handle all of these.
|
||||
focus: bool,
|
||||
|
||||
/// Reset the cursor blink by immediately showing the cursor then
|
||||
/// restarting the timer.
|
||||
reset_cursor_blink: void,
|
||||
};
|
@ -75,6 +75,8 @@ modes: packed struct {
|
||||
mouse_event: MouseEvents = .none,
|
||||
mouse_format: MouseFormat = .x10,
|
||||
|
||||
bracketed_paste: bool = false, // 2004
|
||||
|
||||
test {
|
||||
// We have this here so that we explicitly fail when we change the
|
||||
// size of modes. The size of modes is NOT particularly important,
|
||||
|
@ -440,7 +440,7 @@ pub fn Stream(comptime Handler: type) type {
|
||||
}
|
||||
|
||||
fn configureCharset(
|
||||
self: Self,
|
||||
self: *Self,
|
||||
intermediates: []const u8,
|
||||
set: charsets.Charset,
|
||||
) !void {
|
||||
|
18
src/termio.zig
Normal file
18
src/termio.zig
Normal file
@ -0,0 +1,18 @@
|
||||
//! IO implementation and utilities. The IO implementation is responsible
|
||||
//! for taking the config, spinning up a child process, and handling IO
|
||||
//! with the termianl.
|
||||
|
||||
pub usingnamespace @import("termio/message.zig");
|
||||
pub const Exec = @import("termio/Exec.zig");
|
||||
pub const Options = @import("termio/Options.zig");
|
||||
pub const Thread = @import("termio/Thread.zig");
|
||||
|
||||
/// The implementation to use for the IO. This is just "exec" for now but
|
||||
/// this is somewhat pluggable so that in the future we can introduce other
|
||||
/// options for other platforms (i.e. wasm) or even potentially a vtable
|
||||
/// implementation for runtime polymorphism.
|
||||
pub const Impl = Exec;
|
||||
|
||||
test {
|
||||
@import("std").testing.refAllDecls(@This());
|
||||
}
|
718
src/termio/Exec.zig
Normal file
718
src/termio/Exec.zig
Normal file
@ -0,0 +1,718 @@
|
||||
//! Implementation of IO that uses child exec to talk to the child process.
|
||||
pub const Exec = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const termio = @import("../termio.zig");
|
||||
const Command = @import("../Command.zig");
|
||||
const Pty = @import("../Pty.zig");
|
||||
const SegmentedPool = @import("../segmented_pool.zig").SegmentedPool;
|
||||
const terminal = @import("../terminal/main.zig");
|
||||
const libuv = @import("libuv");
|
||||
const renderer = @import("../renderer.zig");
|
||||
|
||||
const log = std.log.scoped(.io_exec);
|
||||
|
||||
/// Allocator
|
||||
alloc: Allocator,
|
||||
|
||||
/// This is the pty fd created for the subcommand.
|
||||
pty: Pty,
|
||||
|
||||
/// This is the container for the subcommand.
|
||||
command: Command,
|
||||
|
||||
/// The terminal emulator internal state. This is the abstract "terminal"
|
||||
/// that manages input, grid updating, etc. and is renderer-agnostic. It
|
||||
/// just stores internal state about a grid.
|
||||
terminal: terminal.Terminal,
|
||||
|
||||
/// The stream parser. This parses the stream of escape codes and so on
|
||||
/// from the child process and calls callbacks in the stream handler.
|
||||
terminal_stream: terminal.Stream(StreamHandler),
|
||||
|
||||
/// The shared render state
|
||||
renderer_state: *renderer.State,
|
||||
|
||||
/// A handle to wake up the renderer. This hints to the renderer that that
|
||||
/// a repaint should happen.
|
||||
renderer_wakeup: libuv.Async,
|
||||
|
||||
/// The mailbox for notifying the renderer of things.
|
||||
renderer_mailbox: *renderer.Thread.Mailbox,
|
||||
|
||||
/// The cached grid size whenever a resize is called.
|
||||
grid_size: renderer.GridSize,
|
||||
|
||||
/// The data associated with the currently running thread.
|
||||
data: ?*EventData,
|
||||
|
||||
/// Initialize the exec implementation. This will also start the child
|
||||
/// process.
|
||||
pub fn init(alloc: Allocator, opts: termio.Options) !Exec {
|
||||
// Create our pty
|
||||
var pty = try Pty.open(.{
|
||||
.ws_row = @intCast(u16, opts.grid_size.rows),
|
||||
.ws_col = @intCast(u16, opts.grid_size.columns),
|
||||
.ws_xpixel = @intCast(u16, opts.screen_size.width),
|
||||
.ws_ypixel = @intCast(u16, opts.screen_size.height),
|
||||
});
|
||||
errdefer pty.deinit();
|
||||
|
||||
// Determine the path to the binary we're executing
|
||||
const path = (try Command.expandPath(alloc, opts.config.command orelse "sh")) orelse
|
||||
return error.CommandNotFound;
|
||||
defer alloc.free(path);
|
||||
|
||||
// Set our env vars
|
||||
var env = try std.process.getEnvMap(alloc);
|
||||
defer env.deinit();
|
||||
try env.put("TERM", "xterm-256color");
|
||||
|
||||
// Build our subcommand
|
||||
var cmd: Command = .{
|
||||
.path = path,
|
||||
.args = &[_][]const u8{path},
|
||||
.env = &env,
|
||||
.cwd = opts.config.@"working-directory",
|
||||
.pre_exec = (struct {
|
||||
fn callback(c: *Command) void {
|
||||
const p = c.getData(Pty) orelse unreachable;
|
||||
p.childPreExec() catch |err|
|
||||
log.err("error initializing child: {}", .{err});
|
||||
}
|
||||
}).callback,
|
||||
.data = &pty,
|
||||
};
|
||||
// note: can't set these in the struct initializer because it
|
||||
// sets the handle to "0". Probably a stage1 zig bug.
|
||||
cmd.stdin = std.fs.File{ .handle = pty.slave };
|
||||
cmd.stdout = cmd.stdin;
|
||||
cmd.stderr = cmd.stdin;
|
||||
try cmd.start(alloc);
|
||||
log.info("started subcommand path={s} pid={?}", .{ path, cmd.pid });
|
||||
|
||||
// Create our terminal
|
||||
var term = try terminal.Terminal.init(alloc, opts.grid_size.columns, opts.grid_size.rows);
|
||||
errdefer term.deinit(alloc);
|
||||
|
||||
return Exec{
|
||||
.alloc = alloc,
|
||||
.pty = pty,
|
||||
.command = cmd,
|
||||
.terminal = term,
|
||||
.terminal_stream = undefined,
|
||||
.renderer_state = opts.renderer_state,
|
||||
.renderer_wakeup = opts.renderer_wakeup,
|
||||
.renderer_mailbox = opts.renderer_mailbox,
|
||||
.grid_size = opts.grid_size,
|
||||
.data = null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Exec) void {
|
||||
// Deinitialize the pty. This closes the pty handles. This should
|
||||
// cause a close in the our subprocess so just wait for that.
|
||||
self.pty.deinit();
|
||||
_ = self.command.wait() catch |err|
|
||||
log.err("error waiting for command to exit: {}", .{err});
|
||||
|
||||
// Clean up our other members
|
||||
self.terminal.deinit(self.alloc);
|
||||
}
|
||||
|
||||
pub fn threadEnter(self: *Exec, loop: libuv.Loop) !ThreadData {
|
||||
assert(self.data == null);
|
||||
|
||||
// Get a copy to our allocator
|
||||
const alloc_ptr = loop.getData(Allocator).?;
|
||||
const alloc = alloc_ptr.*;
|
||||
|
||||
// Setup our data that is used for callbacks
|
||||
var ev_data_ptr = try alloc.create(EventData);
|
||||
errdefer alloc.destroy(ev_data_ptr);
|
||||
|
||||
// Read data
|
||||
var stream = try libuv.Tty.init(alloc, loop, self.pty.master);
|
||||
errdefer stream.deinit(alloc);
|
||||
stream.setData(ev_data_ptr);
|
||||
try stream.readStart(ttyReadAlloc, ttyRead);
|
||||
|
||||
// Setup our event data before we start
|
||||
ev_data_ptr.* = .{
|
||||
.read_arena = std.heap.ArenaAllocator.init(alloc),
|
||||
.renderer_state = self.renderer_state,
|
||||
.renderer_wakeup = self.renderer_wakeup,
|
||||
.renderer_mailbox = self.renderer_mailbox,
|
||||
.data_stream = stream,
|
||||
.terminal_stream = .{
|
||||
.handler = .{
|
||||
.alloc = self.alloc,
|
||||
.ev = ev_data_ptr,
|
||||
.terminal = &self.terminal,
|
||||
.grid_size = &self.grid_size,
|
||||
},
|
||||
},
|
||||
};
|
||||
errdefer ev_data_ptr.deinit();
|
||||
|
||||
// Store our data so our callbacks can access it
|
||||
self.data = ev_data_ptr;
|
||||
|
||||
// Return our thread data
|
||||
return ThreadData{
|
||||
.alloc = alloc,
|
||||
.ev = ev_data_ptr,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn threadExit(self: *Exec, data: ThreadData) void {
|
||||
_ = data;
|
||||
|
||||
self.data = null;
|
||||
}
|
||||
|
||||
/// Resize the terminal.
|
||||
pub fn resize(
|
||||
self: *Exec,
|
||||
grid_size: renderer.GridSize,
|
||||
screen_size: renderer.ScreenSize,
|
||||
) !void {
|
||||
// Update the size of our pty
|
||||
try self.pty.setSize(.{
|
||||
.ws_row = @intCast(u16, grid_size.rows),
|
||||
.ws_col = @intCast(u16, grid_size.columns),
|
||||
.ws_xpixel = @intCast(u16, screen_size.width),
|
||||
.ws_ypixel = @intCast(u16, screen_size.height),
|
||||
});
|
||||
|
||||
// Update our cached grid size
|
||||
self.grid_size = grid_size;
|
||||
|
||||
// Enter the critical area that we want to keep small
|
||||
{
|
||||
self.renderer_state.mutex.lock();
|
||||
defer self.renderer_state.mutex.unlock();
|
||||
|
||||
// We need to setup our render state to store our new pending size
|
||||
self.renderer_state.resize_screen = screen_size;
|
||||
|
||||
// Update the size of our terminal state
|
||||
try self.terminal.resize(self.alloc, grid_size.columns, grid_size.rows);
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn queueWrite(self: *Exec, data: []const u8) !void {
|
||||
try self.data.?.queueWrite(data);
|
||||
}
|
||||
|
||||
const ThreadData = struct {
|
||||
/// Allocator used for the event data
|
||||
alloc: Allocator,
|
||||
|
||||
/// The data that is attached to the callbacks.
|
||||
ev: *EventData,
|
||||
|
||||
pub fn deinit(self: *ThreadData) void {
|
||||
self.ev.deinit(self.alloc);
|
||||
self.alloc.destroy(self.ev);
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
const EventData = struct {
|
||||
// The preallocation size for the write request pool. This should be big
|
||||
// enough to satisfy most write requests. It must be a power of 2.
|
||||
const WRITE_REQ_PREALLOC = std.math.pow(usize, 2, 5);
|
||||
|
||||
/// This is the arena allocator used for IO read buffers. Since we use
|
||||
/// libuv under the covers, this lets us rarely heap allocate since we're
|
||||
/// usually just reusing buffers from this.
|
||||
read_arena: std.heap.ArenaAllocator,
|
||||
|
||||
/// The stream parser. This parses the stream of escape codes and so on
|
||||
/// from the child process and calls callbacks in the stream handler.
|
||||
terminal_stream: terminal.Stream(StreamHandler),
|
||||
|
||||
/// The shared render state
|
||||
renderer_state: *renderer.State,
|
||||
|
||||
/// A handle to wake up the renderer. This hints to the renderer that that
|
||||
/// a repaint should happen.
|
||||
renderer_wakeup: libuv.Async,
|
||||
|
||||
/// The mailbox for notifying the renderer of things.
|
||||
renderer_mailbox: *renderer.Thread.Mailbox,
|
||||
|
||||
/// The data stream is the main IO for the pty.
|
||||
data_stream: libuv.Tty,
|
||||
|
||||
/// This is the pool of available (unused) write requests. If you grab
|
||||
/// one from the pool, you must put it back when you're done!
|
||||
write_req_pool: SegmentedPool(libuv.WriteReq.T, WRITE_REQ_PREALLOC) = .{},
|
||||
|
||||
/// The pool of available buffers for writing to the pty.
|
||||
write_buf_pool: SegmentedPool([64]u8, WRITE_REQ_PREALLOC) = .{},
|
||||
|
||||
pub fn deinit(self: *EventData, alloc: Allocator) void {
|
||||
self.read_arena.deinit();
|
||||
|
||||
// Clear our write pools. We know we aren't ever going to do
|
||||
// any more IO since we stop our data stream below so we can just
|
||||
// drop this.
|
||||
self.write_req_pool.deinit(alloc);
|
||||
self.write_buf_pool.deinit(alloc);
|
||||
|
||||
// Stop our data stream
|
||||
self.data_stream.readStop();
|
||||
self.data_stream.close((struct {
|
||||
fn callback(h: *libuv.Tty) void {
|
||||
const handle_alloc = h.loop().getData(Allocator).?.*;
|
||||
h.deinit(handle_alloc);
|
||||
}
|
||||
}).callback);
|
||||
}
|
||||
|
||||
/// This queues a render operation with the renderer thread. The render
|
||||
/// isn't guaranteed to happen immediately but it will happen as soon as
|
||||
/// practical.
|
||||
inline fn queueRender(self: *EventData) !void {
|
||||
try self.renderer_wakeup.send();
|
||||
}
|
||||
|
||||
/// Queue a write to the pty.
|
||||
fn queueWrite(self: *EventData, data: []const u8) !void {
|
||||
// We go through and chunk the data if necessary to fit into
|
||||
// our cached buffers that we can queue to the stream.
|
||||
var i: usize = 0;
|
||||
while (i < data.len) {
|
||||
const req = try self.write_req_pool.get();
|
||||
const buf = try self.write_buf_pool.get();
|
||||
const end = @min(data.len, i + buf.len);
|
||||
std.mem.copy(u8, buf, data[i..end]);
|
||||
try self.data_stream.write(
|
||||
.{ .req = req },
|
||||
&[1][]u8{buf[0..(end - i)]},
|
||||
ttyWrite,
|
||||
);
|
||||
|
||||
i = end;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
fn ttyWrite(req: *libuv.WriteReq, status: i32) void {
|
||||
const tty = req.handle(libuv.Tty).?;
|
||||
const ev = tty.getData(EventData).?;
|
||||
ev.write_req_pool.put();
|
||||
ev.write_buf_pool.put();
|
||||
|
||||
libuv.convertError(status) catch |err|
|
||||
log.err("write error: {}", .{err});
|
||||
|
||||
//log.info("WROTE: {d}", .{status});
|
||||
}
|
||||
|
||||
fn ttyReadAlloc(t: *libuv.Tty, size: usize) ?[]u8 {
|
||||
const ev = t.getData(EventData) orelse return null;
|
||||
const alloc = ev.read_arena.allocator();
|
||||
return alloc.alloc(u8, size) catch null;
|
||||
}
|
||||
|
||||
fn ttyRead(t: *libuv.Tty, n: isize, buf: []const u8) void {
|
||||
const ev = t.getData(EventData).?;
|
||||
defer {
|
||||
const alloc = ev.read_arena.allocator();
|
||||
alloc.free(buf);
|
||||
}
|
||||
|
||||
// log.info("DATA: {d}", .{n});
|
||||
// log.info("DATA: {any}", .{buf[0..@intCast(usize, n)]});
|
||||
|
||||
// First check for errors in the case n is less than 0.
|
||||
libuv.convertError(@intCast(i32, n)) catch |err| {
|
||||
switch (err) {
|
||||
// ignore EOF because it should end the process.
|
||||
libuv.Error.EOF => {},
|
||||
else => log.err("read error: {}", .{err}),
|
||||
}
|
||||
|
||||
return;
|
||||
};
|
||||
|
||||
// Whenever a character is typed, we ensure the cursor is in the
|
||||
// non-blink state so it is rendered if visible.
|
||||
_ = ev.renderer_mailbox.push(.{
|
||||
.reset_cursor_blink = {},
|
||||
}, .{ .forever = {} });
|
||||
|
||||
// We are modifying terminal state from here on out
|
||||
ev.renderer_state.mutex.lock();
|
||||
defer ev.renderer_state.mutex.unlock();
|
||||
|
||||
// Schedule a render
|
||||
ev.queueRender() catch unreachable;
|
||||
|
||||
// Process the terminal data. This is an extremely hot part of the
|
||||
// terminal emulator, so we do some abstraction leakage to avoid
|
||||
// function calls and unnecessary logic.
|
||||
//
|
||||
// The ground state is the only state that we can see and print/execute
|
||||
// ASCII, so we only execute this hot path if we're already in the ground
|
||||
// state.
|
||||
//
|
||||
// Empirically, this alone improved throughput of large text output by ~20%.
|
||||
var i: usize = 0;
|
||||
const end = @intCast(usize, n);
|
||||
if (ev.terminal_stream.parser.state == .ground) {
|
||||
for (buf[i..end]) |c| {
|
||||
switch (terminal.parse_table.table[c][@enumToInt(terminal.Parser.State.ground)].action) {
|
||||
// Print, call directly.
|
||||
.print => ev.terminal_stream.handler.print(@intCast(u21, c)) catch |err|
|
||||
log.err("error processing terminal data: {}", .{err}),
|
||||
|
||||
// C0 execute, let our stream handle this one but otherwise
|
||||
// continue since we're guaranteed to be back in ground.
|
||||
.execute => ev.terminal_stream.execute(c) catch |err|
|
||||
log.err("error processing terminal data: {}", .{err}),
|
||||
|
||||
// Otherwise, break out and go the slow path until we're
|
||||
// back in ground. There is a slight optimization here where
|
||||
// could try to find the next transition to ground but when
|
||||
// I implemented that it didn't materially change performance.
|
||||
else => break,
|
||||
}
|
||||
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (i < end) {
|
||||
ev.terminal_stream.nextSlice(buf[i..end]) catch |err|
|
||||
log.err("error processing terminal data: {}", .{err});
|
||||
}
|
||||
}
|
||||
|
||||
/// This is used as the handler for the terminal.Stream type. This is
|
||||
/// stateful and is expected to live for the entire lifetime of the terminal.
|
||||
/// It is NOT VALID to stop a stream handler, create a new one, and use that
|
||||
/// unless all of the member fields are copied.
|
||||
const StreamHandler = struct {
|
||||
ev: *EventData,
|
||||
alloc: Allocator,
|
||||
grid_size: *renderer.GridSize,
|
||||
terminal: *terminal.Terminal,
|
||||
|
||||
inline fn queueRender(self: *StreamHandler) !void {
|
||||
try self.ev.queueRender();
|
||||
}
|
||||
|
||||
inline fn queueWrite(self: *StreamHandler, data: []const u8) !void {
|
||||
try self.ev.queueWrite(data);
|
||||
}
|
||||
|
||||
pub fn print(self: *StreamHandler, c: u21) !void {
|
||||
try self.terminal.print(c);
|
||||
}
|
||||
|
||||
pub fn bell(self: StreamHandler) !void {
|
||||
_ = self;
|
||||
log.info("BELL", .{});
|
||||
}
|
||||
|
||||
pub fn backspace(self: *StreamHandler) !void {
|
||||
self.terminal.backspace();
|
||||
}
|
||||
|
||||
pub fn horizontalTab(self: *StreamHandler) !void {
|
||||
try self.terminal.horizontalTab();
|
||||
}
|
||||
|
||||
pub fn linefeed(self: *StreamHandler) !void {
|
||||
// Small optimization: call index instead of linefeed because they're
|
||||
// identical and this avoids one layer of function call overhead.
|
||||
try self.terminal.index();
|
||||
}
|
||||
|
||||
pub fn carriageReturn(self: *StreamHandler) !void {
|
||||
self.terminal.carriageReturn();
|
||||
}
|
||||
|
||||
pub fn setCursorLeft(self: *StreamHandler, amount: u16) !void {
|
||||
self.terminal.cursorLeft(amount);
|
||||
}
|
||||
|
||||
pub fn setCursorRight(self: *StreamHandler, amount: u16) !void {
|
||||
self.terminal.cursorRight(amount);
|
||||
}
|
||||
|
||||
pub fn setCursorDown(self: *StreamHandler, amount: u16) !void {
|
||||
self.terminal.cursorDown(amount);
|
||||
}
|
||||
|
||||
pub fn setCursorUp(self: *StreamHandler, amount: u16) !void {
|
||||
self.terminal.cursorUp(amount);
|
||||
}
|
||||
|
||||
pub fn setCursorCol(self: *StreamHandler, col: u16) !void {
|
||||
self.terminal.setCursorColAbsolute(col);
|
||||
}
|
||||
|
||||
pub fn setCursorRow(self: *StreamHandler, row: u16) !void {
|
||||
if (self.terminal.modes.origin) {
|
||||
// TODO
|
||||
log.err("setCursorRow: implement origin mode", .{});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
self.terminal.setCursorPos(row, self.terminal.screen.cursor.x + 1);
|
||||
}
|
||||
|
||||
pub fn setCursorPos(self: *StreamHandler, row: u16, col: u16) !void {
|
||||
self.terminal.setCursorPos(row, col);
|
||||
}
|
||||
|
||||
pub fn eraseDisplay(self: *StreamHandler, mode: terminal.EraseDisplay) !void {
|
||||
if (mode == .complete) {
|
||||
// Whenever we erase the full display, scroll to bottom.
|
||||
try self.terminal.scrollViewport(.{ .bottom = {} });
|
||||
try self.queueRender();
|
||||
}
|
||||
|
||||
self.terminal.eraseDisplay(mode);
|
||||
}
|
||||
|
||||
pub fn eraseLine(self: *StreamHandler, mode: terminal.EraseLine) !void {
|
||||
self.terminal.eraseLine(mode);
|
||||
}
|
||||
|
||||
pub fn deleteChars(self: *StreamHandler, count: usize) !void {
|
||||
try self.terminal.deleteChars(count);
|
||||
}
|
||||
|
||||
pub fn eraseChars(self: *StreamHandler, count: usize) !void {
|
||||
self.terminal.eraseChars(count);
|
||||
}
|
||||
|
||||
pub fn insertLines(self: *StreamHandler, count: usize) !void {
|
||||
try self.terminal.insertLines(count);
|
||||
}
|
||||
|
||||
pub fn insertBlanks(self: *StreamHandler, count: usize) !void {
|
||||
self.terminal.insertBlanks(count);
|
||||
}
|
||||
|
||||
pub fn deleteLines(self: *StreamHandler, count: usize) !void {
|
||||
try self.terminal.deleteLines(count);
|
||||
}
|
||||
|
||||
pub fn reverseIndex(self: *StreamHandler) !void {
|
||||
try self.terminal.reverseIndex();
|
||||
}
|
||||
|
||||
pub fn index(self: *StreamHandler) !void {
|
||||
try self.terminal.index();
|
||||
}
|
||||
|
||||
pub fn nextLine(self: *StreamHandler) !void {
|
||||
self.terminal.carriageReturn();
|
||||
try self.terminal.index();
|
||||
}
|
||||
|
||||
pub fn setTopAndBottomMargin(self: *StreamHandler, top: u16, bot: u16) !void {
|
||||
self.terminal.setScrollingRegion(top, bot);
|
||||
}
|
||||
|
||||
pub fn setMode(self: *StreamHandler, mode: terminal.Mode, enabled: bool) !void {
|
||||
switch (mode) {
|
||||
.reverse_colors => {
|
||||
self.terminal.modes.reverse_colors = enabled;
|
||||
|
||||
// Schedule a render since we changed colors
|
||||
try self.queueRender();
|
||||
},
|
||||
|
||||
.origin => {
|
||||
self.terminal.modes.origin = enabled;
|
||||
self.terminal.setCursorPos(1, 1);
|
||||
},
|
||||
|
||||
.autowrap => {
|
||||
self.terminal.modes.autowrap = enabled;
|
||||
},
|
||||
|
||||
.cursor_visible => {
|
||||
self.ev.renderer_state.cursor.visible = enabled;
|
||||
},
|
||||
|
||||
.alt_screen_save_cursor_clear_enter => {
|
||||
const opts: terminal.Terminal.AlternateScreenOptions = .{
|
||||
.cursor_save = true,
|
||||
.clear_on_enter = true,
|
||||
};
|
||||
|
||||
if (enabled)
|
||||
self.terminal.alternateScreen(opts)
|
||||
else
|
||||
self.terminal.primaryScreen(opts);
|
||||
|
||||
// Schedule a render since we changed screens
|
||||
try self.queueRender();
|
||||
},
|
||||
|
||||
.bracketed_paste => self.terminal.modes.bracketed_paste = enabled,
|
||||
|
||||
.enable_mode_3 => {
|
||||
// Disable deccolm
|
||||
self.terminal.setDeccolmSupported(enabled);
|
||||
|
||||
// Force resize back to the window size
|
||||
self.terminal.resize(self.alloc, self.grid_size.columns, self.grid_size.rows) catch |err|
|
||||
log.err("error updating terminal size: {}", .{err});
|
||||
},
|
||||
|
||||
.@"132_column" => try self.terminal.deccolm(
|
||||
self.alloc,
|
||||
if (enabled) .@"132_cols" else .@"80_cols",
|
||||
),
|
||||
|
||||
.mouse_event_x10 => self.terminal.modes.mouse_event = if (enabled) .x10 else .none,
|
||||
.mouse_event_normal => self.terminal.modes.mouse_event = if (enabled) .normal else .none,
|
||||
.mouse_event_button => self.terminal.modes.mouse_event = if (enabled) .button else .none,
|
||||
.mouse_event_any => self.terminal.modes.mouse_event = if (enabled) .any else .none,
|
||||
|
||||
.mouse_format_utf8 => self.terminal.modes.mouse_format = if (enabled) .utf8 else .x10,
|
||||
.mouse_format_sgr => self.terminal.modes.mouse_format = if (enabled) .sgr else .x10,
|
||||
.mouse_format_urxvt => self.terminal.modes.mouse_format = if (enabled) .urxvt else .x10,
|
||||
.mouse_format_sgr_pixels => self.terminal.modes.mouse_format = if (enabled) .sgr_pixels else .x10,
|
||||
|
||||
else => if (enabled) log.warn("unimplemented mode: {}", .{mode}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setAttribute(self: *StreamHandler, attr: terminal.Attribute) !void {
|
||||
switch (attr) {
|
||||
.unknown => |unk| log.warn("unimplemented or unknown attribute: {any}", .{unk}),
|
||||
|
||||
else => self.terminal.setAttribute(attr) catch |err|
|
||||
log.warn("error setting attribute {}: {}", .{ attr, err }),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deviceAttributes(
|
||||
self: *StreamHandler,
|
||||
req: terminal.DeviceAttributeReq,
|
||||
params: []const u16,
|
||||
) !void {
|
||||
_ = params;
|
||||
|
||||
switch (req) {
|
||||
// VT220
|
||||
.primary => self.queueWrite("\x1B[?62;c") catch |err|
|
||||
log.warn("error queueing device attr response: {}", .{err}),
|
||||
else => log.warn("unimplemented device attributes req: {}", .{req}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deviceStatusReport(
|
||||
self: *StreamHandler,
|
||||
req: terminal.DeviceStatusReq,
|
||||
) !void {
|
||||
switch (req) {
|
||||
.operating_status => self.queueWrite("\x1B[0n") catch |err|
|
||||
log.warn("error queueing device attr response: {}", .{err}),
|
||||
|
||||
.cursor_position => {
|
||||
const pos: struct {
|
||||
x: usize,
|
||||
y: usize,
|
||||
} = if (self.terminal.modes.origin) .{
|
||||
// TODO: what do we do if cursor is outside scrolling region?
|
||||
.x = self.terminal.screen.cursor.x,
|
||||
.y = self.terminal.screen.cursor.y -| self.terminal.scrolling_region.top,
|
||||
} else .{
|
||||
.x = self.terminal.screen.cursor.x,
|
||||
.y = self.terminal.screen.cursor.y,
|
||||
};
|
||||
|
||||
// Response always is at least 4 chars, so this leaves the
|
||||
// remainder for the row/column as base-10 numbers. This
|
||||
// will support a very large terminal.
|
||||
var buf: [32]u8 = undefined;
|
||||
const resp = try std.fmt.bufPrint(&buf, "\x1B[{};{}R", .{
|
||||
pos.y + 1,
|
||||
pos.x + 1,
|
||||
});
|
||||
|
||||
try self.queueWrite(resp);
|
||||
},
|
||||
|
||||
else => log.warn("unimplemented device status req: {}", .{req}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setCursorStyle(
|
||||
self: *StreamHandler,
|
||||
style: terminal.CursorStyle,
|
||||
) !void {
|
||||
self.ev.renderer_state.cursor.style = style;
|
||||
}
|
||||
|
||||
pub fn decaln(self: *StreamHandler) !void {
|
||||
try self.terminal.decaln();
|
||||
}
|
||||
|
||||
pub fn tabClear(self: *StreamHandler, cmd: terminal.TabClear) !void {
|
||||
self.terminal.tabClear(cmd);
|
||||
}
|
||||
|
||||
pub fn tabSet(self: *StreamHandler) !void {
|
||||
self.terminal.tabSet();
|
||||
}
|
||||
|
||||
pub fn saveCursor(self: *StreamHandler) !void {
|
||||
self.terminal.saveCursor();
|
||||
}
|
||||
|
||||
pub fn restoreCursor(self: *StreamHandler) !void {
|
||||
self.terminal.restoreCursor();
|
||||
}
|
||||
|
||||
pub fn enquiry(self: *StreamHandler) !void {
|
||||
try self.queueWrite("");
|
||||
}
|
||||
|
||||
pub fn scrollDown(self: *StreamHandler, count: usize) !void {
|
||||
try self.terminal.scrollDown(count);
|
||||
}
|
||||
|
||||
pub fn scrollUp(self: *StreamHandler, count: usize) !void {
|
||||
try self.terminal.scrollUp(count);
|
||||
}
|
||||
|
||||
pub fn setActiveStatusDisplay(
|
||||
self: *StreamHandler,
|
||||
req: terminal.StatusDisplay,
|
||||
) !void {
|
||||
self.terminal.status_display = req;
|
||||
}
|
||||
|
||||
pub fn configureCharset(
|
||||
self: *StreamHandler,
|
||||
slot: terminal.CharsetSlot,
|
||||
set: terminal.Charset,
|
||||
) !void {
|
||||
self.terminal.configureCharset(slot, set);
|
||||
}
|
||||
|
||||
pub fn invokeCharset(
|
||||
self: *StreamHandler,
|
||||
active: terminal.CharsetActiveSlot,
|
||||
slot: terminal.CharsetSlot,
|
||||
single: bool,
|
||||
) !void {
|
||||
self.terminal.invokeCharset(active, slot, single);
|
||||
}
|
||||
};
|
27
src/termio/Options.zig
Normal file
27
src/termio/Options.zig
Normal file
@ -0,0 +1,27 @@
|
||||
//! The options that are used to configure a terminal IO implementation.
|
||||
|
||||
const libuv = @import("libuv");
|
||||
const renderer = @import("../renderer.zig");
|
||||
const Config = @import("../config.zig").Config;
|
||||
|
||||
/// The size of the terminal grid.
|
||||
grid_size: renderer.GridSize,
|
||||
|
||||
/// The size of the viewport in pixels.
|
||||
screen_size: renderer.ScreenSize,
|
||||
|
||||
/// The app configuration.
|
||||
config: *const Config,
|
||||
|
||||
/// The render state. The IO implementation can modify anything here. The
|
||||
/// window thread will setup the initial "terminal" pointer but the IO impl
|
||||
/// is free to change that if that is useful (i.e. doing some sort of dual
|
||||
/// terminal implementation.)
|
||||
renderer_state: *renderer.State,
|
||||
|
||||
/// A handle to wake up the renderer. This hints to the renderer that that
|
||||
/// a repaint should happen.
|
||||
renderer_wakeup: libuv.Async,
|
||||
|
||||
/// The mailbox for renderer messages.
|
||||
renderer_mailbox: *renderer.Thread.Mailbox,
|
197
src/termio/Thread.zig
Normal file
197
src/termio/Thread.zig
Normal file
@ -0,0 +1,197 @@
|
||||
//! Represents the IO thread logic. The IO thread is responsible for
|
||||
//! the child process and pty management.
|
||||
pub const Thread = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const libuv = @import("libuv");
|
||||
const termio = @import("../termio.zig");
|
||||
const BlockingQueue = @import("../blocking_queue.zig").BlockingQueue;
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
const log = std.log.scoped(.io_thread);
|
||||
|
||||
/// The type used for sending messages to the IO thread. For now this is
|
||||
/// hardcoded with a capacity. We can make this a comptime parameter in
|
||||
/// the future if we want it configurable.
|
||||
const Mailbox = BlockingQueue(termio.Message, 64);
|
||||
|
||||
/// The main event loop for the thread. The user data of this loop
|
||||
/// is always the allocator used to create the loop. This is a convenience
|
||||
/// so that users of the loop always have an allocator.
|
||||
loop: libuv.Loop,
|
||||
|
||||
/// This can be used to wake up the thread.
|
||||
wakeup: libuv.Async,
|
||||
|
||||
/// This can be used to stop the thread on the next loop iteration.
|
||||
stop: libuv.Async,
|
||||
|
||||
/// The underlying IO implementation.
|
||||
impl: *termio.Impl,
|
||||
|
||||
/// The mailbox that can be used to send this thread messages. Note
|
||||
/// this is a blocking queue so if it is full you will get errors (or block).
|
||||
mailbox: *Mailbox,
|
||||
|
||||
/// Initialize the thread. This does not START the thread. This only sets
|
||||
/// up all the internal state necessary prior to starting the thread. It
|
||||
/// is up to the caller to start the thread with the threadMain entrypoint.
|
||||
pub fn init(
|
||||
alloc: Allocator,
|
||||
impl: *termio.Impl,
|
||||
) !Thread {
|
||||
// We always store allocator pointer on the loop data so that
|
||||
// handles can use our global allocator.
|
||||
const allocPtr = try alloc.create(Allocator);
|
||||
errdefer alloc.destroy(allocPtr);
|
||||
allocPtr.* = alloc;
|
||||
|
||||
// Create our event loop.
|
||||
var loop = try libuv.Loop.init(alloc);
|
||||
errdefer loop.deinit(alloc);
|
||||
loop.setData(allocPtr);
|
||||
|
||||
// This async handle is used to "wake up" the renderer and force a render.
|
||||
var wakeup_h = try libuv.Async.init(alloc, loop, wakeupCallback);
|
||||
errdefer wakeup_h.close((struct {
|
||||
fn callback(h: *libuv.Async) void {
|
||||
const loop_alloc = h.loop().getData(Allocator).?.*;
|
||||
h.deinit(loop_alloc);
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// This async handle is used to stop the loop and force the thread to end.
|
||||
var stop_h = try libuv.Async.init(alloc, loop, stopCallback);
|
||||
errdefer stop_h.close((struct {
|
||||
fn callback(h: *libuv.Async) void {
|
||||
const loop_alloc = h.loop().getData(Allocator).?.*;
|
||||
h.deinit(loop_alloc);
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// The mailbox for messaging this thread
|
||||
var mailbox = try Mailbox.create(alloc);
|
||||
errdefer mailbox.destroy(alloc);
|
||||
|
||||
return Thread{
|
||||
.loop = loop,
|
||||
.wakeup = wakeup_h,
|
||||
.stop = stop_h,
|
||||
.impl = impl,
|
||||
.mailbox = mailbox,
|
||||
};
|
||||
}
|
||||
|
||||
/// Clean up the thread. This is only safe to call once the thread
|
||||
/// completes executing; the caller must join prior to this.
|
||||
pub fn deinit(self: *Thread) void {
|
||||
// Get a copy to our allocator
|
||||
const alloc_ptr = self.loop.getData(Allocator).?;
|
||||
const alloc = alloc_ptr.*;
|
||||
|
||||
// Schedule our handles to close
|
||||
self.stop.close((struct {
|
||||
fn callback(h: *libuv.Async) void {
|
||||
const handle_alloc = h.loop().getData(Allocator).?.*;
|
||||
h.deinit(handle_alloc);
|
||||
}
|
||||
}).callback);
|
||||
self.wakeup.close((struct {
|
||||
fn callback(h: *libuv.Async) void {
|
||||
const handle_alloc = h.loop().getData(Allocator).?.*;
|
||||
h.deinit(handle_alloc);
|
||||
}
|
||||
}).callback);
|
||||
|
||||
// Run the loop one more time, because destroying our other things
|
||||
// like windows usually cancel all our event loop stuff and we need
|
||||
// one more run through to finalize all the closes.
|
||||
_ = self.loop.run(.default) catch |err|
|
||||
log.err("error finalizing event loop: {}", .{err});
|
||||
|
||||
// Nothing can possibly access the mailbox anymore, destroy it.
|
||||
self.mailbox.destroy(alloc);
|
||||
|
||||
// Dealloc our allocator copy
|
||||
alloc.destroy(alloc_ptr);
|
||||
|
||||
self.loop.deinit(alloc);
|
||||
}
|
||||
|
||||
/// The main entrypoint for the thread.
|
||||
pub fn threadMain(self: *Thread) void {
|
||||
// Call child function so we can use errors...
|
||||
self.threadMain_() catch |err| {
|
||||
// In the future, we should expose this on the thread struct.
|
||||
log.warn("error in io thread err={}", .{err});
|
||||
};
|
||||
}
|
||||
|
||||
fn threadMain_(self: *Thread) !void {
|
||||
// Run our thread start/end callbacks. This allows the implementation
|
||||
// to hook into the event loop as needed.
|
||||
var data = try self.impl.threadEnter(self.loop);
|
||||
defer data.deinit();
|
||||
defer self.impl.threadExit(data);
|
||||
|
||||
// Set up our async handler to support rendering
|
||||
self.wakeup.setData(self);
|
||||
defer self.wakeup.setData(null);
|
||||
|
||||
// Run
|
||||
log.debug("starting IO thread", .{});
|
||||
defer log.debug("exiting IO thread", .{});
|
||||
_ = try self.loop.run(.default);
|
||||
}
|
||||
|
||||
/// Drain the mailbox, handling all the messages in our terminal implementation.
|
||||
fn drainMailbox(self: *Thread) !void {
|
||||
// This holds the mailbox lock for the duration of the drain. The
|
||||
// expectation is that all our message handlers will be non-blocking
|
||||
// ENOUGH to not mess up throughput on producers.
|
||||
var redraw: bool = false;
|
||||
{
|
||||
var drain = self.mailbox.drain();
|
||||
defer drain.deinit();
|
||||
|
||||
while (drain.next()) |message| {
|
||||
// If we have a message we always redraw
|
||||
redraw = true;
|
||||
|
||||
log.debug("mailbox message={}", .{message});
|
||||
switch (message) {
|
||||
.resize => |v| try self.impl.resize(v.grid_size, v.screen_size),
|
||||
.write_small => |v| try self.impl.queueWrite(v.data[0..v.len]),
|
||||
.write_stable => |v| try self.impl.queueWrite(v),
|
||||
.write_alloc => |v| {
|
||||
defer v.alloc.free(v.data);
|
||||
try self.impl.queueWrite(v.data);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Trigger a redraw after we've drained so we don't waste cyces
|
||||
// messaging a redraw.
|
||||
if (redraw) {
|
||||
try self.impl.renderer_wakeup.send();
|
||||
}
|
||||
}
|
||||
|
||||
fn wakeupCallback(h: *libuv.Async) void {
|
||||
const t = h.getData(Thread) orelse {
|
||||
// This shouldn't happen so we log it.
|
||||
log.warn("wakeup callback fired without data set", .{});
|
||||
return;
|
||||
};
|
||||
|
||||
// When we wake up, we check the mailbox. Mailbox producers should
|
||||
// wake up our thread after publishing.
|
||||
t.drainMailbox() catch |err|
|
||||
log.err("error draining mailbox err={}", .{err});
|
||||
}
|
||||
|
||||
fn stopCallback(h: *libuv.Async) void {
|
||||
h.loop().stop();
|
||||
}
|
118
src/termio/message.zig
Normal file
118
src/termio/message.zig
Normal file
@ -0,0 +1,118 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const renderer = @import("../renderer.zig");
|
||||
const terminal = @import("../terminal/main.zig");
|
||||
|
||||
/// The messages that can be sent to an IO thread.
|
||||
///
|
||||
/// This is not a tiny structure (~40 bytes at the time of writing this comment),
|
||||
/// but the messages are IO thread sends are also very few. At the current size
|
||||
/// we can queue 26,000 messages before consuming a MB of RAM.
|
||||
pub const Message = union(enum) {
|
||||
/// Resize the window.
|
||||
resize: struct {
|
||||
grid_size: renderer.GridSize,
|
||||
screen_size: renderer.ScreenSize,
|
||||
},
|
||||
|
||||
/// Write where the data fits in the union.
|
||||
write_small: WriteReq.Small,
|
||||
|
||||
/// Write where the data pointer is stable.
|
||||
write_stable: []const u8,
|
||||
|
||||
/// Write where the data is allocated and must be freed.
|
||||
write_alloc: WriteReq.Alloc,
|
||||
|
||||
/// Return a write request for the given data. This will use
|
||||
/// write_small if it fits or write_alloc otherwise. This should NOT
|
||||
/// be used for stable pointers which can be manually set to write_stable.
|
||||
pub fn writeReq(alloc: Allocator, data: anytype) !Message {
|
||||
switch (@typeInfo(@TypeOf(data))) {
|
||||
.Pointer => |info| {
|
||||
assert(info.size == .Slice);
|
||||
assert(info.child == u8);
|
||||
|
||||
// If it fits in our small request, do that.
|
||||
if (data.len <= WriteReq.Small.Max) {
|
||||
var buf: WriteReq.Small.Array = undefined;
|
||||
std.mem.copy(u8, &buf, data);
|
||||
return Message{
|
||||
.write_small = .{
|
||||
.data = buf,
|
||||
.len = @intCast(u8, data.len),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Otherwise, allocate
|
||||
var buf = try alloc.dupe(u8, data);
|
||||
errdefer alloc.free(buf);
|
||||
return Message{
|
||||
.write_alloc = .{
|
||||
.alloc = alloc,
|
||||
.data = buf,
|
||||
},
|
||||
};
|
||||
},
|
||||
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a write request.
|
||||
pub const WriteReq = union(enum) {
|
||||
pub const Small = struct {
|
||||
pub const Max = 38;
|
||||
pub const Array = [Max]u8;
|
||||
data: Array,
|
||||
len: u8,
|
||||
};
|
||||
|
||||
pub const Alloc = struct {
|
||||
alloc: Allocator,
|
||||
data: []u8,
|
||||
};
|
||||
|
||||
/// A small write where the data fits into this union size.
|
||||
small: Small,
|
||||
|
||||
/// A stable pointer so we can just pass the slice directly through.
|
||||
/// This is useful i.e. for const data.
|
||||
stable: []const u8,
|
||||
|
||||
/// Allocated and must be freed with the provided allocator. This
|
||||
/// should be rarely used.
|
||||
alloc: Alloc,
|
||||
};
|
||||
};
|
||||
|
||||
test {
|
||||
std.testing.refAllDecls(@This());
|
||||
}
|
||||
|
||||
test {
|
||||
// Ensure we don't grow our IO message size without explicitly wanting to.
|
||||
const testing = std.testing;
|
||||
try testing.expectEqual(@as(usize, 40), @sizeOf(Message));
|
||||
}
|
||||
|
||||
test "Message.writeReq small" {
|
||||
const testing = std.testing;
|
||||
const alloc = testing.allocator;
|
||||
|
||||
const input = "hello!";
|
||||
const io = try Message.writeReq(alloc, @as([]const u8, input));
|
||||
try testing.expect(io == .write_small);
|
||||
}
|
||||
|
||||
test "Message.writeReq alloc" {
|
||||
const testing = std.testing;
|
||||
const alloc = testing.allocator;
|
||||
|
||||
const input = "hello! " ** 100;
|
||||
const io = try Message.writeReq(alloc, @as([]const u8, input));
|
||||
try testing.expect(io == .write_alloc);
|
||||
io.write_alloc.alloc.free(io.write_alloc.data);
|
||||
}
|
Reference in New Issue
Block a user