Multi-threaded Rendering

This moves the OpenGL renderer out to a dedicated thread. The immediate effect
is noticeably improved FPS under high load scenarios. But the future impact is
also important: (1) required for multiple windows, tabs, etc. one day (2) forced
a refactor to support pluggable renderers.

== Architectural Notes

Windowing events and IO remain on the main thread for now, but I plan to move
IO to a dedicated thread in a future PR. Windowing events have to remain on the
main thread for eternity since some platforms force all Window events onto one
thread.

This is one of the reasons that this will be required for multi-window. As more
windows are opened, the windowing system will send more and more window events
to our main thread, taking more and more time away from other tasks on the main
thread. Windowing events are effectively free, so if the thread is ONLY handling
windowing events, this can scale basically forever.

Further, with OpenGL, each window must have its own "context." A context is the
state used to draw. OpenGL mandates at most one context is active at any given
time _per OS thread_. For multiple windows, this means that a single thread
would have to draw one window at a time in serial. With multiple rendering
threads, each thread can have its own context activated and draw in parallel.

The renderer thread and main thread **do have shared state** and will have some
shared state forever. The terminal screen state for example is shared memory.
An immutable data structure or double buffering would be too expensive (I think,
I'll check one day) so we use a mutex to protect access to the terminal state.

The primary instigator of terminal state change is PTY read. This is the
primary motivator to eventually move IO to its own dedicated thread as well.
As multiple windows come up, each PTY under heavy load conditions will block
only its own thread and hold the lock only for its own terminal state (and
therefore its own renderer thread).

We can continue to optimize critical areas from this point forward though and
lower any contention.

== Performance Notes

I haven't done any hard benchmarking on this, only did some cursory
memory measurements, CPU measurements, etc. I also ran superficial test
programs such as `cat`-ing large files, scrolling through large buffers
in vim, etc.

In any case, it seems that responsivity is generally higher, although it
appears that `cat`-ing large files slowed down by roughly 10%. This is
probably due to the lock overhead with the renderer and optimizing for
higher framerates with this setup.
This commit is contained in:
Mitchell Hashimoto
2022-10-24 11:17:43 -07:00
21 changed files with 1621 additions and 1908 deletions

66
pkg/libuv/Idle.zig Normal file
View File

@ -0,0 +1,66 @@
//! Idle handles will run the given callback once per loop iteration, right
//! before the uv_prepare_t handles.
const Idle = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
const testing = std.testing;
const c = @import("c.zig");
const errors = @import("error.zig");
const Loop = @import("Loop.zig");
const Handle = @import("handle.zig").Handle;
handle: *c.uv_idle_t,
pub usingnamespace Handle(Idle);
pub fn init(alloc: Allocator, loop: Loop) !Idle {
var handle = try alloc.create(c.uv_idle_t);
errdefer alloc.destroy(handle);
try errors.convertError(c.uv_idle_init(loop.loop, handle));
return Idle{ .handle = handle };
}
pub fn deinit(self: *Idle, alloc: Allocator) void {
alloc.destroy(self.handle);
self.* = undefined;
}
/// Start the handle with the given callback. This function always succeeds,
/// except when cb is NULL.
pub fn start(self: Idle, comptime cb: fn (*Idle) void) !void {
const Wrapper = struct {
pub fn callback(arg: [*c]c.uv_idle_t) callconv(.C) void {
var newSelf: Idle = .{ .handle = arg };
@call(.{ .modifier = .always_inline }, cb, .{&newSelf});
}
};
try errors.convertError(c.uv_idle_start(self.handle, Wrapper.callback));
}
/// Stop the handle, the callback will no longer be called.
pub fn stop(self: Idle) !void {
try errors.convertError(c.uv_idle_stop(self.handle));
}
test "Idle" {
var loop = try Loop.init(testing.allocator);
defer loop.deinit(testing.allocator);
var h = try init(testing.allocator, loop);
defer h.deinit(testing.allocator);
var called: bool = false;
h.setData(&called);
try h.start((struct {
fn callback(t: *Idle) void {
t.getData(bool).?.* = true;
t.close(null);
}
}).callback);
_ = try loop.run(.default);
try testing.expect(called);
}

View File

@ -1,8 +1,10 @@
const std = @import("std");
const stream = @import("stream.zig");
pub const c = @import("c.zig");
pub const Loop = @import("Loop.zig");
pub const Async = @import("Async.zig");
pub const Idle = @import("Idle.zig");
pub const Pipe = @import("Pipe.zig");
pub const Timer = @import("Timer.zig");
pub const Tty = @import("Tty.zig");
@ -27,6 +29,7 @@ test {
_ = Loop;
_ = Async;
_ = Idle;
_ = Pipe;
_ = Timer;
_ = Tty;

View File

@ -27,7 +27,7 @@ window: ?*Window = null,
/// Update the state associated with the dev mode. This should generally
/// only be called paired with a render since it otherwise wastes CPU
/// cycles.
pub fn update(self: *DevMode) !void {
pub fn update(self: *const DevMode) !void {
imgui.ImplOpenGL3.newFrame();
imgui.ImplGlfw.newFrame();
imgui.newFrame();
@ -82,7 +82,7 @@ fn helpMarker(desc: [:0]const u8) void {
}
}
fn atlasInfo(self: *DevMode, atlas: *Atlas, tex: ?usize) !void {
fn atlasInfo(self: *const DevMode, atlas: *Atlas, tex: ?usize) !void {
_ = self;
imgui.text("Dimensions: %d x %d", atlas.size, atlas.size);

View File

@ -19,14 +19,11 @@ const font = @import("font/main.zig");
const Command = @import("Command.zig");
const SegmentedPool = @import("segmented_pool.zig").SegmentedPool;
const trace = @import("tracy").trace;
const max_timer = @import("max_timer.zig");
const terminal = @import("terminal/main.zig");
const Config = @import("config.zig").Config;
const input = @import("input.zig");
const DevMode = @import("DevMode.zig");
const RenderTimer = max_timer.MaxTimer(renderTimerCallback);
const log = std.log.scoped(.window);
// The preallocation size for the write request pool. This should be big
@ -50,12 +47,18 @@ cursor: glfw.Cursor,
/// Imgui context
imgui_ctx: if (DevMode.enabled) *imgui.Context else void,
/// Whether the window is currently focused
focused: bool,
/// The renderer for this window.
renderer: renderer.OpenGL,
/// The render state
renderer_state: renderer.State,
/// The renderer thread manager
renderer_thread: renderer.Thread,
/// The actual thread
renderer_thr: std.Thread,
/// The underlying pty for this window.
pty: Pty,
@ -77,8 +80,8 @@ terminal_stream: terminal.Stream(*Window),
/// Cursor state.
terminal_cursor: Cursor,
/// Render at least 60fps.
render_timer: RenderTimer,
/// The dimensions of the grid in rows and columns.
grid_size: renderer.GridSize,
/// The reader/writer stream for the pty.
pty_stream: libuv.Tty,
@ -117,19 +120,6 @@ const Cursor = struct {
/// Timer for cursor blinking.
timer: libuv.Timer,
/// Current cursor style. This can be set by escape sequences. To get
/// the default style, the config has to be referenced.
style: terminal.CursorStyle = .default,
/// Whether the cursor is visible at all. This should not be used for
/// "blink" settings, see "blink" for that. This is used to turn the
/// cursor ON or OFF.
visible: bool = true,
/// Whether the cursor is currently blinking. If it is blinking, then
/// the cursor will not be rendered.
blink: bool = false,
/// Start (or restart) the timer. This is idempotent.
pub fn startTimer(self: Cursor) !void {
try self.timer.start(
@ -242,12 +232,12 @@ pub fn create(alloc: Allocator, loop: libuv.Loop, config: *const Config) !*Windo
// Culling, probably not necessary. We have to change the winding
// order since our 0,0 is top-left.
gl.c.glEnable(gl.c.GL_CULL_FACE);
gl.c.glFrontFace(gl.c.GL_CW);
try gl.enable(gl.c.GL_CULL_FACE);
try gl.frontFace(gl.c.GL_CW);
// Blending for text
gl.c.glEnable(gl.c.GL_BLEND);
gl.c.glBlendFunc(gl.c.GL_SRC_ALPHA, gl.c.GL_ONE_MINUS_SRC_ALPHA);
try gl.enable(gl.c.GL_BLEND);
try gl.blendFunc(gl.c.GL_SRC_ALPHA, gl.c.GL_ONE_MINUS_SRC_ALPHA);
// The font size we desire along with the DPI determiend for the window
const font_size: font.face.DesiredSize = .{
@ -368,8 +358,11 @@ pub fn create(alloc: Allocator, loop: libuv.Loop, config: *const Config) !*Windo
// Create our terminal grid with the initial window size
const window_size = try window.getSize();
const screen_size: renderer.ScreenSize = .{
.width = window_size.width,
.height = window_size.height,
};
var renderer_impl = try renderer.OpenGL.init(alloc, font_group);
try renderer_impl.setScreenSize(.{ .width = window_size.width, .height = window_size.height });
renderer_impl.background = .{
.r = config.background.r,
.g = config.background.g,
@ -381,6 +374,9 @@ pub fn create(alloc: Allocator, loop: libuv.Loop, config: *const Config) !*Windo
.b = config.foreground.b,
};
// Calculate our grid size based on known dimensions.
const grid_size = renderer.GridSize.init(screen_size, renderer_impl.cell_size);
// Set a minimum size that is cols=10 h=4. This matches Mac's Terminal.app
// but is otherwise somewhat arbitrary.
try window.setSizeLimits(.{
@ -390,8 +386,8 @@ pub fn create(alloc: Allocator, loop: libuv.Loop, config: *const Config) !*Windo
// Create our pty
var pty = try Pty.open(.{
.ws_row = @intCast(u16, renderer_impl.size.rows),
.ws_col = @intCast(u16, renderer_impl.size.columns),
.ws_row = @intCast(u16, grid_size.rows),
.ws_col = @intCast(u16, grid_size.columns),
.ws_xpixel = @intCast(u16, window_size.width),
.ws_ypixel = @intCast(u16, window_size.height),
});
@ -434,7 +430,7 @@ pub fn create(alloc: Allocator, loop: libuv.Loop, config: *const Config) !*Windo
try stream.readStart(ttyReadAlloc, ttyRead);
// Create our terminal
var term = try terminal.Terminal.init(alloc, renderer_impl.size.columns, renderer_impl.size.rows);
var term = try terminal.Terminal.init(alloc, grid_size.columns, grid_size.rows);
errdefer term.deinit(alloc);
// Setup a timer for blinking the cursor
@ -455,6 +451,20 @@ pub fn create(alloc: Allocator, loop: libuv.Loop, config: *const Config) !*Windo
var io_arena = std.heap.ArenaAllocator.init(alloc);
errdefer io_arena.deinit();
// The mutex used to protect our renderer state.
var mutex = try alloc.create(std.Thread.Mutex);
mutex.* = .{};
errdefer alloc.destroy(mutex);
// Create the renderer thread
var render_thread = try renderer.Thread.init(
alloc,
window,
&self.renderer,
&self.renderer_state,
);
errdefer render_thread.deinit();
self.* = .{
.alloc = alloc,
.alloc_io_arena = io_arena,
@ -462,18 +472,28 @@ pub fn create(alloc: Allocator, loop: libuv.Loop, config: *const Config) !*Windo
.font_group = font_group,
.window = window,
.cursor = cursor,
.focused = false,
.renderer = renderer_impl,
.renderer_thread = render_thread,
.renderer_state = .{
.mutex = mutex,
.focused = false,
.resize_screen = screen_size,
.cursor = .{
.style = .blinking_block,
.visible = true,
.blink = false,
},
.terminal = &self.terminal,
.devmode = if (!DevMode.enabled) null else &DevMode.instance,
},
.renderer_thr = undefined,
.pty = pty,
.command = cmd,
.mouse = .{},
.terminal = term,
.terminal_stream = .{ .handler = self },
.terminal_cursor = .{
.timer = timer,
.style = .blinking_block,
},
.render_timer = try RenderTimer.init(loop, self, 6, 12),
.terminal_cursor = .{ .timer = timer },
.grid_size = grid_size,
.pty_stream = stream,
.config = config,
.bg_r = @intToFloat(f32, config.background.r) / 255.0,
@ -535,10 +555,33 @@ pub fn create(alloc: Allocator, loop: libuv.Loop, config: *const Config) !*Windo
DevMode.instance.window = self;
}
// Unload our context prior to switching over to the renderer thread
// because OpenGL requires it to be unloaded.
gl.glad.unload();
try glfw.makeContextCurrent(null);
// Start our renderer thread
self.renderer_thr = try std.Thread.spawn(
.{},
renderer.Thread.threadMain,
.{&self.renderer_thread},
);
return self;
}
pub fn destroy(self: *Window) void {
{
// Stop rendering thread
self.renderer_thread.stop.send() catch |err|
log.err("error notifying renderer thread to stop, may stall err={}", .{err});
self.renderer_thr.join();
// We need to become the active rendering thread again
self.renderer.threadEnter(self.window) catch unreachable;
self.renderer_thread.deinit();
}
if (DevMode.enabled) {
// Clear the window
DevMode.instance.window = null;
@ -566,8 +609,6 @@ pub fn destroy(self: *Window) void {
}
}).callback);
self.render_timer.deinit();
// We have to dealloc our window in the close callback because
// we can't free some of the memory associated with the window
// until the stream is closed.
@ -591,6 +632,7 @@ pub fn destroy(self: *Window) void {
self.font_lib.deinit();
self.alloc_io_arena.deinit();
self.alloc.destroy(self.renderer_state.mutex);
}
pub fn shouldClose(self: Window) bool {
@ -617,6 +659,13 @@ fn queueWrite(self: *Window, data: []const u8) !void {
}
}
/// This queues a render operation with the renderer thread. The render
/// isn't guaranteed to happen immediately but it will happen as soon as
/// practical.
fn queueRender(self: *const Window) !void {
try self.renderer_thread.wakeup.send();
}
/// The cursor position from glfw directly is in screen coordinates but
/// all our internal state works in pixels.
fn cursorPosToPixels(self: Window, pos: glfw.Window.CursorPos) glfw.Window.CursorPos {
@ -660,33 +709,40 @@ fn sizeCallback(window: glfw.Window, width: i32, height: i32) void {
};
};
// Update our grid so that the projections on render are correct.
const win = window.getUserPointer(Window) orelse return;
win.renderer.setScreenSize(.{
const screen_size: renderer.ScreenSize = .{
.width = px_size.width,
.height = px_size.height,
}) catch |err| log.err("error updating grid screen size err={}", .{err});
};
// Update the size of our terminal state
win.terminal.resize(win.alloc, win.renderer.size.columns, win.renderer.size.rows) catch |err|
log.err("error updating terminal size: {}", .{err});
const win = window.getUserPointer(Window) orelse return;
// Resize usually forces a redraw
win.queueRender() catch |err|
log.err("error scheduling render timer in sizeCallback err={}", .{err});
// Recalculate our grid size
win.grid_size.update(screen_size, win.renderer.cell_size);
// Update the size of our pty
win.pty.setSize(.{
.ws_row = @intCast(u16, win.renderer.size.rows),
.ws_col = @intCast(u16, win.renderer.size.columns),
.ws_row = @intCast(u16, win.grid_size.rows),
.ws_col = @intCast(u16, win.grid_size.columns),
.ws_xpixel = @intCast(u16, width),
.ws_ypixel = @intCast(u16, height),
}) catch |err| log.err("error updating pty screen size err={}", .{err});
// Update our viewport for this context to be the entire window.
// OpenGL works in pixels, so we have to use the pixel size.
gl.viewport(0, 0, @intCast(i32, px_size.width), @intCast(i32, px_size.height)) catch |err|
log.err("error updating OpenGL viewport err={}", .{err});
// Enter the critical area that we want to keep small
{
win.renderer_state.mutex.lock();
defer win.renderer_state.mutex.unlock();
// Draw
win.render_timer.schedule() catch |err|
log.err("error scheduling render timer in sizeCallback err={}", .{err});
// We need to setup our render state to store our new pending size
win.renderer_state.resize_screen = screen_size;
// Update the size of our terminal state
win.terminal.resize(win.alloc, win.grid_size.columns, win.grid_size.rows) catch |err|
log.err("error updating terminal size: {}", .{err});
}
}
fn charCallback(window: glfw.Window, codepoint: u21) void {
@ -700,7 +756,7 @@ fn charCallback(window: glfw.Window, codepoint: u21) void {
// If the event was handled by imgui, ignore it.
if (imgui.IO.get()) |io| {
if (io.cval().WantCaptureKeyboard) {
win.render_timer.schedule() catch |err|
win.queueRender() catch |err|
log.err("error scheduling render timer err={}", .{err});
}
} else |_| {}
@ -715,7 +771,7 @@ fn charCallback(window: glfw.Window, codepoint: u21) void {
// Anytime is character is created, we have to clear the selection
if (win.terminal.selection != null) {
win.terminal.selection = null;
win.render_timer.schedule() catch |err|
win.queueRender() catch |err|
log.err("error scheduling render in charCallback err={}", .{err});
}
@ -723,7 +779,7 @@ fn charCallback(window: glfw.Window, codepoint: u21) void {
// TODO: detect if we're at the bottom to avoid the render call here.
win.terminal.scrollViewport(.{ .bottom = {} }) catch |err|
log.err("error scrolling viewport err={}", .{err});
win.render_timer.schedule() catch |err|
win.queueRender() catch |err|
log.err("error scheduling render in charCallback err={}", .{err});
// Write the character to the pty
@ -748,7 +804,7 @@ fn keyCallback(
// If the event was handled by imgui, ignore it.
if (imgui.IO.get()) |io| {
if (io.cval().WantCaptureKeyboard) {
win.render_timer.schedule() catch |err|
win.queueRender() catch |err|
log.err("error scheduling render timer err={}", .{err});
}
} else |_| {}
@ -869,7 +925,7 @@ fn keyCallback(
.toggle_dev_mode => if (DevMode.enabled) {
DevMode.instance.visible = !DevMode.instance.visible;
win.render_timer.schedule() catch unreachable;
win.queueRender() catch unreachable;
} else log.warn("dev mode was not compiled into this binary", .{}),
}
@ -943,22 +999,20 @@ fn focusCallback(window: glfw.Window, focused: bool) void {
const win = window.getUserPointer(Window) orelse return;
// If we aren't changing focus state, do nothing. I don't think this
// can happen but it costs very little to check.
if (win.focused == focused) return;
// We have to schedule a render because no matter what we're changing
// the cursor. If we're focused its reappearing, if we're not then
// its changing to hollow and not blinking.
win.render_timer.schedule() catch unreachable;
// Set our focused state on the window.
win.focused = focused;
win.queueRender() catch unreachable;
if (focused)
win.terminal_cursor.startTimer() catch unreachable
else
win.terminal_cursor.stopTimer() catch unreachable;
// We are modifying renderer state from here on out
win.renderer_state.mutex.lock();
defer win.renderer_state.mutex.unlock();
win.renderer_state.focused = focused;
}
fn refreshCallback(window: glfw.Window) void {
@ -968,7 +1022,7 @@ fn refreshCallback(window: glfw.Window) void {
const win = window.getUserPointer(Window) orelse return;
// The point of this callback is to schedule a render, so do that.
win.render_timer.schedule() catch unreachable;
win.queueRender() catch unreachable;
}
fn scrollCallback(window: glfw.Window, xoff: f64, yoff: f64) void {
@ -980,7 +1034,7 @@ fn scrollCallback(window: glfw.Window, xoff: f64, yoff: f64) void {
// If our dev mode window is visible then we always schedule a render on
// cursor move because the cursor might touch our windows.
if (DevMode.enabled and DevMode.instance.visible) {
win.render_timer.schedule() catch |err|
win.queueRender() catch |err|
log.err("error scheduling render timer err={}", .{err});
// If the mouse event was handled by imgui, ignore it.
@ -1007,7 +1061,7 @@ fn scrollCallback(window: glfw.Window, xoff: f64, yoff: f64) void {
// Positive is up
const sign: isize = if (yoff > 0) -1 else 1;
const delta: isize = sign * @max(@divFloor(win.renderer.size.rows, 15), 1);
const delta: isize = sign * @max(@divFloor(win.grid_size.rows, 15), 1);
log.info("scroll: delta={}", .{delta});
win.terminal.scrollViewport(.{ .delta = delta }) catch |err|
log.err("error scrolling viewport err={}", .{err});
@ -1015,7 +1069,7 @@ fn scrollCallback(window: glfw.Window, xoff: f64, yoff: f64) void {
// Schedule render since scrolling usually does something.
// TODO(perf): we can only schedule render if we know scrolling
// did something
win.render_timer.schedule() catch unreachable;
win.queueRender() catch unreachable;
}
/// The type of action to report for a mouse event.
@ -1200,7 +1254,7 @@ fn mouseButtonCallback(
// If our dev mode window is visible then we always schedule a render on
// cursor move because the cursor might touch our windows.
if (DevMode.enabled and DevMode.instance.visible) {
win.render_timer.schedule() catch |err|
win.queueRender() catch |err|
log.err("error scheduling render timer in cursorPosCallback err={}", .{err});
// If the mouse event was handled by imgui, ignore it.
@ -1270,7 +1324,7 @@ fn mouseButtonCallback(
// Selection is always cleared
if (win.terminal.selection != null) {
win.terminal.selection = null;
win.render_timer.schedule() catch |err|
win.queueRender() catch |err|
log.err("error scheduling render in mouseButtinCallback err={}", .{err});
}
}
@ -1289,7 +1343,7 @@ fn cursorPosCallback(
// If our dev mode window is visible then we always schedule a render on
// cursor move because the cursor might touch our windows.
if (DevMode.enabled and DevMode.instance.visible) {
win.render_timer.schedule() catch |err|
win.queueRender() catch |err|
log.err("error scheduling render timer in cursorPosCallback err={}", .{err});
// If the mouse event was handled by imgui, ignore it.
@ -1324,7 +1378,7 @@ fn cursorPosCallback(
if (win.mouse.click_state[@enumToInt(input.MouseButton.left)] != .press) return;
// All roads lead to requiring a re-render at this pont.
win.render_timer.schedule() catch |err|
win.queueRender() catch |err|
log.err("error scheduling render timer in cursorPosCallback err={}", .{err});
// Convert to pixels from screen coords
@ -1467,13 +1521,17 @@ fn cursorTimerCallback(t: *libuv.Timer) void {
const win = t.getData(Window) orelse return;
// We are modifying renderer state from here on out
win.renderer_state.mutex.lock();
defer win.renderer_state.mutex.unlock();
// If the cursor is currently invisible, then we do nothing. Ideally
// in this state the timer would be cancelled but no big deal.
if (!win.terminal_cursor.visible) return;
if (!win.renderer_state.cursor.visible) return;
// Swap blink state and schedule a render
win.terminal_cursor.blink = !win.terminal_cursor.blink;
win.render_timer.schedule() catch unreachable;
win.renderer_state.cursor.blink = !win.renderer_state.cursor.blink;
win.queueRender() catch unreachable;
}
fn ttyReadAlloc(t: *libuv.Tty, size: usize) ?[]u8 {
@ -1510,15 +1568,19 @@ fn ttyRead(t: *libuv.Tty, n: isize, buf: []const u8) void {
return;
};
// We are modifying terminal state from here on out
win.renderer_state.mutex.lock();
defer win.renderer_state.mutex.unlock();
// Whenever a character is typed, we ensure the cursor is in the
// non-blink state so it is rendered if visible.
win.terminal_cursor.blink = false;
win.renderer_state.cursor.blink = false;
if (win.terminal_cursor.timer.isActive() catch false) {
_ = win.terminal_cursor.timer.again() catch null;
}
// Schedule a render
win.render_timer.schedule() catch unreachable;
win.queueRender() catch unreachable;
// Process the terminal data. This is an extremely hot part of the
// terminal emulator, so we do some abstraction leakage to avoid
@ -1575,84 +1637,6 @@ fn ttyWrite(req: *libuv.WriteReq, status: i32) void {
//log.info("WROTE: {d}", .{status});
}
fn renderTimerCallback(t: *libuv.Timer) void {
const tracy = trace(@src());
tracy.color(0x006E7F); // blue-ish
defer tracy.end();
const win = t.getData(Window).?;
// Setup our cursor settings
if (win.focused) {
win.renderer.cursor_visible = win.terminal_cursor.visible and !win.terminal_cursor.blink;
win.renderer.cursor_style = renderer.OpenGL.CursorStyle.fromTerminal(win.terminal_cursor.style) orelse .box;
} else {
win.renderer.cursor_visible = true;
win.renderer.cursor_style = .box_hollow;
}
// Calculate foreground and background colors
const bg = win.renderer.background;
const fg = win.renderer.foreground;
defer {
win.renderer.background = bg;
win.renderer.foreground = fg;
}
if (win.terminal.modes.reverse_colors) {
win.renderer.background = fg;
win.renderer.foreground = bg;
}
// Set our background
const gl_bg: struct {
r: f32,
g: f32,
b: f32,
a: f32,
} = if (win.terminal.modes.reverse_colors) .{
.r = @intToFloat(f32, fg.r) / 255,
.g = @intToFloat(f32, fg.g) / 255,
.b = @intToFloat(f32, fg.b) / 255,
.a = 1.0,
} else .{
.r = win.bg_r,
.g = win.bg_g,
.b = win.bg_b,
.a = win.bg_a,
};
gl.clearColor(gl_bg.r, gl_bg.g, gl_bg.b, gl_bg.a);
gl.clear(gl.c.GL_COLOR_BUFFER_BIT);
// For now, rebuild all cells
win.renderer.rebuildCells(&win.terminal) catch |err|
log.err("error calling rebuildCells in render timer err={}", .{err});
// Finalize the cells prior to render
win.renderer.finalizeCells(&win.terminal) catch |err|
log.err("error calling updateCells in render timer err={}", .{err});
// Render the grid
win.renderer.render() catch |err| {
log.err("error rendering grid: {}", .{err});
return;
};
if (DevMode.enabled and DevMode.instance.visible) {
DevMode.instance.update() catch unreachable;
const data = DevMode.instance.render() catch unreachable;
imgui.ImplOpenGL3.renderDrawData(data);
}
// Swap
win.window.swapBuffers() catch |err| {
log.err("error swapping buffers: {}", .{err});
return;
};
// Record our run
win.render_timer.tick();
}
//-------------------------------------------------------------------
// Stream Callbacks
@ -1721,7 +1705,7 @@ pub fn eraseDisplay(self: *Window, mode: terminal.EraseDisplay) !void {
if (mode == .complete) {
// Whenever we erase the full display, scroll to bottom.
try self.terminal.scrollViewport(.{ .bottom = {} });
try self.render_timer.schedule();
try self.queueRender();
}
self.terminal.eraseDisplay(mode);
@ -1774,7 +1758,7 @@ pub fn setMode(self: *Window, mode: terminal.Mode, enabled: bool) !void {
self.terminal.modes.reverse_colors = enabled;
// Schedule a render since we changed colors
try self.render_timer.schedule();
try self.queueRender();
},
.origin => {
@ -1787,7 +1771,7 @@ pub fn setMode(self: *Window, mode: terminal.Mode, enabled: bool) !void {
},
.cursor_visible => {
self.terminal_cursor.visible = enabled;
self.renderer_state.cursor.visible = enabled;
},
.alt_screen_save_cursor_clear_enter => {
@ -1802,7 +1786,7 @@ pub fn setMode(self: *Window, mode: terminal.Mode, enabled: bool) !void {
self.terminal.primaryScreen(opts);
// Schedule a render since we changed screens
try self.render_timer.schedule();
try self.queueRender();
},
.bracketed_paste => self.bracketed_paste = true,
@ -1812,7 +1796,7 @@ pub fn setMode(self: *Window, mode: terminal.Mode, enabled: bool) !void {
self.terminal.setDeccolmSupported(enabled);
// Force resize back to the window size
self.terminal.resize(self.alloc, self.renderer.size.columns, self.renderer.size.rows) catch |err|
self.terminal.resize(self.alloc, self.grid_size.columns, self.grid_size.rows) catch |err|
log.err("error updating terminal size: {}", .{err});
},
@ -1900,7 +1884,7 @@ pub fn setCursorStyle(
self: *Window,
style: terminal.CursorStyle,
) !void {
self.terminal_cursor.style = style;
self.renderer_state.cursor.style = style;
}
pub fn decaln(self: *Window) !void {

View File

@ -1,109 +0,0 @@
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const libuv = @import("libuv");
/// A coalescing timer that forces a run after a certain maximum time
/// since the last run. This is used for example by the renderer to try
/// to render at a high FPS but gracefully fall back under high IO load so
/// that we can process more data and increase throughput.
pub fn MaxTimer(comptime cb: fn (*libuv.Timer) void) type {
return struct {
const Self = @This();
/// The underlying libuv timer.
timer: libuv.Timer,
/// The maximum time between timer calls. This is best effort based on
/// event loop load. If the event loop is busy, the timer will be run on
/// the next available tick.
max: u64,
/// The fastest the timer will ever run.
min: u64,
/// The last time this timer ran.
last: u64 = 0,
/// This handle is used to wake up the event loop when the timer
/// is restarted.
async_h: libuv.Async,
pub fn init(
loop: libuv.Loop,
data: ?*anyopaque,
min: u64,
max: u64,
) !Self {
const alloc = loop.getData(Allocator).?.*;
var timer = try libuv.Timer.init(alloc, loop);
timer.setData(data);
// The async handle is used to wake up the event loop. This is
// necessary since stop/starting a timer doesn't trigger the
// poll on the backend fd.
var async_h = try libuv.Async.init(alloc, loop, (struct {
fn callback(_: *libuv.Async) void {}
}).callback);
// The maximum time can't be less than the interval otherwise this
// will just constantly fire. if (max < min) return error.MaxShorterThanTimer;
return Self{
.timer = timer,
.min = min,
.max = max,
.async_h = async_h,
};
}
pub fn deinit(self: *Self) void {
self.async_h.close((struct {
fn callback(h: *libuv.Async) void {
const alloc = h.loop().getData(Allocator).?.*;
h.deinit(alloc);
}
}).callback);
self.timer.close((struct {
fn callback(t: *libuv.Timer) void {
const alloc = t.loop().getData(Allocator).?.*;
t.deinit(alloc);
}
}).callback);
}
/// This should be called from the callback to update the last called time.
pub fn tick(self: *Self) void {
self.timer.loop().updateTime();
self.last = self.timer.loop().now();
self.timer.stop() catch unreachable;
}
/// Schedule the timer to run. If the timer is not started, it'll
/// run on the next min tick. If the timer is started, this will
/// delay the timer up to max time since the last run.
pub fn schedule(self: *Self) !void {
// If the timer hasn't been started, start it now and schedule
// a tick as soon as possible.
if (!try self.timer.isActive()) {
try self.timer.start(cb, self.min, self.min);
// We have to send an async message to wake up the
// event loop. Starting a timer doesn't write to the fd.
try self.async_h.send();
return;
}
// If we are past the max time, we run the timer now.
try self.timer.stop();
self.timer.loop().updateTime();
const timeout = if (self.timer.loop().now() - self.last > self.max)
0
else
self.min;
// We still have time, restart the timer so that it is min time away.
try self.timer.start(cb, timeout, 0);
}
};
}

View File

@ -3,6 +3,7 @@ const Buffer = @This();
const std = @import("std");
const c = @import("c.zig");
const errors = @import("errors.zig");
const glad = @import("glad.zig");
id: c.GLuint,
@ -41,7 +42,7 @@ pub const Binding = struct {
usage: Usage,
) !void {
const info = dataInfo(&data);
c.glBufferData(@enumToInt(b.target), info.size, info.ptr, @enumToInt(usage));
glad.context.BufferData.?(@enumToInt(b.target), info.size, info.ptr, @enumToInt(usage));
try errors.getError();
}
@ -53,7 +54,7 @@ pub const Binding = struct {
data: anytype,
) !void {
const info = dataInfo(data);
c.glBufferSubData(@enumToInt(b.target), @intCast(c_long, offset), info.size, info.ptr);
glad.context.BufferSubData.?(@enumToInt(b.target), @intCast(c_long, offset), info.size, info.ptr);
try errors.getError();
}
@ -65,7 +66,7 @@ pub const Binding = struct {
comptime T: type,
usage: Usage,
) !void {
c.glBufferData(@enumToInt(b.target), @sizeOf(T), null, @enumToInt(usage));
glad.context.BufferData.?(@enumToInt(b.target), @sizeOf(T), null, @enumToInt(usage));
try errors.getError();
}
@ -75,7 +76,7 @@ pub const Binding = struct {
size: usize,
usage: Usage,
) !void {
c.glBufferData(@enumToInt(b.target), @intCast(c_long, size), null, @enumToInt(usage));
glad.context.BufferData.?(@enumToInt(b.target), @intCast(c_long, size), null, @enumToInt(usage));
try errors.getError();
}
@ -106,7 +107,7 @@ pub const Binding = struct {
}
pub inline fn enableAttribArray(_: Binding, idx: c.GLuint) !void {
c.glEnableVertexAttribArray(idx);
glad.context.EnableVertexAttribArray.?(idx);
}
/// Shorthand for vertexAttribPointer that is specialized towards the
@ -153,7 +154,7 @@ pub const Binding = struct {
/// VertexAttribDivisor
pub fn attributeDivisor(_: Binding, idx: c.GLuint, divisor: c.GLuint) !void {
c.glVertexAttribDivisor(idx, divisor);
glad.context.VertexAttribDivisor.?(idx, divisor);
try errors.getError();
}
@ -172,7 +173,7 @@ pub const Binding = struct {
else
null;
c.glVertexAttribPointer(idx, size, typ, normalized_c, stride, offsetPtr);
glad.context.VertexAttribPointer.?(idx, size, typ, normalized_c, stride, offsetPtr);
try errors.getError();
}
@ -189,12 +190,12 @@ pub const Binding = struct {
else
null;
c.glVertexAttribIPointer(idx, size, typ, stride, offsetPtr);
glad.context.VertexAttribIPointer.?(idx, size, typ, stride, offsetPtr);
try errors.getError();
}
pub inline fn unbind(b: *Binding) void {
c.glBindBuffer(@enumToInt(b.target), 0);
glad.context.BindBuffer.?(@enumToInt(b.target), 0);
b.* = undefined;
}
};
@ -202,16 +203,16 @@ pub const Binding = struct {
/// Create a single buffer.
pub inline fn create() !Buffer {
var vbo: c.GLuint = undefined;
c.glGenBuffers(1, &vbo);
glad.context.GenBuffers.?(1, &vbo);
return Buffer{ .id = vbo };
}
/// glBindBuffer
pub inline fn bind(v: Buffer, target: Target) !Binding {
c.glBindBuffer(@enumToInt(target), v.id);
glad.context.BindBuffer.?(@enumToInt(target), v.id);
return Binding{ .target = target };
}
pub inline fn destroy(v: Buffer) void {
c.glDeleteBuffers(1, &v.id);
glad.context.DeleteBuffers.?(1, &v.id);
}

View File

@ -7,17 +7,18 @@ const log = std.log.scoped(.opengl);
const c = @import("c.zig");
const Shader = @import("Shader.zig");
const errors = @import("errors.zig");
const glad = @import("glad.zig");
id: c.GLuint,
const Binding = struct {
pub inline fn unbind(_: Binding) void {
c.glUseProgram(0);
glad.context.UseProgram.?(0);
}
};
pub inline fn create() !Program {
const id = c.glCreateProgram();
const id = glad.context.CreateProgram.?();
if (id == 0) try errors.mustError();
log.debug("program created id={}", .{id});
@ -44,16 +45,16 @@ pub inline fn createVF(vsrc: [:0]const u8, fsrc: [:0]const u8) !Program {
}
pub inline fn attachShader(p: Program, s: Shader) !void {
c.glAttachShader(p.id, s.id);
glad.context.AttachShader.?(p.id, s.id);
try errors.getError();
}
pub inline fn link(p: Program) !void {
c.glLinkProgram(p.id);
glad.context.LinkProgram.?(p.id);
// Check if linking succeeded
var success: c_int = undefined;
c.glGetProgramiv(p.id, c.GL_LINK_STATUS, &success);
glad.context.GetProgramiv.?(p.id, c.GL_LINK_STATUS, &success);
if (success == c.GL_TRUE) {
log.debug("program linked id={}", .{p.id});
return;
@ -67,7 +68,7 @@ pub inline fn link(p: Program) !void {
}
pub inline fn use(p: Program) !Binding {
c.glUseProgram(p.id);
glad.context.UseProgram.?(p.id);
return Binding{};
}
@ -77,7 +78,7 @@ pub inline fn setUniform(
n: [:0]const u8,
value: anytype,
) !void {
const loc = c.glGetUniformLocation(
const loc = glad.context.GetUniformLocation.?(
p.id,
@ptrCast([*c]const u8, n.ptr),
);
@ -88,12 +89,12 @@ pub inline fn setUniform(
// Perform the correct call depending on the type of the value.
switch (@TypeOf(value)) {
comptime_int => c.glUniform1i(loc, value),
f32 => c.glUniform1f(loc, value),
@Vector(2, f32) => c.glUniform2f(loc, value[0], value[1]),
@Vector(3, f32) => c.glUniform3f(loc, value[0], value[1], value[2]),
@Vector(4, f32) => c.glUniform4f(loc, value[0], value[1], value[2], value[3]),
[4]@Vector(4, f32) => c.glUniformMatrix4fv(
comptime_int => glad.context.Uniform1i.?(loc, value),
f32 => glad.context.Uniform1f.?(loc, value),
@Vector(2, f32) => glad.context.Uniform2f.?(loc, value[0], value[1]),
@Vector(3, f32) => glad.context.Uniform3f.?(loc, value[0], value[1], value[2]),
@Vector(4, f32) => glad.context.Uniform4f.?(loc, value[0], value[1], value[2], value[3]),
[4]@Vector(4, f32) => glad.context.UniformMatrix4fv.?(
loc,
1,
c.GL_FALSE,
@ -112,12 +113,12 @@ pub inline fn setUniform(
// if we ever need it.
pub inline fn getInfoLog(s: Program) [512]u8 {
var msg: [512]u8 = undefined;
c.glGetProgramInfoLog(s.id, msg.len, null, &msg);
glad.context.GetProgramInfoLog.?(s.id, msg.len, null, &msg);
return msg;
}
pub inline fn destroy(p: Program) void {
assert(p.id != 0);
c.glDeleteProgram(p.id);
glad.context.DeleteProgram.?(p.id);
log.debug("program destroyed id={}", .{p.id});
}

View File

@ -6,11 +6,12 @@ const log = std.log.scoped(.opengl);
const c = @import("c.zig");
const errors = @import("errors.zig");
const glad = @import("glad.zig");
id: c.GLuint,
pub inline fn create(typ: c.GLenum) errors.Error!Shader {
const id = c.glCreateShader(typ);
const id = glad.context.CreateShader.?(typ);
if (id == 0) {
try errors.mustError();
unreachable;
@ -22,12 +23,12 @@ pub inline fn create(typ: c.GLenum) errors.Error!Shader {
/// Set the source and compile a shader.
pub inline fn setSourceAndCompile(s: Shader, source: [:0]const u8) !void {
c.glShaderSource(s.id, 1, &@ptrCast([*c]const u8, source), null);
c.glCompileShader(s.id);
glad.context.ShaderSource.?(s.id, 1, &@ptrCast([*c]const u8, source), null);
glad.context.CompileShader.?(s.id);
// Check if compilation succeeded
var success: c_int = undefined;
c.glGetShaderiv(s.id, c.GL_COMPILE_STATUS, &success);
glad.context.GetShaderiv.?(s.id, c.GL_COMPILE_STATUS, &success);
if (success == c.GL_TRUE) return;
log.err("shader compilation failure id={} message={s}", .{
s.id,
@ -44,12 +45,12 @@ pub inline fn setSourceAndCompile(s: Shader, source: [:0]const u8) !void {
// if we ever need it.
pub inline fn getInfoLog(s: Shader) [512]u8 {
var msg: [512]u8 = undefined;
c.glGetShaderInfoLog(s.id, msg.len, null, &msg);
glad.context.GetShaderInfoLog.?(s.id, msg.len, null, &msg);
return msg;
}
pub inline fn destroy(s: Shader) void {
assert(s.id != 0);
c.glDeleteShader(s.id);
glad.context.DeleteShader.?(s.id);
log.debug("shader destroyed id={}", .{s.id});
}

View File

@ -3,11 +3,12 @@ const Texture = @This();
const std = @import("std");
const c = @import("c.zig");
const errors = @import("errors.zig");
const glad = @import("glad.zig");
id: c.GLuint,
pub inline fn active(target: c.GLenum) !void {
c.glActiveTexture(target);
glad.context.ActiveTexture.?(target);
}
/// Enun for possible texture binding targets.
@ -74,17 +75,17 @@ pub const Binding = struct {
target: Target,
pub inline fn unbind(b: *Binding) void {
c.glBindTexture(@enumToInt(b.target), 0);
glad.context.BindTexture.?(@enumToInt(b.target), 0);
b.* = undefined;
}
pub fn generateMipmap(b: Binding) void {
c.glGenerateMipmap(@enumToInt(b.target));
glad.context.GenerateMipmap.?(@enumToInt(b.target));
}
pub fn parameter(b: Binding, name: Parameter, value: anytype) !void {
switch (@TypeOf(value)) {
c.GLint => c.glTexParameteri(
c.GLint => glad.context.TexParameteri.?(
@enumToInt(b.target),
@enumToInt(name),
value,
@ -104,7 +105,7 @@ pub const Binding = struct {
typ: DataType,
data: ?*const anyopaque,
) !void {
c.glTexImage2D(
glad.context.TexImage2D.?(
@enumToInt(b.target),
level,
@enumToInt(internal_format),
@ -128,7 +129,7 @@ pub const Binding = struct {
typ: DataType,
data: ?*const anyopaque,
) !void {
c.glTexSubImage2D(
glad.context.TexSubImage2D.?(
@enumToInt(b.target),
level,
xoffset,
@ -145,16 +146,16 @@ pub const Binding = struct {
/// Create a single texture.
pub inline fn create() !Texture {
var id: c.GLuint = undefined;
c.glGenTextures(1, &id);
glad.context.GenTextures.?(1, &id);
return Texture{ .id = id };
}
/// glBindTexture
pub inline fn bind(v: Texture, target: Target) !Binding {
c.glBindTexture(@enumToInt(target), v.id);
glad.context.BindTexture.?(@enumToInt(target), v.id);
return Binding{ .target = target };
}
pub inline fn destroy(v: Texture) void {
c.glDeleteTextures(1, &v.id);
glad.context.DeleteTextures.?(1, &v.id);
}

View File

@ -1,26 +1,27 @@
const VertexArray = @This();
const c = @import("c.zig");
const glad = @import("glad.zig");
id: c.GLuint,
/// Create a single vertex array object.
pub inline fn create() !VertexArray {
var vao: c.GLuint = undefined;
c.glGenVertexArrays(1, &vao);
glad.context.GenVertexArrays.?(1, &vao);
return VertexArray{ .id = vao };
}
// Unbind any active vertex array.
pub inline fn unbind() !void {
c.glBindVertexArray(0);
glad.context.BindVertexArray.?(0);
}
/// glBindVertexArray
pub inline fn bind(v: VertexArray) !void {
c.glBindVertexArray(v.id);
glad.context.BindVertexArray.?(v.id);
}
pub inline fn destroy(v: VertexArray) void {
c.glDeleteVertexArrays(1, &v.id);
glad.context.DeleteVertexArrays.?(1, &v.id);
}

View File

@ -1,22 +1,23 @@
const c = @import("c.zig");
const errors = @import("errors.zig");
const glad = @import("glad.zig");
pub fn clearColor(r: f32, g: f32, b: f32, a: f32) void {
c.glClearColor(r, g, b, a);
glad.context.ClearColor.?(r, g, b, a);
}
pub fn clear(mask: c.GLbitfield) void {
c.glClear(mask);
glad.context.Clear.?(mask);
}
pub fn drawArrays(mode: c.GLenum, first: c.GLint, count: c.GLsizei) !void {
c.glDrawArrays(mode, first, count);
glad.context.DrawArrays.?(mode, first, count);
try errors.getError();
}
pub fn drawElements(mode: c.GLenum, count: c.GLsizei, typ: c.GLenum, offset: usize) !void {
const offsetPtr = if (offset == 0) null else @intToPtr(*const anyopaque, offset);
c.glDrawElements(mode, count, typ, offsetPtr);
glad.context.DrawElements.?(mode, count, typ, offsetPtr);
try errors.getError();
}
@ -26,17 +27,32 @@ pub fn drawElementsInstanced(
typ: c.GLenum,
primcount: usize,
) !void {
c.glDrawElementsInstanced(mode, count, typ, null, @intCast(c.GLsizei, primcount));
glad.context.DrawElementsInstanced.?(mode, count, typ, null, @intCast(c.GLsizei, primcount));
try errors.getError();
}
pub fn enable(cap: c.GLenum) !void {
glad.context.Enable.?(cap);
try errors.getError();
}
pub fn frontFace(mode: c.GLenum) !void {
glad.context.FrontFace.?(mode);
try errors.getError();
}
pub fn blendFunc(sfactor: c.GLenum, dfactor: c.GLenum) !void {
glad.context.BlendFunc.?(sfactor, dfactor);
try errors.getError();
}
pub fn viewport(x: c.GLint, y: c.GLint, width: c.GLsizei, height: c.GLsizei) !void {
c.glViewport(x, y, width, height);
glad.context.Viewport.?(x, y, width, height);
}
pub fn pixelStore(mode: c.GLenum, value: anytype) !void {
switch (@typeInfo(@TypeOf(value))) {
.ComptimeInt, .Int => c.glPixelStorei(mode, value),
.ComptimeInt, .Int => glad.context.PixelStorei.?(mode, value),
else => unreachable,
}
try errors.getError();

View File

@ -1,5 +1,6 @@
const std = @import("std");
const c = @import("c.zig");
const glad = @import("glad.zig");
pub const Error = error{
InvalidEnum,
@ -13,7 +14,7 @@ pub const Error = error{
/// getError returns the error (if any) from the last OpenGL operation.
pub fn getError() Error!void {
return switch (c.glGetError()) {
return switch (glad.context.GetError.?()) {
c.GL_NO_ERROR => {},
c.GL_INVALID_ENUM => Error.InvalidEnum,
c.GL_INVALID_VALUE => Error.InvalidValue,

View File

@ -1,11 +1,12 @@
const std = @import("std");
const c = @import("c.zig");
const errors = @import("errors.zig");
const glad = @import("glad.zig");
/// Returns the number of extensions.
pub fn len() !u32 {
var n: c.GLint = undefined;
c.glGetIntegerv(c.GL_NUM_EXTENSIONS, &n);
glad.context.GetIntegerv.?(c.GL_NUM_EXTENSIONS, &n);
try errors.getError();
return @intCast(u32, n);
}
@ -23,7 +24,7 @@ pub const Iterator = struct {
pub fn next(self: *Iterator) !?[]const u8 {
if (self.i >= self.len) return null;
const res = c.glGetStringi(c.GL_EXTENSIONS, self.i);
const res = glad.context.GetStringi.?(c.GL_EXTENSIONS, self.i);
try errors.getError();
self.i += 1;
return std.mem.sliceTo(res, 0);

View File

@ -1,6 +1,14 @@
const std = @import("std");
const c = @import("c.zig");
pub const Context = c.GladGLContext;
/// This is the current context. Set this var manually prior to calling
/// any of this package's functions. I know its nasty to have a global but
/// this makes it match OpenGL API styles where it also operates on a
/// threadlocal global.
pub threadlocal var context: Context = undefined;
/// Initialize Glad. This is guaranteed to succeed if no errors are returned.
/// The getProcAddress param is an anytype so that we can accept multiple
/// forms of the function depending on what we're interfacing with.
@ -10,18 +18,23 @@ pub fn load(getProcAddress: anytype) !c_int {
const res = switch (@TypeOf(getProcAddress)) {
// glfw
GlfwFn => c.gladLoadGL(@ptrCast(
GlfwFn => c.gladLoadGLContext(&context, @ptrCast(
std.meta.FnPtr(fn ([*c]const u8) callconv(.C) ?GlProc),
getProcAddress,
)),
// try as-is. If this introduces a compiler error, then add a new case.
else => c.gladLoadGL(getProcAddress),
else => c.gladLoadGLContext(&context, getProcAddress),
};
if (res == 0) return error.GLInitFailed;
return res;
}
pub fn unload() void {
c.gladLoaderUnloadGLContext(&context);
context = undefined;
}
pub fn versionMajor(res: c_int) c_uint {
// https://github.com/ziglang/zig/issues/13162
// return c.GLAD_VERSION_MAJOR(res);

View File

@ -7,7 +7,10 @@
//! APIs. The renderers in this package assume that the renderer is already
//! setup (OpenGL has a context, Vulkan has a surface, etc.)
pub usingnamespace @import("renderer/size.zig");
pub const OpenGL = @import("renderer/OpenGL.zig");
pub const Thread = @import("renderer/Thread.zig");
pub const State = @import("renderer/State.zig");
test {
@import("std").testing.refAllDecls(@This());

View File

@ -2,11 +2,15 @@
pub const OpenGL = @This();
const std = @import("std");
const builtin = @import("builtin");
const glfw = @import("glfw");
const assert = std.debug.assert;
const testing = std.testing;
const Allocator = std.mem.Allocator;
const Atlas = @import("../Atlas.zig");
const font = @import("../font/main.zig");
const imgui = @import("imgui");
const renderer = @import("../renderer.zig");
const terminal = @import("../terminal/main.zig");
const Terminal = terminal.Terminal;
const gl = @import("../opengl.zig");
@ -27,11 +31,8 @@ const CellsLRU = lru.AutoHashMap(struct {
alloc: std.mem.Allocator,
/// Current dimensions for this grid.
size: GridSize,
/// Current cell dimensions for this grid.
cell_size: CellSize,
cell_size: renderer.CellSize,
/// The current set of cells to render.
cells: std.ArrayListUnmanaged(GPUCell),
@ -289,7 +290,6 @@ pub fn init(alloc: Allocator, font_group: *font.GroupCache) !OpenGL {
.cells = .{},
.cells_lru = CellsLRU.init(0),
.cell_size = .{ .width = metrics.cell_width, .height = metrics.cell_height },
.size = .{ .rows = 0, .columns = 0 },
.program = program,
.vao = vao,
.ebo = ebo,
@ -320,6 +320,135 @@ pub fn deinit(self: *OpenGL) void {
self.* = undefined;
}
/// Callback called by renderer.Thread when it begins.
pub fn threadEnter(self: *const OpenGL, window: glfw.Window) !void {
_ = self;
// We need to make the OpenGL context current. OpenGL requires
// that a single thread own the a single OpenGL context (if any). This
// ensures that the context switches over to our thread. Important:
// the prior thread MUST have detached the context prior to calling
// this entrypoint.
try glfw.makeContextCurrent(window);
errdefer glfw.makeContextCurrent(null) catch |err|
log.warn("failed to cleanup OpenGL context err={}", .{err});
try glfw.swapInterval(1);
// Load OpenGL bindings. This API is context-aware so this sets
// a threadlocal context for these pointers.
const version = try gl.glad.load(switch (builtin.zig_backend) {
.stage1 => glfw.getProcAddress,
else => &glfw.getProcAddress,
});
errdefer gl.glad.unload();
log.info("loaded OpenGL {}.{}", .{
gl.glad.versionMajor(version),
gl.glad.versionMinor(version),
});
}
/// Callback called by renderer.Thread when it exits.
pub fn threadExit(self: *const OpenGL) void {
_ = self;
gl.glad.unload();
glfw.makeContextCurrent(null) catch {};
}
/// The primary render callback that is completely thread-safe.
pub fn render(
self: *OpenGL,
window: glfw.Window,
state: *renderer.State,
) !void {
// Data we extract out of the critical area.
const Critical = struct {
gl_bg: terminal.color.RGB,
devmode_data: ?*imgui.DrawData,
screen_size: ?renderer.ScreenSize,
};
// Update all our data as tightly as possible within the mutex.
const critical: Critical = critical: {
state.mutex.lock();
defer state.mutex.unlock();
// If we're resizing, then handle that now.
if (state.resize_screen) |size| try self.setScreenSize(size);
defer state.resize_screen = null;
// Setup our cursor state
if (state.focused) {
self.cursor_visible = state.cursor.visible and !state.cursor.blink;
self.cursor_style = CursorStyle.fromTerminal(state.cursor.style) orelse .box;
} else {
self.cursor_visible = true;
self.cursor_style = .box_hollow;
}
// Swap bg/fg if the terminal is reversed
const bg = self.background;
const fg = self.foreground;
defer {
self.background = bg;
self.foreground = fg;
}
if (state.terminal.modes.reverse_colors) {
self.background = fg;
self.foreground = bg;
}
// Build our GPU cells
try self.rebuildCells(state.terminal);
try self.finalizeCells(state.terminal);
// Build our devmode draw data
const devmode_data = devmode_data: {
if (state.devmode) |dm| {
if (dm.visible) {
try dm.update();
break :devmode_data try dm.render();
}
}
break :devmode_data null;
};
break :critical .{
.gl_bg = self.background,
.devmode_data = devmode_data,
.screen_size = state.resize_screen,
};
};
// If we are resizing we need to update the viewport
if (critical.screen_size) |size| {
// Update our viewport for this context to be the entire window.
// OpenGL works in pixels, so we have to use the pixel size.
try gl.viewport(0, 0, @intCast(i32, size.width), @intCast(i32, size.height));
}
// Clear the surface
gl.clearColor(
@intToFloat(f32, critical.gl_bg.r) / 255,
@intToFloat(f32, critical.gl_bg.g) / 255,
@intToFloat(f32, critical.gl_bg.b) / 255,
1.0,
);
gl.clear(gl.c.GL_COLOR_BUFFER_BIT);
// We're out of the critical path now. Let's first render our terminal.
try self.draw();
// If we have devmode, then render that
if (critical.devmode_data) |data| {
imgui.ImplOpenGL3.renderDrawData(data);
}
// Swap our window buffers
try window.swapBuffers();
}
/// rebuildCells rebuilds all the GPU cells from our CPU state. This is a
/// slow operation but ensures that the GPU state exactly matches the CPU state.
/// In steady-state operation, we use some GPU tricks to send down stale data
@ -690,7 +819,7 @@ pub fn updateCell(
/// Set the screen size for rendering. This will update the projection
/// used for the shader so that the scaling of the grid is correct.
pub fn setScreenSize(self: *OpenGL, dim: ScreenSize) !void {
fn setScreenSize(self: *OpenGL, dim: renderer.ScreenSize) !void {
// Update the projection uniform within our shader
const bind = try self.program.use();
defer bind.unbind();
@ -707,21 +836,21 @@ pub fn setScreenSize(self: *OpenGL, dim: ScreenSize) !void {
);
// Recalculate the rows/columns.
self.size.update(dim, self.cell_size);
const grid_size = renderer.GridSize.init(dim, self.cell_size);
// Update our LRU. We arbitrarily support a certain number of pages here.
// We also always support a minimum number of caching in case a user
// is resizing tiny then growing again we can save some of the renders.
const evicted = try self.cells_lru.resize(self.alloc, @max(80, self.size.rows * 10));
const evicted = try self.cells_lru.resize(self.alloc, @max(80, grid_size.rows * 10));
if (evicted) |list| for (list) |*value| value.deinit(self.alloc);
// Update our shaper
var shape_buf = try self.alloc.alloc(font.Shaper.Cell, self.size.columns * 2);
var shape_buf = try self.alloc.alloc(font.Shaper.Cell, grid_size.columns * 2);
errdefer self.alloc.free(shape_buf);
self.alloc.free(self.font_shaper.cell_buf);
self.font_shaper.cell_buf = shape_buf;
log.debug("screen size screen={} grid={}, cell={}", .{ dim, self.size, self.cell_size });
log.debug("screen size screen={} grid={}, cell={}", .{ dim, grid_size, self.cell_size });
}
/// Updates the font texture atlas if it is dirty.
@ -797,7 +926,7 @@ fn flushAtlas(self: *OpenGL) !void {
/// Render renders the current cell state. This will not modify any of
/// the cells.
pub fn render(self: *OpenGL) !void {
pub fn draw(self: *OpenGL) !void {
const t = trace(@src());
defer t.end();
@ -863,71 +992,3 @@ pub fn render(self: *OpenGL) !void {
self.cells.items.len,
);
}
/// The dimensions of a single "cell" in the terminal grid.
///
/// The dimensions are dependent on the current loaded set of font glyphs.
/// We calculate the width based on the widest character and the height based
/// on the height requirement for an underscore (the "lowest" -- visually --
/// character).
///
/// The units for the width and height are in world space. They have to
/// be normalized using the screen projection.
///
/// TODO(mitchellh): we should recalculate cell dimensions when new glyphs
/// are loaded.
const CellSize = struct {
width: f32,
height: f32,
};
/// The dimensions of the screen that the grid is rendered to. This is the
/// terminal screen, so it is likely a subset of the window size. The dimensions
/// should be in pixels.
const ScreenSize = struct {
width: u32,
height: u32,
};
/// The dimensions of the grid itself, in rows/columns units.
const GridSize = struct {
const Unit = u32;
columns: Unit = 0,
rows: Unit = 0,
/// Update the columns/rows for the grid based on the given screen and
/// cell size.
fn update(self: *GridSize, screen: ScreenSize, cell: CellSize) void {
self.columns = @floatToInt(Unit, @intToFloat(f32, screen.width) / cell.width);
self.rows = @floatToInt(Unit, @intToFloat(f32, screen.height) / cell.height);
}
};
test "GridSize update exact" {
var grid: GridSize = .{};
grid.update(.{
.width = 100,
.height = 40,
}, .{
.width = 5,
.height = 10,
});
try testing.expectEqual(@as(GridSize.Unit, 20), grid.columns);
try testing.expectEqual(@as(GridSize.Unit, 4), grid.rows);
}
test "GridSize update rounding" {
var grid: GridSize = .{};
grid.update(.{
.width = 20,
.height = 40,
}, .{
.width = 6,
.height = 15,
});
try testing.expectEqual(@as(GridSize.Unit, 3), grid.columns);
try testing.expectEqual(@as(GridSize.Unit, 2), grid.rows);
}

42
src/renderer/State.zig Normal file
View File

@ -0,0 +1,42 @@
//! This is the render state that is given to a renderer.
const std = @import("std");
const DevMode = @import("../DevMode.zig");
const terminal = @import("../terminal/main.zig");
const renderer = @import("../renderer.zig");
/// The mutex that must be held while reading any of the data in the
/// members of this state. Note that the state itself is NOT protected
/// by the mutex and is NOT thread-safe, only the members values of the
/// state (i.e. the terminal, devmode, etc. values).
mutex: *std.Thread.Mutex,
/// True if the window is focused
focused: bool,
/// A new screen size if the screen was resized.
resize_screen: ?renderer.ScreenSize,
/// Cursor configuration for rendering
cursor: Cursor,
/// The terminal data.
terminal: *terminal.Terminal,
/// The devmode data.
devmode: ?*const DevMode = null,
pub const Cursor = struct {
/// Current cursor style. This can be set by escape sequences. To get
/// the default style, the config has to be referenced.
style: terminal.CursorStyle = .default,
/// Whether the cursor is visible at all. This should not be used for
/// "blink" settings, see "blink" for that. This is used to turn the
/// cursor ON or OFF.
visible: bool = true,
/// Whether the cursor is currently blinking. If it is blinking, then
/// the cursor will not be rendered.
blink: bool = false,
};

212
src/renderer/Thread.zig Normal file
View File

@ -0,0 +1,212 @@
//! Represents the renderer thread logic. The renderer thread is able to
//! be woken up to render.
pub const Thread = @This();
const std = @import("std");
const builtin = @import("builtin");
const glfw = @import("glfw");
const libuv = @import("libuv");
const renderer = @import("../renderer.zig");
const gl = @import("../opengl.zig");
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.renderer_thread);
/// The main event loop for the application. The user data of this loop
/// is always the allocator used to create the loop. This is a convenience
/// so that users of the loop always have an allocator.
loop: libuv.Loop,
/// This can be used to wake up the renderer and force a render safely from
/// any thread.
wakeup: libuv.Async,
/// This can be used to stop the renderer on the next loop iteration.
stop: libuv.Async,
/// The timer used for rendering
render_h: libuv.Timer,
/// The windo we're rendering to.
window: glfw.Window,
/// The underlying renderer implementation.
renderer: *renderer.OpenGL,
/// Pointer to the shared state that is used to generate the final render.
state: *renderer.State,
/// Initialize the thread. This does not START the thread. This only sets
/// up all the internal state necessary prior to starting the thread. It
/// is up to the caller to start the thread with the threadMain entrypoint.
pub fn init(
alloc: Allocator,
window: glfw.Window,
renderer_impl: *renderer.OpenGL,
state: *renderer.State,
) !Thread {
// We always store allocator pointer on the loop data so that
// handles can use our global allocator.
const allocPtr = try alloc.create(Allocator);
errdefer alloc.destroy(allocPtr);
allocPtr.* = alloc;
// Create our event loop.
var loop = try libuv.Loop.init(alloc);
errdefer loop.deinit(alloc);
loop.setData(allocPtr);
// This async handle is used to "wake up" the renderer and force a render.
var wakeup_h = try libuv.Async.init(alloc, loop, wakeupCallback);
errdefer wakeup_h.close((struct {
fn callback(h: *libuv.Async) void {
const loop_alloc = h.loop().getData(Allocator).?.*;
h.deinit(loop_alloc);
}
}).callback);
// This async handle is used to stop the loop and force the thread to end.
var stop_h = try libuv.Async.init(alloc, loop, stopCallback);
errdefer stop_h.close((struct {
fn callback(h: *libuv.Async) void {
const loop_alloc = h.loop().getData(Allocator).?.*;
h.deinit(loop_alloc);
}
}).callback);
// The primary timer for rendering.
var render_h = try libuv.Timer.init(alloc, loop);
errdefer render_h.close((struct {
fn callback(h: *libuv.Timer) void {
const loop_alloc = h.loop().getData(Allocator).?.*;
h.deinit(loop_alloc);
}
}).callback);
return Thread{
.loop = loop,
.wakeup = wakeup_h,
.stop = stop_h,
.render_h = render_h,
.window = window,
.renderer = renderer_impl,
.state = state,
};
}
/// Clean up the thread. This is only safe to call once the thread
/// completes executing; the caller must join prior to this.
pub fn deinit(self: *Thread) void {
// Get a copy to our allocator
const alloc_ptr = self.loop.getData(Allocator).?;
const alloc = alloc_ptr.*;
// Schedule our handles to close
self.stop.close((struct {
fn callback(h: *libuv.Async) void {
const handle_alloc = h.loop().getData(Allocator).?.*;
h.deinit(handle_alloc);
}
}).callback);
self.wakeup.close((struct {
fn callback(h: *libuv.Async) void {
const handle_alloc = h.loop().getData(Allocator).?.*;
h.deinit(handle_alloc);
}
}).callback);
self.render_h.close((struct {
fn callback(h: *libuv.Timer) void {
const handle_alloc = h.loop().getData(Allocator).?.*;
h.deinit(handle_alloc);
}
}).callback);
// Run the loop one more time, because destroying our other things
// like windows usually cancel all our event loop stuff and we need
// one more run through to finalize all the closes.
_ = self.loop.run(.default) catch |err|
log.err("error finalizing event loop: {}", .{err});
// Dealloc our allocator copy
alloc.destroy(alloc_ptr);
self.loop.deinit(alloc);
}
/// The main entrypoint for the thread.
pub fn threadMain(self: *Thread) void {
// Call child function so we can use errors...
self.threadMain_() catch |err| {
// In the future, we should expose this on the thread struct.
log.warn("error in renderer err={}", .{err});
};
}
fn threadMain_(self: *Thread) !void {
// Get a copy to our allocator
// const alloc_ptr = self.loop.getData(Allocator).?;
// const alloc = alloc_ptr.*;
// Run our thread start/end callbacks. This is important because some
// renderers have to do per-thread setup. For example, OpenGL has to set
// some thread-local state since that is how it works.
const Renderer = RendererType();
if (@hasDecl(Renderer, "threadEnter")) try self.renderer.threadEnter(self.window);
defer if (@hasDecl(Renderer, "threadExit")) self.renderer.threadExit();
// Set up our async handler to support rendering
self.wakeup.setData(self);
defer self.wakeup.setData(null);
// Set up our timer and start it for rendering
self.render_h.setData(self);
defer self.render_h.setData(null);
try self.wakeup.send();
// Run
log.debug("starting renderer thread", .{});
defer log.debug("exiting renderer thread", .{});
_ = try self.loop.run(.default);
}
fn wakeupCallback(h: *libuv.Async) void {
const t = h.getData(Thread) orelse {
// This shouldn't happen so we log it.
log.warn("render callback fired without data set", .{});
return;
};
// If the timer is already active then we don't have to do anything.
const active = t.render_h.isActive() catch true;
if (active) return;
// Timer is not active, let's start it
t.render_h.start(renderCallback, 10, 0) catch |err|
log.warn("render timer failed to start err={}", .{err});
}
fn renderCallback(h: *libuv.Timer) void {
const t = h.getData(Thread) orelse {
// This shouldn't happen so we log it.
log.warn("render callback fired without data set", .{});
return;
};
t.renderer.render(t.window, t.state) catch |err|
log.warn("error rendering err={}", .{err});
}
fn stopCallback(h: *libuv.Async) void {
h.loop().stop();
}
// This is unnecessary right now but is logic we'll need for when we
// abstract renderers out.
fn RendererType() type {
const self: Thread = undefined;
return switch (@typeInfo(@TypeOf(self.renderer))) {
.Pointer => |p| p.child,
.Struct => |s| s,
else => unreachable,
};
}

101
src/renderer/size.zig Normal file
View File

@ -0,0 +1,101 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const font = @import("../font/main.zig");
const log = std.log.scoped(.renderer_size);
/// The dimensions of a single "cell" in the terminal grid.
///
/// The dimensions are dependent on the current loaded set of font glyphs.
/// We calculate the width based on the widest character and the height based
/// on the height requirement for an underscore (the "lowest" -- visually --
/// character).
///
/// The units for the width and height are in world space. They have to
/// be normalized for any renderer implementation.
pub const CellSize = struct {
width: f32,
height: f32,
/// Initialize the cell size information from a font group. This ensures
/// that all renderers use the same cell sizing information for the same
/// fonts.
pub fn init(alloc: Allocator, group: *font.GroupCache) !CellSize {
// Get our cell metrics based on a regular font ascii 'M'. Why 'M'?
// Doesn't matter, any normal ASCII will do we're just trying to make
// sure we use the regular font.
const metrics = metrics: {
const index = (try group.indexForCodepoint(alloc, 'M', .regular, .text)).?;
const face = try group.group.faceFromIndex(index);
break :metrics face.metrics;
};
log.debug("cell dimensions={}", .{metrics});
return CellSize{
.width = metrics.cell_width,
.height = metrics.cell_height,
};
}
};
/// The dimensions of the screen that the grid is rendered to. This is the
/// terminal screen, so it is likely a subset of the window size. The dimensions
/// should be in pixels.
pub const ScreenSize = struct {
width: u32,
height: u32,
};
/// The dimensions of the grid itself, in rows/columns units.
pub const GridSize = struct {
const Unit = u32;
columns: Unit = 0,
rows: Unit = 0,
/// Initialize a grid size based on a screen and cell size.
pub fn init(screen: ScreenSize, cell: CellSize) GridSize {
var result: GridSize = undefined;
result.update(screen, cell);
return result;
}
/// Update the columns/rows for the grid based on the given screen and
/// cell size.
pub fn update(self: *GridSize, screen: ScreenSize, cell: CellSize) void {
self.columns = @floatToInt(Unit, @intToFloat(f32, screen.width) / cell.width);
self.rows = @floatToInt(Unit, @intToFloat(f32, screen.height) / cell.height);
}
};
test "GridSize update exact" {
const testing = std.testing;
var grid: GridSize = .{};
grid.update(.{
.width = 100,
.height = 40,
}, .{
.width = 5,
.height = 10,
});
try testing.expectEqual(@as(GridSize.Unit, 20), grid.columns);
try testing.expectEqual(@as(GridSize.Unit, 4), grid.rows);
}
test "GridSize update rounding" {
const testing = std.testing;
var grid: GridSize = .{};
grid.update(.{
.width = 20,
.height = 40,
}, .{
.width = 6,
.height = 15,
});
try testing.expectEqual(@as(GridSize.Unit, 3), grid.columns);
try testing.expectEqual(@as(GridSize.Unit, 2), grid.rows);
}

File diff suppressed because it is too large Load Diff

1220
vendor/glad/src/gl.c vendored

File diff suppressed because it is too large Load Diff