mirror of
https://github.com/ghostty-org/ghostty.git
synced 2025-07-23 12:16:11 +03:00
implement LRU row GPU cell caching
This commit is contained in:
43
src/Grid.zig
43
src/Grid.zig
@ -12,9 +12,12 @@ const Terminal = terminal.Terminal;
|
||||
const gl = @import("opengl.zig");
|
||||
const trace = @import("tracy").trace;
|
||||
const math = @import("math.zig");
|
||||
const lru = @import("lru.zig");
|
||||
|
||||
const log = std.log.scoped(.grid);
|
||||
|
||||
const CellsLRU = lru.AutoHashMap(terminal.Screen.RowHeader.Id, std.ArrayListUnmanaged(GPUCell));
|
||||
|
||||
alloc: std.mem.Allocator,
|
||||
|
||||
/// Current dimensions for this grid.
|
||||
@ -26,6 +29,10 @@ cell_size: CellSize,
|
||||
/// The current set of cells to render.
|
||||
cells: std.ArrayListUnmanaged(GPUCell),
|
||||
|
||||
/// The LRU that stores our GPU cells cached by row IDs. This is used to
|
||||
/// prevent high CPU activity when shaping rows.
|
||||
cells_lru: CellsLRU,
|
||||
|
||||
/// The size of the cells list that was sent to the GPU. This is used
|
||||
/// to detect when the cells array was reallocated/resized and handle that
|
||||
/// accordingly.
|
||||
@ -303,6 +310,7 @@ pub fn init(
|
||||
return Grid{
|
||||
.alloc = alloc,
|
||||
.cells = .{},
|
||||
.cells_lru = CellsLRU.init(0),
|
||||
.cell_size = .{ .width = metrics.cell_width, .height = metrics.cell_height },
|
||||
.size = .{ .rows = 0, .columns = 0 },
|
||||
.program = program,
|
||||
@ -333,6 +341,7 @@ pub fn deinit(self: *Grid) void {
|
||||
self.ebo.destroy();
|
||||
self.vao.destroy();
|
||||
self.program.destroy();
|
||||
self.cells_lru.deinit(self.alloc);
|
||||
self.cells.deinit(self.alloc);
|
||||
self.* = undefined;
|
||||
}
|
||||
@ -369,6 +378,22 @@ pub fn rebuildCells(self: *Grid, term: *Terminal) !void {
|
||||
while (rowIter.next()) |row| {
|
||||
defer y += 1;
|
||||
|
||||
// Get our value from the cache.
|
||||
const gop = try self.cells_lru.getOrPut(self.alloc, row.getId());
|
||||
if (!row.isDirty() and gop.found_existing) {
|
||||
var i: usize = self.cells.items.len;
|
||||
for (gop.value_ptr.items) |cell| {
|
||||
self.cells.appendAssumeCapacity(cell);
|
||||
self.cells.items[i].grid_row = @intCast(u16, y);
|
||||
i += 1;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get the starting index for our row so we can cache any new GPU cells.
|
||||
const start = self.cells.items.len;
|
||||
|
||||
// Split our row into runs and shape each one.
|
||||
var iter = self.font_shaper.runIterator(&self.font_group, row);
|
||||
while (try iter.next(self.alloc)) |run| {
|
||||
@ -383,6 +408,18 @@ pub fn rebuildCells(self: *Grid, term: *Terminal) !void {
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize our list
|
||||
if (!gop.found_existing) gop.value_ptr.* = .{};
|
||||
var row_cells = gop.value_ptr;
|
||||
|
||||
// Get our new length and cache the cells.
|
||||
try row_cells.ensureTotalCapacity(self.alloc, term.screen.cols);
|
||||
row_cells.clearRetainingCapacity();
|
||||
row_cells.appendSliceAssumeCapacity(self.cells.items[start..]);
|
||||
|
||||
// Set row is not dirty anymore
|
||||
row.setDirty(false);
|
||||
}
|
||||
|
||||
// Add the cursor
|
||||
@ -625,6 +662,12 @@ pub fn setScreenSize(self: *Grid, dim: ScreenSize) !void {
|
||||
// Recalculate the rows/columns.
|
||||
self.size.update(dim, self.cell_size);
|
||||
|
||||
// Update our LRU. We arbitrarily support a certain number of pages here.
|
||||
// We also always support a minimum number of caching in case a user
|
||||
// is resizing tiny then growing again we can save some of the renders.
|
||||
const evicted = try self.cells_lru.resize(self.alloc, @maximum(80, self.size.rows * 10));
|
||||
if (evicted) |list| for (list) |*value| value.deinit(self.alloc);
|
||||
|
||||
// Update our shaper
|
||||
var shape_buf = try self.alloc.alloc(font.Shaper.Cell, self.size.columns * 2);
|
||||
errdefer self.alloc.free(shape_buf);
|
||||
|
21
src/lru.zig
21
src/lru.zig
@ -164,26 +164,30 @@ pub fn HashMap(
|
||||
}
|
||||
|
||||
/// Resize the LRU. If this shrinks the LRU then LRU items will be
|
||||
/// deallocated.
|
||||
pub fn resize(self: *Self, alloc: Allocator, capacity: Map.Size) void {
|
||||
/// deallocated. The deallocated items are returned in the slice. This
|
||||
/// slice must be freed by the caller.
|
||||
pub fn resize(self: *Self, alloc: Allocator, capacity: Map.Size) Allocator.Error!?[]V {
|
||||
// Fastest
|
||||
if (capacity >= self.capacity) {
|
||||
self.capacity = capacity;
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
|
||||
// If we're shrinking but we're smaller than the new capacity,
|
||||
// then we don't have to do anything.
|
||||
if (self.map.count() <= capacity) {
|
||||
self.capacity = capacity;
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
|
||||
// We're shrinking and we have more items than the new capacity
|
||||
const delta = self.map.count() - capacity;
|
||||
var evicted = try alloc.alloc(V, delta);
|
||||
|
||||
var i: Map.Size = 0;
|
||||
while (i < delta) : (i += 1) {
|
||||
var node = self.queue.first.?;
|
||||
evicted[i] = node.data.value;
|
||||
self.queue.remove(node);
|
||||
_ = self.map.remove(node.data.key);
|
||||
alloc.destroy(node);
|
||||
@ -191,6 +195,8 @@ pub fn HashMap(
|
||||
|
||||
self.capacity = capacity;
|
||||
assert(self.map.count() == capacity);
|
||||
|
||||
return evicted;
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -281,7 +287,8 @@ test "resize shrink without removal" {
|
||||
}
|
||||
|
||||
// Shrink
|
||||
m.resize(alloc, 1);
|
||||
const evicted = try m.resize(alloc, 1);
|
||||
try testing.expect(evicted == null);
|
||||
{
|
||||
const gop = try m.getOrPut(alloc, 1);
|
||||
try testing.expect(gop.found_existing);
|
||||
@ -311,7 +318,9 @@ test "resize shrink and remove" {
|
||||
}
|
||||
|
||||
// Shrink
|
||||
m.resize(alloc, 1);
|
||||
const evicted = try m.resize(alloc, 1);
|
||||
defer alloc.free(evicted.?);
|
||||
try testing.expectEqual(@as(usize, 1), evicted.?.len);
|
||||
{
|
||||
const gop = try m.getOrPut(alloc, 1);
|
||||
try testing.expect(!gop.found_existing);
|
||||
|
@ -113,7 +113,7 @@ const StorageCell = union {
|
||||
/// The row header is at the start of every row within the storage buffer.
|
||||
/// It can store row-specific data.
|
||||
pub const RowHeader = struct {
|
||||
const Id = u32;
|
||||
pub const Id = u32;
|
||||
|
||||
/// The ID of this row, used to uniquely identify this row. The cells
|
||||
/// are also ID'd by id + cell index (0-indexed). This will wrap around
|
||||
@ -2463,13 +2463,22 @@ test "Screen: resize (no reflow) more rows" {
|
||||
defer s.deinit();
|
||||
const str = "1ABCD\n2EFGH\n3IJKL";
|
||||
try s.testWriteString(str);
|
||||
try s.resizeWithoutReflow(10, 5);
|
||||
|
||||
// Clear dirty rows
|
||||
var iter = s.rowIterator(.viewport);
|
||||
while (iter.next()) |row| row.setDirty(false);
|
||||
|
||||
// Resize
|
||||
try s.resizeWithoutReflow(10, 5);
|
||||
{
|
||||
var contents = try s.testString(alloc, .viewport);
|
||||
defer alloc.free(contents);
|
||||
try testing.expectEqualStrings(str, contents);
|
||||
}
|
||||
|
||||
// Everything should be dirty
|
||||
iter = s.rowIterator(.viewport);
|
||||
while (iter.next()) |row| try testing.expect(row.isDirty());
|
||||
}
|
||||
|
||||
test "Screen: resize (no reflow) less rows" {
|
||||
|
Reference in New Issue
Block a user