From 08b7a866b6fd685eb63886d1643219756825e6a6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 10 Sep 2022 10:59:57 -0700 Subject: [PATCH 1/6] Screen dirty tracking --- src/terminal/Screen.zig | 150 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 149 insertions(+), 1 deletion(-) diff --git a/src/terminal/Screen.zig b/src/terminal/Screen.zig index 395d8aab9..43bbf3901 100644 --- a/src/terminal/Screen.zig +++ b/src/terminal/Screen.zig @@ -127,6 +127,11 @@ pub const RowHeader = struct { /// row is a continuous of this row. wrap: bool = false, + /// True if this row has had changes. It is up to the caller to + /// set this to false. See the methods on Row to see what will set + /// this to true. + dirty: bool = false, + /// True if any cell in this row has a grapheme associated with it. grapheme: bool = false, } = .{}, @@ -239,7 +244,7 @@ pub const Row = struct { /// Returns the ID for this row. You can turn this into a cell ID /// by adding the cell offset plus 1 (so it is 1-indexed). - pub fn getId(self: Row) RowHeader.Id { + pub inline fn getId(self: Row) RowHeader.Id { return self.storage[0].header.id; } @@ -249,6 +254,16 @@ pub const Row = struct { self.storage[0].header.flags.wrap = v; } + /// Set a row as dirty or not. Generally you only set a row as NOT dirty. + /// Various Row functions manage flagging dirty to true. + pub fn setDirty(self: Row, v: bool) void { + self.storage[0].header.flags.dirty = v; + } + + pub inline fn isDirty(self: Row) bool { + return self.storage[0].header.flags.dirty; + } + /// Retrieve the header for this row. pub fn header(self: Row) RowHeader { return self.storage[0].header; @@ -276,6 +291,9 @@ pub const Row = struct { assert(len <= self.storage.len - 1); assert(!cell.attrs.grapheme); // you can't fill with graphemes + // Always mark the row as dirty for this. + self.storage[0].header.flags.dirty = true; + // If our row has no graphemes, then this is a fast copy if (!self.storage[0].header.flags.grapheme) { std.mem.set(StorageCell, self.storage[start + 1 .. len + 1], .{ .cell = cell }); @@ -308,6 +326,10 @@ pub const Row = struct { /// this should be done prior. pub fn getCellPtr(self: Row, x: usize) *Cell { assert(x < self.storage.len - 1); + + // Always mark the row as dirty for this. + self.storage[0].header.flags.dirty = true; + return &self.storage[x + 1].cell; } @@ -323,6 +345,9 @@ pub const Row = struct { // Our row now has a grapheme self.storage[0].header.flags.grapheme = true; + // Our row is now dirty + self.storage[0].header.flags.dirty = true; + // If we weren't previously a grapheme and we found an existing value // it means that it is old grapheme data. Just delete that. if (!cell.attrs.grapheme and gop.found_existing) { @@ -346,6 +371,9 @@ pub const Row = struct { /// Removes all graphemes associated with a cell. pub fn clearGraphemes(self: Row, x: usize) void { + // Our row is now dirty + self.storage[0].header.flags.dirty = true; + const cell = &self.storage[x + 1].cell; const key = self.getId() + x + 1; cell.attrs.grapheme = false; @@ -357,6 +385,9 @@ pub const Row = struct { // If we have graphemes, clear first to unset them. if (self.storage[0].header.flags.grapheme) self.clear(.{}); + // Always mark the row as dirty for this. + self.storage[0].header.flags.dirty = true; + // If the source has no graphemes (likely) then this is fast. const end = @minimum(src.storage.len, self.storage.len); if (!src.storage[0].header.flags.grapheme) { @@ -2304,6 +2335,123 @@ test "Screen: selectionString wide char with header" { } } +test "Screen: dirty with getCellPtr" { + const testing = std.testing; + const alloc = testing.allocator; + + var s = try init(alloc, 3, 5, 0); + defer s.deinit(); + try s.testWriteString("1ABCD\n2EFGH\n3IJKL"); + try testing.expect(s.viewportIsBottom()); + + // Ensure all are dirty. Clear em. + var iter = s.rowIterator(.viewport); + while (iter.next()) |row| { + try testing.expect(row.isDirty()); + row.setDirty(false); + } + + // Reset our cursor onto the second row. + s.cursor.x = 0; + s.cursor.y = 1; + + try s.testWriteString("foo"); + { + const row = s.getRow(.{ .active = 0 }); + try testing.expect(!row.isDirty()); + } + { + const row = s.getRow(.{ .active = 1 }); + try testing.expect(row.isDirty()); + } + { + const row = s.getRow(.{ .active = 2 }); + try testing.expect(!row.isDirty()); + + _ = row.getCell(0); + try testing.expect(!row.isDirty()); + } +} + +test "Screen: dirty with clear, fill, fillSlice, copyRow" { + const testing = std.testing; + const alloc = testing.allocator; + + var s = try init(alloc, 3, 5, 0); + defer s.deinit(); + try s.testWriteString("1ABCD\n2EFGH\n3IJKL"); + try testing.expect(s.viewportIsBottom()); + + // Ensure all are dirty. Clear em. + var iter = s.rowIterator(.viewport); + while (iter.next()) |row| { + try testing.expect(row.isDirty()); + row.setDirty(false); + } + + { + const row = s.getRow(.{ .active = 0 }); + try testing.expect(!row.isDirty()); + row.clear(.{}); + try testing.expect(row.isDirty()); + row.setDirty(false); + } + + { + const row = s.getRow(.{ .active = 0 }); + try testing.expect(!row.isDirty()); + row.fill(.{ .char = 'A' }); + try testing.expect(row.isDirty()); + row.setDirty(false); + } + + { + const row = s.getRow(.{ .active = 0 }); + try testing.expect(!row.isDirty()); + row.fillSlice(.{ .char = 'A' }, 0, 2); + try testing.expect(row.isDirty()); + row.setDirty(false); + } + + { + const src = s.getRow(.{ .active = 0 }); + const row = s.getRow(.{ .active = 1 }); + try testing.expect(!row.isDirty()); + try row.copyRow(src); + try testing.expect(!src.isDirty()); + try testing.expect(row.isDirty()); + row.setDirty(false); + } +} + +test "Screen: dirty with graphemes" { + const testing = std.testing; + const alloc = testing.allocator; + + var s = try init(alloc, 3, 5, 0); + defer s.deinit(); + try s.testWriteString("1ABCD\n2EFGH\n3IJKL"); + try testing.expect(s.viewportIsBottom()); + + // Ensure all are dirty. Clear em. + var iter = s.rowIterator(.viewport); + while (iter.next()) |row| { + try testing.expect(row.isDirty()); + row.setDirty(false); + } + + { + const row = s.getRow(.{ .active = 0 }); + try testing.expect(!row.isDirty()); + try row.attachGrapheme(0, 0xFE0F); + try testing.expect(row.isDirty()); + row.setDirty(false); + row.clearGraphemes(0); + try testing.expect(row.isDirty()); + row.setDirty(false); + } +} + test "Screen: resize (no reflow) more rows" { const testing = std.testing; const alloc = testing.allocator; From 8995e74e234f73c106145a4b4ce40b566800b1c6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 10 Sep 2022 11:01:40 -0700 Subject: [PATCH 2/6] rows are dirty when initialized --- src/terminal/Screen.zig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/terminal/Screen.zig b/src/terminal/Screen.zig index 43bbf3901..c6cf467e3 100644 --- a/src/terminal/Screen.zig +++ b/src/terminal/Screen.zig @@ -818,6 +818,9 @@ pub fn getRow(self: *Screen, index: RowIndex) Row { // Store the header row.storage[0].header.id = id; + // Mark that we're dirty since we're a new row + row.storage[0].header.flags.dirty = true; + // We only need to fill with runtime safety because unions are // tag-checked. Otherwise, the default value of zero will be valid. if (std.debug.runtime_safety) row.fill(.{}); From 4f6c67fe9d7c5922eecb5a9fbfd1968552fe1bc3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 12 Sep 2022 10:21:18 -0700 Subject: [PATCH 3/6] add LRU --- src/lru.zig | 234 +++++++++++++++++++++++++++++++++++++++++++++++++++ src/main.zig | 1 + 2 files changed, 235 insertions(+) create mode 100644 src/lru.zig diff --git a/src/lru.zig b/src/lru.zig new file mode 100644 index 000000000..39b97006c --- /dev/null +++ b/src/lru.zig @@ -0,0 +1,234 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; + +/// Create a HashMap for a key type that can be autoamtically hashed. +/// If you want finer-grained control, use HashMap directly. +pub fn AutoHashMap(comptime K: type, comptime V: type) type { + return HashMap( + K, + V, + std.hash_map.AutoContext(K), + std.hash_map.default_max_load_percentage, + ); +} + +/// HashMap implementation that supports least-recently-used eviction. +/// +/// Note: This is a really elementary CS101 version of an LRU right now. +/// This is done initially to get something working. Once we have it working, +/// we can benchmark and improve if this ends up being a source of slowness. +pub fn HashMap( + comptime K: type, + comptime V: type, + comptime Context: type, + comptime max_load_percentage: u64, +) type { + return struct { + const Self = @This(); + const Map = std.HashMapUnmanaged(K, *Queue.Node, Context, max_load_percentage); + const Queue = std.TailQueue(KV); + + /// Map to maintain our entries. + map: Map, + + /// Queue to maintain LRU order. + queue: Queue, + + /// The capacity of our map. If this capacity is reached, cache + /// misses will begin evicting entries. + capacity: Map.Size, + + pub const KV = struct { + key: K, + value: V, + }; + + /// The result of a getOrPut operation. + pub const GetOrPutResult = struct { + /// The entry that was retrieved. If found_existing is false, + /// then this is a pointer to allocated space to store a V. + /// If found_existing is true, the pointer value is valid, but + /// can be overwritten. + value_ptr: *V, + + /// Whether an existing value was found or not. + found_existing: bool, + + /// If another entry had to be evicted to make space for this + /// put operation, then this is the value that was evicted. + evicted: ?KV, + }; + + pub fn init(capacity: Map.Size) Self { + return .{ + .map = .{}, + .queue = .{}, + .capacity = capacity, + }; + } + + pub fn deinit(self: *Self, alloc: Allocator) void { + // Important: use our queue as a source of truth for dealloc + // because we might keep items in the queue around that aren't + // present in our LRU anymore to prevent future allocations. + var it = self.queue.first; + while (it) |node| { + it = node.next; + alloc.destroy(node); + } + + self.map.deinit(alloc); + self.* = undefined; + } + + /// Get or put a value for a key. See GetOrPutResult on how to check + /// if an existing value was found, if an existing value was evicted, + /// etc. + pub fn getOrPut(self: *Self, allocator: Allocator, key: K) Allocator.Error!GetOrPutResult { + if (@sizeOf(Context) != 0) + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call getOrPutContext instead."); + return self.getOrPutContext(allocator, key, undefined); + } + + /// See getOrPut + pub fn getOrPutContext( + self: *Self, + alloc: Allocator, + key: K, + ctx: Context, + ) Allocator.Error!GetOrPutResult { + const map_gop = try self.map.getOrPutContext(alloc, key, ctx); + if (map_gop.found_existing) { + // Move to end to mark as most recently used + self.queue.remove(map_gop.value_ptr.*); + self.queue.append(map_gop.value_ptr.*); + + return GetOrPutResult{ + .found_existing = true, + .value_ptr = &map_gop.value_ptr.*.data.value, + .evicted = null, + }; + } + errdefer _ = self.map.remove(key); + + // We're evicting if our map insertion increased our capacity. + const evict = self.map.count() > self.capacity; + + // Get our node. If we're not evicting then we allocate a new + // node. If we are evicting then we avoid allocation by just + // reusing the node we would've evicted. + var node = if (!evict) try alloc.create(Queue.Node) else node: { + // Our first node is the least recently used. + var least_used = self.queue.first.?; + + // Move our least recently used to the end to make + // it the most recently used. + self.queue.remove(least_used); + + // Remove the least used from the map + _ = self.map.remove(least_used.data.key); + + break :node least_used; + }; + errdefer if (!evict) alloc.destroy(node); + + // Store our node in the map. + map_gop.value_ptr.* = node; + + // Mark the node as most recently used + self.queue.append(node); + + // Set our key + node.data.key = key; + + return GetOrPutResult{ + .found_existing = map_gop.found_existing, + .value_ptr = &node.data.value, + .evicted = if (!evict) null else node.data, + }; + } + + /// Get a value for a key. + pub fn get(self: *Self, key: K) ?V { + if (@sizeOf(Context) != 0) { + @compileError("getContext must be used."); + } + return self.getContext(key, undefined); + } + + /// See get + pub fn getContext(self: *Self, key: K, ctx: Context) ?V { + const node = self.map.getContext(key, ctx) orelse return null; + return node.data.value; + } + }; +} + +test "getOrPut" { + const testing = std.testing; + const alloc = testing.allocator; + + const Map = AutoHashMap(u32, u8); + var m = Map.init(2); + defer m.deinit(alloc); + + // Insert cap values, should be hits + { + const gop = try m.getOrPut(alloc, 1); + try testing.expect(!gop.found_existing); + try testing.expect(gop.evicted == null); + gop.value_ptr.* = 1; + } + { + const gop = try m.getOrPut(alloc, 2); + try testing.expect(!gop.found_existing); + try testing.expect(gop.evicted == null); + gop.value_ptr.* = 2; + } + + // 1 is LRU + try testing.expect((try m.getOrPut(alloc, 1)).found_existing); + try testing.expect((try m.getOrPut(alloc, 2)).found_existing); + + // Next should evict + { + const gop = try m.getOrPut(alloc, 3); + try testing.expect(!gop.found_existing); + try testing.expect(gop.evicted != null); + try testing.expect(gop.evicted.?.value == 1); + gop.value_ptr.* = 3; + } + + // Currently: 2 is LRU, let's make 3 LRU + try testing.expect((try m.getOrPut(alloc, 2)).found_existing); + + // Next should evict + { + const gop = try m.getOrPut(alloc, 4); + try testing.expect(!gop.found_existing); + try testing.expect(gop.evicted != null); + try testing.expect(gop.evicted.?.value == 3); + gop.value_ptr.* = 4; + } +} + +test "get" { + const testing = std.testing; + const alloc = testing.allocator; + + const Map = AutoHashMap(u32, u8); + var m = Map.init(2); + defer m.deinit(alloc); + + // Insert cap values, should be hits + { + const gop = try m.getOrPut(alloc, 1); + try testing.expect(!gop.found_existing); + try testing.expect(gop.evicted == null); + gop.value_ptr.* = 1; + } + + try testing.expect(m.get(1) != null); + try testing.expect(m.get(1).? == 1); + try testing.expect(m.get(2) == null); +} diff --git a/src/main.zig b/src/main.zig index 188ff1fef..53c401d24 100644 --- a/src/main.zig +++ b/src/main.zig @@ -119,4 +119,5 @@ test { // TODO _ = @import("config.zig"); _ = @import("cli_args.zig"); + _ = @import("lru.zig"); } From 3e27120e8c6b34c29ef3058561c9a58e00b2ffe4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 12 Sep 2022 11:01:22 -0700 Subject: [PATCH 4/6] lru resize --- src/lru.zig | 87 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/src/lru.zig b/src/lru.zig index 39b97006c..1b252b393 100644 --- a/src/lru.zig +++ b/src/lru.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const assert = std.debug.assert; const Allocator = std.mem.Allocator; /// Create a HashMap for a key type that can be autoamtically hashed. @@ -161,6 +162,36 @@ pub fn HashMap( const node = self.map.getContext(key, ctx) orelse return null; return node.data.value; } + + /// Resize the LRU. If this shrinks the LRU then LRU items will be + /// deallocated. + pub fn resize(self: *Self, alloc: Allocator, capacity: Map.Size) void { + // Fastest + if (capacity >= self.capacity) { + self.capacity = capacity; + return; + } + + // If we're shrinking but we're smaller than the new capacity, + // then we don't have to do anything. + if (self.map.count() <= capacity) { + self.capacity = capacity; + return; + } + + // We're shrinking and we have more items than the new capacity + const delta = self.map.count() - capacity; + var i: Map.Size = 0; + while (i < delta) : (i += 1) { + var node = self.queue.first.?; + self.queue.remove(node); + _ = self.map.remove(node.data.key); + alloc.destroy(node); + } + + self.capacity = capacity; + assert(self.map.count() == capacity); + } }; } @@ -232,3 +263,59 @@ test "get" { try testing.expect(m.get(1).? == 1); try testing.expect(m.get(2) == null); } + +test "resize shrink without removal" { + const testing = std.testing; + const alloc = testing.allocator; + + const Map = AutoHashMap(u32, u8); + var m = Map.init(2); + defer m.deinit(alloc); + + // Insert cap values, LRU is 1 + { + const gop = try m.getOrPut(alloc, 1); + try testing.expect(!gop.found_existing); + try testing.expect(gop.evicted == null); + gop.value_ptr.* = 1; + } + + // Shrink + m.resize(alloc, 1); + { + const gop = try m.getOrPut(alloc, 1); + try testing.expect(gop.found_existing); + } +} + +test "resize shrink and remove" { + const testing = std.testing; + const alloc = testing.allocator; + + const Map = AutoHashMap(u32, u8); + var m = Map.init(2); + defer m.deinit(alloc); + + // Insert cap values, LRU is 1 + { + const gop = try m.getOrPut(alloc, 1); + try testing.expect(!gop.found_existing); + try testing.expect(gop.evicted == null); + gop.value_ptr.* = 1; + } + { + const gop = try m.getOrPut(alloc, 2); + try testing.expect(!gop.found_existing); + try testing.expect(gop.evicted == null); + gop.value_ptr.* = 2; + } + + // Shrink + m.resize(alloc, 1); + { + const gop = try m.getOrPut(alloc, 1); + try testing.expect(!gop.found_existing); + try testing.expect(gop.evicted.?.value == 2); + gop.value_ptr.* = 1; + } +} From 662b6562185c5023897230d7cc743324208c224b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 12 Sep 2022 11:24:34 -0700 Subject: [PATCH 5/6] implement LRU row GPU cell caching --- src/Grid.zig | 43 +++++++++++++++++++++++++++++++++++++++++ src/lru.zig | 21 ++++++++++++++------ src/terminal/Screen.zig | 13 +++++++++++-- 3 files changed, 69 insertions(+), 8 deletions(-) diff --git a/src/Grid.zig b/src/Grid.zig index 89fe26a42..0b214fc6f 100644 --- a/src/Grid.zig +++ b/src/Grid.zig @@ -12,9 +12,12 @@ const Terminal = terminal.Terminal; const gl = @import("opengl.zig"); const trace = @import("tracy").trace; const math = @import("math.zig"); +const lru = @import("lru.zig"); const log = std.log.scoped(.grid); +const CellsLRU = lru.AutoHashMap(terminal.Screen.RowHeader.Id, std.ArrayListUnmanaged(GPUCell)); + alloc: std.mem.Allocator, /// Current dimensions for this grid. @@ -26,6 +29,10 @@ cell_size: CellSize, /// The current set of cells to render. cells: std.ArrayListUnmanaged(GPUCell), +/// The LRU that stores our GPU cells cached by row IDs. This is used to +/// prevent high CPU activity when shaping rows. +cells_lru: CellsLRU, + /// The size of the cells list that was sent to the GPU. This is used /// to detect when the cells array was reallocated/resized and handle that /// accordingly. @@ -303,6 +310,7 @@ pub fn init( return Grid{ .alloc = alloc, .cells = .{}, + .cells_lru = CellsLRU.init(0), .cell_size = .{ .width = metrics.cell_width, .height = metrics.cell_height }, .size = .{ .rows = 0, .columns = 0 }, .program = program, @@ -333,6 +341,7 @@ pub fn deinit(self: *Grid) void { self.ebo.destroy(); self.vao.destroy(); self.program.destroy(); + self.cells_lru.deinit(self.alloc); self.cells.deinit(self.alloc); self.* = undefined; } @@ -369,6 +378,22 @@ pub fn rebuildCells(self: *Grid, term: *Terminal) !void { while (rowIter.next()) |row| { defer y += 1; + // Get our value from the cache. + const gop = try self.cells_lru.getOrPut(self.alloc, row.getId()); + if (!row.isDirty() and gop.found_existing) { + var i: usize = self.cells.items.len; + for (gop.value_ptr.items) |cell| { + self.cells.appendAssumeCapacity(cell); + self.cells.items[i].grid_row = @intCast(u16, y); + i += 1; + } + + continue; + } + + // Get the starting index for our row so we can cache any new GPU cells. + const start = self.cells.items.len; + // Split our row into runs and shape each one. var iter = self.font_shaper.runIterator(&self.font_group, row); while (try iter.next(self.alloc)) |run| { @@ -383,6 +408,18 @@ pub fn rebuildCells(self: *Grid, term: *Terminal) !void { )); } } + + // Initialize our list + if (!gop.found_existing) gop.value_ptr.* = .{}; + var row_cells = gop.value_ptr; + + // Get our new length and cache the cells. + try row_cells.ensureTotalCapacity(self.alloc, term.screen.cols); + row_cells.clearRetainingCapacity(); + row_cells.appendSliceAssumeCapacity(self.cells.items[start..]); + + // Set row is not dirty anymore + row.setDirty(false); } // Add the cursor @@ -625,6 +662,12 @@ pub fn setScreenSize(self: *Grid, dim: ScreenSize) !void { // Recalculate the rows/columns. self.size.update(dim, self.cell_size); + // Update our LRU. We arbitrarily support a certain number of pages here. + // We also always support a minimum number of caching in case a user + // is resizing tiny then growing again we can save some of the renders. + const evicted = try self.cells_lru.resize(self.alloc, @maximum(80, self.size.rows * 10)); + if (evicted) |list| for (list) |*value| value.deinit(self.alloc); + // Update our shaper var shape_buf = try self.alloc.alloc(font.Shaper.Cell, self.size.columns * 2); errdefer self.alloc.free(shape_buf); diff --git a/src/lru.zig b/src/lru.zig index 1b252b393..e4344eeba 100644 --- a/src/lru.zig +++ b/src/lru.zig @@ -164,26 +164,30 @@ pub fn HashMap( } /// Resize the LRU. If this shrinks the LRU then LRU items will be - /// deallocated. - pub fn resize(self: *Self, alloc: Allocator, capacity: Map.Size) void { + /// deallocated. The deallocated items are returned in the slice. This + /// slice must be freed by the caller. + pub fn resize(self: *Self, alloc: Allocator, capacity: Map.Size) Allocator.Error!?[]V { // Fastest if (capacity >= self.capacity) { self.capacity = capacity; - return; + return null; } // If we're shrinking but we're smaller than the new capacity, // then we don't have to do anything. if (self.map.count() <= capacity) { self.capacity = capacity; - return; + return null; } // We're shrinking and we have more items than the new capacity const delta = self.map.count() - capacity; + var evicted = try alloc.alloc(V, delta); + var i: Map.Size = 0; while (i < delta) : (i += 1) { var node = self.queue.first.?; + evicted[i] = node.data.value; self.queue.remove(node); _ = self.map.remove(node.data.key); alloc.destroy(node); @@ -191,6 +195,8 @@ pub fn HashMap( self.capacity = capacity; assert(self.map.count() == capacity); + + return evicted; } }; } @@ -281,7 +287,8 @@ test "resize shrink without removal" { } // Shrink - m.resize(alloc, 1); + const evicted = try m.resize(alloc, 1); + try testing.expect(evicted == null); { const gop = try m.getOrPut(alloc, 1); try testing.expect(gop.found_existing); @@ -311,7 +318,9 @@ test "resize shrink and remove" { } // Shrink - m.resize(alloc, 1); + const evicted = try m.resize(alloc, 1); + defer alloc.free(evicted.?); + try testing.expectEqual(@as(usize, 1), evicted.?.len); { const gop = try m.getOrPut(alloc, 1); try testing.expect(!gop.found_existing); diff --git a/src/terminal/Screen.zig b/src/terminal/Screen.zig index c6cf467e3..821d64215 100644 --- a/src/terminal/Screen.zig +++ b/src/terminal/Screen.zig @@ -113,7 +113,7 @@ const StorageCell = union { /// The row header is at the start of every row within the storage buffer. /// It can store row-specific data. pub const RowHeader = struct { - const Id = u32; + pub const Id = u32; /// The ID of this row, used to uniquely identify this row. The cells /// are also ID'd by id + cell index (0-indexed). This will wrap around @@ -2463,13 +2463,22 @@ test "Screen: resize (no reflow) more rows" { defer s.deinit(); const str = "1ABCD\n2EFGH\n3IJKL"; try s.testWriteString(str); - try s.resizeWithoutReflow(10, 5); + // Clear dirty rows + var iter = s.rowIterator(.viewport); + while (iter.next()) |row| row.setDirty(false); + + // Resize + try s.resizeWithoutReflow(10, 5); { var contents = try s.testString(alloc, .viewport); defer alloc.free(contents); try testing.expectEqualStrings(str, contents); } + + // Everything should be dirty + iter = s.rowIterator(.viewport); + while (iter.next()) |row| try testing.expect(row.isDirty()); } test "Screen: resize (no reflow) less rows" { From b694afa425e6b01b701b272ffd38c46f87853623 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 12 Sep 2022 11:39:20 -0700 Subject: [PATCH 6/6] update TODO --- TODO.md | 1 - 1 file changed, 1 deletion(-) diff --git a/TODO.md b/TODO.md index b16c37b8d..7a629328e 100644 --- a/TODO.md +++ b/TODO.md @@ -12,7 +12,6 @@ Performance: screen data structure. * Screen cell structure should be rethought to use some data oriented design, also bring it closer to GPU cells, perhaps. -* Cache text shaping results and only invalidate if the line becomes dirty. Correctness: