Merge pull request #1726 from mitchellh/renderer-dirty

Dirty Tracking
This commit is contained in:
Mitchell Hashimoto
2024-05-04 14:49:35 -07:00
committed by GitHub
14 changed files with 1148 additions and 28 deletions

View File

@ -83,9 +83,13 @@ pub const WebCanvas = struct {
pub fn deinit(self: *DeferredFace) void {
switch (options.backend) {
.fontconfig_freetype => if (self.fc) |*fc| fc.deinit(),
.coretext, .coretext_freetype, .coretext_harfbuzz => if (self.ct) |*ct| ct.deinit(),
.freetype => {},
.web_canvas => if (self.wc) |*wc| wc.deinit(),
.coretext,
.coretext_freetype,
.coretext_harfbuzz,
.coretext_noshape,
=> if (self.ct) |*ct| ct.deinit(),
}
self.* = undefined;
}
@ -98,7 +102,11 @@ pub fn familyName(self: DeferredFace, buf: []u8) ![]const u8 {
.fontconfig_freetype => if (self.fc) |fc|
return (try fc.pattern.get(.family, 0)).string,
.coretext, .coretext_freetype, .coretext_harfbuzz => if (self.ct) |ct| {
.coretext,
.coretext_freetype,
.coretext_harfbuzz,
.coretext_noshape,
=> if (self.ct) |ct| {
const family_name = ct.font.copyAttribute(.family_name);
return family_name.cstringPtr(.utf8) orelse unsupported: {
break :unsupported family_name.cstring(buf, .utf8) orelse
@ -121,7 +129,11 @@ pub fn name(self: DeferredFace, buf: []u8) ![]const u8 {
.fontconfig_freetype => if (self.fc) |fc|
return (try fc.pattern.get(.fullname, 0)).string,
.coretext, .coretext_freetype, .coretext_harfbuzz => if (self.ct) |ct| {
.coretext,
.coretext_freetype,
.coretext_harfbuzz,
.coretext_noshape,
=> if (self.ct) |ct| {
const display_name = ct.font.copyDisplayName();
return display_name.cstringPtr(.utf8) orelse unsupported: {
// "NULL if the internal storage of theString does not allow
@ -147,7 +159,7 @@ pub fn load(
) !Face {
return switch (options.backend) {
.fontconfig_freetype => try self.loadFontconfig(lib, opts),
.coretext, .coretext_harfbuzz => try self.loadCoreText(lib, opts),
.coretext, .coretext_harfbuzz, .coretext_noshape => try self.loadCoreText(lib, opts),
.coretext_freetype => try self.loadCoreTextFreetype(lib, opts),
.web_canvas => try self.loadWebCanvas(opts),
@ -262,7 +274,11 @@ pub fn hasCodepoint(self: DeferredFace, cp: u32, p: ?Presentation) bool {
}
},
.coretext, .coretext_freetype, .coretext_harfbuzz => {
.coretext,
.coretext_freetype,
.coretext_harfbuzz,
.coretext_noshape,
=> {
// If we are using coretext, we check the loaded CT font.
if (self.ct) |ct| {
if (p) |desired_p| {

View File

@ -14,8 +14,12 @@ const log = std.log.scoped(.discovery);
pub const Discover = switch (options.backend) {
.freetype => void, // no discovery
.fontconfig_freetype => Fontconfig,
.coretext, .coretext_freetype, .coretext_harfbuzz => CoreText,
.web_canvas => void, // no discovery
.coretext,
.coretext_freetype,
.coretext_harfbuzz,
.coretext_noshape,
=> CoreText,
};
/// Descriptor is used to search for fonts. The only required field

View File

@ -13,7 +13,11 @@ pub const Face = switch (options.backend) {
.coretext_freetype,
=> freetype.Face,
.coretext, .coretext_harfbuzz => coretext.Face,
.coretext,
.coretext_harfbuzz,
.coretext_noshape,
=> coretext.Face,
.web_canvas => web_canvas.Face,
};

View File

@ -16,6 +16,7 @@ pub const Library = switch (options.backend) {
// Some backends such as CT and Canvas don't have a "library"
.coretext,
.coretext_harfbuzz,
.coretext_noshape,
.web_canvas,
=> NoopLibrary,
};

View File

@ -61,6 +61,9 @@ pub const Backend = enum {
/// CoreText for font discovery and rendering, HarfBuzz for shaping
coretext_harfbuzz,
/// CoreText for font discovery and rendering, no shaping.
coretext_noshape,
/// Use the browser font system and the Canvas API (wasm). This limits
/// the available fonts to browser fonts (anything Canvas natively
/// supports).
@ -97,6 +100,7 @@ pub const Backend = enum {
.coretext,
.coretext_harfbuzz,
.coretext_noshape,
.web_canvas,
=> false,
};
@ -107,6 +111,7 @@ pub const Backend = enum {
.coretext,
.coretext_freetype,
.coretext_harfbuzz,
.coretext_noshape,
=> true,
.freetype,
@ -124,6 +129,7 @@ pub const Backend = enum {
.coretext,
.coretext_freetype,
.coretext_harfbuzz,
.coretext_noshape,
.web_canvas,
=> false,
};
@ -138,6 +144,7 @@ pub const Backend = enum {
=> true,
.coretext,
.coretext_noshape,
.web_canvas,
=> false,
};

View File

@ -1,5 +1,6 @@
const builtin = @import("builtin");
const options = @import("main.zig").options;
pub const noop = @import("shaper/noop.zig");
pub const harfbuzz = @import("shaper/harfbuzz.zig");
pub const coretext = @import("shaper/coretext.zig");
pub const web_canvas = @import("shaper/web_canvas.zig");
@ -19,6 +20,8 @@ pub const Shaper = switch (options.backend) {
// font faces.
.coretext => coretext.Shaper,
.coretext_noshape => noop.Shaper,
.web_canvas => web_canvas.Shaper,
};
@ -61,4 +64,7 @@ pub const Options = struct {
test {
_ = Cache;
_ = Shaper;
// Always test noop
_ = noop;
}

143
src/font/shaper/noop.zig Normal file
View File

@ -0,0 +1,143 @@
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const trace = @import("tracy").trace;
const font = @import("../main.zig");
const Face = font.Face;
const Collection = font.Collection;
const DeferredFace = font.DeferredFace;
const Group = font.Group;
const GroupCache = font.GroupCache;
const Library = font.Library;
const SharedGrid = font.SharedGrid;
const Style = font.Style;
const Presentation = font.Presentation;
const terminal = @import("../../terminal/main.zig");
const log = std.log.scoped(.font_shaper);
/// Shaper that doesn't do any shaping. Each individual codepoint is mapped
/// directly to the detected text run font's glyph index.
pub const Shaper = struct {
/// The allocated used for the feature list and cell buf.
alloc: Allocator,
/// The string used for shaping the current run.
run_state: RunState,
/// The shared memory used for shaping results.
cell_buf: CellBuf,
const CellBuf = std.ArrayListUnmanaged(font.shape.Cell);
const CodepointList = std.ArrayListUnmanaged(Codepoint);
const Codepoint = struct {
codepoint: u32,
cluster: u32,
};
const RunState = struct {
codepoints: CodepointList = .{},
fn deinit(self: *RunState, alloc: Allocator) void {
self.codepoints.deinit(alloc);
}
fn reset(self: *RunState) !void {
self.codepoints.clearRetainingCapacity();
}
};
/// The cell_buf argument is the buffer to use for storing shaped results.
/// This should be at least the number of columns in the terminal.
pub fn init(alloc: Allocator, opts: font.shape.Options) !Shaper {
_ = opts;
return Shaper{
.alloc = alloc,
.cell_buf = .{},
.run_state = .{},
};
}
pub fn deinit(self: *Shaper) void {
self.cell_buf.deinit(self.alloc);
self.run_state.deinit(self.alloc);
}
pub fn runIterator(
self: *Shaper,
grid: *SharedGrid,
screen: *const terminal.Screen,
row: terminal.Pin,
selection: ?terminal.Selection,
cursor_x: ?usize,
) font.shape.RunIterator {
return .{
.hooks = .{ .shaper = self },
.grid = grid,
.screen = screen,
.row = row,
.selection = selection,
.cursor_x = cursor_x,
};
}
pub fn shape(self: *Shaper, run: font.shape.TextRun) ![]const font.shape.Cell {
const state = &self.run_state;
// Special fonts aren't shaped and their codepoint == glyph so we
// can just return the codepoints as-is.
if (run.font_index.special() != null) {
self.cell_buf.clearRetainingCapacity();
try self.cell_buf.ensureTotalCapacity(self.alloc, state.codepoints.items.len);
for (state.codepoints.items) |entry| {
self.cell_buf.appendAssumeCapacity(.{
.x = @intCast(entry.cluster),
.glyph_index = @intCast(entry.codepoint),
});
}
return self.cell_buf.items;
}
// Go through the run and map each codepoint to a glyph index.
self.cell_buf.clearRetainingCapacity();
// Note: this is digging into some internal details, we should maybe
// expose a public API for this.
const face = try run.grid.resolver.collection.getFace(run.font_index);
for (state.codepoints.items) |entry| {
const glyph_index = face.glyphIndex(entry.codepoint);
try self.cell_buf.append(self.alloc, .{
.x = @intCast(entry.cluster),
.glyph_index = glyph_index,
});
}
return self.cell_buf.items;
}
/// The hooks for RunIterator.
pub const RunIteratorHook = struct {
shaper: *Shaper,
pub fn prepare(self: *RunIteratorHook) !void {
try self.shaper.run_state.reset();
}
pub fn addCodepoint(self: RunIteratorHook, cp: u32, cluster: u32) !void {
try self.shaper.run_state.codepoints.append(self.shaper.alloc, .{
.codepoint = cp,
.cluster = cluster,
});
}
pub fn finalize(self: RunIteratorHook) !void {
_ = self;
}
};
};
test {
@import("std").testing.refAllDecls(@This());
}

View File

@ -92,6 +92,9 @@ current_background_color: terminal.color.RGB,
/// cells goes into a separate shader.
cells: mtl_cell.Contents,
/// If this is true, we do a full cell rebuild on the next frame.
cells_rebuild: bool = true,
/// The current GPU uniform values.
uniforms: mtl_shaders.Uniforms,
@ -129,8 +132,7 @@ pub const GPUState = struct {
// is comptime because there isn't a good reason to change this at
// runtime and there is a lot of complexity to support it. For comptime,
// this is useful for debugging.
// TODO(mitchellh): enable triple-buffering when we improve our frame times
const BufferCount = 1;
const BufferCount = 3;
/// The frame data, the current frame index, and the semaphore protecting
/// the frame data. This is used to implement double/triple/etc. buffering.
@ -786,6 +788,36 @@ pub fn updateFrame(
try self.prepKittyGraphics(state.terminal);
}
// If we have any terminal dirty flags set then we need to rebuild
// the entire screen. This can be optimized in the future.
{
const Int = @typeInfo(terminal.Terminal.Dirty).Struct.backing_integer.?;
const v: Int = @bitCast(state.terminal.flags.dirty);
if (v > 0) self.cells_rebuild = true;
}
{
const Int = @typeInfo(terminal.Screen.Dirty).Struct.backing_integer.?;
const v: Int = @bitCast(state.terminal.screen.dirty);
if (v > 0) self.cells_rebuild = true;
}
// Reset the dirty flags in the terminal and screen. We assume
// that our rebuild will be successful since so we optimize for
// success and reset while we hold the lock. This is much easier
// than coordinating row by row or as changes are persisted.
state.terminal.flags.dirty = .{};
{
var it = state.terminal.screen.pages.pageIterator(
.right_down,
.{ .screen = .{} },
null,
);
while (it.next()) |chunk| {
var dirty_set = chunk.page.data.dirtyBitSet();
dirty_set.unsetAll();
}
}
break :critical .{
.bg = self.background_color,
.screen = screen_copy,
@ -1711,6 +1743,10 @@ fn rebuildCells(
while (row_it.next()) |row| {
y = y - 1;
// Only rebuild if we are doing a full rebuild or this row is dirty.
// if (row.isDirty()) std.log.warn("dirty y={}", .{y});
if (!self.cells_rebuild and !row.isDirty()) continue;
// If we're rebuilding a row, then we always clear the cells
self.cells.clear(y);
@ -1856,6 +1892,9 @@ fn rebuildCells(
}
}
// We always mark our rebuild flag as false since we're done.
self.cells_rebuild = false;
// Log some things
// log.debug("rebuildCells complete cached_runs={}", .{
// self.font_shaper_cache.count(),

View File

@ -64,7 +64,12 @@ pub fn Buffer(comptime T: type) type {
return ptr[0..len];
}
/// Sync new contents to the buffer.
/// Sync new contents to the buffer. The data is expected to be the
/// complete contents of the buffer. If the amont of data is larger
/// than the buffer length, the buffer will be reallocated.
///
/// If the amount of data is smaller than the buffer length, the
/// remaining data in the buffer is left untouched.
pub fn sync(self: *Self, device: objc.Object, data: []const T) !void {
// If we need more bytes than our buffer has, we need to reallocate.
const req_bytes = data.len * @sizeOf(T);

View File

@ -1917,6 +1917,7 @@ fn createPage(
self: *PageList,
cap: Capacity,
) !*List.Node {
// log.debug("create page cap={}", .{cap});
return try createPageExt(&self.pool, cap, &self.page_size);
}
@ -2101,6 +2102,10 @@ pub fn eraseRowBounded(
page.data.clearCells(&rows[pn.y], 0, page.data.size.cols);
fastmem.rotateOnce(Row, rows[pn.y..][0 .. limit + 1]);
// Set all the rows as dirty
var dirty = page.data.dirtyBitSet();
dirty.setRangeValue(.{ .start = pn.y, .end = pn.y + limit }, true);
// Update pins in the shifted region.
var pin_it = self.tracked_pins.keyIterator();
while (pin_it.next()) |p_ptr| {
@ -2122,6 +2127,12 @@ pub fn eraseRowBounded(
fastmem.rotateOnce(Row, rows[pn.y..page.data.size.rows]);
// All the rows in the page are dirty below the erased row.
{
var dirty = page.data.dirtyBitSet();
dirty.setRangeValue(.{ .start = pn.y, .end = page.data.size.rows }, true);
}
// We need to keep track of how many rows we've shifted so that we can
// determine at what point we need to do a partial shift on subsequent
// pages.
@ -2164,6 +2175,10 @@ pub fn eraseRowBounded(
page.data.clearCells(&rows[0], 0, page.data.size.cols);
fastmem.rotateOnce(Row, rows[0 .. shifted_limit + 1]);
// Set all the rows as dirty
var dirty = page.data.dirtyBitSet();
dirty.setRangeValue(.{ .start = 0, .end = shifted_limit }, true);
// Update pins in the shifted region.
var pin_it = self.tracked_pins.keyIterator();
while (pin_it.next()) |p_ptr| {
@ -2182,6 +2197,10 @@ pub fn eraseRowBounded(
fastmem.rotateOnce(Row, rows[0..page.data.size.rows]);
// Set all the rows as dirty
var dirty = page.data.dirtyBitSet();
dirty.setRangeValue(.{ .start = 0, .end = page.data.size.rows }, true);
// Account for the rows shifted in this page.
shifted += page.data.size.rows;
@ -2927,6 +2946,27 @@ fn growRows(self: *PageList, n: usize) !void {
}
}
/// Clear all dirty bits on all pages. This is not efficient since it
/// traverses the entire list of pages. This is used for testing/debugging.
pub fn clearDirty(self: *PageList) void {
var page = self.pages.first;
while (page) |p| {
var set = p.data.dirtyBitSet();
set.unsetAll();
page = p.next;
}
}
/// Returns true if the point is dirty, used for testing.
pub fn isDirty(self: *const PageList, pt: point.Point) bool {
return self.getCell(pt).?.isDirty();
}
/// Mark a point as dirty, used for testing.
fn markDirty(self: *PageList, pt: point.Point) void {
self.pin(pt).?.markDirty();
}
/// Represents an exact x/y coordinate within the screen. This is called
/// a "pin" because it is a fixed point within the pagelist direct to
/// a specific page pointer and memory offset. The benefit is that this
@ -2985,6 +3025,17 @@ pub const Pin = struct {
).?.*;
}
/// Check if this pin is dirty.
pub fn isDirty(self: Pin) bool {
return self.page.data.isRowDirty(self.y);
}
/// Mark this pin location as dirty.
pub fn markDirty(self: Pin) void {
var set = self.page.data.dirtyBitSet();
set.set(self.y);
}
/// Iterators. These are the same as PageList iterator funcs but operate
/// on pins rather than points. This is MUCH more efficient than calling
/// pointFromPin and building up the iterator from points.
@ -3218,6 +3269,14 @@ const Cell = struct {
row_idx: size.CellCountInt,
col_idx: size.CellCountInt,
/// Returns true if this cell is marked as dirty.
///
/// This is not very performant this is primarily used for assertions
/// and testing.
pub fn isDirty(self: Cell) bool {
return self.page.data.isRowDirty(self.row_idx);
}
/// Get the cell style.
///
/// Not meant for non-test usage since this is inefficient.
@ -4486,6 +4545,13 @@ test "PageList eraseRowBounded less than full row" {
try s.eraseRowBounded(.{ .active = .{ .y = 5 } }, 3);
try testing.expectEqual(s.rows, s.totalRows());
// The erased rows should be dirty
try testing.expect(!s.isDirty(.{ .active = .{ .x = 0, .y = 4 } }));
try testing.expect(s.isDirty(.{ .active = .{ .x = 0, .y = 5 } }));
try testing.expect(s.isDirty(.{ .active = .{ .x = 0, .y = 6 } }));
try testing.expect(s.isDirty(.{ .active = .{ .x = 0, .y = 7 } }));
try testing.expect(!s.isDirty(.{ .active = .{ .x = 0, .y = 8 } }));
try testing.expectEqual(s.pages.first.?, p_top.page);
try testing.expectEqual(@as(usize, 4), p_top.y);
try testing.expectEqual(@as(usize, 0), p_top.x);
@ -4514,6 +4580,12 @@ test "PageList eraseRowBounded with pin at top" {
try s.eraseRowBounded(.{ .active = .{ .y = 0 } }, 3);
try testing.expectEqual(s.rows, s.totalRows());
// The erased rows should be dirty
try testing.expect(s.isDirty(.{ .active = .{ .x = 0, .y = 0 } }));
try testing.expect(s.isDirty(.{ .active = .{ .x = 0, .y = 1 } }));
try testing.expect(s.isDirty(.{ .active = .{ .x = 0, .y = 2 } }));
try testing.expect(!s.isDirty(.{ .active = .{ .x = 0, .y = 3 } }));
try testing.expectEqual(s.pages.first.?, p_top.page);
try testing.expectEqual(@as(usize, 0), p_top.y);
try testing.expectEqual(@as(usize, 0), p_top.x);
@ -4536,6 +4608,13 @@ test "PageList eraseRowBounded full rows single page" {
try s.eraseRowBounded(.{ .active = .{ .y = 5 } }, 10);
try testing.expectEqual(s.rows, s.totalRows());
// The erased rows should be dirty
try testing.expect(!s.isDirty(.{ .active = .{ .x = 0, .y = 4 } }));
for (5..10) |y| try testing.expect(s.isDirty(.{ .active = .{
.x = 0,
.y = @intCast(y),
} }));
// Our pin should move to the first page
try testing.expectEqual(s.pages.first.?, p_in.page);
try testing.expectEqual(@as(usize, 6), p_in.y);
@ -4593,6 +4672,13 @@ test "PageList eraseRowBounded full rows two pages" {
// Erase only a few rows in our active
try s.eraseRowBounded(.{ .active = .{ .y = 4 } }, 4);
// The erased rows should be dirty
try testing.expect(!s.isDirty(.{ .active = .{ .x = 0, .y = 3 } }));
for (4..8) |y| try testing.expect(s.isDirty(.{ .active = .{
.x = 0,
.y = @intCast(y),
} }));
// In page in first page is shifted
try testing.expectEqual(s.pages.last.?.prev.?, p_first.page);
try testing.expectEqual(@as(usize, p_first.page.data.size.rows - 2), p_first.y);
@ -4753,6 +4839,34 @@ test "PageList clone remap tracked pin not in cloned area" {
try testing.expect(pin_remap.get(p) == null);
}
test "PageList clone full dirty" {
const testing = std.testing;
const alloc = testing.allocator;
var s = try init(alloc, 80, 24, null);
defer s.deinit();
try testing.expectEqual(@as(usize, s.rows), s.totalRows());
// Mark a row as dirty
s.markDirty(.{ .active = .{ .x = 0, .y = 0 } });
s.markDirty(.{ .active = .{ .x = 0, .y = 12 } });
s.markDirty(.{ .active = .{ .x = 0, .y = 23 } });
var s2 = try s.clone(.{
.top = .{ .screen = .{} },
.memory = .{ .alloc = alloc },
});
defer s2.deinit();
try testing.expectEqual(@as(usize, s.rows), s2.totalRows());
// Should still be dirty
try testing.expect(s2.isDirty(.{ .active = .{ .x = 0, .y = 0 } }));
try testing.expect(!s2.isDirty(.{ .active = .{ .x = 0, .y = 1 } }));
try testing.expect(s2.isDirty(.{ .active = .{ .x = 0, .y = 12 } }));
try testing.expect(!s2.isDirty(.{ .active = .{ .x = 0, .y = 14 } }));
try testing.expect(s2.isDirty(.{ .active = .{ .x = 0, .y = 23 } }));
}
test "PageList resize (no reflow) more rows" {
const testing = std.testing;
const alloc = testing.allocator;

View File

@ -62,6 +62,16 @@ kitty_keyboard: kitty.KeyFlagStack = .{},
/// Kitty graphics protocol state.
kitty_images: kitty.graphics.ImageStorage = .{},
/// Dirty flags for the renderer.
dirty: Dirty = .{},
/// See Terminal.Dirty. This behaves the same way.
pub const Dirty = packed struct {
/// Set when the selection is set or unset, regardless of if the
/// selection is changed or not.
selection: bool = false,
};
/// The cursor position.
pub const Cursor = struct {
// The x/y position within the viewport.
@ -362,6 +372,7 @@ pub fn clonePool(
.no_scrollback = self.no_scrollback,
.cursor = cursor,
.selection = sel,
.dirty = self.dirty,
};
result.assertIntegrity();
return result;
@ -719,6 +730,12 @@ fn cursorChangePin(self: *Screen, new: Pin) void {
};
}
/// Mark the cursor position as dirty.
/// TODO: test
pub fn cursorMarkDirty(self: *Screen) void {
self.cursor.page_pin.markDirty();
}
/// Options for scrolling the viewport of the terminal grid. The reason
/// we have this in addition to PageList.Scroll is because we have additional
/// scroll behaviors that are not part of the PageList.Scroll enum.
@ -803,6 +820,10 @@ pub fn clearRows(
var it = self.pages.pageIterator(.right_down, tl, bl);
while (it.next()) |chunk| {
// Mark everything in this chunk as dirty
var dirty = chunk.page.data.dirtyBitSet();
dirty.setRangeValue(.{ .start = chunk.start, .end = chunk.end }, true);
for (chunk.rows()) |*row| {
const cells_offset = row.cells;
const cells_multi: [*]Cell = row.cells.ptr(chunk.page.data.memory);
@ -1318,12 +1339,14 @@ pub fn select(self: *Screen, sel_: ?Selection) !void {
// Untrack prior selection
if (self.selection) |*old| old.deinit(self);
self.selection = tracked_sel;
self.dirty.selection = true;
}
/// Same as select(null) but can't fail.
pub fn clearSelection(self: *Screen) void {
if (self.selection) |*sel| sel.deinit(self);
self.selection = null;
self.dirty.selection = true;
}
pub const SelectionString = struct {
@ -2502,6 +2525,7 @@ test "Screen clearRows active one line" {
try s.testWriteString("hello, world");
s.clearRows(.{ .active = .{} }, null, false);
try testing.expect(s.pages.isDirty(.{ .active = .{ .x = 0, .y = 0 } }));
const str = try s.dumpStringAlloc(alloc, .{ .screen = .{} });
defer alloc.free(str);
try testing.expectEqualStrings("", str);
@ -2516,6 +2540,8 @@ test "Screen clearRows active multi line" {
try s.testWriteString("hello\nworld");
s.clearRows(.{ .active = .{} }, null, false);
try testing.expect(s.pages.isDirty(.{ .active = .{ .x = 0, .y = 0 } }));
try testing.expect(s.pages.isDirty(.{ .active = .{ .x = 0, .y = 1 } }));
const str = try s.dumpStringAlloc(alloc, .{ .screen = .{} });
defer alloc.free(str);
try testing.expectEqualStrings("", str);

File diff suppressed because it is too large Load Diff

View File

@ -91,6 +91,44 @@ pub const Page = struct {
/// The available set of styles in use on this page.
styles: style.Set,
/// The offset to the first mask of dirty bits in the page.
///
/// The dirty bits is a contiguous array of usize where each bit represents
/// a row in the page, in order. If the bit is set, then the row is dirty
/// and requires a redraw. Dirty status is only ever meant to convey that
/// a cell has changed visually. A cell which changes in a way that doesn't
/// affect the visual representation may not be marked as dirty.
///
/// Dirty tracking may have false positives but should never have false
/// negatives. A false negative would result in a visual artifact on the
/// screen.
///
/// Dirty bits are only ever unset by consumers of a page. The page
/// structure itself does not unset dirty bits since the page does not
/// know when a cell has been redrawn.
///
/// As implementation background: it may seem that dirty bits should be
/// stored elsewhere and not on the page itself, because the only data
/// that could possibly change is in the active area of a terminal
/// historically and that area is small compared to the typical scrollback.
/// My original thinking was to put the dirty bits on Screen instead and
/// have them only track the active area. However, I decided to put them
/// into the page directly for a few reasons:
///
/// 1. It's simpler. The page is a self-contained unit and it's nice
/// to have all the data for a page in one place.
///
/// 2. It's cheap. Even a very large page might have 1000 rows and
/// that's only ~128 bytes of 64-bit integers to track all the dirty
/// bits. Compared to the hundreds of kilobytes a typical page
/// consumes, this is nothing.
///
/// 3. It's more flexible. If we ever want to implement new terminal
/// features that allow non-active area to be dirty, we can do that
/// with minimal dirty-tracking work.
///
dirty: Offset(usize),
/// The current dimensions of the page. The capacity may be larger
/// than this. This allows us to allocate a larger page than necessary
/// and also to resize a page smaller witout reallocating.
@ -155,6 +193,7 @@ pub const Page = struct {
.memory = @alignCast(buf.start()[0..l.total_size]),
.rows = rows,
.cells = cells,
.dirty = buf.member(usize, l.dirty_start),
.styles = style.Set.init(
buf.add(l.styles_start),
l.styles_layout,
@ -461,11 +500,12 @@ pub const Page = struct {
const other_rows = other.rows.ptr(other.memory)[y_start..y_end];
const rows = self.rows.ptr(self.memory)[0 .. y_end - y_start];
for (rows, other_rows) |*dst_row, *src_row| try self.cloneRowFrom(
other,
dst_row,
src_row,
);
const other_dirty_set = other.dirtyBitSet();
var dirty_set = self.dirtyBitSet();
for (rows, 0.., other_rows, y_start..) |*dst_row, dst_y, *src_row, src_y| {
try self.cloneRowFrom(other, dst_row, src_row);
if (other_dirty_set.isSet(src_y)) dirty_set.set(dst_y);
}
// We should remain consistent
self.assertIntegrity();
@ -866,12 +906,40 @@ pub const Page = struct {
return self.grapheme_map.map(self.memory).count();
}
/// Returns the bitset for the dirty bits on this page.
///
/// The returned value is a DynamicBitSetUnmanaged but it is NOT
/// actually dynamic; do NOT call resize on this. It is safe to
/// read and write but do not resize it.
pub fn dirtyBitSet(self: *const Page) std.DynamicBitSetUnmanaged {
return .{
.bit_length = self.capacity.rows,
.masks = self.dirty.ptr(self.memory),
};
}
/// Returns true if the given row is dirty. This is NOT very
/// efficient if you're checking many rows and you should use
/// dirtyBitSet directly instead.
pub fn isRowDirty(self: *const Page, y: usize) bool {
return self.dirtyBitSet().isSet(y);
}
/// Returns true if this page is dirty at all. If you plan on
/// checking any additional rows, you should use dirtyBitSet and
/// check this on your own so you have the set available.
pub fn isDirty(self: *const Page) bool {
return self.dirtyBitSet().findFirstSet() != null;
}
pub const Layout = struct {
total_size: usize,
rows_start: usize,
rows_size: usize,
cells_start: usize,
cells_size: usize,
dirty_start: usize,
dirty_size: usize,
styles_start: usize,
styles_layout: style.Set.Layout,
grapheme_alloc_start: usize,
@ -892,8 +960,19 @@ pub const Page = struct {
const cells_start = alignForward(usize, rows_end, @alignOf(Cell));
const cells_end = cells_start + (cells_count * @sizeOf(Cell));
// The division below cannot fail because our row count cannot
// exceed the maximum value of usize.
const dirty_bit_length: usize = rows_count;
const dirty_usize_length: usize = std.math.divCeil(
usize,
dirty_bit_length,
@bitSizeOf(usize),
) catch unreachable;
const dirty_start = alignForward(usize, cells_end, @alignOf(usize));
const dirty_end: usize = dirty_start + (dirty_usize_length * @sizeOf(usize));
const styles_layout = style.Set.layout(cap.styles);
const styles_start = alignForward(usize, cells_end, style.Set.base_align);
const styles_start = alignForward(usize, dirty_end, style.Set.base_align);
const styles_end = styles_start + styles_layout.total_size;
const grapheme_alloc_layout = GraphemeAlloc.layout(cap.grapheme_bytes);
@ -913,6 +992,8 @@ pub const Page = struct {
.rows_size = rows_end - rows_start,
.cells_start = cells_start,
.cells_size = cells_end - cells_start,
.dirty_start = dirty_start,
.dirty_size = dirty_end - dirty_start,
.styles_start = styles_start,
.styles_layout = styles_layout,
.grapheme_alloc_start = grapheme_alloc_start,
@ -981,9 +1062,18 @@ pub const Capacity = struct {
const grapheme_alloc_start = alignBackward(usize, grapheme_map_start - layout.grapheme_alloc_layout.total_size, GraphemeAlloc.base_align);
const styles_start = alignBackward(usize, grapheme_alloc_start - layout.styles_layout.total_size, style.Set.base_align);
const available_size = styles_start;
const size_per_row = @sizeOf(Row) + (@sizeOf(Cell) * @as(usize, @intCast(cols)));
const new_rows = @divFloor(available_size, size_per_row);
// The size per row is:
// - The row metadata itself
// - The cells per row (n=cols)
// - 1 bit for dirty tracking
const bits_per_row: usize = size: {
var bits: usize = @bitSizeOf(Row); // Row metadata
bits += @bitSizeOf(Cell) * @as(usize, @intCast(cols)); // Cells (n=cols)
bits += 1; // The dirty bit
break :size bits;
};
const available_bits: usize = styles_start * 8;
const new_rows: usize = @divFloor(available_bits, bits_per_row);
// If our rows go to zero then we can't fit any row metadata
// for the desired number of columns.
@ -1315,6 +1405,10 @@ test "Page init" {
.styles = 32,
});
defer page.deinit();
// Dirty set should be empty
const dirty = page.dirtyBitSet();
try std.testing.expectEqual(@as(usize, 0), dirty.count());
}
test "Page read and write cells" {

View File

@ -397,6 +397,7 @@ pub fn changeConfig(self: *Exec, config: *DerivedConfig) !void {
for (0..config.palette.len) |i| {
if (!self.terminal.color_palette.mask.isSet(i)) {
self.terminal.color_palette.colors[i] = config.palette[i];
self.terminal.flags.dirty.palette = true;
}
}
@ -2166,7 +2167,10 @@ const StreamHandler = struct {
.autorepeat => {},
// Schedule a render since we changed colors
.reverse_colors => try self.queueRender(),
.reverse_colors => {
self.terminal.flags.dirty.reverse_colors = true;
try self.queueRender();
},
// Origin resets cursor pos. This is called whether or not
// we're enabling or disabling origin mode and whether or
@ -2792,6 +2796,7 @@ const StreamHandler = struct {
switch (kind) {
.palette => |i| {
self.terminal.flags.dirty.palette = true;
self.terminal.color_palette.colors[i] = color;
self.terminal.color_palette.mask.set(i);
},
@ -2829,6 +2834,7 @@ const StreamHandler = struct {
// reset those indices to the default palette
var it = mask.iterator(.{});
while (it.next()) |i| {
self.terminal.flags.dirty.palette = true;
self.terminal.color_palette.colors[i] = self.terminal.default_palette[i];
mask.unset(i);
}
@ -2838,6 +2844,7 @@ const StreamHandler = struct {
// Skip invalid parameters
const i = std.fmt.parseUnsigned(u8, param, 10) catch continue;
if (mask.isSet(i)) {
self.terminal.flags.dirty.palette = true;
self.terminal.color_palette.colors[i] = self.terminal.default_palette[i];
mask.unset(i);
}