terminal/new: clone can take a shared pool

This commit is contained in:
Mitchell Hashimoto
2024-02-29 09:30:18 -08:00
parent 0c888af470
commit 06376fcb0b
7 changed files with 125 additions and 39 deletions

View File

@ -1360,7 +1360,7 @@ fn benchSteps(
.target = target, .target = target,
// We always want our benchmarks to be in release mode. // We always want our benchmarks to be in release mode.
.optimize = .ReleaseFast, .optimize = .Debug,
}); });
c_exe.linkLibC(); c_exe.linkLibC();
if (install) b.installArtifact(c_exe); if (install) b.installArtifact(c_exe);

View File

@ -7,6 +7,8 @@ hyperfine \
--warmup 10 \ --warmup 10 \
-n new \ -n new \
"./zig-out/bin/bench-screen-copy --mode=new${ARGS}" \ "./zig-out/bin/bench-screen-copy --mode=new${ARGS}" \
-n new-pooled \
"./zig-out/bin/bench-screen-copy --mode=new-pooled${ARGS}" \
-n old \ -n old \
"./zig-out/bin/bench-screen-copy --mode=old${ARGS}" "./zig-out/bin/bench-screen-copy --mode=old${ARGS}"

View File

@ -11,7 +11,7 @@ const Args = struct {
mode: Mode = .old, mode: Mode = .old,
/// The number of times to loop. /// The number of times to loop.
count: usize = 5_000, count: usize = 2500,
/// Rows and cols in the terminal. /// Rows and cols in the terminal.
rows: usize = 100, rows: usize = 100,
@ -32,6 +32,7 @@ const Mode = enum {
/// Use a memory pool to allocate pages from a backing buffer. /// Use a memory pool to allocate pages from a backing buffer.
new, new,
@"new-pooled",
}; };
pub const std_options: std.Options = .{ pub const std_options: std.Options = .{
@ -68,6 +69,16 @@ pub fn main() !void {
defer t.deinit(alloc); defer t.deinit(alloc);
try benchNew(alloc, &t, args); try benchNew(alloc, &t, args);
}, },
.@"new-pooled" => {
var t = try terminal.new.Terminal.init(
alloc,
@intCast(args.cols),
@intCast(args.rows),
);
defer t.deinit(alloc);
try benchNewPooled(alloc, &t, args);
},
} }
} }
@ -104,3 +115,21 @@ noinline fn benchNew(alloc: Allocator, t: *terminal.new.Terminal, args: Args) !v
errdefer s.deinit(); errdefer s.deinit();
} }
} }
noinline fn benchNewPooled(alloc: Allocator, t: *terminal.new.Terminal, args: Args) !void {
// We fill the terminal with letters.
for (0..args.rows) |row| {
for (0..args.cols) |col| {
t.setCursorPos(row + 1, col + 1);
try t.print('A');
}
}
var pool = try terminal.new.PageList.MemoryPool.init(alloc, std.heap.page_allocator, 4);
defer pool.deinit();
for (0..args.count) |_| {
var s = try t.screen.clonePool(alloc, &pool, .{ .active = .{} }, null);
errdefer s.deinit();
}
}

View File

@ -1136,6 +1136,7 @@ pub fn clone(self: *Screen, alloc: Allocator, top: RowIndex, bottom: RowIndex) !
assert(dst[1].len == 0); assert(dst[1].len == 0);
// Perform the copy // Perform the copy
std.log.warn("copy bytes={}", .{src[0].len + src[1].len});
fastmem.copy(StorageCell, dst[0], src[0]); fastmem.copy(StorageCell, dst[0], src[0]);
fastmem.copy(StorageCell, dst[0][src[0].len..], src[1]); fastmem.copy(StorageCell, dst[0][src[0].len..], src[1]);

View File

@ -30,7 +30,7 @@ const page_preheat = 4;
const List = std.DoublyLinkedList(Page); const List = std.DoublyLinkedList(Page);
/// The memory pool we get page nodes from. /// The memory pool we get page nodes from.
const Pool = std.heap.MemoryPool(List.Node); const NodePool = std.heap.MemoryPool(List.Node);
const std_capacity = pagepkg.std_capacity; const std_capacity = pagepkg.std_capacity;
@ -42,13 +42,40 @@ const PagePool = std.heap.MemoryPoolAligned(
std.mem.page_size, std.mem.page_size,
); );
/// The allocator to use for pages. /// The pool of memory used for a pagelist. This can be shared between
alloc: Allocator, /// multiple pagelists but it is not threadsafe.
pub const MemoryPool = struct {
nodes: NodePool,
pages: PagePool,
/// The memory pool we get page nodes for the linked list from. pub const ResetMode = std.heap.ArenaAllocator.ResetMode;
pool: Pool,
page_pool: PagePool, pub fn init(
gen_alloc: Allocator,
page_alloc: Allocator,
preheat: usize,
) !MemoryPool {
var pool = try NodePool.initPreheated(gen_alloc, preheat);
errdefer pool.deinit();
var page_pool = try PagePool.initPreheated(page_alloc, preheat);
errdefer page_pool.deinit();
return .{ .nodes = pool, .pages = page_pool };
}
pub fn deinit(self: *MemoryPool) void {
self.pages.deinit();
self.nodes.deinit();
}
pub fn reset(self: *MemoryPool, mode: ResetMode) void {
_ = self.pages.reset(mode);
_ = self.nodes.reset(mode);
}
};
/// The memory pool we get page nodes, pages from.
pool: MemoryPool,
pool_owned: bool,
/// The list of pages in the screen. /// The list of pages in the screen.
pages: List, pages: List,
@ -120,14 +147,10 @@ pub fn init(
// The screen starts with a single page that is the entire viewport, // The screen starts with a single page that is the entire viewport,
// and we'll split it thereafter if it gets too large and add more as // and we'll split it thereafter if it gets too large and add more as
// necessary. // necessary.
var pool = try Pool.initPreheated(alloc, page_preheat); var pool = try MemoryPool.init(alloc, std.heap.page_allocator, page_preheat);
errdefer pool.deinit();
var page_pool = try PagePool.initPreheated(std.heap.page_allocator, page_preheat); var page = try pool.nodes.create();
errdefer page_pool.deinit(); const page_buf = try pool.pages.create();
var page = try pool.create();
const page_buf = try page_pool.create();
// no errdefer because the pool deinit will clean these up // no errdefer because the pool deinit will clean these up
// In runtime safety modes we have to memset because the Zig allocator // In runtime safety modes we have to memset because the Zig allocator
@ -160,11 +183,10 @@ pub fn init(
); );
return .{ return .{
.alloc = alloc,
.cols = cols, .cols = cols,
.rows = rows, .rows = rows,
.pool = pool, .pool = pool,
.page_pool = page_pool, .pool_owned = true,
.pages = page_list, .pages = page_list,
.page_size = page_size, .page_size = page_size,
.max_size = max_size_actual, .max_size = max_size_actual,
@ -172,11 +194,16 @@ pub fn init(
}; };
} }
/// Deinit the pagelist. If you own the memory pool (used clonePool) then
/// this will reset the pool and retain capacity.
pub fn deinit(self: *PageList) void { pub fn deinit(self: *PageList) void {
// Deallocate all the pages. We don't need to deallocate the list or // Deallocate all the pages. We don't need to deallocate the list or
// nodes because they all reside in the pool. // nodes because they all reside in the pool.
self.page_pool.deinit(); if (self.pool_owned) {
self.pool.deinit(); self.pool.deinit();
} else {
self.pool.reset(.{ .retain_capacity = {} });
}
} }
/// Clone this pagelist from the top to bottom (inclusive). /// Clone this pagelist from the top to bottom (inclusive).
@ -192,32 +219,44 @@ pub fn clone(
top: point.Point, top: point.Point,
bot: ?point.Point, bot: ?point.Point,
) !PageList { ) !PageList {
var it = self.pageIterator(top, bot);
// First, count our pages so our preheat is exactly what we need. // First, count our pages so our preheat is exactly what we need.
var it = self.pageIterator(top, bot);
const page_count: usize = page_count: { const page_count: usize = page_count: {
// Copy the iterator so we don't mutate our original.
var count_it = it;
var count: usize = 0; var count: usize = 0;
while (count_it.next()) |_| count += 1; while (it.next()) |_| count += 1;
break :page_count count; break :page_count count;
}; };
// Setup our pools // Setup our pools
var pool = try Pool.initPreheated(alloc, page_count); var pool = try MemoryPool.init(alloc, std.heap.page_allocator, page_count);
errdefer pool.deinit(); errdefer pool.deinit();
var page_pool = try PagePool.initPreheated(std.heap.page_allocator, page_count);
errdefer page_pool.deinit(); var result = try self.clonePool(&pool, top, bot);
result.pool_owned = true;
return result;
}
/// Like clone, but specify your own memory pool. This is advanced but
/// lets you avoid expensive syscalls to allocate memory.
pub fn clonePool(
self: *const PageList,
pool: *MemoryPool,
top: point.Point,
bot: ?point.Point,
) !PageList {
var it = self.pageIterator(top, bot);
// Copy our pages // Copy our pages
var page_list: List = .{}; var page_list: List = .{};
var total_rows: usize = 0; var total_rows: usize = 0;
var page_count: usize = 0;
while (it.next()) |chunk| { while (it.next()) |chunk| {
// Clone the page // Clone the page
const page = try pool.create(); const page = try pool.nodes.create();
const page_buf = try page_pool.create(); const page_buf = try pool.pages.create();
page.* = .{ .data = chunk.page.data.cloneBuf(page_buf) }; page.* = .{ .data = chunk.page.data.cloneBuf(page_buf) };
page_list.append(page); page_list.append(page);
page_count += 1;
// If this is a full page then we're done. // If this is a full page then we're done.
if (chunk.fullPage()) { if (chunk.fullPage()) {
@ -251,9 +290,8 @@ pub fn clone(
} }
var result: PageList = .{ var result: PageList = .{
.alloc = alloc, .pool = pool.*,
.pool = pool, .pool_owned = false,
.page_pool = page_pool,
.pages = page_list, .pages = page_list,
.page_size = PagePool.item_size * page_count, .page_size = PagePool.item_size * page_count,
.max_size = self.max_size, .max_size = self.max_size,
@ -434,11 +472,11 @@ pub fn grow(self: *PageList) !?*List.Node {
/// Create a new page node. This does not add it to the list and this /// Create a new page node. This does not add it to the list and this
/// does not do any memory size accounting with max_size/page_size. /// does not do any memory size accounting with max_size/page_size.
fn createPage(self: *PageList) !*List.Node { fn createPage(self: *PageList) !*List.Node {
var page = try self.pool.create(); var page = try self.pool.nodes.create();
errdefer self.pool.destroy(page); errdefer self.pool.nodes.destroy(page);
const page_buf = try self.page_pool.create(); const page_buf = try self.pool.pages.create();
errdefer self.page_pool.destroy(page_buf); errdefer self.pool.pages.destroy(page_buf);
if (comptime std.debug.runtime_safety) @memset(page_buf, 0); if (comptime std.debug.runtime_safety) @memset(page_buf, 0);
page.* = .{ page.* = .{
@ -548,8 +586,8 @@ fn erasePage(self: *PageList, page: *List.Node) void {
// Reset the page memory and return it back to the pool. // Reset the page memory and return it back to the pool.
@memset(page.data.memory, 0); @memset(page.data.memory, 0);
self.page_pool.destroy(@ptrCast(page.data.memory.ptr)); self.pool.pages.destroy(@ptrCast(page.data.memory.ptr));
self.pool.destroy(page); self.pool.nodes.destroy(page);
} }
/// Get the top-left of the screen for the given tag. /// Get the top-left of the screen for the given tag.

View File

@ -194,7 +194,22 @@ pub fn clone(
top: point.Point, top: point.Point,
bot: ?point.Point, bot: ?point.Point,
) !Screen { ) !Screen {
var pages = try self.pages.clone(alloc, top, bot); return try self.clonePool(alloc, null, top, bot);
}
/// Same as clone but you can specify a custom memory pool to use for
/// the screen.
pub fn clonePool(
self: *const Screen,
alloc: Allocator,
pool: ?*PageList.MemoryPool,
top: point.Point,
bot: ?point.Point,
) !Screen {
var pages = if (pool) |p|
try self.pages.clonePool(p, top, bot)
else
try self.pages.clone(alloc, top, bot);
errdefer pages.deinit(); errdefer pages.deinit();
return .{ return .{

View File

@ -200,6 +200,7 @@ pub const Page = struct {
// This is a memcpy. We may want to investigate if there are // This is a memcpy. We may want to investigate if there are
// faster ways to do this (i.e. copy-on-write tricks) but I suspect // faster ways to do this (i.e. copy-on-write tricks) but I suspect
// they'll be slower. I haven't experimented though. // they'll be slower. I haven't experimented though.
std.log.warn("copy bytes={}", .{self.memory.len});
fastmem.copy(u8, result.memory, self.memory); fastmem.copy(u8, result.memory, self.memory);
return result; return result;