mirror of
https://github.com/ghostty-org/ghostty.git
synced 2025-07-18 17:56:09 +03:00
benchmark: a new package and framework for benchmarking
This commit is contained in:
165
src/benchmark/Benchmark.zig
Normal file
165
src/benchmark/Benchmark.zig
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
//! A single benchmark case.
|
||||||
|
const Benchmark = @This();
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const builtin = @import("builtin");
|
||||||
|
const assert = std.debug.assert;
|
||||||
|
const macos = @import("macos");
|
||||||
|
const build_config = @import("../build_config.zig");
|
||||||
|
|
||||||
|
ptr: *anyopaque,
|
||||||
|
vtable: VTable,
|
||||||
|
|
||||||
|
/// Create a new benchmark from a pointer and a vtable.
|
||||||
|
///
|
||||||
|
/// This usually is only called by benchmark implementations, not
|
||||||
|
/// benchmark users.
|
||||||
|
pub fn init(
|
||||||
|
pointer: anytype,
|
||||||
|
vtable: VTable,
|
||||||
|
) Benchmark {
|
||||||
|
const Ptr = @TypeOf(pointer);
|
||||||
|
assert(@typeInfo(Ptr) == .pointer); // Must be a pointer
|
||||||
|
assert(@typeInfo(Ptr).pointer.size == .one); // Must be a single-item pointer
|
||||||
|
assert(@typeInfo(@typeInfo(Ptr).pointer.child) == .@"struct"); // Must point to a struct
|
||||||
|
return .{ .ptr = pointer, .vtable = vtable };
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run the benchmark.
|
||||||
|
pub fn run(
|
||||||
|
self: Benchmark,
|
||||||
|
mode: RunMode,
|
||||||
|
) Error!RunResult {
|
||||||
|
// Run our setup function if it exists. We do this first because
|
||||||
|
// we don't want this part of our benchmark and we want to fail fast.
|
||||||
|
if (self.vtable.setupFn) |func| try func(self.ptr);
|
||||||
|
defer if (self.vtable.teardownFn) |func| func(self.ptr);
|
||||||
|
|
||||||
|
// Our result accumulator. This will be returned at the end of the run.
|
||||||
|
var result: RunResult = .{};
|
||||||
|
|
||||||
|
// If we're on macOS, we setup signposts so its easier to find
|
||||||
|
// the results in Instruments. There's a lot of nasty comptime stuff
|
||||||
|
// here but its just to ensure this does nothing on other platforms.
|
||||||
|
const signpost_name = "Ghostty Benchmark";
|
||||||
|
const signpost: if (builtin.target.os.tag.isDarwin()) struct {
|
||||||
|
log: *macos.os.Log,
|
||||||
|
id: macos.os.signpost.Id,
|
||||||
|
} else void = if (comptime builtin.os.tag == .macos) macos: {
|
||||||
|
const log = macos.os.Log.create(
|
||||||
|
build_config.bundle_id,
|
||||||
|
macos.os.signpost.Category.points_of_interest,
|
||||||
|
);
|
||||||
|
const id = macos.os.signpost.Id.generate(log);
|
||||||
|
macos.os.signpost.intervalBegin(log, id, signpost_name);
|
||||||
|
break :macos .{ .log = log, .id = id };
|
||||||
|
} else {};
|
||||||
|
defer if (comptime builtin.os.tag == .macos) {
|
||||||
|
macos.os.signpost.intervalEnd(
|
||||||
|
signpost.log,
|
||||||
|
signpost.id,
|
||||||
|
signpost_name,
|
||||||
|
);
|
||||||
|
signpost.log.release();
|
||||||
|
};
|
||||||
|
|
||||||
|
const start = std.time.Instant.now() catch return error.BenchmarkFailed;
|
||||||
|
while (true) {
|
||||||
|
// Run our step function. If it fails, we return the error.
|
||||||
|
try self.vtable.stepFn(self.ptr);
|
||||||
|
result.iterations += 1;
|
||||||
|
|
||||||
|
// Get our current monotonic time and check our exit conditions.
|
||||||
|
const now = std.time.Instant.now() catch return error.BenchmarkFailed;
|
||||||
|
const exit = switch (mode) {
|
||||||
|
.once => true,
|
||||||
|
.duration => |ns| now.since(start) >= ns,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (exit) {
|
||||||
|
result.duration = now.since(start);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We exit within the loop body.
|
||||||
|
unreachable;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The type of benchmark run. This is used to determine how the benchmark
|
||||||
|
/// is executed.
|
||||||
|
pub const RunMode = union(enum) {
|
||||||
|
/// Run the benchmark exactly once.
|
||||||
|
once,
|
||||||
|
|
||||||
|
/// Run the benchmark for a fixed duration in nanoseconds. This
|
||||||
|
/// will not interrupt a running step so if the granularity of the
|
||||||
|
/// duration is too low, benchmark results may be inaccurate.
|
||||||
|
duration: u64,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// The result of a benchmark run.
|
||||||
|
pub const RunResult = struct {
|
||||||
|
/// The total iterations that step was executed. For "once" run
|
||||||
|
/// modes this will always be 1.
|
||||||
|
iterations: u32 = 0,
|
||||||
|
|
||||||
|
/// The total time taken for the run. For "duration" run modes
|
||||||
|
/// this will be relatively close to the requested duration.
|
||||||
|
/// The units are nanoseconds.
|
||||||
|
duration: u64 = 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// The possible errors that can occur during various stages of the
|
||||||
|
/// benchmark. Right now its just "failure" which ends the benchmark.
|
||||||
|
pub const Error = error{BenchmarkFailed};
|
||||||
|
|
||||||
|
/// The vtable that must be provided to invoke the real implementation.
|
||||||
|
pub const VTable = struct {
|
||||||
|
/// A single step to execute the benchmark. This should do the work
|
||||||
|
/// that is under test. This may be called multiple times if we're
|
||||||
|
/// testing throughput.
|
||||||
|
stepFn: *const fn (ptr: *anyopaque) Error!void,
|
||||||
|
|
||||||
|
/// Setup and teardown functions. These are called once before
|
||||||
|
/// the first step and once after the last step. They are not part
|
||||||
|
/// of the benchmark results (unless you're benchmarking the full
|
||||||
|
/// binary).
|
||||||
|
setupFn: ?*const fn (ptr: *anyopaque) Error!void = null,
|
||||||
|
teardownFn: ?*const fn (ptr: *anyopaque) void = null,
|
||||||
|
};
|
||||||
|
|
||||||
|
test Benchmark {
|
||||||
|
const testing = std.testing;
|
||||||
|
const Simple = struct {
|
||||||
|
const Self = @This();
|
||||||
|
|
||||||
|
setup_i: usize = 0,
|
||||||
|
step_i: usize = 0,
|
||||||
|
|
||||||
|
pub fn benchmark(self: *Self) Benchmark {
|
||||||
|
return .init(self, .{
|
||||||
|
.stepFn = step,
|
||||||
|
.setupFn = setup,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn setup(ptr: *anyopaque) Error!void {
|
||||||
|
const self: *Self = @ptrCast(@alignCast(ptr));
|
||||||
|
self.setup_i += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn step(ptr: *anyopaque) Error!void {
|
||||||
|
const self: *Self = @ptrCast(@alignCast(ptr));
|
||||||
|
self.step_i += 1;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var s: Simple = .{};
|
||||||
|
const b = s.benchmark();
|
||||||
|
const result = try b.run(.once);
|
||||||
|
try testing.expectEqual(1, s.setup_i);
|
||||||
|
try testing.expectEqual(1, s.step_i);
|
||||||
|
try testing.expectEqual(1, result.iterations);
|
||||||
|
try testing.expect(result.duration > 0);
|
||||||
|
}
|
97
src/benchmark/TerminalStream.zig
Normal file
97
src/benchmark/TerminalStream.zig
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
//! This benchmark tests the performance of the terminal stream
|
||||||
|
//! handler from input to terminal state update. This is useful to
|
||||||
|
//! test general throughput of VT parsing and handling.
|
||||||
|
//!
|
||||||
|
//! Note that the handler used for this benchmark isn't the full
|
||||||
|
//! terminal handler, since that requires a significant amount of
|
||||||
|
//! state. This is a simplified version that only handles specific
|
||||||
|
//! terminal operations like printing characters. We should expand
|
||||||
|
//! this to include more operations to improve the accuracy of the
|
||||||
|
//! benchmark.
|
||||||
|
//!
|
||||||
|
//! It is a fairly broad benchmark that can be used to determine
|
||||||
|
//! if we need to optimize something more specific (e.g. the parser).
|
||||||
|
const TerminalStream = @This();
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const Allocator = std.mem.Allocator;
|
||||||
|
const terminalpkg = @import("../terminal/main.zig");
|
||||||
|
const Benchmark = @import("Benchmark.zig");
|
||||||
|
const Terminal = terminalpkg.Terminal;
|
||||||
|
const Stream = terminalpkg.Stream(*Handler);
|
||||||
|
|
||||||
|
terminal: Terminal,
|
||||||
|
handler: Handler,
|
||||||
|
stream: Stream,
|
||||||
|
|
||||||
|
pub const Options = struct {
|
||||||
|
/// The size of the terminal. This affects benchmarking when
|
||||||
|
/// dealing with soft line wrapping and the memory impact
|
||||||
|
/// of page sizes.
|
||||||
|
@"terminal-rows": u16 = 80,
|
||||||
|
@"terminal-cols": u16 = 120,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Create a new terminal stream handler for the given arguments.
|
||||||
|
pub fn create(
|
||||||
|
alloc: Allocator,
|
||||||
|
args: Options,
|
||||||
|
) !*TerminalStream {
|
||||||
|
const ptr = try alloc.create(TerminalStream);
|
||||||
|
errdefer alloc.destroy(ptr);
|
||||||
|
|
||||||
|
ptr.* = .{
|
||||||
|
.terminal = try .init(alloc, .{
|
||||||
|
.rows = args.@"terminal-rows",
|
||||||
|
.cols = args.@"terminal-cols",
|
||||||
|
}),
|
||||||
|
.handler = .{ .t = &ptr.terminal },
|
||||||
|
.stream = .{ .handler = &ptr.handler },
|
||||||
|
};
|
||||||
|
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn destroy(self: *TerminalStream, alloc: Allocator) void {
|
||||||
|
self.terminal.deinit(alloc);
|
||||||
|
alloc.destroy(self);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn benchmark(self: *TerminalStream) Benchmark {
|
||||||
|
return .init(self, .{
|
||||||
|
.stepFn = step,
|
||||||
|
.setupFn = setup,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn setup(ptr: *anyopaque) Benchmark.Error!void {
|
||||||
|
const self: *TerminalStream = @ptrCast(@alignCast(ptr));
|
||||||
|
self.terminal.fullReset();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn step(ptr: *anyopaque) Benchmark.Error!void {
|
||||||
|
const self: *TerminalStream = @ptrCast(@alignCast(ptr));
|
||||||
|
_ = self;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Implements the handler interface for the terminal.Stream.
|
||||||
|
/// We should expand this to include more operations to make
|
||||||
|
/// our benchmark more realistic.
|
||||||
|
const Handler = struct {
|
||||||
|
t: *Terminal,
|
||||||
|
|
||||||
|
pub fn print(self: *Handler, cp: u21) !void {
|
||||||
|
try self.t.print(cp);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
test TerminalStream {
|
||||||
|
const testing = std.testing;
|
||||||
|
const alloc = testing.allocator;
|
||||||
|
|
||||||
|
const impl: *TerminalStream = try .create(alloc, .{});
|
||||||
|
defer impl.destroy(alloc);
|
||||||
|
|
||||||
|
const bench = impl.benchmark();
|
||||||
|
_ = try bench.run(.once);
|
||||||
|
}
|
6
src/benchmark/main.zig
Normal file
6
src/benchmark/main.zig
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
pub const Benchmark = @import("Benchmark.zig");
|
||||||
|
pub const TerminalStream = @import("TerminalStream.zig");
|
||||||
|
|
||||||
|
test {
|
||||||
|
_ = @import("std").testing.refAllDecls(@This());
|
||||||
|
}
|
@ -182,6 +182,7 @@ test {
|
|||||||
_ = @import("surface_mouse.zig");
|
_ = @import("surface_mouse.zig");
|
||||||
|
|
||||||
// Libraries
|
// Libraries
|
||||||
|
_ = @import("benchmark/main.zig");
|
||||||
_ = @import("crash/main.zig");
|
_ = @import("crash/main.zig");
|
||||||
_ = @import("datastruct/main.zig");
|
_ = @import("datastruct/main.zig");
|
||||||
_ = @import("inspector/main.zig");
|
_ = @import("inspector/main.zig");
|
||||||
|
Reference in New Issue
Block a user