mirror of
https://github.com/ghostty-org/ghostty.git
synced 2025-07-17 09:16:11 +03:00
terminal/new: comment, remove some pubs
This commit is contained in:
@ -41,21 +41,24 @@ const Wyhash = std.hash.Wyhash;
|
|||||||
|
|
||||||
const Offset = @import("size.zig").Offset;
|
const Offset = @import("size.zig").Offset;
|
||||||
|
|
||||||
pub fn AutoHashMapUnmanaged(comptime K: type, comptime V: type) type {
|
|
||||||
return HashMapUnmanaged(K, V, AutoContext(K));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn AutoOffsetHashMap(comptime K: type, comptime V: type) type {
|
pub fn AutoOffsetHashMap(comptime K: type, comptime V: type) type {
|
||||||
return OffsetHashMap(K, V, AutoContext(K));
|
return OffsetHashMap(K, V, AutoContext(K));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn AutoContext(comptime K: type) type {
|
fn AutoHashMapUnmanaged(comptime K: type, comptime V: type) type {
|
||||||
|
return HashMapUnmanaged(K, V, AutoContext(K));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn AutoContext(comptime K: type) type {
|
||||||
return struct {
|
return struct {
|
||||||
pub const hash = std.hash_map.getAutoHashFn(K, @This());
|
pub const hash = std.hash_map.getAutoHashFn(K, @This());
|
||||||
pub const eql = std.hash_map.getAutoEqlFn(K, @This());
|
pub const eql = std.hash_map.getAutoEqlFn(K, @This());
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A HashMap type that uses offsets rather than pointers, making it
|
||||||
|
/// possible to efficiently move around the backing memory without
|
||||||
|
/// invalidating the HashMap.
|
||||||
pub fn OffsetHashMap(
|
pub fn OffsetHashMap(
|
||||||
comptime K: type,
|
comptime K: type,
|
||||||
comptime V: type,
|
comptime V: type,
|
||||||
@ -64,19 +67,37 @@ pub fn OffsetHashMap(
|
|||||||
return struct {
|
return struct {
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
|
/// This is the pointer-based map that we're wrapping.
|
||||||
pub const Unmanaged = HashMapUnmanaged(K, V, Context);
|
pub const Unmanaged = HashMapUnmanaged(K, V, Context);
|
||||||
|
|
||||||
|
/// This is the alignment that the base pointer must have.
|
||||||
|
pub const base_align = Unmanaged.max_align;
|
||||||
|
|
||||||
metadata: Offset(Unmanaged.Metadata) = .{},
|
metadata: Offset(Unmanaged.Metadata) = .{},
|
||||||
size: Unmanaged.Size = 0,
|
size: Unmanaged.Size = 0,
|
||||||
|
|
||||||
|
/// Returns the total size of the backing memory required for a
|
||||||
|
/// HashMap with the given capacity. The base ptr must also be
|
||||||
|
/// aligned to base_align.
|
||||||
|
pub fn bufferSize(cap: Unmanaged.Size) usize {
|
||||||
|
const layout = Unmanaged.layoutForCapacity(cap);
|
||||||
|
return layout.total_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize a new HashMap with the given capacity and backing
|
||||||
|
/// memory. The backing memory must be aligned to base_align.
|
||||||
pub fn init(cap: Unmanaged.Size, buf: []u8) Self {
|
pub fn init(cap: Unmanaged.Size, buf: []u8) Self {
|
||||||
|
assert(@intFromPtr(buf.ptr) % base_align == 0);
|
||||||
|
|
||||||
const m = Unmanaged.init(cap, buf);
|
const m = Unmanaged.init(cap, buf);
|
||||||
|
const offset = @intFromPtr(m.metadata.?) - @intFromPtr(buf.ptr);
|
||||||
return .{
|
return .{
|
||||||
.metadata = .{ .offset = @intCast(@intFromPtr(m.metadata.?) - @intFromPtr(buf.ptr)) },
|
.metadata = .{ .offset = @intCast(offset) },
|
||||||
.size = m.size,
|
.size = m.size,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the pointer-based map from a base pointer.
|
||||||
pub fn map(self: Self, base: anytype) Unmanaged {
|
pub fn map(self: Self, base: anytype) Unmanaged {
|
||||||
return .{
|
return .{
|
||||||
.metadata = self.metadata.ptr(base),
|
.metadata = self.metadata.ptr(base),
|
||||||
@ -86,16 +107,11 @@ pub fn OffsetHashMap(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A HashMap based on open addressing and linear probing.
|
/// Fork of stdlib.HashMap as of Zig 0.12 modified to to use offsets
|
||||||
/// A lookup or modification typically incurs only 2 cache misses.
|
/// for the key/values pointer. The metadata is still a pointer to limit
|
||||||
/// No order is guaranteed and any modification invalidates live iterators.
|
/// the amount of arithmetic required to access it. See the file comment
|
||||||
/// It achieves good performance with quite high load factors (by default,
|
/// for full details.
|
||||||
/// grow is triggered at 80% full) and only one byte of overhead per element.
|
fn HashMapUnmanaged(
|
||||||
/// The struct itself is only 16 bytes for a small footprint. This comes at
|
|
||||||
/// the price of handling size with u32, which should be reasonable enough
|
|
||||||
/// for almost all uses.
|
|
||||||
/// Deletions are achieved with tombstones.
|
|
||||||
pub fn HashMapUnmanaged(
|
|
||||||
comptime K: type,
|
comptime K: type,
|
||||||
comptime V: type,
|
comptime V: type,
|
||||||
comptime Context: type,
|
comptime Context: type,
|
||||||
@ -294,10 +310,6 @@ pub fn HashMapUnmanaged(
|
|||||||
return map;
|
return map;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn capacityForSize(size: Size) Size {
|
|
||||||
return math.ceilPowerOfTwo(u32, size + 1) catch unreachable;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ensureTotalCapacity(self: *Self, new_size: Size) Allocator.Error!void {
|
pub fn ensureTotalCapacity(self: *Self, new_size: Size) Allocator.Error!void {
|
||||||
if (new_size > self.size) try self.growIfNeeded(new_size - self.size);
|
if (new_size > self.size) try self.growIfNeeded(new_size - self.size);
|
||||||
}
|
}
|
||||||
@ -828,7 +840,7 @@ pub fn HashMapUnmanaged(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The memory layout for the underlying buffer for a given capacity.
|
/// The memory layout for the underlying buffer for a given capacity.
|
||||||
pub const Layout = struct {
|
const Layout = struct {
|
||||||
/// The total size of the buffer required. The buffer is expected
|
/// The total size of the buffer required. The buffer is expected
|
||||||
/// to be aligned to `max_align`.
|
/// to be aligned to `max_align`.
|
||||||
total_size: usize,
|
total_size: usize,
|
||||||
@ -844,7 +856,7 @@ pub fn HashMapUnmanaged(
|
|||||||
/// The actual size may be able to fit more than the given capacity
|
/// The actual size may be able to fit more than the given capacity
|
||||||
/// because capacity is rounded up to the next power of two. This is
|
/// because capacity is rounded up to the next power of two. This is
|
||||||
/// a design requirement for this hash map implementation.
|
/// a design requirement for this hash map implementation.
|
||||||
pub fn layoutForCapacity(new_capacity: Size) Layout {
|
fn layoutForCapacity(new_capacity: Size) Layout {
|
||||||
assert(std.math.isPowerOfTwo(new_capacity));
|
assert(std.math.isPowerOfTwo(new_capacity));
|
||||||
const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata);
|
const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata);
|
||||||
comptime assert(@alignOf(Metadata) == 1);
|
comptime assert(@alignOf(Metadata) == 1);
|
||||||
|
Reference in New Issue
Block a user